CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
List of all members | Public Member Functions | Public Attributes | Private Member Functions | Private Attributes | Static Private Attributes
dataset.Dataset Class Reference
Inheritance diagram for dataset.Dataset:
dataset.BaseDataset

Public Member Functions

def __init__
 
def __init__
 
def buildListOfBadFiles
 
def buildListOfFiles
 
def convertTimeToRun
 
def datasetSnippet
 
def dataType
 
def dump_cff
 
def extractFileSizes
 
def fileInfoList
 
def fileList
 
def forcerunrange
 
def getForceRunRangeFunction
 
def getPrimaryDatasetEntries
 
def magneticField
 
def magneticFieldForRun
 
def name
 
def parentDataset
 
def predefined
 
def printInfo
 
def runList
 
- Public Member Functions inherited from dataset.BaseDataset
def __init__
 def init(self, name, user, pattern='. More...
 
def buildListOfBadFiles
 
def buildListOfFiles
 
def extractFileSizes
 
def getPrimaryDatasetEntries
 
def listOfFiles
 
def listOfGoodFiles
 
def listOfGoodFilesWithPrescale
 
def printFiles
 
def printInfo
 

Public Attributes

 bad_files
 
 castorDir
 
 files
 
 filesAndSizes
 
 good_files
 
 lfnDir
 
 maskExists
 
 report
 
- Public Attributes inherited from dataset.BaseDataset
 bad_files
 
 dbsInstance
 MM. More...
 
 files
 
 filesAndSizes
 
 good_files
 
 name
 
 pattern
 
 primaryDatasetEntries
 MM. More...
 
 report
 
 run_range
 
 user
 

Private Member Functions

def __chunks
 
def __createSnippet
 
def __dateString
 
def __datetime
 
def __find_ge
 
def __find_lt
 
def __findInJson
 
def __getData
 
def __getDataType
 
def __getFileInfoList
 
def __getMagneticField
 
def __getMagneticFieldForRun
 
def __getParentDataset
 
def __getRunList
 

Private Attributes

 __alreadyStored
 
 __cmssw
 
 __cmsswrelease
 
 __dasLimit
 
 __dataType
 
 __fileInfoList
 
 __fileList
 
 __filename
 
 __firstusedrun
 
 __lastusedrun
 
 __magneticField
 
 __name
 
 __official
 
 __origName
 
 __parentDataset
 
 __parentFileInfoList
 
 __parentFileList
 
 __predefined
 
 __runList
 

Static Private Attributes

tuple __dummy_source_template
 

Detailed Description

Definition at line 14 of file dataset.py.

Constructor & Destructor Documentation

def dataset.Dataset.__init__ (   self,
  datasetName,
  dasLimit = 0,
  tryPredefinedFirst = True,
  cmssw = os.environ["CMSSW_BASE"],
  cmsswrelease = os.environ["CMSSW_RELEASE_BASE"] 
)

Definition at line 16 of file dataset.py.

Referenced by dataset.Dataset.__init__().

16 
17  cmssw = os.environ["CMSSW_BASE"], cmsswrelease = os.environ["CMSSW_RELEASE_BASE"]):
18  self.__name = datasetName
19  self.__origName = datasetName
20  self.__dasLimit = dasLimit
21  self.__fileList = None
22  self.__fileInfoList = None
23  self.__runList = None
24  self.__alreadyStored = False
25  self.__cmssw = cmssw
26  self.__cmsswrelease = cmsswrelease
27  self.__firstusedrun = None
28  self.__lastusedrun = None
29  self.__parentDataset = None
30  self.__parentFileList = None
31  self.__parentFileInfoList = None
32 
33  # check, if dataset name matches CMS dataset naming scheme
34  if re.match( r'/.+/.+/.+', self.__name ):
35  self.__official = True
36  fileName = "Dataset" + self.__name.replace("/","_") + "_cff.py"
37  else:
38  self.__official = False
39  fileName = self.__name + "_cff.py"
40 
41  searchPath1 = os.path.join( self.__cmssw, "python",
42  "Alignment", "OfflineValidation",
43  fileName )
44  searchPath2 = os.path.join( self.__cmssw, "src",
45  "Alignment", "OfflineValidation",
46  "python", fileName )
47  searchPath3 = os.path.join( self.__cmsswrelease,
48  "python", "Alignment",
49  "OfflineValidation", fileName )
50  if self.__official and not tryPredefinedFirst:
51  self.__predefined = False
52  elif os.path.exists( searchPath1 ):
53  self.__predefined = True
54  self.__filename = searchPath1
55  elif os.path.exists( searchPath2 ):
56  msg = ("The predefined dataset '%s' does exist in '%s', but "
57  "you need to run 'scram b' first."
58  %( self.__name, searchPath2 ))
59  if self.__official:
60  print msg
61  print "Getting the data from DAS again. To go faster next time, run scram b."
62  else:
63  raise AllInOneError( msg )
64  elif os.path.exists( searchPath3 ):
65  self.__predefined = True
66  self.__filename = searchPath3
67  elif self.__official:
68  self.__predefined = False
69  else:
70  msg = ("The predefined dataset '%s' does not exist. Please "
71  "create it first or check for typos."%( self.__name ))
72  raise AllInOneError( msg )
73 
74  if self.__predefined and self.__official:
75  self.__name = "Dataset" + self.__name.replace("/","_")
76 
77  self.__dataType = self.__getDataType()
def __getMagneticField
Definition: dataset.py:404
def __getDataType
Definition: dataset.py:369
def dataset.Dataset.__init__ (   self,
  name,
  user,
  pattern = '.*root' 
)

Definition at line 264 of file dataset.py.

References dataset.Dataset.__init__().

265  def __init__(self, name, user, pattern='.*root'):
266  self.lfnDir = castorBaseDir(user) + name
267  self.castorDir = castortools.lfnToCastor( self.lfnDir )
268  self.maskExists = False
269  self.report = None
270  super(Dataset, self).__init__(name, user, pattern)

Member Function Documentation

def dataset.Dataset.__chunks (   self,
  theList,
  n 
)
private
Yield successive n-sized chunks from theList.

Definition at line 79 of file dataset.py.

Referenced by dataset.Dataset.__createSnippet().

79 
80  def __chunks( self, theList, n ):
81  """ Yield successive n-sized chunks from theList.
82  """
83  for i in xrange( 0, len( theList ), n ):
84  yield theList[i:i+n]
def dataset.Dataset.__createSnippet (   self,
  jsonPath = None,
  begin = None,
  end = None,
  firstRun = None,
  lastRun = None,
  repMap = None,
  crab = False,
  parent = False 
)
private

Definition at line 117 of file dataset.py.

References dataset.Dataset.__chunks(), dataset.Dataset.__dummy_source_template, dataset.Dataset.__findInJson(), dataset.Dataset.__firstusedrun, dataset.Dataset.__getRunList(), dataset.Dataset.__lastusedrun, dataset.Dataset.convertTimeToRun(), dataset.Dataset.fileList(), dataset.Dataset.getForceRunRangeFunction(), join(), list(), bookConverter.max, min(), dataset.Dataset.predefined(), python.rootplot.root2matplotlib.replace(), and split.

Referenced by dataset.Dataset.datasetSnippet(), and dataset.Dataset.dump_cff().

118  crab = False, parent = False ):
119  if firstRun:
120  firstRun = int( firstRun )
121  if lastRun:
122  lastRun = int( lastRun )
123  if ( begin and firstRun ) or ( end and lastRun ):
124  msg = ( "The Usage of "
125  + "'begin' & 'firstRun' " * int( bool( begin and
126  firstRun ) )
127  + "and " * int( bool( ( begin and firstRun ) and
128  ( end and lastRun ) ) )
129  + "'end' & 'lastRun' " * int( bool( end and lastRun ) )
130  + "is ambigous." )
131  raise AllInOneError( msg )
132  if begin or end:
133  ( firstRun, lastRun ) = self.convertTimeToRun(
134  begin = begin, end = end, firstRun = firstRun,
135  lastRun = lastRun )
136  if ( firstRun and lastRun ) and ( firstRun > lastRun ):
137  msg = ( "The lower time/runrange limit ('begin'/'firstRun') "
138  "chosen is greater than the upper time/runrange limit "
139  "('end'/'lastRun').")
140  raise AllInOneError( msg )
141  if self.predefined() and (jsonPath or begin or end or firstRun or lastRun):
142  msg = ( "The parameters 'JSON', 'begin', 'end', 'firstRun', and 'lastRun'"
143  "only work for official datasets, not predefined _cff.py files" )
144  raise AllInOneError( msg )
145  goodLumiSecStr = ""
146  lumiStr = ""
147  lumiSecExtend = ""
148  if firstRun or lastRun or jsonPath:
149  goodLumiSecStr = ( "lumiSecs = cms.untracked."
150  "VLuminosityBlockRange()\n" )
151  lumiStr = " lumisToProcess = lumiSecs,\n"
152  if not jsonPath:
153  selectedRunList = self.__getRunList()
154  if firstRun:
155  selectedRunList = [ run for run in selectedRunList \
156  if self.__findInJson(run, "run_number") >= firstRun ]
157  if lastRun:
158  selectedRunList = [ run for run in selectedRunList \
159  if self.__findInJson(run, "run_number") <= lastRun ]
160  lumiList = [ str( self.__findInJson(run, "run_number") ) + ":1-" \
161  + str( self.__findInJson(run, "run_number") ) + ":max" \
162  for run in selectedRunList ]
163  splitLumiList = list( self.__chunks( lumiList, 255 ) )
164  else:
165  theLumiList = None
166  try:
167  theLumiList = LumiList ( filename = jsonPath )
168  except ValueError:
169  pass
170 
171  if theLumiList is not None:
172  allRuns = theLumiList.getRuns()
173  runsToRemove = []
174  for run in allRuns:
175  if firstRun and int( run ) < firstRun:
176  runsToRemove.append( run )
177  if lastRun and int( run ) > lastRun:
178  runsToRemove.append( run )
179  theLumiList.removeRuns( runsToRemove )
180  splitLumiList = list( self.__chunks(
181  theLumiList.getCMSSWString().split(','), 255 ) )
182  if not (splitLumiList and splitLumiList[0] and splitLumiList[0][0]):
183  splitLumiList = None
184  else:
185  with open(jsonPath) as f:
186  jsoncontents = f.read()
187  if "process.source.lumisToProcess" in jsoncontents:
188  msg = "%s is not a json file, but it seems to be a CMSSW lumi selection cff snippet. Trying to use it" % jsonPath
189  if firstRun or lastRun:
190  msg += ("\n (after applying firstRun and/or lastRun)")
191  msg += ".\nPlease note that, depending on the format of this file, it may not work as expected."
192  msg += "\nCheck your config file to make sure that it worked properly."
193  print msg
194 
195  runlist = self.__getRunList()
196  if firstRun or lastRun:
197  self.__firstusedrun = -1
198  self.__lastusedrun = -1
199  jsoncontents = re.sub(r"\d+:(\d+|max)(-\d+:(\d+|max))?", self.getForceRunRangeFunction(firstRun, lastRun), jsoncontents)
200  jsoncontents = (jsoncontents.replace("'',\n","").replace("''\n","")
201  .replace('"",\n','').replace('""\n',''))
202  self.__firstusedrun = max(self.__firstusedrun, int(self.__findInJson(runlist[0],"run_number")))
203  self.__lastusedrun = min(self.__lastusedrun, int(self.__findInJson(runlist[-1],"run_number")))
204  if self.__lastusedrun < self.__firstusedrun:
205  jsoncontents = None
206  else:
207  self.__firstusedrun = int(self.__findInJson(runlist[0],"run_number"))
208  self.__lastusedrun = int(self.__findInJson(runlist[-1],"run_number"))
209  lumiSecExtend = jsoncontents
210  splitLumiList = None
211  else:
212  raise AllInOneError("%s is not a valid json file!" % jsonPath)
213 
214  if splitLumiList and splitLumiList[0] and splitLumiList[0][0]:
215  lumiSecStr = [ "',\n'".join( lumis ) \
216  for lumis in splitLumiList ]
217  lumiSecStr = [ "lumiSecs.extend( [\n'" + lumis + "'\n] )" \
218  for lumis in lumiSecStr ]
219  lumiSecExtend = "\n".join( lumiSecStr )
220  runlist = self.__getRunList()
221  self.__firstusedrun = max(int(splitLumiList[0][0].split(":")[0]), int(self.__findInJson(runlist[0],"run_number")))
222  self.__lastusedrun = min(int(splitLumiList[-1][-1].split(":")[0]), int(self.__findInJson(runlist[-1],"run_number")))
223  elif lumiSecExtend:
224  pass
225  else:
226  msg = "You are trying to run a validation without any runs! Check that:"
227  if firstRun or lastRun:
228  msg += "\n - firstRun and lastRun are correct for this dataset, and there are runs in between containing data"
229  if jsonPath:
230  msg += "\n - your JSON file is correct for this dataset, and the runs contain data"
231  if (firstRun or lastRun) and jsonPath:
232  msg += "\n - firstRun and lastRun are consistent with your JSON file"
233  if begin:
234  msg = msg.replace("firstRun", "begin")
235  if end:
236  msg = msg.replace("lastRun", "end")
237  raise AllInOneError(msg)
238 
239  else:
240  runlist = self.__getRunList()
241  self.__firstusedrun = int(self.__findInJson(self.__getRunList()[0],"run_number"))
242  self.__lastusedrun = int(self.__findInJson(self.__getRunList()[-1],"run_number"))
243 
244  if crab:
245  files = ""
246  else:
247  splitFileList = list( self.__chunks( self.fileList(), 255 ) )
248  fileStr = [ "',\n'".join( files ) for files in splitFileList ]
249  fileStr = [ "readFiles.extend( [\n'" + files + "'\n] )" \
250  for files in fileStr ]
251  files = "\n".join( fileStr )
252 
253  if parent:
254  splitParentFileList = list( self.__chunks( self.fileList(parent = True), 255 ) )
255  parentFileStr = [ "',\n'".join( parentFiles ) for parentFiles in splitParentFileList ]
256  parentFileStr = [ "secFiles.extend( [\n'" + parentFiles + "'\n] )" \
257  for parentFiles in parentFileStr ]
258  parentFiles = "\n".join( parentFileStr )
259  files += "\n\n" + parentFiles
260 
261 
262  theMap = repMap
263  theMap["files"] = files
264  theMap["json"] = jsonPath
265  theMap["lumiStr"] = lumiStr
266  theMap["goodLumiSecStr"] = goodLumiSecStr%( theMap )
267  theMap["lumiSecExtend"] = lumiSecExtend
268  if crab:
269  dataset_snippet = self.__dummy_source_template%( theMap )
270  else:
271  dataset_snippet = self.__source_template%( theMap )
272  return dataset_snippet
def __findInJson
Definition: dataset.py:287
T min(T a, T b)
Definition: MathUtil.h:58
def convertTimeToRun
Definition: dataset.py:622
static std::string join(char **cmd)
Definition: RemoteFile.cc:18
tuple __dummy_source_template
Definition: dataset.py:103
def __getRunList
Definition: dataset.py:595
def getForceRunRangeFunction
Definition: dataset.py:334
double split
Definition: MVATrainer.cc:139
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run
def dataset.Dataset.__dateString (   self,
  date 
)
private

Definition at line 617 of file dataset.py.

References dataset.Dataset.convertTimeToRun().

Referenced by dataset.Dataset.convertTimeToRun().

618  def __dateString(self, date):
619  return str(date.year) + str(date.month).zfill(2) + str(date.day).zfill(2)
def __dateString
Definition: dataset.py:617
def dataset.Dataset.__datetime (   self,
  stringForDas 
)
private

Definition at line 608 of file dataset.py.

Referenced by dataset.Dataset.convertTimeToRun().

609  def __datetime(self, stringForDas):
610  if len(stringForDas) != 8:
611  raise AllInOneError(stringForDas + " is not a valid date string.\n"
612  + "DAS accepts dates in the form 'yyyymmdd'")
613  year = stringForDas[:4]
614  month = stringForDas[4:6]
615  day = stringForDas[6:8]
616  return datetime.date(int(year), int(month), int(day))
def dataset.Dataset.__find_ge (   self,
  a,
  x 
)
private

Definition at line 280 of file dataset.py.

Referenced by dataset.Dataset.convertTimeToRun().

281  def __find_ge( self, a, x):
282  'Find leftmost item greater than or equal to x'
283  i = bisect.bisect_left( a, x )
284  if i != len( a ):
285  return i
286  raise ValueError
def dataset.Dataset.__find_lt (   self,
  a,
  x 
)
private

Definition at line 273 of file dataset.py.

Referenced by dataset.Dataset.convertTimeToRun().

274  def __find_lt( self, a, x ):
275  'Find rightmost value less than x'
276  i = bisect.bisect_left( a, x )
277  if i:
278  return i-1
279  raise ValueError
def dataset.Dataset.__findInJson (   self,
  jsondict,
  strings 
)
private

Definition at line 287 of file dataset.py.

References dataset.Dataset.__findInJson().

Referenced by dataset.Dataset.__createSnippet(), dataset.Dataset.__findInJson(), dataset.Dataset.__getData(), dataset.Dataset.__getDataType(), dataset.Dataset.__getFileInfoList(), dataset.Dataset.__getMagneticField(), dataset.Dataset.__getMagneticFieldForRun(), dataset.Dataset.__getParentDataset(), dataset.Dataset.__getRunList(), dataset.Dataset.convertTimeToRun(), and dataset.Dataset.fileList().

288  def __findInJson(self, jsondict, strings):
289  if isinstance(strings, str):
290  strings = [ strings ]
291 
292  if len(strings) == 0:
293  return jsondict
294  if isinstance(jsondict,dict):
295  if strings[0] in jsondict:
296  try:
297  return self.__findInJson(jsondict[strings[0]], strings[1:])
298  except KeyError:
299  pass
300  else:
301  for a in jsondict:
302  if strings[0] in a:
303  try:
304  return self.__findInJson(a[strings[0]], strings[1:])
305  except (TypeError, KeyError): #TypeError because a could be a string and contain strings[0]
306  pass
307  #if it's not found
308  raise KeyError("Can't find " + strings[0])
def __findInJson
Definition: dataset.py:287
def dataset.Dataset.__getData (   self,
  dasQuery,
  dasLimit = 0 
)
private

Definition at line 339 of file dataset.py.

References dataset.Dataset.__findInJson().

Referenced by dataset.Dataset.__getDataType(), dataset.Dataset.__getFileInfoList(), dataset.Dataset.__getMagneticField(), dataset.Dataset.__getMagneticFieldForRun(), dataset.Dataset.__getParentDataset(), dataset.Dataset.__getRunList(), and dataset.Dataset.convertTimeToRun().

340  def __getData( self, dasQuery, dasLimit = 0 ):
341  dasData = das_client.get_data( 'https://cmsweb.cern.ch',
342  dasQuery, 0, dasLimit, False )
343  if isinstance(dasData, str):
344  jsondict = json.loads( dasData )
345  else:
346  jsondict = dasData
347  # Check, if the DAS query fails
348  try:
349  error = self.__findInJson(jsondict,["data","error"])
350  except KeyError:
351  error = None
352  if error or self.__findInJson(jsondict,"status") != 'ok' or "data" not in jsondict:
353  jsonstr = str(jsondict)
354  if len(jsonstr) > 10000:
355  jsonfile = "das_query_output_%i.txt"
356  i = 0
357  while os.path.lexists(jsonfile % i):
358  i += 1
359  jsonfile = jsonfile % i
360  theFile = open( jsonfile, "w" )
361  theFile.write( jsonstr )
362  theFile.close()
363  msg = "The DAS query returned an error. The output is very long, and has been stored in:\n" + jsonfile
364  else:
365  msg = "The DAS query returned a error. Here is the output\n" + jsonstr
366  msg += "\nIt's possible that this was a server error. If so, it may work if you try again later"
367  raise AllInOneError(msg)
368  return self.__findInJson(jsondict,"data")
def __findInJson
Definition: dataset.py:287
def dataset.Dataset.__getDataType (   self)
private

Definition at line 369 of file dataset.py.

References dataset.Dataset.__filename, dataset.Dataset.__findInJson(), dataset.Dataset.__getData(), dataset.Dataset.__name, dataset.Dataset.__predefined, ElectronMVAID.ElectronMVAID.name, counter.Counter.name, entry.name, average.Average.name, histograms.Histograms.name, cond::persistency::TAG::NAME.name, TmModule.name, cond::persistency::GLOBAL_TAG::NAME.name, core.autovars.NTupleVariable.name, cond::persistency::TAG::TIME_TYPE.name, genericValidation.GenericValidation.name, cond::persistency::GLOBAL_TAG::VALIDITY.name, cond::persistency::COND_LOG_TABLE::EXECTIME.name, cond::persistency::TAG::OBJECT_TYPE.name, preexistingValidation.PreexistingValidation.name, cond::persistency::GLOBAL_TAG::DESCRIPTION.name, ora::RecordSpecImpl::Item.name, cond::persistency::COND_LOG_TABLE::IOVTAG.name, cond::persistency::TAG::SYNCHRONIZATION.name, cond::persistency::GLOBAL_TAG::RELEASE.name, cond::persistency::COND_LOG_TABLE::USERTEXT.name, cond::persistency::TAG::END_OF_VALIDITY.name, cond::persistency::GLOBAL_TAG::SNAPSHOT_TIME.name, cond::persistency::GLOBAL_TAG::INSERTION_TIME.name, cond::persistency::TAG::DESCRIPTION.name, cond::persistency::GTEditorData.name, cond::persistency::TAG::LAST_VALIDATED_TIME.name, FWTGeoRecoGeometry::Info.name, Types._Untracked.name, cond::persistency::TAG::INSERTION_TIME.name, dataset.BaseDataset.name, cond::persistency::TAG::MODIFICATION_TIME.name, personalPlayback.Applet.name, ParameterSet.name, PixelDCSObject< class >::Item.name, analyzer.Analyzer.name, DQMRivetClient::LumiOption.name, MagCylinder.name, alignment.Alignment.name, ParSet.name, DQMRivetClient::ScaleFactorOption.name, SingleObjectCondition.name, EgHLTOfflineSummaryClient::SumHistBinData.name, XMLHTRZeroSuppressionLoader::_loaderBaseConfig.name, DQMGenericClient::EfficOption.name, XMLRBXPedestalsLoader::_loaderBaseConfig.name, cond::persistency::GTProxyData.name, core.autovars.NTupleObjectType.name, MyWatcher.name, edm::PathTimingSummary.name, cond::TimeTypeSpecs.name, lumi::TriggerInfo.name, edm::PathSummary.name, cond::persistency::GLOBAL_TAG_MAP::GLOBAL_TAG_NAME.name, PixelEndcapLinkMaker::Item.name, perftools::EdmEventSize::BranchRecord.name, FWTableViewManager::TableEntry.name, cond::persistency::GLOBAL_TAG_MAP::RECORD.name, PixelBarrelLinkMaker::Item.name, Mapper::definition< ScannerT >.name, EcalLogicID.name, cond::persistency::GLOBAL_TAG_MAP::LABEL.name, cond::persistency::GLOBAL_TAG_MAP::TAG_NAME.name, ExpressionHisto< T >.name, McSelector.name, RecoSelector.name, XMLProcessor::_loaderBaseConfig.name, DQMGenericClient::ProfileOption.name, cond::persistency::PAYLOAD::HASH.name, TreeCrawler.Package.name, cond::persistency::PAYLOAD::OBJECT_TYPE.name, cond::persistency::PAYLOAD::DATA.name, cond::persistency::PAYLOAD::STREAMER_INFO.name, cond::persistency::PAYLOAD::VERSION.name, MagGeoBuilderFromDDD::volumeHandle.name, cond::persistency::PAYLOAD::INSERTION_TIME.name, DQMGenericClient::NormOption.name, options.ConnectionHLTMenu.name, DQMGenericClient::CDOption.name, FastHFShowerLibrary.name, h4DSegm.name, PhysicsTools::Calibration::Variable.name, cond::TagInfo_t.name, EDMtoMEConverter.name, looper.Looper.name, MEtoEDM< T >::MEtoEDMObject.name, cond::persistency::IOV::TAG_NAME.name, TrackerSectorStruct.name, cond::persistency::IOV::SINCE.name, cond::persistency::IOV::PAYLOAD_HASH.name, cond::persistency::IOV::INSERTION_TIME.name, MuonGeometrySanityCheckPoint.name, config.Analyzer.name, config.Service.name, h2DSegm.name, options.HLTProcessOptions.name, core.autovars.NTupleSubObject.name, DQMNet::WaitObject.name, AlpgenParameterName.name, SiStripMonitorDigi.name, core.autovars.NTupleObject.name, cond::persistency::TAG_MIGRATION::SOURCE_ACCOUNT.name, cond::persistency::TAG_MIGRATION::SOURCE_TAG.name, cond::persistency::TAG_MIGRATION::TAG_NAME.name, cond::persistency::TAG_MIGRATION::STATUS_CODE.name, cond::persistency::TAG_MIGRATION::INSERTION_TIME.name, core.autovars.NTupleCollection.name, FastTimerService::LuminosityDescription.name, cond::persistency::PAYLOAD_MIGRATION::SOURCE_ACCOUNT.name, cond::persistency::PAYLOAD_MIGRATION::SOURCE_TOKEN.name, cond::persistency::PAYLOAD_MIGRATION::PAYLOAD_HASH.name, cond::persistency::PAYLOAD_MIGRATION::INSERTION_TIME.name, conddblib.Tag.name, conddblib.GlobalTag.name, personalPlayback.FrameworkJob.name, plotscripts.SawTeethFunction.name, FastTimerService::ProcessDescription.name, hTMaxCell.name, cscdqm::ParHistoDef.name, BeautifulSoup.Tag.name, TiXmlAttribute.name, BeautifulSoup.SoupStrainer.name, and python.rootplot.root2matplotlib.replace().

Referenced by dataset.Dataset.dataType().

370  def __getDataType( self ):
371  if self.__predefined:
372  with open(self.__filename) as f:
373  datatype = None
374  for line in f.readlines():
375  if line.startswith("#data type: "):
376  if datatype is not None:
377  raise AllInOneError(self.__filename + " has multiple 'data type' lines.")
378  datatype = line.replace("#data type: ", "").replace("\n","")
379  return datatype
380  return "unknown"
381 
382  dasQuery_type = ( 'dataset dataset=%s | grep dataset.datatype,'
383  'dataset.name'%( self.__name ) )
384  data = self.__getData( dasQuery_type )
385 
386  try:
387  return self.__findInJson(data, ["dataset", "datatype"])
388  except KeyError:
389  print ("Cannot find the datatype of the dataset '%s'\n"
390  "It may not be possible to automatically find the magnetic field,\n"
391  "and you will not be able run in CRAB mode"
392  %( self.name() ))
393  return "unknown"
def __findInJson
Definition: dataset.py:287
def __getDataType
Definition: dataset.py:369
def dataset.Dataset.__getFileInfoList (   self,
  dasLimit,
  parent = False 
)
private

Definition at line 529 of file dataset.py.

References dataset.Dataset.__fileInfoList, dataset.Dataset.__findInJson(), dataset.Dataset.__getData(), dataset.Dataset.__name, dataset.Dataset.__parentFileInfoList, dataset.Dataset.__predefined, ElectronMVAID.ElectronMVAID.name, counter.Counter.name, entry.name, average.Average.name, histograms.Histograms.name, cond::persistency::TAG::NAME.name, TmModule.name, cond::persistency::GLOBAL_TAG::NAME.name, core.autovars.NTupleVariable.name, cond::persistency::TAG::TIME_TYPE.name, genericValidation.GenericValidation.name, cond::persistency::GLOBAL_TAG::VALIDITY.name, cond::persistency::COND_LOG_TABLE::EXECTIME.name, cond::persistency::TAG::OBJECT_TYPE.name, preexistingValidation.PreexistingValidation.name, cond::persistency::GLOBAL_TAG::DESCRIPTION.name, ora::RecordSpecImpl::Item.name, cond::persistency::COND_LOG_TABLE::IOVTAG.name, cond::persistency::TAG::SYNCHRONIZATION.name, cond::persistency::GLOBAL_TAG::RELEASE.name, cond::persistency::GLOBAL_TAG::SNAPSHOT_TIME.name, cond::persistency::COND_LOG_TABLE::USERTEXT.name, cond::persistency::TAG::END_OF_VALIDITY.name, cond::persistency::GLOBAL_TAG::INSERTION_TIME.name, cond::persistency::TAG::DESCRIPTION.name, cond::persistency::GTEditorData.name, cond::persistency::TAG::LAST_VALIDATED_TIME.name, FWTGeoRecoGeometry::Info.name, Types._Untracked.name, cond::persistency::TAG::INSERTION_TIME.name, dataset.BaseDataset.name, cond::persistency::TAG::MODIFICATION_TIME.name, personalPlayback.Applet.name, ParameterSet.name, PixelDCSObject< class >::Item.name, analyzer.Analyzer.name, DQMRivetClient::LumiOption.name, MagCylinder.name, alignment.Alignment.name, ParSet.name, DQMRivetClient::ScaleFactorOption.name, SingleObjectCondition.name, EgHLTOfflineSummaryClient::SumHistBinData.name, XMLHTRZeroSuppressionLoader::_loaderBaseConfig.name, DQMGenericClient::EfficOption.name, XMLRBXPedestalsLoader::_loaderBaseConfig.name, cond::persistency::GTProxyData.name, core.autovars.NTupleObjectType.name, MyWatcher.name, edm::PathTimingSummary.name, lumi::TriggerInfo.name, cond::TimeTypeSpecs.name, edm::PathSummary.name, cond::persistency::GLOBAL_TAG_MAP::GLOBAL_TAG_NAME.name, perftools::EdmEventSize::BranchRecord.name, PixelEndcapLinkMaker::Item.name, FWTableViewManager::TableEntry.name, cond::persistency::GLOBAL_TAG_MAP::RECORD.name, PixelBarrelLinkMaker::Item.name, Mapper::definition< ScannerT >.name, EcalLogicID.name, cond::persistency::GLOBAL_TAG_MAP::LABEL.name, cond::persistency::GLOBAL_TAG_MAP::TAG_NAME.name, ExpressionHisto< T >.name, McSelector.name, RecoSelector.name, XMLProcessor::_loaderBaseConfig.name, cond::persistency::PAYLOAD::HASH.name, TreeCrawler.Package.name, DQMGenericClient::ProfileOption.name, cond::persistency::PAYLOAD::OBJECT_TYPE.name, cond::persistency::PAYLOAD::DATA.name, cond::persistency::PAYLOAD::STREAMER_INFO.name, cond::persistency::PAYLOAD::VERSION.name, MagGeoBuilderFromDDD::volumeHandle.name, cond::persistency::PAYLOAD::INSERTION_TIME.name, DQMGenericClient::NormOption.name, options.ConnectionHLTMenu.name, DQMGenericClient::CDOption.name, FastHFShowerLibrary.name, h4DSegm.name, PhysicsTools::Calibration::Variable.name, EDMtoMEConverter.name, cond::TagInfo_t.name, looper.Looper.name, MEtoEDM< T >::MEtoEDMObject.name, cond::persistency::IOV::TAG_NAME.name, cond::persistency::IOV::SINCE.name, TrackerSectorStruct.name, cond::persistency::IOV::PAYLOAD_HASH.name, cond::persistency::IOV::INSERTION_TIME.name, MuonGeometrySanityCheckPoint.name, config.Analyzer.name, config.Service.name, h2DSegm.name, options.HLTProcessOptions.name, core.autovars.NTupleSubObject.name, DQMNet::WaitObject.name, AlpgenParameterName.name, SiStripMonitorDigi.name, core.autovars.NTupleObject.name, cond::persistency::TAG_MIGRATION::SOURCE_ACCOUNT.name, cond::persistency::TAG_MIGRATION::SOURCE_TAG.name, cond::persistency::TAG_MIGRATION::TAG_NAME.name, cond::persistency::TAG_MIGRATION::STATUS_CODE.name, cond::persistency::TAG_MIGRATION::INSERTION_TIME.name, core.autovars.NTupleCollection.name, FastTimerService::LuminosityDescription.name, cond::persistency::PAYLOAD_MIGRATION::SOURCE_ACCOUNT.name, cond::persistency::PAYLOAD_MIGRATION::SOURCE_TOKEN.name, cond::persistency::PAYLOAD_MIGRATION::PAYLOAD_HASH.name, cond::persistency::PAYLOAD_MIGRATION::INSERTION_TIME.name, conddblib.Tag.name, conddblib.GlobalTag.name, personalPlayback.FrameworkJob.name, plotscripts.SawTeethFunction.name, FastTimerService::ProcessDescription.name, hTMaxCell.name, cscdqm::ParHistoDef.name, BeautifulSoup.Tag.name, TiXmlAttribute.name, BeautifulSoup.SoupStrainer.name, and dataset.Dataset.parentDataset().

Referenced by dataset.Dataset.fileInfoList().

530  def __getFileInfoList( self, dasLimit, parent = False ):
531  if self.__predefined:
532  if parent:
533  extendstring = "secFiles.extend"
534  else:
535  extendstring = "readFiles.extend"
536  with open(self.__fileName) as f:
537  files = []
538  copy = False
539  for line in f.readlines():
540  if "]" in line:
541  copy = False
542  if copy:
543  files.append({name: line.translate(None, "', " + '"')})
544  if extendstring in line and "[" in line and "]" not in line:
545  copy = True
546  return files
547 
548  if self.__fileInfoList and not parent:
549  return self.__fileInfoList
550  if self.__parentFileInfoList and parent:
551  return self.__parentFileInfoList
552 
553  if parent:
554  searchdataset = self.parentDataset()
555  else:
556  searchdataset = self.__name
557  dasQuery_files = ( 'file dataset=%s | grep file.name, file.nevents, '
558  'file.creation_time, '
559  'file.modification_time'%( searchdataset ) )
560  print "Requesting file information for '%s' from DAS..."%( searchdataset ),
561  data = self.__getData( dasQuery_files, dasLimit )
562  print "Done."
563  data = [ self.__findInJson(entry,"file") for entry in data ]
564  if len( data ) == 0:
565  msg = ("No files are available for the dataset '%s'. This can be "
566  "due to a typo or due to a DAS problem. Please check the "
567  "spelling of the dataset and/or retry to run "
568  "'validateAlignments.py'."%( self.name() ))
569  raise AllInOneError( msg )
570  fileInformationList = []
571  for file in data:
572  fileName = 'unknown'
573  try:
574  fileName = self.__findInJson(file, "name")
575  fileCreationTime = self.__findInJson(file, "creation_time")
576  fileNEvents = self.__findInJson(file, "nevents")
577  except KeyError:
578  print ("DAS query gives bad output for file '%s'. Skipping it.\n"
579  "It may work if you try again later.") % fileName
580  fileNEvents = 0
581  # select only non-empty files
582  if fileNEvents == 0:
583  continue
584  fileDict = { "name": fileName,
585  "creation_time": fileCreationTime,
586  "nevents": fileNEvents
587  }
588  fileInformationList.append( fileDict )
589  fileInformationList.sort( key=lambda info: self.__findInJson(info,"name") )
590  if parent:
591  self.__parentFileInfoList = fileInformationList
592  else:
593  self.__fileInfoList = fileInformationList
594  return fileInformationList
def __findInJson
Definition: dataset.py:287
def __getFileInfoList
Definition: dataset.py:529
def parentDataset
Definition: dataset.py:700
def dataset.Dataset.__getMagneticField (   self)
private

Definition at line 404 of file dataset.py.

References dataset.Dataset.__cmssw, dataset.Dataset.__cmsswrelease, dataset.Dataset.__dataType, dataset.Dataset.__filename, dataset.Dataset.__findInJson(), dataset.Dataset.__getData(), dataset.Dataset.__name, dataset.Dataset.__predefined, and python.rootplot.root2matplotlib.replace().

Referenced by dataset.Dataset.magneticField().

405  def __getMagneticField( self ):
406  Bfieldlocation = os.path.join( self.__cmssw, "python", "Configuration", "StandardSequences" )
407  if not os.path.isdir(Bfieldlocation):
408  Bfieldlocation = os.path.join( self.__cmsswrelease, "python", "Configuration", "StandardSequences" )
409  Bfieldlist = [ f.replace("_cff.py",'') \
410  for f in os.listdir(Bfieldlocation) \
411  if f.startswith("MagneticField_") and f.endswith("_cff.py") ]
412  Bfieldlist.sort( key = lambda Bfield: -len(Bfield) ) #Put it in order of decreasing length, so that searching in the name gives the longer match
413 
414  if self.__predefined:
415  with open(self.__filename) as f:
416  datatype = None
417  Bfield = None
418  for line in f.readlines():
419  if line.startswith("#data type: "):
420  if datatype is not None:
421  raise AllInOneError(self.__filename + " has multiple 'data type' lines.")
422  datatype = line.replace("#data type: ", "").replace("\n","")
423  datatype = datatype.split("#")[0].strip()
424  if line.startswith("#magnetic field: "):
425  if Bfield is not None:
426  raise AllInOneError(self.__filename + " has multiple 'magnetic field' lines.")
427  Bfield = line.replace("#magnetic field: ", "").replace("\n","")
428  Bfield = Bfield.split("#")[0].strip()
429  if Bfield is not None:
430  Bfield = Bfield.split(",")[0]
431  if Bfield in Bfieldlist or Bfield == "unknown":
432  return Bfield
433  else:
434  print "Your dataset has magnetic field '%s', which does not exist in your CMSSW version!" % Bfield
435  print "Using Bfield='unknown' - this will revert to the default"
436  return "unknown"
437  elif datatype == "data":
438  return "MagneticField" #this should be in the "#magnetic field" line, but for safety in case it got messed up
439  else:
440  return "unknown"
441 
442  if self.__dataType == "data":
443  return "MagneticField"
444 
445  dasQuery_B = ( 'dataset dataset=%s'%( self.__name ) ) #try to find the magnetic field from DAS
446  data = self.__getData( dasQuery_B ) #it seems to be there for the newer (7X) MC samples, except cosmics
447 
448  try:
449  Bfield = self.__findInJson(data, ["dataset", "mcm", "sequences", "magField"])
450  if Bfield in Bfieldlist:
451  return Bfield
452  elif Bfield == "38T" or Bfield == "38T_PostLS1":
453  return "MagneticField"
454  elif "MagneticField_" + Bfield in Bfieldlist:
455  return "MagneticField_" + Bfield
456  elif Bfield == "":
457  pass
458  else:
459  print "Your dataset has magnetic field '%s', which does not exist in your CMSSW version!" % Bfield
460  print "Using Bfield='unknown' - this will revert to the default magnetic field"
461  return "unknown"
462  except KeyError:
463  pass
464 
465  for possibleB in Bfieldlist:
466  if (possibleB != "MagneticField"
467  and possibleB.replace("MagneticField_","") in self.__name.replace("TkAlCosmics0T", "")):
468  #final attempt - try to identify the dataset from the name
469  #all cosmics dataset names contain "TkAlCosmics0T"
470  if possibleB == "MagneticField_38T" or possibleB == "MagneticField_38T_PostLS1":
471  return "MagneticField"
472  return possibleB
473 
474  return "unknown"
def __findInJson
Definition: dataset.py:287
def __getMagneticField
Definition: dataset.py:404
def dataset.Dataset.__getMagneticFieldForRun (   self,
  run = -1,
  tolerance = 0.5 
)
private
For MC, this returns the same as the previous function.
   For data, it gets the magnetic field from the runs.  This is important for
   deciding which template to use for offlinevalidation

Definition at line 475 of file dataset.py.

References dataset.Dataset.__dataType, dataset.Dataset.__filename, dataset.Dataset.__findInJson(), dataset.Dataset.__firstusedrun, dataset.Dataset.__getData(), dataset.Dataset.__getMagneticFieldForRun(), dataset.Dataset.__lastusedrun, dataset.Dataset.__magneticField, dataset.Dataset.__name, dataset.Dataset.__predefined, funct.abs(), python.rootplot.root2matplotlib.replace(), and split.

Referenced by dataset.Dataset.__getMagneticFieldForRun(), dataset.Dataset.dump_cff(), and dataset.Dataset.magneticFieldForRun().

476  def __getMagneticFieldForRun( self, run = -1, tolerance = 0.5 ):
477  """For MC, this returns the same as the previous function.
478  For data, it gets the magnetic field from the runs. This is important for
479  deciding which template to use for offlinevalidation
480  """
481  if self.__dataType == "mc" and self.__magneticField == "MagneticField":
482  return 3.8 #For 3.8T MC the default MagneticField is used
483  if "T" in self.__magneticField:
484  Bfield = self.__magneticField.split("T")[0].replace("MagneticField_","")
485  try:
486  return float(Bfield) / 10.0 #e.g. 38T and 38T_PostLS1 both return 3.8
487  except ValueError:
488  pass
489  if self.__predefined:
490  with open(self.__filename) as f:
491  Bfield = None
492  for line in f.readlines():
493  if line.startswith("#magnetic field: ") and "," in line:
494  if Bfield is not None:
495  raise AllInOneError(self.__filename + " has multiple 'magnetic field' lines.")
496  return float(line.replace("#magnetic field: ", "").split(",")[1].split("#")[0].strip())
497 
498  if run > 0:
499  dasQuery = ('run = %s'%run) #for data
500  data = self.__getData(dasQuery)
501  try:
502  return self.__findInJson(data, ["run","bfield"])
503  except KeyError:
504  return "unknown Can't get the magnetic field for run %s from DAS" % run
505 
506  #run < 0 - find B field for the first and last runs, and make sure they're compatible
507  # (to within tolerance)
508  #NOT FOOLPROOF! The magnetic field might go up and then down, or vice versa
509  if self.__firstusedrun is None or self.__lastusedrun is None:
510  return "unknown Can't get the exact magnetic field for the dataset until data has been retrieved from DAS."
511  firstrunB = self.__getMagneticFieldForRun(self.__firstusedrun)
512  lastrunB = self.__getMagneticFieldForRun(self.__lastusedrun)
513  try:
514  if abs(firstrunB - lastrunB) <= tolerance:
515  return .5*(firstrunB + lastrunB)
516  print firstrunB, lastrunB, tolerance
517  return ("unknown The beginning and end of your run range for %s\n"
518  "have different magnetic fields (%s, %s)!\n"
519  "Try limiting the run range using firstRun, lastRun, begin, end, or JSON,\n"
520  "or increasing the tolerance (in dataset.py) from %s.") % (self.__name, firstrunB, lastrunB, tolerance)
521  except TypeError:
522  try:
523  if "unknown" in firstrunB:
524  return firstrunB
525  else:
526  return lastrunB
527  except TypeError:
528  return lastrunB
def __findInJson
Definition: dataset.py:287
def __getMagneticFieldForRun
Definition: dataset.py:475
Abs< T >::type abs(const T &t)
Definition: Abs.h:22
double split
Definition: MVATrainer.cc:139
def dataset.Dataset.__getParentDataset (   self)
private

Definition at line 394 of file dataset.py.

References dataset.Dataset.__findInJson(), dataset.Dataset.__getData(), and dataset.Dataset.__name.

Referenced by dataset.Dataset.parentDataset().

395  def __getParentDataset( self ):
396  dasQuery = "parent dataset=" + self.__name
397  data = self.__getData( dasQuery )
398  try:
399  return self.__findInJson(data, ["parent", "name"])
400  except KeyError:
401  raise AllInOneError("Cannot find the parent of the dataset '" + self.__name + "'\n"
402  "Here is the DAS output:\n" + str(jsondict) +
403  "\nIt's possible that this was a server error. If so, it may work if you try again later")
def __findInJson
Definition: dataset.py:287
def __getParentDataset
Definition: dataset.py:394
def dataset.Dataset.__getRunList (   self)
private

Definition at line 595 of file dataset.py.

References dataset.Dataset.__findInJson(), dataset.Dataset.__getData(), dataset.Dataset.__name, and dataset.Dataset.__runList.

Referenced by dataset.Dataset.__createSnippet(), dataset.Dataset.convertTimeToRun(), and dataset.Dataset.runList().

596  def __getRunList( self ):
597  if self.__runList:
598  return self.__runList
599  dasQuery_runs = ( 'run dataset=%s | grep run.run_number,'
600  'run.creation_time'%( self.__name ) )
601  print "Requesting run information for '%s' from DAS..."%( self.__name ),
602  data = self.__getData( dasQuery_runs )
603  print "Done."
604  data = [ self.__findInJson(entry,"run") for entry in data ]
605  data.sort( key = lambda run: self.__findInJson(run, "run_number") )
606  self.__runList = data
607  return data
def __findInJson
Definition: dataset.py:287
def __getRunList
Definition: dataset.py:595
def dataset.Dataset.buildListOfBadFiles (   self)
fills the list of bad files from the IntegrityCheck log.

When the integrity check file is not available,
files are considered as good.

Definition at line 275 of file dataset.py.

276  def buildListOfBadFiles(self):
277  '''fills the list of bad files from the IntegrityCheck log.
278 
279  When the integrity check file is not available,
280  files are considered as good.'''
281  mask = "IntegrityCheck"
282 
283  self.bad_files = {}
284  self.good_files = []
285 
286  file_mask = castortools.matchingFiles(self.castorDir, '^%s_.*\.txt$' % mask)
287  if file_mask:
288  # here to avoid circular dependency
289  from edmIntegrityCheck import PublishToFileSystem
290  p = PublishToFileSystem(mask)
291  report = p.get(self.castorDir)
292  if report is not None and report:
293  self.maskExists = True
294  self.report = report
295  dup = report.get('ValidDuplicates',{})
296  for name, status in report['Files'].iteritems():
297  # print name, status
298  if not status[0]:
299  self.bad_files[name] = 'MarkedBad'
300  elif dup.has_key(name):
301  self.bad_files[name] = 'ValidDup'
302  else:
303  self.good_files.append( name )
304  else:
305  raise IntegrityCheckError( "ERROR: IntegrityCheck log file IntegrityCheck_XXXXXXXXXX.txt not found" )
def buildListOfBadFiles
Definition: dataset.py:275
def dataset.Dataset.buildListOfFiles (   self,
  pattern = '.*root' 
)
fills list of files, taking all root files matching the pattern in the castor dir

Definition at line 271 of file dataset.py.

272  def buildListOfFiles(self, pattern='.*root'):
273  '''fills list of files, taking all root files matching the pattern in the castor dir'''
274  self.files = castortools.matchingFiles( self.castorDir, pattern )
def buildListOfFiles
Definition: dataset.py:271
def dataset.Dataset.convertTimeToRun (   self,
  begin = None,
  end = None,
  firstRun = None,
  lastRun = None,
  shortTuple = True 
)

Definition at line 622 of file dataset.py.

References dataset.Dataset.__dateString(), dataset.Dataset.__datetime(), dataset.Dataset.__find_ge(), dataset.Dataset.__find_lt(), dataset.Dataset.__findInJson(), dataset.Dataset.__getData(), dataset.Dataset.__getRunList(), and dataset.Dataset.__name.

Referenced by dataset.Dataset.__createSnippet(), and dataset.Dataset.__dateString().

623  shortTuple = True ):
624  if ( begin and firstRun ) or ( end and lastRun ):
625  msg = ( "The Usage of "
626  + "'begin' & 'firstRun' " * int( bool( begin and
627  firstRun ) )
628  + "and " * int( bool( ( begin and firstRun ) and
629  ( end and lastRun ) ) )
630  + "'end' & 'lastRun' " * int( bool( end and lastRun ) )
631  + "is ambigous." )
632  raise AllInOneError( msg )
633 
634  if begin or end:
635  runList = [ self.__findInJson(run, "run_number") for run in self.__getRunList() ]
636 
637  if begin:
638  lastdate = begin
639  for delta in [ 1, 5, 10, 20, 30 ]: #try searching for about 2 months after begin
640  firstdate = lastdate
641  lastdate = self.__dateString(self.__datetime(firstdate) + datetime.timedelta(delta))
642  dasQuery_begin = "run date between[%s,%s]" % (firstdate, lastdate)
643  begindata = self.__getData(dasQuery_begin)
644  if len(begindata) > 0:
645  begindata.sort(key = lambda run: self.__findInJson(run, ["run", "run_number"]))
646  try:
647  runIndex = self.__find_ge( runList, self.__findInJson(begindata[0], ["run", "run_number"]))
648  except ValueError:
649  msg = ( "Your 'begin' is after the creation time of the last "
650  "run in the dataset\n'%s'"%( self.__name ) )
651  raise AllInOneError( msg )
652  firstRun = runList[runIndex]
653  begin = None
654  break
655 
656  if begin:
657  raise AllInOneError("No runs within a reasonable time interval after your 'begin'."
658  "Try using a 'begin' that has runs soon after it (within 2 months at most)")
659 
660  if end:
661  firstdate = end
662  for delta in [ 1, 5, 10, 20, 30 ]: #try searching for about 2 months before end
663  lastdate = firstdate
664  firstdate = self.__dateString(self.__datetime(lastdate) - datetime.timedelta(delta))
665  dasQuery_end = "run date between[%s,%s]" % (firstdate, lastdate)
666  enddata = self.__getData(dasQuery_end)
667  if len(enddata) > 0:
668  enddata.sort(key = lambda run: self.__findInJson(run, ["run", "run_number"]))
669  try:
670  runIndex = self.__find_lt( runList, self.__findInJson(enddata[-1], ["run", "run_number"]))
671  except ValueError:
672  msg = ( "Your 'end' is before the creation time of the first "
673  "run in the dataset\n'%s'"%( self.__name ) )
674  raise AllInOneError( msg )
675  lastRun = runList[runIndex]
676  end = None
677  break
678 
679  if end:
680  raise AllInOneError("No runs within a reasonable time interval before your 'end'."
681  "Try using an 'end' that has runs soon before it (within 2 months at most)")
682 
683  if shortTuple:
684  return firstRun, lastRun
685  else:
686  return begin, end, firstRun, lastRun
def __findInJson
Definition: dataset.py:287
def __getRunList
Definition: dataset.py:595
def __dateString
Definition: dataset.py:617
def dataset.Dataset.datasetSnippet (   self,
  jsonPath = None,
  begin = None,
  end = None,
  firstRun = None,
  lastRun = None,
  crab = False,
  parent = False 
)

Definition at line 706 of file dataset.py.

References dataset.Dataset.__createSnippet(), dataset.Dataset.__filename, dataset.Dataset.__name, dataset.Dataset.__official, dataset.Dataset.__origName, dataset.Dataset.__predefined, and dataset.Dataset.dump_cff().

Referenced by dataset.Dataset.parentDataset().

707  firstRun = None, lastRun = None, crab = False, parent = False ):
708  if self.__predefined and parent:
709  with open(self.__filename) as f:
710  if "secFiles.extend" not in f.read():
711  msg = ("The predefined dataset '%s' does not contain secondary files, "
712  "which your validation requires!") % self.__name
713  if self.__official:
714  self.__name = self.__origName
715  self.__predefined = False
716  print msg
717  print ("Retreiving the files from DAS. You will be asked if you want "
718  "to overwrite the old dataset.\n"
719  "It will still be compatible with validations that don't need secondary files.")
720  else:
721  raise AllInOneError(msg)
722 
723  if self.__predefined:
724  snippet = ("process.load(\"Alignment.OfflineValidation.%s_cff\")\n"
725  "process.maxEvents = cms.untracked.PSet(\n"
726  " input = cms.untracked.int32(.oO[nEvents]Oo. / .oO[parallelJobs]Oo.)\n"
727  ")\n"
728  "process.source.skipEvents=cms.untracked.uint32(.oO[nIndex]Oo.*.oO[nEvents]Oo./.oO[parallelJobs]Oo.)"
729  %(self.__name))
730  if not parent:
731  with open(self.__filename) as f:
732  if "secFiles.extend" in f.read():
733  snippet += "\nprocess.source.secondaryFileNames = cms.untracked.vstring()"
734  return snippet
735  theMap = { "process": "process.",
736  "tab": " " * len( "process." ),
737  "nEvents": ".oO[nEvents]Oo. / .oO[parallelJobs]Oo.",
738  "skipEventsString": "process.source.skipEvents=cms.untracked.uint32(.oO[nIndex]Oo.*.oO[nEvents]Oo./.oO[parallelJobs]Oo.)\n",
739  "importCms": "",
740  "header": ""
741  }
742  datasetSnippet = self.__createSnippet( jsonPath = jsonPath,
743  begin = begin,
744  end = end,
745  firstRun = firstRun,
746  lastRun = lastRun,
747  repMap = theMap,
748  crab = crab,
749  parent = parent )
750  if jsonPath == "" and begin == "" and end == "" and firstRun == "" and lastRun == "":
751  try:
752  self.dump_cff(parent = parent)
753  except AllInOneError, e:
754  print "Can't store the dataset as a cff:"
755  print e
756  print "This may be inconvenient in the future, but will not cause a problem for this validation."
757  return datasetSnippet
def __createSnippet
Definition: dataset.py:117
def dataset.Dataset.dataType (   self)

Definition at line 687 of file dataset.py.

References dataset.Dataset.__dataType, and dataset.Dataset.__getDataType().

688  def dataType( self ):
689  if not self.__dataType:
690  self.__dataType = self.__getDataType()
691  return self.__dataType
def __getDataType
Definition: dataset.py:369
def dataset.Dataset.dump_cff (   self,
  outName = None,
  jsonPath = None,
  begin = None,
  end = None,
  firstRun = None,
  lastRun = None,
  parent = False 
)

Definition at line 759 of file dataset.py.

References dataset.Dataset.__alreadyStored, dataset.Dataset.__cmssw, dataset.Dataset.__createSnippet(), dataset.Dataset.__dataType, dataset.Dataset.__getMagneticFieldForRun(), dataset.Dataset.__magneticField, dataset.Dataset.__name, python.rootplot.root2matplotlib.replace(), and split.

Referenced by dataset.Dataset.datasetSnippet().

760  end = None, firstRun = None, lastRun = None, parent = False ):
761  if self.__alreadyStored:
762  return
763  self.__alreadyStored = True
764  if outName == None:
765  outName = "Dataset" + self.__name.replace("/", "_")
766  packageName = os.path.join( "Alignment", "OfflineValidation" )
767  if not os.path.exists( os.path.join(
768  self.__cmssw, "src", packageName ) ):
769  msg = ("You try to store the predefined dataset'%s'.\n"
770  "For that you need to check out the package '%s' to your "
771  "private relase area in\n"%( outName, packageName )
772  + self.__cmssw )
773  raise AllInOneError( msg )
774  theMap = { "process": "",
775  "tab": "",
776  "nEvents": str( -1 ),
777  "skipEventsString": "",
778  "importCms": "import FWCore.ParameterSet.Config as cms\n",
779  "header": "#Do not delete or (unless you know what you're doing) change these comments\n"
780  "#%(name)s\n"
781  "#data type: %(dataType)s\n"
782  "#magnetic field: .oO[magneticField]Oo.\n" #put in magnetic field later
783  %{"name": self.__name, #need to create the snippet before getting the magnetic field
784  "dataType": self.__dataType} #so that we know the first and last runs
785  }
786  dataset_cff = self.__createSnippet( jsonPath = jsonPath,
787  begin = begin,
788  end = end,
789  firstRun = firstRun,
790  lastRun = lastRun,
791  repMap = theMap,
792  parent = parent)
793  magneticField = self.__magneticField
794  if magneticField == "MagneticField":
795  magneticField = "%s, %s #%s" % (magneticField,
796  str(self.__getMagneticFieldForRun()).replace("\n"," ").split("#")[0].strip(),
797  "Use MagneticField_cff.py; the number is for determining which track selection to use."
798  )
799  dataset_cff = dataset_cff.replace(".oO[magneticField]Oo.",magneticField)
800  filePath = os.path.join( self.__cmssw, "src", packageName,
801  "python", outName + "_cff.py" )
802  if os.path.exists( filePath ):
803  existMsg = "The predefined dataset '%s' already exists.\n"%( outName )
804  askString = "Do you want to overwrite it? [y/n]\n"
805  inputQuery = existMsg + askString
806  while True:
807  userInput = raw_input( inputQuery ).lower()
808  if userInput == "y":
809  break
810  elif userInput == "n":
811  return
812  else:
813  inputQuery = askString
814  print ( "The predefined dataset '%s' will be stored in the file\n"
815  %( outName )
816  + filePath +
817  "\nFor future use you have to do 'scram b'." )
818  print
819  theFile = open( filePath, "w" )
820  theFile.write( dataset_cff )
821  theFile.close()
822  return
def __getMagneticFieldForRun
Definition: dataset.py:475
def __createSnippet
Definition: dataset.py:117
double split
Definition: MVATrainer.cc:139
def dataset.Dataset.extractFileSizes (   self)
Get the file size for each file, from the eos ls -l command.

Definition at line 306 of file dataset.py.

References dataset.EOSDataset.castorDir, and dataset.Dataset.castorDir.

307  def extractFileSizes(self):
308  '''Get the file size for each file, from the eos ls -l command.'''
309  # EOS command does not work in tier3
310  lsout = castortools.runXRDCommand(self.castorDir,'dirlist')[0]
311  lsout = lsout.split('\n')
312  self.filesAndSizes = {}
313  for entry in lsout:
314  values = entry.split()
315  if( len(values) != 5):
316  continue
317  # using full abs path as a key.
318  file = '/'.join([self.lfnDir, values[4].split("/")[-1]])
319  size = values[1]
320  self.filesAndSizes[file] = size
def extractFileSizes
Definition: dataset.py:306
if(c.getParameter< edm::InputTag >("puppiValueMap").label().size()!=0)
static std::string join(char **cmd)
Definition: RemoteFile.cc:18
double split
Definition: MVATrainer.cc:139
def dataset.Dataset.fileInfoList (   self,
  parent = False 
)

Definition at line 838 of file dataset.py.

References dataset.Dataset.__dasLimit, and dataset.Dataset.__getFileInfoList().

Referenced by dataset.Dataset.fileList().

839  def fileInfoList( self, parent = False ):
840  return self.__getFileInfoList( self.__dasLimit, parent )
def __getFileInfoList
Definition: dataset.py:529
def fileInfoList
Definition: dataset.py:838
def dataset.Dataset.fileList (   self,
  parent = False 
)

Definition at line 823 of file dataset.py.

References dataset.Dataset.__fileList, dataset.Dataset.__findInJson(), dataset.Dataset.__parentFileList, and dataset.Dataset.fileInfoList().

Referenced by dataset.Dataset.__createSnippet().

824  def fileList( self, parent = False ):
825  if self.__fileList and not parent:
826  return self.__fileList
827  if self.__parentFileList and parent:
828  return self.__parentFileList
829 
830  fileList = [ self.__findInJson(fileInfo,"name") \
831  for fileInfo in self.fileInfoList(parent) ]
832 
833  if not parent:
834  self.__fileList = fileList
835  else:
836  self.__parentFileList = fileList
837  return fileList
def __findInJson
Definition: dataset.py:287
def fileInfoList
Definition: dataset.py:838
def dataset.Dataset.forcerunrange (   self,
  firstRun,
  lastRun,
  s 
)
s must be in the format run1:lum1-run2:lum2

Definition at line 309 of file dataset.py.

References dataset.Dataset.__firstusedrun, dataset.Dataset.__lastusedrun, and split.

Referenced by dataset.Dataset.getForceRunRangeFunction().

310  def forcerunrange(self, firstRun, lastRun, s):
311  """s must be in the format run1:lum1-run2:lum2"""
312  s = s.group()
313  run1 = s.split("-")[0].split(":")[0]
314  lum1 = s.split("-")[0].split(":")[1]
315  try:
316  run2 = s.split("-")[1].split(":")[0]
317  lum2 = s.split("-")[1].split(":")[1]
318  except IndexError:
319  run2 = run1
320  lum2 = lum1
321  if int(run2) < firstRun or int(run1) > lastRun:
322  return ""
323  if int(run1) < firstRun or firstRun < 0:
324  run1 = firstRun
325  lum1 = 1
326  if int(run2) > lastRun:
327  run2 = lastRun
328  lum2 = "max"
329  if int(run1) < self.__firstusedrun or self.__firstusedrun < 0:
330  self.__firstusedrun = int(run1)
331  if int(run2) > self.__lastusedrun:
332  self.__lastusedrun = int(run2)
333  return "%s:%s-%s:%s" % (run1, lum1, run2, lum2)
def forcerunrange
Definition: dataset.py:309
double split
Definition: MVATrainer.cc:139
def dataset.Dataset.getForceRunRangeFunction (   self,
  firstRun,
  lastRun 
)

Definition at line 334 of file dataset.py.

References dataset.Dataset.forcerunrange().

Referenced by dataset.Dataset.__createSnippet().

335  def getForceRunRangeFunction(self, firstRun, lastRun):
336  def forcerunrangefunction(s):
337  return self.forcerunrange(firstRun, lastRun, s)
338  return forcerunrangefunction
def forcerunrange
Definition: dataset.py:309
def getForceRunRangeFunction
Definition: dataset.py:334
def dataset.Dataset.getPrimaryDatasetEntries (   self)

Definition at line 326 of file dataset.py.

References runall.testit.report, dataset.BaseDataset.report, ALIUtils.report, and WorkFlowRunner.WorkFlowRunner.report.

327  def getPrimaryDatasetEntries(self):
328  if self.report is not None and self.report:
329  return int(self.report.get('PrimaryDatasetEntries',-1))
330  return -1
331 
def getPrimaryDatasetEntries
Definition: dataset.py:326
def dataset.Dataset.magneticField (   self)

Definition at line 692 of file dataset.py.

References dataset.Dataset.__getMagneticField(), and dataset.Dataset.__magneticField.

693  def magneticField( self ):
694  if not self.__magneticField:
695  self.__magneticField = self.__getMagneticField()
696  return self.__magneticField
def magneticField
Definition: dataset.py:692
def __getMagneticField
Definition: dataset.py:404
def dataset.Dataset.magneticFieldForRun (   self,
  run = -1 
)

Definition at line 697 of file dataset.py.

References dataset.Dataset.__getMagneticFieldForRun().

698  def magneticFieldForRun( self, run = -1 ):
699  return self.__getMagneticFieldForRun(run)
def __getMagneticFieldForRun
Definition: dataset.py:475
def magneticFieldForRun
Definition: dataset.py:697
def dataset.Dataset.name (   self)

Definition at line 841 of file dataset.py.

References dataset.Dataset.__name.

Referenced by cuy.divideElement.__init__(), cuy.plotElement.__init__(), cuy.additionElement.__init__(), cuy.superimposeElement.__init__(), cuy.graphElement.__init__(), config.CFG.__str__(), validation.Sample.digest(), VIDSelectorBase.VIDSelectorBase.initialize(), and Vispa.Views.PropertyView.Property.valueChanged().

842  def name( self ):
843  return self.__name
def dataset.Dataset.parentDataset (   self)

Definition at line 700 of file dataset.py.

References dataset.Dataset.__getParentDataset(), dataset.Dataset.__parentDataset, and dataset.Dataset.datasetSnippet().

Referenced by dataset.Dataset.__getFileInfoList().

701  def parentDataset( self ):
702  if not self.__parentDataset:
703  self.__parentDataset = self.__getParentDataset()
704  return self.__parentDataset
def parentDataset
Definition: dataset.py:700
def __getParentDataset
Definition: dataset.py:394
def dataset.Dataset.predefined (   self)

Definition at line 844 of file dataset.py.

References dataset.Dataset.__predefined.

Referenced by dataset.Dataset.__createSnippet().

845  def predefined( self ):
846  return self.__predefined
def dataset.Dataset.printInfo (   self)

Definition at line 321 of file dataset.py.

References dataset.EOSDataset.castorDir, dataset.Dataset.castorDir, dataset.Dataset.lfnDir, ElectronMVAID.ElectronMVAID.name, counter.Counter.name, average.Average.name, entry.name, histograms.Histograms.name, TmModule.name, cond::persistency::GLOBAL_TAG::NAME.name, core.autovars.NTupleVariable.name, cond::persistency::TAG::NAME.name, cond::persistency::TAG::TIME_TYPE.name, cond::persistency::GLOBAL_TAG::VALIDITY.name, genericValidation.GenericValidation.name, cond::persistency::TAG::OBJECT_TYPE.name, cond::persistency::GLOBAL_TAG::DESCRIPTION.name, preexistingValidation.PreexistingValidation.name, cond::persistency::COND_LOG_TABLE::EXECTIME.name, cond::persistency::TAG::SYNCHRONIZATION.name, cond::persistency::GLOBAL_TAG::RELEASE.name, ora::RecordSpecImpl::Item.name, cond::persistency::COND_LOG_TABLE::IOVTAG.name, cond::persistency::COND_LOG_TABLE::USERTEXT.name, cond::persistency::TAG::END_OF_VALIDITY.name, cond::persistency::GLOBAL_TAG::SNAPSHOT_TIME.name, cond::persistency::GTEditorData.name, cond::persistency::TAG::DESCRIPTION.name, cond::persistency::GLOBAL_TAG::INSERTION_TIME.name, cond::persistency::TAG::LAST_VALIDATED_TIME.name, FWTGeoRecoGeometry::Info.name, Types._Untracked.name, cond::persistency::TAG::INSERTION_TIME.name, cond::persistency::TAG::MODIFICATION_TIME.name, dataset.BaseDataset.name, personalPlayback.Applet.name, ParameterSet.name, PixelDCSObject< class >::Item.name, analyzer.Analyzer.name, DQMRivetClient::LumiOption.name, MagCylinder.name, alignment.Alignment.name, ParSet.name, DQMRivetClient::ScaleFactorOption.name, SingleObjectCondition.name, EgHLTOfflineSummaryClient::SumHistBinData.name, DQMGenericClient::EfficOption.name, cond::persistency::GTProxyData.name, core.autovars.NTupleObjectType.name, XMLHTRZeroSuppressionLoader::_loaderBaseConfig.name, XMLRBXPedestalsLoader::_loaderBaseConfig.name, MyWatcher.name, edm::PathTimingSummary.name, cond::TimeTypeSpecs.name, lumi::TriggerInfo.name, edm::PathSummary.name, PixelEndcapLinkMaker::Item.name, perftools::EdmEventSize::BranchRecord.name, cond::persistency::GLOBAL_TAG_MAP::GLOBAL_TAG_NAME.name, FWTableViewManager::TableEntry.name, cond::persistency::GLOBAL_TAG_MAP::RECORD.name, PixelBarrelLinkMaker::Item.name, EcalLogicID.name, Mapper::definition< ScannerT >.name, cond::persistency::GLOBAL_TAG_MAP::LABEL.name, ExpressionHisto< T >.name, McSelector.name, cond::persistency::GLOBAL_TAG_MAP::TAG_NAME.name, XMLProcessor::_loaderBaseConfig.name, RecoSelector.name, DQMGenericClient::ProfileOption.name, cond::persistency::PAYLOAD::HASH.name, TreeCrawler.Package.name, cond::persistency::PAYLOAD::OBJECT_TYPE.name, cond::persistency::PAYLOAD::DATA.name, cond::persistency::PAYLOAD::STREAMER_INFO.name, MagGeoBuilderFromDDD::volumeHandle.name, cond::persistency::PAYLOAD::VERSION.name, cond::persistency::PAYLOAD::INSERTION_TIME.name, DQMGenericClient::NormOption.name, options.ConnectionHLTMenu.name, DQMGenericClient::CDOption.name, FastHFShowerLibrary.name, h4DSegm.name, PhysicsTools::Calibration::Variable.name, EDMtoMEConverter.name, cond::TagInfo_t.name, looper.Looper.name, MEtoEDM< T >::MEtoEDMObject.name, cond::persistency::IOV::TAG_NAME.name, TrackerSectorStruct.name, cond::persistency::IOV::SINCE.name, cond::persistency::IOV::PAYLOAD_HASH.name, cond::persistency::IOV::INSERTION_TIME.name, MuonGeometrySanityCheckPoint.name, config.Analyzer.name, config.Service.name, h2DSegm.name, options.HLTProcessOptions.name, core.autovars.NTupleSubObject.name, DQMNet::WaitObject.name, AlpgenParameterName.name, SiStripMonitorDigi.name, core.autovars.NTupleObject.name, cond::persistency::TAG_MIGRATION::SOURCE_ACCOUNT.name, cond::persistency::TAG_MIGRATION::SOURCE_TAG.name, cond::persistency::TAG_MIGRATION::TAG_NAME.name, cond::persistency::TAG_MIGRATION::STATUS_CODE.name, cond::persistency::TAG_MIGRATION::INSERTION_TIME.name, core.autovars.NTupleCollection.name, FastTimerService::LuminosityDescription.name, cond::persistency::PAYLOAD_MIGRATION::SOURCE_ACCOUNT.name, cond::persistency::PAYLOAD_MIGRATION::SOURCE_TOKEN.name, cond::persistency::PAYLOAD_MIGRATION::PAYLOAD_HASH.name, cond::persistency::PAYLOAD_MIGRATION::INSERTION_TIME.name, conddblib.Tag.name, conddblib.GlobalTag.name, personalPlayback.FrameworkJob.name, plotscripts.SawTeethFunction.name, FastTimerService::ProcessDescription.name, hTMaxCell.name, cscdqm::ParHistoDef.name, BeautifulSoup.Tag.name, TiXmlAttribute.name, and BeautifulSoup.SoupStrainer.name.

322  def printInfo(self):
323  print 'sample : ' + self.name
324  print 'LFN : ' + self.lfnDir
325  print 'Castor path : ' + self.castorDir
def dataset.Dataset.runList (   self)

Definition at line 847 of file dataset.py.

References dataset.Dataset.__getRunList(), and dataset.Dataset.__runList.

848  def runList( self ):
849  if self.__runList:
850  return self.__runList
851  return self.__getRunList()
852 
def __getRunList
Definition: dataset.py:595

Member Data Documentation

dataset.Dataset.__alreadyStored
private

Definition at line 23 of file dataset.py.

Referenced by dataset.Dataset.dump_cff().

dataset.Dataset.__cmssw
private

Definition at line 24 of file dataset.py.

Referenced by dataset.Dataset.__getMagneticField(), and dataset.Dataset.dump_cff().

dataset.Dataset.__cmsswrelease
private

Definition at line 25 of file dataset.py.

Referenced by dataset.Dataset.__getMagneticField().

dataset.Dataset.__dasLimit
private

Definition at line 19 of file dataset.py.

Referenced by dataset.Dataset.fileInfoList().

dataset.Dataset.__dataType
private

Definition at line 76 of file dataset.py.

Referenced by dataset.Dataset.__getMagneticField(), dataset.Dataset.__getMagneticFieldForRun(), dataset.Dataset.dataType(), and dataset.Dataset.dump_cff().

tuple dataset.Dataset.__dummy_source_template
staticprivate
Initial value:
1 = ("readFiles = cms.untracked.vstring()\n"
2  "secFiles = cms.untracked.vstring()\n"
3  "%(process)ssource = cms.Source(\"PoolSource\",\n"
4  "%(tab)s secondaryFileNames ="
5  "secFiles,\n"
6  "%(tab)s fileNames = readFiles\n"
7  ")\n"
8  "readFiles.extend(['dummy_File.root'])\n"
9  "%(process)smaxEvents = cms.untracked.PSet( "
10  "input = cms.untracked.int32(%(nEvents)s) )\n"
11  "%(skipEventsString)s\n")

Definition at line 103 of file dataset.py.

Referenced by dataset.Dataset.__createSnippet().

dataset.Dataset.__fileInfoList
private

Definition at line 21 of file dataset.py.

Referenced by dataset.Dataset.__getFileInfoList().

dataset.Dataset.__fileList
private

Definition at line 20 of file dataset.py.

Referenced by dataset.Dataset.fileList().

dataset.Dataset.__filename
private

Definition at line 53 of file dataset.py.

Referenced by dataset.Dataset.__getDataType(), dataset.Dataset.__getMagneticField(), dataset.Dataset.__getMagneticFieldForRun(), dataset.Dataset.datasetSnippet(), csvReporter.csvReporter.writeRow(), and csvReporter.csvReporter.writeRows().

dataset.Dataset.__firstusedrun
private

Definition at line 26 of file dataset.py.

Referenced by dataset.Dataset.__createSnippet(), dataset.Dataset.__getMagneticFieldForRun(), and dataset.Dataset.forcerunrange().

dataset.Dataset.__lastusedrun
private

Definition at line 27 of file dataset.py.

Referenced by dataset.Dataset.__createSnippet(), dataset.Dataset.__getMagneticFieldForRun(), and dataset.Dataset.forcerunrange().

dataset.Dataset.__magneticField
private

Definition at line 77 of file dataset.py.

Referenced by dataset.Dataset.__getMagneticFieldForRun(), dataset.Dataset.dump_cff(), and dataset.Dataset.magneticField().

dataset.Dataset.__name
private

Definition at line 17 of file dataset.py.

Referenced by dataset.Dataset.__getDataType(), dataset.Dataset.__getFileInfoList(), dataset.Dataset.__getMagneticField(), dataset.Dataset.__getMagneticFieldForRun(), dataset.Dataset.__getParentDataset(), dataset.Dataset.__getRunList(), dataset.Dataset.convertTimeToRun(), dataset.Dataset.datasetSnippet(), dataset.Dataset.dump_cff(), Config.Process.dumpConfig(), Config.Process.dumpPython(), dataset.Dataset.name(), and Config.Process.name_().

dataset.Dataset.__official
private

Definition at line 34 of file dataset.py.

Referenced by dataset.Dataset.datasetSnippet().

dataset.Dataset.__origName
private

Definition at line 18 of file dataset.py.

Referenced by dataset.Dataset.datasetSnippet().

dataset.Dataset.__parentDataset
private

Definition at line 28 of file dataset.py.

Referenced by dataset.Dataset.parentDataset().

dataset.Dataset.__parentFileInfoList
private

Definition at line 30 of file dataset.py.

Referenced by dataset.Dataset.__getFileInfoList().

dataset.Dataset.__parentFileList
private

Definition at line 29 of file dataset.py.

Referenced by dataset.Dataset.fileList().

dataset.Dataset.__predefined
private

Definition at line 50 of file dataset.py.

Referenced by dataset.Dataset.__getDataType(), dataset.Dataset.__getFileInfoList(), dataset.Dataset.__getMagneticField(), dataset.Dataset.__getMagneticFieldForRun(), dataset.Dataset.datasetSnippet(), and dataset.Dataset.predefined().

dataset.Dataset.__runList
private

Definition at line 22 of file dataset.py.

Referenced by dataset.Dataset.__getRunList(), and dataset.Dataset.runList().

dataset.Dataset.bad_files

Definition at line 282 of file dataset.py.

dataset.Dataset.castorDir

Definition at line 266 of file dataset.py.

Referenced by dataset.Dataset.extractFileSizes(), and dataset.Dataset.printInfo().

dataset.Dataset.files

Definition at line 273 of file dataset.py.

dataset.Dataset.filesAndSizes

Definition at line 311 of file dataset.py.

dataset.Dataset.good_files

Definition at line 283 of file dataset.py.

dataset.Dataset.lfnDir

Definition at line 265 of file dataset.py.

Referenced by dataset.Dataset.printInfo().

dataset.Dataset.maskExists

Definition at line 267 of file dataset.py.

dataset.Dataset.report

Definition at line 268 of file dataset.py.

Referenced by addOnTests.testit.run().