3 '''Script that submits CMS Tracker Alignment Primary Vertex Validation workflows,
6 submitPVValidationJobs.py -j TEST -D /HLTPhysics/Run2016C-TkAlMinBias-07Dec2018-v1/ALCARECO -i testPVValidation_Relvals_DATA.ini -r
9 from __future__
import print_function
10 from builtins
import range
12 __author__ =
'Marco Musich'
13 __copyright__ =
'Copyright 2020, CERN CMS'
14 __credits__ = [
'Ernesto Migliore',
'Salvatore Di Guida']
15 __license__ =
'Unknown'
16 __maintainer__ =
'Marco Musich'
17 __email__ =
'marco.musich@cern.ch'
25 import configparser
as ConfigParser
29 from optparse
import OptionParser
30 from subprocess
import Popen, PIPE
34 import multiprocessing
41 CopyRights =
'##################################\n'
42 CopyRights +=
'# submitPVValidationJobs.py #\n'
43 CopyRights +=
'# marco.musich@cern.ch #\n'
44 CopyRights +=
'# April 2020 #\n'
45 CopyRights +=
'##################################\n'
50 """Check if GRID proxy has been initialized."""
53 with open(os.devnull,
"w")
as dump:
54 subprocess.check_call([
"voms-proxy-info",
"--exists"],
55 stdout = dump, stderr = dump)
56 except subprocess.CalledProcessError:
63 """Forward proxy to location visible from the batch system.
65 - `rundir`: directory for storing the forwarded proxy
69 print(
"Please create proxy via 'voms-proxy-init -voms cms -rfc'.")
72 local_proxy = subprocess.check_output([
"voms-proxy-info",
"--path"]).
strip()
73 shutil.copyfile(local_proxy, os.path.join(rundir,
".user_proxy"))
78 """Writes 'job.submit' file in `path`.
80 - `path`: job directory
81 - `script`: script to be executed
82 - `proxy_path`: path to proxy (only used in case of requested proxy forward)
85 job_submit_template=
"""\
87 requirements = (OpSysAndVer =?= "CentOS7")
88 executable = {script:s}
89 output = {jobm:s}/{out:s}.out
90 error = {jobm:s}/{out:s}.err
91 log = {jobm:s}/{out:s}.log
92 transfer_output_files = ""
93 +JobFlavour = "{flavour:s}"
96 if proxy_path
is not None:
97 job_submit_template +=
"""\
98 +x509userproxy = "{proxy:s}"
101 job_submit_file = os.path.join(path,
"job_"+name+
".submit")
102 with open(job_submit_file,
"w")
as f:
103 f.write(job_submit_template.format(script = os.path.join(path,name+
"_$(ProcId).sh"),
104 out = name+
"_$(ProcId)",
105 jobm = os.path.abspath(path),
106 flavour =
"tomorrow",
110 return job_submit_file
115 """This function executes `command` and returns it output.
117 - `command`: Shell command to be invoked by this function.
119 child = os.popen(command)
123 print(
'%s failed w/ exit code %d' % (command, err))
129 cmd2 =
' dasgoclient -limit=0 -query \'file run='+blob[0][0]+
' dataset='+blob[0][1]+ (
' instance='+blob[1]+
'\'' if (blob[1]
is not None)
else '\'')
131 q = Popen(cmd2 , shell=
True, stdout=PIPE, stderr=PIPE)
132 out, err = q.communicate()
134 outputList = out.decode().
split(
'\n')
141 nEvents = subprocess.check_output([
"das_client",
"--limit",
"0",
"--query",
"summary run={} dataset={} | grep summary.nevents".
format(run, dataset)])
142 return 0
if nEvents ==
"[]\n" else int(nEvents)
147 """Expects something like
148 +-------+------+--------+--------+-------------------+------------------+
149 | nfill | nrun | nls | ncms | totdelivered(/fb) | totrecorded(/fb) |
150 +-------+------+--------+--------+-------------------+------------------+
151 | 73 | 327 | 142418 | 138935 | 19.562 | 18.036 |
152 +-------+------+--------+--------+-------------------+------------------+
153 And extracts the total recorded luminosity (/b).
162 output = subprocess.check_output([homedir+
"/.local/bin/brilcalc",
"lumi",
"-b",
"STABLE BEAMS",
"-u",
"/pb",
"--begin",
str(minRun),
"--end",
str(maxRun),
"--output-style",
"csv",
"-c",
"web"])
164 warnings.warn(
'ATTENTION! Impossible to query the BRIL DB!')
168 print(
"INSIDE GET LUMINOSITY")
171 for line
in output.decode().
split(
"\n"):
172 if (
"#" not in line):
173 runToCache = line.split(
",")[0].
split(
":")[0]
174 lumiToCache = line.split(
",")[-1].
replace(
"\r",
"")
177 myCachedLumi[runToCache] = lumiToCache
187 with open(jsonfile,
'r')
as myJSON:
188 jsonDATA = json.load(myJSON)
189 return (run
in jsonDATA)
191 warnings.warn(
'ATTENTION! Impossible to find lumi mask! All runs will be used.')
198 for section
in config.sections():
199 dictionary[section] = {}
200 for option
in config.options(section):
201 dictionary[section][option] = config.get(section, option)
209 Converts 'something' to boolean. Raises exception for invalid formats
210 Possible True values: 1, True, "1", "TRue", "yes", "y", "t"
211 Possible False values: 0, False, None, [], {}, "", "0", "faLse", "no", "n", "f", 0.0, ...
213 if str(value).lower()
in (
"yes",
"y",
"true",
"t",
"1"):
return True
214 if str(value).lower()
in (
"no",
"n",
"false",
"f",
"0",
"0.0",
"",
"none",
"[]",
"{}"):
return False
215 raise Exception(
'Invalid value for boolean conversion: ' +
str(value))
220 dbName =
"runInfo.pkl"
222 if os.path.exists(dbName):
223 with open(dbName,
'rb')
as f:
224 infos = pickle.load(f)
226 for f
in glob.glob(
"root-files/Run*.root"):
227 run = runFromFilename(f)
230 infos[run][
"start_time"] = getRunStartTime(run)
233 with open(dbName,
"wb")
as f:
234 pickle.dump(infos, f)
239 dbName =
"runInfo.pkl"
241 if os.path.exists(dbName):
242 with open(dbName,
'rb')
as f:
243 infos = pickle.load(f)
248 with open(dbName,
"wb")
as f:
249 pickle.dump(infos, f)
261 items = self.items(section)
262 except ConfigParser.NoSectionError:
265 if item[0] == option:
273 for option
in self.options( section ):
274 result[option] = self.get( section, option )
275 if "local"+section.title()
in self.sections():
276 for option
in self.options(
"local"+section.title() ):
277 result[option] = self.get(
"local"+section.title(),option )
278 except ConfigParser.NoSectionError
as section:
279 msg = (
"%s in configuration files. This section is mandatory."
286 result = copy.deepcopy(defaultDict)
287 for option
in demandPars:
289 result[option] = self.get( section, option )
290 except ConfigParser.NoOptionError
as globalSectionError:
291 globalSection =
str( globalSectionError ).
split(
"'" )[-2]
292 splittedSectionName = section.split(
":" )
293 if len( splittedSectionName ) > 1:
294 localSection = (
"local"+section.split(
":" )[0].
title()+
":"
295 +section.split(
":")[1])
297 localSection = (
"local"+section.split(
":" )[0].
title())
298 if self.has_section( localSection ):
300 result[option] = self.get( localSection, option )
301 except ConfigParser.NoOptionError
as option:
302 msg = (
"%s. This option is mandatory."
305 "section '"+globalSection+
"' or", 1)))
308 msg = (
"%s. This option is mandatory."
309 %(
str(globalSectionError).
replace(
":",
"", 1)))
318 options = config.options(section)
319 for option
in options:
321 the_dict[option] = config.get(section, option)
322 if the_dict[option] == -1:
323 DebugPrint(
"skip: %s" % option)
325 print(
"exception on %s!" % option)
326 the_dict[option] =
None
331 print(
"creating",out_path)
333 for dir
in out_path.split(
'/'):
334 newpath=os.path.join(newpath,dir)
336 if newpath.find(
'test_out') > 0:
338 command=
"/afs/cern.ch/project/eos/installation/cms/bin/eos.select mkdir "+newpath
339 p = subprocess.Popen(command,shell=
True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
340 (out, err) = p.communicate()
345 command2=
"/afs/cern.ch/project/eos/installation/cms/bin/eos.select ls "+out_path
346 p = subprocess.Popen(command2,shell=
True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
347 (out, err) = p.communicate()
358 for i
in range(0, len(sequence), size):
359 yield sequence[i:i+size]
365 def __init__(self,dataset, job_number, job_id, job_name, isDA, isMC, applyBOWS, applyEXTRACOND, extraconditions, runboundary, lumilist, intlumi, maxevents, gt, allFromGT, alignmentDB, alignmentTAG, apeDB, apeTAG, bowDB, bowTAG, vertextype, tracktype, refittertype, ttrhtype, applyruncontrol, ptcut, CMSSW_dir ,the_dir):
368 theDataSet = dataset.split(
"/")[1]+
"_"+(dataset.split(
"/")[2]).
split(
"-")[0]
427 return "PVValidation_"+self.
job_name
440 if not os.path.exists(self.
cfg_dir):
446 template_cfg_file = os.path.join(self.
the_dir,
"PVValidation_T_cfg.py")
448 fin = open(template_cfg_file)
450 config_txt =
'\n\n' + CopyRights +
'\n\n'
451 config_txt += fin.read()
453 config_txt=config_txt.replace(
"ISDATEMPLATE",self.
isDA)
454 config_txt=config_txt.replace(
"ISMCTEMPLATE",self.
isMC)
455 config_txt=config_txt.replace(
"APPLYBOWSTEMPLATE",self.
applyBOWS)
456 config_txt=config_txt.replace(
"EXTRACONDTEMPLATE",self.
applyEXTRACOND)
457 config_txt=config_txt.replace(
"USEFILELISTTEMPLATE",
"True")
458 config_txt=config_txt.replace(
"RUNBOUNDARYTEMPLATE",self.
runboundary)
459 config_txt=config_txt.replace(
"LUMILISTTEMPLATE",self.
lumilist)
460 config_txt=config_txt.replace(
"MAXEVENTSTEMPLATE",self.
maxevents)
461 config_txt=config_txt.replace(
"GLOBALTAGTEMPLATE",self.
gt)
462 config_txt=config_txt.replace(
"ALLFROMGTTEMPLATE",self.
allFromGT)
463 config_txt=config_txt.replace(
"ALIGNOBJTEMPLATE",self.
alignmentDB)
464 config_txt=config_txt.replace(
"GEOMTAGTEMPLATE",self.
alignmentTAG)
465 config_txt=config_txt.replace(
"APEOBJTEMPLATE",self.
apeDB)
466 config_txt=config_txt.replace(
"ERRORTAGTEMPLATE",self.
apeTAG)
467 config_txt=config_txt.replace(
"BOWSOBJECTTEMPLATE",self.
bowDB)
468 config_txt=config_txt.replace(
"BOWSTAGTEMPLATE",self.
bowTAG)
469 config_txt=config_txt.replace(
"VERTEXTYPETEMPLATE",self.
vertextype)
470 config_txt=config_txt.replace(
"TRACKTYPETEMPLATE",self.
tracktype)
471 config_txt=config_txt.replace(
"REFITTERTEMPLATE",self.
refittertype)
472 config_txt=config_txt.replace(
"TTRHBUILDERTEMPLATE",self.
ttrhtype)
473 config_txt=config_txt.replace(
"PTCUTTEMPLATE",self.
ptcut)
474 config_txt=config_txt.replace(
"INTLUMITEMPLATE",self.
intlumi)
476 lfn_with_quotes =
map(
lambda x:
"\'"+x+
"\'",lfn)
477 config_txt=config_txt.replace(
"FILESOURCETEMPLATE",
"["+
",".
join(lfn_with_quotes)+
"]")
478 config_txt=config_txt.replace(
"OUTFILETEMPLATE",self.
output_full_name+
".root")
480 fout.write(config_txt)
482 for line
in fin.readlines():
484 if 'END OF EXTRA CONDITIONS' in line:
486 if(
"Rcd" in element):
490 fout.write(
" process.conditionsIn"+element+
"= CalibTracker.Configuration.Common.PoolDBESSource_cfi.poolDBESSource.clone( \n")
491 fout.write(
" connect = cms.string('"+params[0]+
"'), \n")
492 fout.write(
" toGet = cms.VPSet(cms.PSet(record = cms.string('"+element+
"'), \n")
493 fout.write(
" tag = cms.string('"+params[1]+
"'), \n")
495 fout.write(
" label = cms.untracked.string('"+params[2]+
"') \n")
499 fout.write(
" process.prefer_conditionsIn"+element+
" = cms.ESPrefer(\"PoolDBESSource\", \"conditionsIn"+element[0]+
"\") \n \n")
508 if not os.path.exists(self.
LSF_dir):
516 log_dir = os.path.join(self.
the_dir,
"log")
517 if not os.path.exists(log_dir):
520 fout.write(
"#!/bin/sh \n")
521 fout.write(
"#BSUB -L /bin/sh\n")
522 fout.write(
"#BSUB -J "+job_name+
"\n")
523 fout.write(
"#BSUB -o "+os.path.join(log_dir,job_name+
".log")+
"\n")
524 fout.write(
"#BSUB -q cmscaf1nd \n")
525 fout.write(
"JobName="+job_name+
" \n")
526 fout.write(
"OUT_DIR="+self.
OUTDIR+
" \n")
527 fout.write(
"LXBATCH_DIR=`pwd` \n")
528 fout.write(
"cd "+os.path.join(self.
CMSSW_dir,
"src")+
" \n")
529 fout.write(
"eval `scram runtime -sh` \n")
530 fout.write(
"cd $LXBATCH_DIR \n")
532 fout.write(
"ls -lh . \n")
533 fout.write(
"for RootOutputFile in $(ls *root ); do xrdcp -f ${RootOutputFile} root://eoscms//eos/cms${OUT_DIR}/${RootOutputFile} ; done \n")
534 fout.write(
"for TxtOutputFile in $(ls *txt ); do xrdcp -f ${TxtOutputFile} root://eoscms//eos/cms${OUT_DIR}/${TxtOutputFile} ; done \n")
544 if not os.path.exists(self.
BASH_dir):
552 log_dir = os.path.join(self.
the_dir,
"log")
553 if not os.path.exists(log_dir):
556 fout.write(
"#!/bin/bash \n")
558 fout.write(
"JobName="+job_name+
" \n")
559 fout.write(
"echo \"Job started at \" `date` \n")
560 fout.write(
"CMSSW_DIR="+os.path.join(self.
CMSSW_dir,
"src")+
" \n")
561 fout.write(
"export X509_USER_PROXY=$CMSSW_DIR/Alignment/OfflineValidation/test/.user_proxy \n")
562 fout.write(
"OUT_DIR="+self.
OUTDIR+
" \n")
563 fout.write(
"LXBATCH_DIR=$PWD \n")
565 fout.write(
"cd ${CMSSW_DIR} \n")
566 fout.write(
"eval `scramv1 runtime -sh` \n")
567 fout.write(
"echo \"batch dir: $LXBATCH_DIR release: $CMSSW_DIR release base: $CMSSW_RELEASE_BASE\" \n")
568 fout.write(
"cd $LXBATCH_DIR \n")
572 fout.write(
"echo \"Content of working dir is \"`ls -lh` \n")
574 fout.write(
"for RootOutputFile in $(ls *root ); do xrdcp -f ${RootOutputFile} root://eoscms//eos/cms${OUT_DIR}/${RootOutputFile} ; done \n")
576 fout.write(
"echo \"Job ended at \" `date` \n")
577 fout.write(
"exit 0 \n")
590 child1 = os.system(submitcommand1)
605 print(
"Please create proxy via 'voms-proxy-init -voms cms -rfc'.")
612 print(
'\n'+CopyRights)
614 HOME = os.environ.get(
'HOME')
617 input_CMSSW_BASE = os.environ.get(
'CMSSW_BASE')
618 AnalysisStep_dir = os.path.join(input_CMSSW_BASE,
"src/Alignment/OfflineValidation/test")
619 lib_path = os.path.abspath(AnalysisStep_dir)
620 sys.path.append(lib_path)
625 desc=
"""This is a description of %prog."""
626 parser = OptionParser(description=desc,version=
'%prog version 0.1')
627 parser.add_option(
'-s',
'--submit', help=
'job submitted', dest=
'submit', action=
'store_true', default=
False)
628 parser.add_option(
'-j',
'--jobname', help=
'task name', dest=
'taskname', action=
'store', default=
'myTask')
629 parser.add_option(
'-D',
'--dataset', help=
'selected dataset', dest=
'data', action=
'store', default=
'')
630 parser.add_option(
'-r',
'--doRunBased',help=
'selected dataset', dest=
'doRunBased', action=
'store_true' , default=
False)
631 parser.add_option(
'-i',
'--input', help=
'set input configuration (overrides default)', dest=
'inputconfig',action=
'store',default=
None)
632 parser.add_option(
'-b',
'--begin', help=
'starting point', dest=
'start', action=
'store', default=
'1')
633 parser.add_option(
'-e',
'--end', help=
'ending point', dest=
'end', action=
'store', default=
'999999')
634 parser.add_option(
'-v',
'--verbose', help=
'verbose output', dest=
'verbose', action=
'store_true', default=
False)
635 parser.add_option(
'-u',
'--unitTest', help=
'unit tests?', dest=
'isUnitTest', action=
'store_true', default=
False)
636 parser.add_option(
'-I',
'--instance', help=
'DAS instance to use', dest=
'instance', action=
'store', default=
None)
637 (opts, args) = parser.parse_args()
639 now = datetime.datetime.now()
647 USER = os.environ.get(
'USER')
648 eosdir=os.path.join(
"/store/group/alca_trackeralign",USER,
"test_out",t)
653 print(
"Not going to create EOS folder. -s option has not been chosen")
686 ConfigFile = opts.inputconfig
688 if ConfigFile
is not None:
690 print(
"********************************************************")
691 print(
"* Parsing from input file:", ConfigFile,
" ")
694 config.read(ConfigFile)
696 print(
"Parsed the following configuration \n\n")
698 pprint.pprint(inputDict)
701 raise SystemExit(
"\n\n ERROR! Could not parse any input file, perhaps you are submitting this from the wrong folder? \n\n")
708 doRunBased = opts.doRunBased
710 listOfValidations = config.getResultingSection(
"validations")
712 for item
in listOfValidations:
713 if (
bool(listOfValidations[item]) ==
True):
722 applyEXTRACOND.append(
ConfigSectionMap(config,
"Conditions:"+item)[
'applyextracond'])
723 conditions.append(config.getResultingSection(
"ExtraConditions"))
725 alignmentDB.append(
ConfigSectionMap(config,
"Conditions:"+item)[
'alignmentdb'])
726 alignmentTAG.append(
ConfigSectionMap(config,
"Conditions:"+item)[
'alignmenttag'])
738 if(config.exists(
"Refit",
"refittertype")):
741 refittertype.append(
str(RefitType.COMMON))
743 if(config.exists(
"Refit",
"ttrhtype")):
746 ttrhtype.append(
"WithAngleAndTemplate")
748 applyruncontrol.append(
ConfigSectionMap(config,
"Selection")[
'applyruncontrol'])
754 print(
"********************************************************")
755 print(
"* Parsing from command line *")
756 print(
"********************************************************")
758 jobName = [
'testing']
761 doRunBased = opts.doRunBased
762 maxevents = [
'10000']
764 gt = [
'74X_dataRun2_Prompt_v4']
765 allFromGT = [
'False']
766 applyEXTRACOND = [
'False']
767 conditions = [[(
'SiPixelTemplateDBObjectRcd',
'frontier://FrontierProd/CMS_CONDITIONS',
'SiPixelTemplateDBObject_38T_2015_v3_hltvalidation')]]
768 alignmentDB = [
'frontier://FrontierProd/CMS_CONDITIONS']
769 alignmentTAG = [
'TrackerAlignment_Prompt']
770 apeDB = [
'frontier://FrontierProd/CMS_CONDITIONS']
771 apeTAG = [
'TrackerAlignmentExtendedErr_2009_v2_express_IOVs']
773 bowDB = [
'frontier://FrontierProd/CMS_CONDITIONS']
774 bowTAG = [
'TrackerSurafceDeformations_v1_express']
776 vertextype = [
'offlinePrimaryVertices']
777 tracktype = [
'ALCARECOTkAlMinBias']
779 applyruncontrol = [
'False']
786 print(
"********************************************************")
787 print(
"* Configuration info *")
788 print(
"********************************************************")
789 print(
"- submitted : ",opts.submit)
790 print(
"- taskname : ",opts.taskname)
791 print(
"- Jobname : ",jobName)
792 print(
"- use DA : ",isDA)
793 print(
"- is MC : ",isMC)
794 print(
"- is run-based: ",doRunBased)
795 print(
"- evts/job : ",maxevents)
796 print(
"- GlobatTag : ",gt)
797 print(
"- allFromGT? : ",allFromGT)
798 print(
"- extraCond? : ",applyEXTRACOND)
799 print(
"- extraCond : ",conditions)
800 print(
"- Align db : ",alignmentDB)
801 print(
"- Align tag : ",alignmentTAG)
802 print(
"- APE db : ",apeDB)
803 print(
"- APE tag : ",apeTAG)
804 print(
"- use bows? : ",applyBOWS)
805 print(
"- K&B db : ",bowDB)
806 print(
"- K&B tag : ",bowTAG)
807 print(
"- VertexColl : ",vertextype)
808 print(
"- TrackColl : ",tracktype)
809 print(
"- RefitterSeq : ",refittertype)
810 print(
"- TTRHBuilder : ",ttrhtype)
811 print(
"- RunControl? : ",applyruncontrol)
812 print(
"- Pt> ",ptcut)
813 print(
"- run= ",runboundary)
814 print(
"- JSON : ",lumilist)
815 print(
"- Out Dir : ",eosdir)
817 print(
"********************************************************")
818 print(
"Will run on",len(jobName),
"workflows")
824 print(
">>>> This is Data!")
825 print(
">>>> Doing run based selection")
826 cmd =
'dasgoclient -limit=0 -query \'run dataset='+opts.data + (
' instance='+opts.instance+
'\'' if (opts.instance
is not None)
else '\'')
827 p = Popen(cmd , shell=
True, stdout=PIPE, stderr=PIPE)
828 out, err = p.communicate()
833 print(
"Will run on ",len(listOfRuns),
"runs: \n",listOfRuns)
837 print(
"first run:",opts.start,
"last run:",opts.end)
839 for run
in listOfRuns:
840 if (
int(run)<
int(opts.start)
or int(run)>
int(opts.end)):
841 print(
"excluding",run)
848 print(
"'======> taking",run)
851 mytuple.append((run,opts.data))
855 instances=[opts.instance
for entry
in mytuple]
856 pool = multiprocessing.Pool(processes=20)
857 count = pool.map(getFilesForRun,
zip(mytuple,instances))
858 file_info = dict(
zip(listOfRuns, count))
862 for run
in listOfRuns:
863 if (
int(run)<
int(opts.start)
or int(run)>
int(opts.end)):
864 print(
'rejecting run',run,
' becasue outside of boundaries')
868 print(
'rejecting run',run,
' becasue outside not in JSON')
891 od = collections.OrderedDict(sorted(file_info.items()))
899 print(
"|| WARNING: won't run on any run, probably DAS returned an empty query,\n|| but that's fine because this is a unit test!")
904 raise Exception(
'Will not run on any run.... please check again the configuration')
910 pprint.pprint(myLumiDB)
913 for iConf
in range(len(jobName)):
914 print(
"This is Task n.",iConf+1,
"of",len(jobName))
919 scripts_dir = os.path.join(AnalysisStep_dir,
"scripts")
920 if not os.path.exists(scripts_dir):
921 os.makedirs(scripts_dir)
922 hadd_script_file = os.path.join(scripts_dir,jobName[iConf]+
"_"+opts.taskname+
".sh")
923 fout = open(hadd_script_file,
'w')
925 output_file_list1=list()
926 output_file_list2=list()
927 output_file_list2.append(
"hadd ")
934 cmd =
'dasgoclient -query \'file dataset='+opts.data+ (
' instance='+opts.instance+
'\'' if (opts.instance
is not None)
else '\'')
935 s = Popen(cmd , shell=
True, stdout=PIPE, stderr=PIPE)
936 out,err = s.communicate()
937 mylist = out.decode().
split(
'\n')
942 for files
in splitList:
943 inputFiles.append(files)
944 myRuns.append(
str(1))
946 print(
"this is DATA (not doing full run-based selection)")
947 print(runboundary[iConf])
948 cmd =
'dasgoclient -query \'file dataset='+opts.data+
' run='+runboundary[iConf]+ (
' instance='+opts.instance+
'\'' if (opts.instance
is not None)
else '\'')
950 s = Popen(cmd , shell=
True, stdout=PIPE, stderr=PIPE)
951 out,err = s.communicate()
953 mylist = out.decode().
split(
'\n')
956 print(
"mylist:",mylist)
958 splitList =
split(mylist,10)
959 for files
in splitList:
960 inputFiles.append(files)
961 myRuns.append(
str(runboundary[iConf]))
963 myLumiDB =
getLuminosity(HOME,myRuns[0],myRuns[-1],
True,opts.verbose)
969 inputFiles.append(od[element])
980 print(
"myRuns =====>",myRuns)
986 for jobN,theSrcFiles
in enumerate(inputFiles):
988 print(
"JOB:",jobN,
"run",myRuns[jobN],theSrcFiles)
990 print(
"JOB:",jobN,
"run",myRuns[jobN])
999 thejobIndex=myRuns[jobN]
1001 thejobIndex=myRuns[jobN]+
"_"+
str(jobN)
1003 if (myRuns[jobN])
in myLumiDB:
1004 theLumi = myLumiDB[myRuns[jobN]]
1006 print(
"=====> COULD NOT FIND LUMI, setting default = 1/pb")
1008 print(
"int. lumi:",theLumi,
"/pb")
1014 runInfo[
"run"] = myRuns[jobN]
1016 runInfo[
"conf"] = jobName[iConf]
1017 runInfo[
"gt"] = gt[iConf]
1018 runInfo[
"allFromGT"] = allFromGT[iConf]
1019 runInfo[
"alignmentDB"] = alignmentDB[iConf]
1020 runInfo[
"alignmentTag"] = alignmentTAG[iConf]
1021 runInfo[
"apeDB"] = apeDB[iConf]
1022 runInfo[
"apeTag"] = apeTAG[iConf]
1023 runInfo[
"applyBows"] = applyBOWS[iConf]
1024 runInfo[
"bowDB"] = bowDB[iConf]
1025 runInfo[
"bowTag"] = bowTAG[iConf]
1026 runInfo[
"ptCut"] = ptcut[iConf]
1027 runInfo[
"lumilist"] = lumilist[iConf]
1028 runInfo[
"applyEXTRACOND"] = applyEXTRACOND[iConf]
1029 runInfo[
"conditions"] = conditions[iConf]
1030 runInfo[
"nfiles"] = len(theSrcFiles)
1031 runInfo[
"srcFiles"] = theSrcFiles
1032 runInfo[
"intLumi"] = theLumi
1034 updateDB(((iConf+1)*10)+(jobN+1),runInfo)
1036 totalJobs=totalJobs+1
1041 jobName[iConf],isDA[iConf],isMC[iConf],
1042 applyBOWS[iConf],applyEXTRACOND[iConf],conditions[iConf],
1043 myRuns[jobN], lumilist[iConf], theLumi, maxevents[iConf],
1044 gt[iConf],allFromGT[iConf],
1045 alignmentDB[iConf], alignmentTAG[iConf],
1046 apeDB[iConf], apeTAG[iConf],
1047 bowDB[iConf], bowTAG[iConf],
1048 vertextype[iConf], tracktype[iConf],
1049 refittertype[iConf], ttrhtype[iConf],
1050 applyruncontrol[iConf],
1051 ptcut[iConf],input_CMSSW_BASE,AnalysisStep_dir)
1053 aJob.setEOSout(eosdir)
1054 aJob.createTheCfgFile(theSrcFiles)
1055 aJob.createTheBashFile()
1057 output_file_list1.append(
"xrdcp root://eoscms//eos/cms"+aJob.getOutputFileName()+
" /tmp/$USER/"+opts.taskname+
" \n")
1059 theBashDir=aJob.BASH_dir
1060 theBaseName=aJob.getOutputBaseNameWithData()
1061 mergedFile =
"/tmp/$USER/"+opts.taskname+
"/"+aJob.getOutputBaseName()+
" "+opts.taskname+
".root"
1062 output_file_list2.append(
"/tmp/$USER/"+opts.taskname+
"/"+aJob.getOutputBaseName()+opts.taskname+
".root ")
1063 output_file_list2.append(
"/tmp/$USER/"+opts.taskname+
"/"+os.path.split(aJob.getOutputFileName())[1]+
" ")
1069 os.system(
"chmod u+x "+theBashDir+
"/*.sh")
1070 submissionCommand =
"condor_submit "+job_submit_file
1072 print(submissionOutput)
1074 fout.write(
"#!/bin/bash \n")
1075 fout.write(
"MAIL=$USER@mail.cern.ch \n")
1076 fout.write(
"OUT_DIR="+eosdir+
"\n")
1077 fout.write(
"FILE="+
str(mergedFile)+
"\n")
1078 fout.write(
"echo $HOST | mail -s \"Harvesting job started\" $USER@mail.cern.ch \n")
1079 fout.write(
"cd "+os.path.join(input_CMSSW_BASE,
"src")+
"\n")
1080 fout.write(
"eval `scram r -sh` \n")
1081 fout.write(
"mkdir -p /tmp/$USER/"+opts.taskname+
" \n")
1082 fout.writelines(output_file_list1)
1083 fout.writelines(output_file_list2)
1085 fout.write(
"echo \"xrdcp -f $FILE root://eoscms//eos/cms$OUT_DIR\" \n")
1086 fout.write(
"xrdcp -f $FILE root://eoscms//eos/cms$OUT_DIR \n")
1087 fout.write(
"echo \"Harvesting for "+opts.taskname+
" task is complete; please find output at $OUT_DIR \" | mail -s \"Harvesting for " +opts.taskname +
" completed\" $MAIL \n")
1089 os.system(
"chmod u+x "+hadd_script_file)
1091 harvest_conditions =
'"' +
" && ".
join([
"ended(" + jobId +
")" for jobId
in batchJobIds]) +
'"'
1092 print(harvest_conditions)
1093 lastJobCommand =
"bsub -o harvester"+opts.taskname+
".tmp -q 1nh -w "+harvest_conditions+
" "+hadd_script_file
1094 print(lastJobCommand)
1097 print(lastJobOutput)
1100 del output_file_list1
1103 if __name__ ==
"__main__":