2 from __future__
import print_function
4 import RecoLuminosity.LumiDB.LumiConstants
as LumiConstants
25 if __name__ ==
'__main__':
28 parser = argparse.ArgumentParser(description=
"Script to estimate average and RMS pileup using the per-bunch luminosity information provided by brilcalc. The output is a JSON file containing a dictionary by runs with one entry per lumi section.")
29 parser.add_argument(
'inputFile', help=
'CSV input file as produced from brilcalc')
30 parser.add_argument(
'outputFile', help=
'Name of JSON output file')
31 parser.add_argument(
'-b',
'--selBX', metavar=
'BXLIST', help=
'Comma-separated list of BXs to use (will use all by default)')
32 parser.add_argument(
'-n',
'--no-threshold', action=
'store_true', help=
'By default, to avoid including spurious luminosity from afterglow in the pileup calculation, bunches with luminosity below a given threshold are excluded. This threshold is 8.0/ub/LS for HFOC at nominal energy, 2.0/ub/LS for HFOC at low energy, and 1.2/ub/LS for other luminometers. If the input data has already been preprocessed (e.g. by using the --xingTr argument in brilcalc) to exclude these bunches, or if you are running on special data with low overall luminosity, then use this flag to disable the application of the threshold.')
33 args = parser.parse_args()
35 output = args.outputFile
39 for ibx
in args.selBX.split(
","):
44 print(ibx,
"is not an int")
45 print(
"Processing",args.inputFile,
"with selected BXs:", sorted(sel_bx))
47 print(
"Processing",args.inputFile,
"with all BX")
51 sepRE = re.compile(
r'[\]\[\s,;:]+')
52 csv_input = open(args.inputFile,
'r') 58 for line
in csv_input:
62 pieces = sepRE.split(line.strip())
67 raise RuntimeError(
"Not enough fields in input line; maybe you forgot to include --xing in your brilcalc command?\n"+line)
70 lumi_section =
int(pieces[2])
74 luminometer = pieces[14]
76 if luminometer ==
"HFOC":
77 if beam_energy > 3000:
87 for bxid, bunch_del_lumi, bunch_rec_lumi
in zip(pieces[15::3], pieces[16::3], pieces[17::3]):
88 if sel_bx
and int(bxid)
not in sel_bx:
90 if args.no_threshold
or float(bunch_del_lumi) > threshold:
91 xing_lumi_array.append([
int(bxid),
float(bunch_del_lumi),
float(bunch_rec_lumi)])
93 print(
"Failed to parse line: check if the input format has changed")
94 print(pieces[0],pieces[1],pieces[2],pieces[3],pieces[4],pieces[5],pieces[6],pieces[7],pieces[8],pieces[9])
108 output_line = output_line[:-1] +
'], ' 110 output_line += (
'\n"%d":' % run )
120 for bxid, bunch_del_lumi, bunch_rec_lumi
in xing_lumi_array:
121 total_lumi += bunch_rec_lumi
123 total_int += bunch_del_lumi*bunch_rec_lumi
127 total_int *= parameters.orbitLength / parameters.lumiSectionLength
129 mean_int = total_int/total_lumi
134 for bxid, bunch_del_lumi, bunch_rec_lumi
in xing_lumi_array:
135 mean_pileup = bunch_del_lumi * parameters.orbitLength / parameters.lumiSectionLength
136 if mean_pileup > 100:
137 print(
"mean number of pileup events > 100 for run %d, lum %d : m %f l %f" % \
138 (runNumber, lumi_section, mean_pileup, bunch_del_lumi))
141 total_int2 += bunch_rec_lumi*(mean_pileup-mean_int)*(mean_pileup-mean_int)
142 total_weight2 += bunch_rec_lumi*bunch_rec_lumi
147 denom = total_lumi*total_lumi-total_weight2
148 if total_lumi > 0
and denom > 0:
149 bunch_rms_lumi =
sqrt(total_lumi*total_int2/denom)
151 output_line +=
"[%d,%2.4e,%2.4e,%2.4e]," % (lumi_section, total_lumi, bunch_rms_lumi, mean_int)
152 last_valid_lumi = [lumi_section, total_lumi, bunch_rms_lumi, mean_int]
154 output_line = output_line[:-1] +
']}' 157 outputfile = open(output,
'w')
159 raise RuntimeError(
"Could not open '%s' as an output JSON file" % output)
161 outputfile.write(output_line)
163 print(
"Output written to", output)
S & print(S &os, JobReport::InputFile const &f)
OutputIterator zip(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare comp)