15 LOG_FORMAT=
'%(asctime)s: %(name)-20s - %(levelname)-8s - %(message)s' 16 logging.basicConfig(format=LOG_FORMAT)
17 log = logging.getLogger(
"mbProfile")
18 log.setLevel(logging.INFO)
22 fp = os.path.join(ppath, f)
29 fp = os.path.join(ppath,
"status")
31 for line
in fd.readlines():
34 key, value = line.split(
":", 1)
35 st[key] = value.strip()
42 dct[
"statm"] = read(
"statm").
strip()
43 dct[
"stat"] = read(
"stat").
strip()
44 dct[
"cmdline"] = read(
"cmdline").
strip().
replace(
"\0",
" ")
46 status = read_status()
47 dct[
"status"] = status
48 dct[
"pid"] =
int(status[
"Pid"])
49 dct[
"parent_pid"] =
int(status[
"PPid"])
53 log.warning(
"Exception in read_procfs.", exc_info=
True)
57 lst = os.listdir(
"/proc/")
59 if not f.isdigit():
continue 66 """ Select all processes which are descendant from ppid (exclusive). """ 70 proc[
"_children"] = []
71 pid_dct[proc[
"pid"]] = proc
74 for pid
in list(pid_dct.keys()):
75 parent_pid = pid_dct[pid][
"parent_pid"]
77 if parent_pid
in pid_dct:
78 pid_dct[parent_pid][
"_children"].
append(pid)
81 if ppid
is None or ppid
not in pid_dct:
86 to_accept = collections.deque([ppid, ])
89 head = pid_dct[to_accept.popleft()]
92 if head[
"pid"] != ppid:
95 to_accept.extend(head.get(
"_children", []))
96 head[
"children"] = head[
"_children"]
118 self.
_file = open(self._args.file,
"w")
125 fields_to_subtract = (
126 "ru_utime",
"ru_stime",
"ru_maxrss",
"ru_minflt",
"ru_majflt",
"ru_nswap",
127 "ru_inblock",
"ru_oublock",
"ru_msgsnd",
"ru_msgrcv",
"ru_nsignals",
"ru_nvcsw",
"ru_nivcsw",
130 rusage = resource.getrusage(resource.RUSAGE_CHILDREN)
136 for field
in fields_to_subtract:
137 current = getattr(self.
ru, field)
140 self.
ru_diff[field] = current - base
144 Private, Shared, Pss = 0, 0, 0
146 fp = os.path.join(
"/proc/%d" % proc_dict[
"pid"],
"smaps")
148 for line
in fd.readlines():
149 if line.startswith(
"Shared"):
150 Shared +=
int(line.split()[1])
151 elif line.startswith(
"Private"):
152 Private +=
int(line.split()[1])
153 elif line.startswith(
"Pss"):
154 Pss +=
int(line.split()[1])
156 proc_dict[
"smaps_shared"] = Shared * 1024
157 proc_dict[
"smaps_private"] = Private * 1024
158 proc_dict[
"smaps_pss"] = Pss * 1024
169 log.warning(
"Exception in read_smaps.", exc_info=
True)
172 stopped = set(self.known_pids.keys())
174 proc[
"running"] =
True 186 self.
time = time.time()
193 self._file.write(
"\n")
196 log.info(
"Written profile to: %s, took=%.03f", self._args.file, time.time() - self.
time)
199 dct = collections.OrderedDict()
200 dct[
'time'] = self.
time 201 dct[
'pid'] = self.
pid 202 dct[
'final'] = self.
final 205 dct[
'ru'] =
dict((k, v)
for k, v
in inspect.getmembers(self.
ru)
if k.startswith(
'ru_'))
217 log.info(
"ru_diff: %s", self.
ru_diff)
221 ALARM_P_OBJECT =
None 225 ALARM_P_OBJECT.update()
227 signal.alarm(ALARM_TIMER)
232 proc = subprocess.Popen(args.pargs)
233 profile.pid = proc.pid
235 global ALARM_P_OBJECT
236 ALARM_P_OBJECT = profile
238 signal.signal(signal.SIGALRM, handle_alarm)
239 signal.alarm(ALARM_TIMER)
246 if p
and not os.path.exists(p):
250 os.path.join(os.getenv(
"CMSSW_BASE"),
"src/DQMServices/Components/data/html"),
251 os.path.join(os.getenv(
"CMSSW_RELEASE_BASE"),
"src/DQMServices/Components/data/html"),
257 x = os.path.join(p, f)
258 if os.path.exists(x):
263 log.warning(
"Could not find html file: %s (%s)", f, fails)
265 for f
in [
'mbGraph.js',
'mbGraph.html']:
266 target_fn = os.path.join(p, f)
267 source_fn = find_file(f)
269 log.info(
"Copying %s to %s", source_fn, target_fn)
270 shutil.copyfile(source_fn, target_fn)
273 target_fn = os.path.join(p,
"mbGraph.json")
274 log.info(
"Creating %s", target_fn)
275 with open(target_fn,
"w")
as fp:
277 "file": os.path.basename(args.file),
280 "CMSSW_GIT_HASH": os.getenv(
"CMSSW_GIT_HASH"),
281 "CMSSW_RELEASE_BASE": os.getenv(
"CMSSW_RELEASE_BASE"),
282 "SCRAM_ARCH": os.getenv(
"SCRAM_ARCH"),
286 json.dump(dct, fp, indent=2)
289 if __name__ ==
"__main__":
290 parser = argparse.ArgumentParser(description=
"Profile child processes and produce data for rss and such graphs.")
291 parser.add_argument(
"-f",
"--file", type=str, default=
"performance.json", help=
"Filename to write.", metavar=
"performance.json")
292 parser.add_argument(
"-i", type=int, help=
"Time interval between profiles.", default=15)
293 parser.add_argument(
'-q', action=
'store_true', help=
"Reduce logging.")
294 parser.add_argument(
'-w', action=
'store_true', help=
"Write html helper files for rendering the performance file.")
295 parser.add_argument(
'pargs', nargs=argparse.REMAINDER)
297 args = parser.parse_args()
302 elif args.pargs[0] ==
"--":
304 args.pargs = args.pargs[1:]
309 log.setLevel(logging.WARNING)
312 p = os.path.dirname(args.file)
def replace(string, replacements)
def read_smaps(self, proc_dict)
def handle_alarm(num, frame)
def run_and_monitor(args)
def find_and_write_html(p, args)
def read_procfs(ppath, only_ppid=True)
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run