blob: 5b784cba01cfd00e91b29dd7825b84c5f6dfd95f [file] [log] [blame]
Brendan Greggf4bf2752016-07-21 18:13:24 -07001#!/usr/bin/python
2# @lint-avoid-python-3-compatibility-imports
3#
4# profile Profile CPU usage by sampling stack traces at a timed interval.
5# For Linux, uses BCC, BPF, perf_events. Embedded C.
6#
7# This is an efficient profiler, as stack traces are frequency counted in
8# kernel context, rather than passing every stack to user space for frequency
9# counting there. Only the unique stacks and counts are passed to user space
10# at the end of the profile, greatly reducing the kernel<->user transfer.
11#
12# This uses perf_event_open to setup a timer which is instrumented by BPF,
13# and for efficiency it does not initialize the perf ring buffer, so the
14# redundant perf samples are not collected.
15#
16# Kernel stacks are post-process in user-land to skip the interrupt framework
17# frames. You can improve efficiency a little by specifying the exact number
18# of frames to skip with -s, provided you know what that is. If you get -s
19# wrong, note that the first line is the IP, and then the (skipped) stack.
20#
21# Note: if another perf-based sampling session is active, the output may become
22# polluted with their events. On older kernels, the ouptut may also become
23# polluted with tracing sessions (when the kprobe is used instead of the
24# tracepoint). If this becomes a problem, logic can be added to filter events.
25#
26# REQUIRES: Linux 4.6+ (BPF_MAP_TYPE_STACK_TRACE support), and the
27# perf:perf_hrtimer tracepoint (currently a kernel patch). If the latter is
28# unavailable, this will try to use kprobes as a fallback, which may work or
29# may instrument nothing, depending on your kernel build.
30#
31# Copyright 2016 Netflix, Inc.
32# Licensed under the Apache License, Version 2.0 (the "License")
33#
34# THANKS: Sasha Goldshtein, Andrew Birchall, and Evgeny Vereshchagin, who wrote
35# much of the code here, borrowed from tracepoint.py and offcputime.py.
36#
37# 15-Jul-2016 Brendan Gregg Created this.
38
39from __future__ import print_function
40from bcc import BPF, Perf
41from sys import stderr
42from time import sleep
43import argparse
44import signal
45import os
46import errno
47import multiprocessing
48import ctypes as ct
49
50#
51# Process Arguments
52#
53
54# arg validation
55def positive_int(val):
56 try:
57 ival = int(val)
58 except ValueError:
59 raise argparse.ArgumentTypeError("must be an integer")
60
61 if ival < 0:
62 raise argparse.ArgumentTypeError("must be positive")
63 return ival
64
65def positive_nonzero_int(val):
66 ival = positive_int(val)
67 if ival == 0:
68 raise argparse.ArgumentTypeError("must be nonzero")
69 return ival
70
71# arguments
72examples = """examples:
73 ./profile # profile stack traces at 49 Hertz until Ctrl-C
74 ./profile -F 99 # profile stack traces at 99 Hertz
75 ./profile 5 # profile at 49 Hertz for 5 seconds only
76 ./profile -f 5 # output in folded format for flame graphs
77 ./profile -p 185 # only profile threads for PID 185
78 ./profile -U # only show user space stacks (no kernel)
79 ./profile -K # only show kernel space stacks (no user)
80 ./profile -S 11 # always skip 11 frames of kernel stack
81"""
82parser = argparse.ArgumentParser(
83 description="Profile CPU stack traces at a timed interval",
84 formatter_class=argparse.RawDescriptionHelpFormatter,
85 epilog=examples)
86thread_group = parser.add_mutually_exclusive_group()
87thread_group.add_argument("-p", "--pid", type=positive_int,
88 help="profile this PID only")
89# TODO: add options for user/kernel threads only
90stack_group = parser.add_mutually_exclusive_group()
91stack_group.add_argument("-U", "--user-stacks-only", action="store_true",
92 help="show stacks from user space only (no kernel space stacks)")
93stack_group.add_argument("-K", "--kernel-stacks-only", action="store_true",
94 help="show stacks from kernel space only (no user space stacks)")
95parser.add_argument("-F", "--frequency", type=positive_int, default=49,
96 help="sample frequency, Hertz (default 49)")
97parser.add_argument("-d", "--delimited", action="store_true",
98 help="insert delimiter between kernel/user stacks")
99parser.add_argument("-a", "--annotations", action="store_true",
100 help="add _[k] annotations to kernel frames")
101parser.add_argument("-f", "--folded", action="store_true",
102 help="output folded format, one line per stack (for flame graphs)")
103parser.add_argument("--stack-storage-size", default=2048,
104 type=positive_nonzero_int,
105 help="the number of unique stack traces that can be stored and "
106 "displayed (default 2048)")
107parser.add_argument("-S", "--kernel-skip", type=positive_int, default=0,
108 help="skip this many kernel frames (default 3)")
109parser.add_argument("duration", nargs="?", default=99999999,
110 type=positive_nonzero_int,
111 help="duration of trace, in seconds")
112
113# option logic
114args = parser.parse_args()
115skip = args.kernel_skip
116pid = int(args.pid) if args.pid is not None else -1
117duration = int(args.duration)
118debug = 0
119need_delimiter = args.delimited and not (args.kernel_stacks_only or
120 args.user_stacks_only)
121# TODO: add stack depth, and interval
122
123#
124# Setup BPF
125#
126
127# define BPF program
128bpf_text = """
129#include <uapi/linux/ptrace.h>
130#include <linux/sched.h>
131
132struct key_t {
133 u32 pid;
134 u64 kernel_ip;
135 u64 kernel_ret_ip;
136 int user_stack_id;
137 int kernel_stack_id;
138 char name[TASK_COMM_LEN];
139};
140BPF_HASH(counts, struct key_t);
141BPF_HASH(start, u32);
142BPF_STACK_TRACE(stack_traces, STACK_STORAGE_SIZE)
143
144// This code gets a bit complex. Probably not suitable for casual hacking.
145
146PERF_TRACE_EVENT {
147 u32 pid = bpf_get_current_pid_tgid();
148 if (!(THREAD_FILTER))
149 return 0;
150
151 // create map key
152 u64 zero = 0, *val;
153 struct key_t key = {.pid = pid};
154 bpf_get_current_comm(&key.name, sizeof(key.name));
155
156 // get stacks
157 key.user_stack_id = USER_STACK_GET;
158 key.kernel_stack_id = KERNEL_STACK_GET;
159
160 if (key.kernel_stack_id >= 0) {
161 // populate extras to fix the kernel stack
162 struct pt_regs regs = {};
163 bpf_probe_read(&regs, sizeof(regs), (void *)REGS_LOCATION);
164 u64 ip = PT_REGS_IP(&regs);
165 // if ip isn't sane, leave key ips as zero for later checking
166 if (ip > PAGE_OFFSET) {
167 key.kernel_ip = ip;
168 if (DO_KERNEL_RIP) {
169 /*
170 * User didn't specify a skip value (-s), so we will figure
171 * out how many interrupt framework frames to skip by recording
172 * the kernel rip, then later scanning for it on the stack.
173 * This is likely x86_64 specific; can use -s as a workaround
174 * until this supports your architecture.
175 */
176 bpf_probe_read(&key.kernel_ret_ip, sizeof(key.kernel_ret_ip),
177 (void *)(regs.bp + 8));
178 }
179 }
180 }
181
182 val = counts.lookup_or_init(&key, &zero);
183 (*val)++;
184 return 0;
185}
186"""
187
188# set thread filter
189thread_context = ""
190perf_filter = "-a"
191if args.pid is not None:
192 thread_context = "PID %s" % args.pid
193 thread_filter = 'pid == %s' % args.pid
194 perf_filter = '-p %s' % args.pid
195else:
196 thread_context = "all threads"
197 thread_filter = '1'
198bpf_text = bpf_text.replace('THREAD_FILTER', thread_filter)
199
200# set stack storage size
201bpf_text = bpf_text.replace('STACK_STORAGE_SIZE', str(args.stack_storage_size))
202
203# handle stack args
204kernel_stack_get = "stack_traces.get_stackid(args, " \
205 "%d | BPF_F_REUSE_STACKID)" % skip
206user_stack_get = \
207 "stack_traces.get_stackid(args, BPF_F_REUSE_STACKID | BPF_F_USER_STACK)"
208stack_context = ""
209if args.user_stacks_only:
210 stack_context = "user"
211 kernel_stack_get = "-1"
212elif args.kernel_stacks_only:
213 stack_context = "kernel"
214 user_stack_get = "-1"
215else:
216 stack_context = "user + kernel"
217bpf_text = bpf_text.replace('USER_STACK_GET', user_stack_get)
218bpf_text = bpf_text.replace('KERNEL_STACK_GET', kernel_stack_get)
219if skip:
220 # don't record the rip, as we won't use it
221 bpf_text = bpf_text.replace('DO_KERNEL_RIP', '0')
222else:
223 # rip is used to skip interrupt infrastructure frames
224 bpf_text = bpf_text.replace('DO_KERNEL_RIP', '1')
225
226# header
227if not args.folded:
228 print("Sampling at %d Hertz of %s by %s stack" %
229 (args.frequency, thread_context, stack_context), end="")
230 if duration < 99999999:
231 print(" for %d secs." % duration)
232 else:
233 print("... Hit Ctrl-C to end.")
234
235# use perf tracepoint if it exists, else kprobe
236if os.path.exists("/sys/kernel/debug/tracing/events/perf/perf_hrtimer"):
237 bpf_text = bpf_text.replace('PERF_TRACE_EVENT',
238 'TRACEPOINT_PROBE(perf, perf_hrtimer)')
239 bpf_text = bpf_text.replace('REGS_LOCATION', 'args->regs')
240else:
241 if not args.folded:
242 print("Tracepoint perf:perf_hrtimer missing. "
243 "Trying kprobe of perf_misc_flags()...")
244 bpf_text = bpf_text.replace('PERF_TRACE_EVENT',
245 'int kprobe__perf_misc_flags(struct pt_regs *args)')
246 bpf_text = bpf_text.replace('REGS_LOCATION', 'PT_REGS_PARM1(args)')
247if debug:
248 print(bpf_text)
249
250# initialize BPF
251b = BPF(text=bpf_text)
252
253# signal handler
254def signal_ignore(signal, frame):
255 print()
256
257#
258# Setup perf_events
259#
260
261# use perf_events to sample
262try:
263 Perf.perf_event_open(0, pid=-1, ptype=Perf.PERF_TYPE_SOFTWARE,
264 freq=args.frequency)
265except:
266 print("ERROR: initializing perf_events for sampling.\n"
267 "To debug this, try running the following command:\n"
268 " perf record -F 49 -e cpu-clock %s -- sleep 1\n"
269 "If that also doesn't work, fix it first." % perf_filter, file=stderr)
270 exit(0)
271
272#
273# Output Report
274#
275
276# collect samples
277try:
278 sleep(duration)
279except KeyboardInterrupt:
280 # as cleanup can take some time, trap Ctrl-C:
281 signal.signal(signal.SIGINT, signal_ignore)
282
283if not args.folded:
284 print()
285
286def aksym(addr):
287 if args.annotations:
288 return b.ksym(addr) + "_[k]"
289 else:
290 return b.ksym(addr)
291
292# output stacks
293missing_stacks = 0
294has_enomem = False
295counts = b.get_table("counts")
296stack_traces = b.get_table("stack_traces")
297for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
298 # handle get_stackid erorrs
299 if (not args.user_stacks_only and k.kernel_stack_id < 0 and
300 k.kernel_stack_id != -errno.EFAULT) or \
301 (not args.kernel_stacks_only and k.user_stack_id < 0 and
302 k.user_stack_id != -errno.EFAULT):
303 missing_stacks += 1
304 # check for an ENOMEM error
305 if k.kernel_stack_id == -errno.ENOMEM or \
306 k.user_stack_id == -errno.ENOMEM:
307 has_enomem = True
308
309 user_stack = [] if k.user_stack_id < 0 else \
310 stack_traces.walk(k.user_stack_id)
311 kernel_tmp = [] if k.kernel_stack_id < 0 else \
312 stack_traces.walk(k.kernel_stack_id)
313
314 # fix kernel stack
315 kernel_stack = []
316 if k.kernel_stack_id >= 0:
317 if skip:
318 # fixed skip
319 for addr in kernel_tmp:
320 kernel_stack.append(addr)
321 kernel_stack = kernel_stack[skip:]
322 else:
323 # skip the interrupt framework stack by searching for our RIP
324 skipping = 1
325 for addr in kernel_tmp:
326 if k.kernel_ret_ip == addr:
327 skipping = 0
328 if not skipping:
329 kernel_stack.append(addr)
330 if k.kernel_ip:
331 kernel_stack.insert(0, k.kernel_ip)
332
333 do_delimiter = need_delimiter and kernel_stack
334
335 if args.folded:
336 # print folded stack output
337 user_stack = list(user_stack)
338 kernel_stack = list(kernel_stack)
339 line = [k.name.decode()] + \
340 [b.sym(addr, k.pid) for addr in reversed(user_stack)] + \
341 (do_delimiter and ["-"] or []) + \
342 [aksym(addr) for addr in reversed(kernel_stack)]
343 print("%s %d" % (";".join(line), v.value))
344 else:
345 # print default multi-line stack output.
346 for addr in kernel_stack:
347 print(" %016x %s" % (addr, aksym(addr)))
348 if do_delimiter:
349 print(" --")
350 for addr in user_stack:
351 print(" %016x %s" % (addr, b.sym(addr, k.pid)))
352 print(" %-16s %s (%d)" % ("-", k.name, k.pid))
353 print(" %d\n" % v.value)
354
355# check missing
356if missing_stacks > 0:
357 enomem_str = "" if not has_enomem else \
358 " Consider increasing --stack-storage-size."
359 print("WARNING: %d stack traces could not be displayed.%s" %
360 (missing_stacks, enomem_str),
361 file=stderr)