blob: 687a162733e8409dfb16f121320af06cd07312c2 [file] [log] [blame]
Brendan Greggf4bf2752016-07-21 18:13:24 -07001#!/usr/bin/python
2# @lint-avoid-python-3-compatibility-imports
3#
4# profile Profile CPU usage by sampling stack traces at a timed interval.
5# For Linux, uses BCC, BPF, perf_events. Embedded C.
6#
7# This is an efficient profiler, as stack traces are frequency counted in
8# kernel context, rather than passing every stack to user space for frequency
9# counting there. Only the unique stacks and counts are passed to user space
10# at the end of the profile, greatly reducing the kernel<->user transfer.
11#
12# This uses perf_event_open to setup a timer which is instrumented by BPF,
13# and for efficiency it does not initialize the perf ring buffer, so the
14# redundant perf samples are not collected.
15#
Brendan Gregg715f7e62016-10-20 22:50:08 -070016# REQUIRES: Linux 4.9+ (BPF_PROG_TYPE_PERF_EVENT support). Under tools/old is
17# a version of this tool that may work on Linux 4.6 - 4.8.
Brendan Greggf4bf2752016-07-21 18:13:24 -070018#
19# Copyright 2016 Netflix, Inc.
20# Licensed under the Apache License, Version 2.0 (the "License")
21#
Brendan Gregg715f7e62016-10-20 22:50:08 -070022# THANKS: Alexei Starovoitov, who added proper BPF profiling support to Linux;
23# Sasha Goldshtein, Andrew Birchall, and Evgeny Vereshchagin, who wrote much
24# of the code here, borrowed from tracepoint.py and offcputime.py; and
25# Teng Qin, who added perf support in bcc.
Brendan Greggf4bf2752016-07-21 18:13:24 -070026#
27# 15-Jul-2016 Brendan Gregg Created this.
Brendan Gregg715f7e62016-10-20 22:50:08 -070028# 20-Oct-2016 " " Switched to use the new 4.9 support.
Brendan Greggf4bf2752016-07-21 18:13:24 -070029
30from __future__ import print_function
Brendan Gregg715f7e62016-10-20 22:50:08 -070031from bcc import BPF, PerfType, PerfSWConfig
Brendan Greggf4bf2752016-07-21 18:13:24 -070032from sys import stderr
33from time import sleep
34import argparse
35import signal
36import os
37import errno
38import multiprocessing
39import ctypes as ct
40
41#
42# Process Arguments
43#
44
45# arg validation
46def positive_int(val):
47 try:
48 ival = int(val)
49 except ValueError:
50 raise argparse.ArgumentTypeError("must be an integer")
51
52 if ival < 0:
53 raise argparse.ArgumentTypeError("must be positive")
54 return ival
55
56def positive_nonzero_int(val):
57 ival = positive_int(val)
58 if ival == 0:
59 raise argparse.ArgumentTypeError("must be nonzero")
60 return ival
61
62# arguments
63examples = """examples:
64 ./profile # profile stack traces at 49 Hertz until Ctrl-C
65 ./profile -F 99 # profile stack traces at 99 Hertz
66 ./profile 5 # profile at 49 Hertz for 5 seconds only
67 ./profile -f 5 # output in folded format for flame graphs
68 ./profile -p 185 # only profile threads for PID 185
69 ./profile -U # only show user space stacks (no kernel)
70 ./profile -K # only show kernel space stacks (no user)
Brendan Greggf4bf2752016-07-21 18:13:24 -070071"""
72parser = argparse.ArgumentParser(
73 description="Profile CPU stack traces at a timed interval",
74 formatter_class=argparse.RawDescriptionHelpFormatter,
75 epilog=examples)
76thread_group = parser.add_mutually_exclusive_group()
77thread_group.add_argument("-p", "--pid", type=positive_int,
78 help="profile this PID only")
79# TODO: add options for user/kernel threads only
80stack_group = parser.add_mutually_exclusive_group()
81stack_group.add_argument("-U", "--user-stacks-only", action="store_true",
82 help="show stacks from user space only (no kernel space stacks)")
83stack_group.add_argument("-K", "--kernel-stacks-only", action="store_true",
84 help="show stacks from kernel space only (no user space stacks)")
85parser.add_argument("-F", "--frequency", type=positive_int, default=49,
86 help="sample frequency, Hertz (default 49)")
87parser.add_argument("-d", "--delimited", action="store_true",
88 help="insert delimiter between kernel/user stacks")
89parser.add_argument("-a", "--annotations", action="store_true",
90 help="add _[k] annotations to kernel frames")
91parser.add_argument("-f", "--folded", action="store_true",
92 help="output folded format, one line per stack (for flame graphs)")
Brendan Gregg43e87c12017-01-11 09:40:49 -080093parser.add_argument("--stack-storage-size", default=10240,
Brendan Greggf4bf2752016-07-21 18:13:24 -070094 type=positive_nonzero_int,
95 help="the number of unique stack traces that can be stored and "
96 "displayed (default 2048)")
Brendan Greggf4bf2752016-07-21 18:13:24 -070097parser.add_argument("duration", nargs="?", default=99999999,
98 type=positive_nonzero_int,
99 help="duration of trace, in seconds")
100
101# option logic
102args = parser.parse_args()
Brendan Greggf4bf2752016-07-21 18:13:24 -0700103pid = int(args.pid) if args.pid is not None else -1
104duration = int(args.duration)
105debug = 0
106need_delimiter = args.delimited and not (args.kernel_stacks_only or
107 args.user_stacks_only)
108# TODO: add stack depth, and interval
109
110#
111# Setup BPF
112#
113
114# define BPF program
115bpf_text = """
116#include <uapi/linux/ptrace.h>
Brendan Gregg715f7e62016-10-20 22:50:08 -0700117#include <uapi/linux/bpf_perf_event.h>
Brendan Greggf4bf2752016-07-21 18:13:24 -0700118#include <linux/sched.h>
119
120struct key_t {
121 u32 pid;
122 u64 kernel_ip;
123 u64 kernel_ret_ip;
124 int user_stack_id;
125 int kernel_stack_id;
126 char name[TASK_COMM_LEN];
127};
128BPF_HASH(counts, struct key_t);
129BPF_HASH(start, u32);
130BPF_STACK_TRACE(stack_traces, STACK_STORAGE_SIZE)
131
132// This code gets a bit complex. Probably not suitable for casual hacking.
133
Brendan Gregg715f7e62016-10-20 22:50:08 -0700134int do_perf_event(struct bpf_perf_event_data *ctx) {
Brendan Gregg4c9f6602016-11-30 20:26:26 -0800135 u32 pid = bpf_get_current_pid_tgid() >> 32;
Brendan Greggf4bf2752016-07-21 18:13:24 -0700136 if (!(THREAD_FILTER))
137 return 0;
138
139 // create map key
140 u64 zero = 0, *val;
141 struct key_t key = {.pid = pid};
142 bpf_get_current_comm(&key.name, sizeof(key.name));
143
144 // get stacks
145 key.user_stack_id = USER_STACK_GET;
146 key.kernel_stack_id = KERNEL_STACK_GET;
147
148 if (key.kernel_stack_id >= 0) {
149 // populate extras to fix the kernel stack
150 struct pt_regs regs = {};
Brendan Gregg715f7e62016-10-20 22:50:08 -0700151 bpf_probe_read(&regs, sizeof(regs), (void *)&ctx->regs);
Brendan Greggf4bf2752016-07-21 18:13:24 -0700152 u64 ip = PT_REGS_IP(&regs);
Brendan Greggac297c12016-10-18 20:17:04 -0700153
Brendan Greggf4bf2752016-07-21 18:13:24 -0700154 // if ip isn't sane, leave key ips as zero for later checking
Brendan Greggac297c12016-10-18 20:17:04 -0700155#ifdef CONFIG_RANDOMIZE_MEMORY
156 if (ip > __PAGE_OFFSET_BASE) {
157#else
Brendan Greggf4bf2752016-07-21 18:13:24 -0700158 if (ip > PAGE_OFFSET) {
Brendan Greggac297c12016-10-18 20:17:04 -0700159#endif
Brendan Greggf4bf2752016-07-21 18:13:24 -0700160 key.kernel_ip = ip;
Brendan Greggf4bf2752016-07-21 18:13:24 -0700161 }
162 }
163
164 val = counts.lookup_or_init(&key, &zero);
165 (*val)++;
166 return 0;
167}
168"""
169
170# set thread filter
171thread_context = ""
172perf_filter = "-a"
173if args.pid is not None:
174 thread_context = "PID %s" % args.pid
175 thread_filter = 'pid == %s' % args.pid
176 perf_filter = '-p %s' % args.pid
177else:
178 thread_context = "all threads"
179 thread_filter = '1'
180bpf_text = bpf_text.replace('THREAD_FILTER', thread_filter)
181
182# set stack storage size
183bpf_text = bpf_text.replace('STACK_STORAGE_SIZE', str(args.stack_storage_size))
184
185# handle stack args
Brendan Gregg715f7e62016-10-20 22:50:08 -0700186kernel_stack_get = \
187 "stack_traces.get_stackid(&ctx->regs, 0 | BPF_F_REUSE_STACKID)"
Brendan Greggf4bf2752016-07-21 18:13:24 -0700188user_stack_get = \
Brendan Gregg715f7e62016-10-20 22:50:08 -0700189 "stack_traces.get_stackid(&ctx->regs, 0 | BPF_F_REUSE_STACKID | " \
190 "BPF_F_USER_STACK)"
Brendan Greggf4bf2752016-07-21 18:13:24 -0700191stack_context = ""
192if args.user_stacks_only:
193 stack_context = "user"
194 kernel_stack_get = "-1"
195elif args.kernel_stacks_only:
196 stack_context = "kernel"
197 user_stack_get = "-1"
198else:
199 stack_context = "user + kernel"
200bpf_text = bpf_text.replace('USER_STACK_GET', user_stack_get)
201bpf_text = bpf_text.replace('KERNEL_STACK_GET', kernel_stack_get)
Brendan Greggf4bf2752016-07-21 18:13:24 -0700202
203# header
204if not args.folded:
205 print("Sampling at %d Hertz of %s by %s stack" %
206 (args.frequency, thread_context, stack_context), end="")
207 if duration < 99999999:
208 print(" for %d secs." % duration)
209 else:
210 print("... Hit Ctrl-C to end.")
211
Brendan Greggf4bf2752016-07-21 18:13:24 -0700212if debug:
213 print(bpf_text)
214
Brendan Gregg715f7e62016-10-20 22:50:08 -0700215# initialize BPF & perf_events
216b = BPF(text=bpf_text)
217b.attach_perf_event(ev_type=PerfType.SOFTWARE,
218 ev_config=PerfSWConfig.CPU_CLOCK, fn_name="do_perf_event",
219 sample_period=0, sample_freq=args.frequency)
Brendan Greggf4bf2752016-07-21 18:13:24 -0700220
221# signal handler
222def signal_ignore(signal, frame):
223 print()
224
225#
Brendan Greggf4bf2752016-07-21 18:13:24 -0700226# Output Report
227#
228
229# collect samples
230try:
231 sleep(duration)
232except KeyboardInterrupt:
233 # as cleanup can take some time, trap Ctrl-C:
234 signal.signal(signal.SIGINT, signal_ignore)
235
236if not args.folded:
237 print()
238
239def aksym(addr):
240 if args.annotations:
241 return b.ksym(addr) + "_[k]"
242 else:
243 return b.ksym(addr)
244
245# output stacks
246missing_stacks = 0
247has_enomem = False
248counts = b.get_table("counts")
249stack_traces = b.get_table("stack_traces")
250for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
251 # handle get_stackid erorrs
252 if (not args.user_stacks_only and k.kernel_stack_id < 0 and
253 k.kernel_stack_id != -errno.EFAULT) or \
254 (not args.kernel_stacks_only and k.user_stack_id < 0 and
255 k.user_stack_id != -errno.EFAULT):
256 missing_stacks += 1
257 # check for an ENOMEM error
258 if k.kernel_stack_id == -errno.ENOMEM or \
259 k.user_stack_id == -errno.ENOMEM:
260 has_enomem = True
261
262 user_stack = [] if k.user_stack_id < 0 else \
263 stack_traces.walk(k.user_stack_id)
264 kernel_tmp = [] if k.kernel_stack_id < 0 else \
265 stack_traces.walk(k.kernel_stack_id)
266
267 # fix kernel stack
268 kernel_stack = []
269 if k.kernel_stack_id >= 0:
Brendan Gregg715f7e62016-10-20 22:50:08 -0700270 for addr in kernel_tmp:
271 kernel_stack.append(addr)
272 # the later IP checking
Brendan Greggf4bf2752016-07-21 18:13:24 -0700273 if k.kernel_ip:
274 kernel_stack.insert(0, k.kernel_ip)
275
276 do_delimiter = need_delimiter and kernel_stack
277
278 if args.folded:
279 # print folded stack output
280 user_stack = list(user_stack)
281 kernel_stack = list(kernel_stack)
282 line = [k.name.decode()] + \
283 [b.sym(addr, k.pid) for addr in reversed(user_stack)] + \
284 (do_delimiter and ["-"] or []) + \
285 [aksym(addr) for addr in reversed(kernel_stack)]
286 print("%s %d" % (";".join(line), v.value))
287 else:
288 # print default multi-line stack output.
289 for addr in kernel_stack:
Sasha Goldshtein860823b2017-02-08 23:23:20 -0500290 print(" %s" % aksym(addr))
Brendan Greggf4bf2752016-07-21 18:13:24 -0700291 if do_delimiter:
292 print(" --")
293 for addr in user_stack:
Sasha Goldshtein860823b2017-02-08 23:23:20 -0500294 print(" %s" % b.sym(addr, k.pid))
Rafael F78948e42017-03-26 14:54:25 +0200295 print(" %-16s %s (%d)" % ("-", k.name.decode(), k.pid))
Brendan Greggf4bf2752016-07-21 18:13:24 -0700296 print(" %d\n" % v.value)
297
298# check missing
299if missing_stacks > 0:
300 enomem_str = "" if not has_enomem else \
301 " Consider increasing --stack-storage-size."
302 print("WARNING: %d stack traces could not be displayed.%s" %
303 (missing_stacks, enomem_str),
304 file=stderr)