blob: 4ab94394a1ae8a8b0f1607f4aff3870b952e2fbd [file] [log] [blame]
Brendan Gregg38cef482016-01-15 17:26:30 -08001#!/usr/bin/python
2#
3# offcputime Summarize off-CPU time by kernel stack trace
4# For Linux, uses BCC, eBPF.
5#
6# USAGE: offcputime [-h] [-p PID] [-i INTERVAL] [-T] [duration]
7#
8# The current implementation uses an unrolled loop for x86_64, and was written
9# as a proof of concept. This implementation should be replaced in the future
10# with an appropriate bpf_ call, when available.
11#
12# Currently limited to a stack trace depth of 21 (maxdepth + 1).
13#
14# Copyright 2016 Netflix, Inc.
15# Licensed under the Apache License, Version 2.0 (the "License")
16#
17# 13-Jan-2016 Brendan Gregg Created this.
18
19from __future__ import print_function
20from bcc import BPF
21from time import sleep, strftime
22import argparse
23import signal
24
25# arguments
26examples = """examples:
27 ./offcputime # trace off-CPU stack time until Ctrl-C
28 ./offcputime 5 # trace for 5 seconds only
29 ./offcputime -f 5 # 5 seconds, and output in folded format
30 ./offcputime -p 185 # trace fo PID 185 only
31"""
32parser = argparse.ArgumentParser(
33 description="Summarize off-CPU time by kernel stack trace",
34 formatter_class=argparse.RawDescriptionHelpFormatter,
35 epilog=examples)
36parser.add_argument("-p", "--pid",
37 help="trace this PID only")
38parser.add_argument("-v", "--verbose", action="store_true",
39 help="show raw addresses")
40parser.add_argument("-f", "--folded", action="store_true",
41 help="output folded format")
42parser.add_argument("duration", nargs="?", default=99999999,
43 help="duration of trace, in seconds")
44args = parser.parse_args()
45folded = args.folded
46duration = int(args.duration)
47debug = 0
48maxdepth = 20 # and MAXDEPTH
49
50# signal handler
51def signal_ignore(signal, frame):
52 print()
53
54# load BPF program
55bpf_text = """
56#include <uapi/linux/ptrace.h>
57#include <linux/sched.h>
58
59#define MAXDEPTH 20
60#define MINBLOCK_US 1
61
62struct key_t {
63 char name[TASK_COMM_LEN];
64 // Skip saving the ip
65 u64 ret[MAXDEPTH];
66};
67BPF_HASH(counts, struct key_t);
68BPF_HASH(start, u32);
69
70static u64 get_frame(u64 *bp) {
71 if (*bp) {
72 // The following stack walker is x86_64 specific
73 u64 ret = 0;
74 if (bpf_probe_read(&ret, sizeof(ret), (void *)(*bp+8)))
75 return 0;
76 if (bpf_probe_read(bp, sizeof(*bp), (void *)*bp))
77 bp = 0;
78 if (ret < __START_KERNEL_map)
79 return 0;
80 return ret;
81 }
82 return 0;
83}
84
85int offcpu(struct pt_regs *ctx) {
86 u32 pid = bpf_get_current_pid_tgid();
87 u64 ts = bpf_ktime_get_ns();
88 FILTER
89 start.update(&pid, &ts);
90 return 0;
91}
92
93int oncpu(struct pt_regs *ctx) {
94 u32 pid = bpf_get_current_pid_tgid();
95 FILTER
96 u64 ts = bpf_ktime_get_ns();
97 struct key_t key = {};
98 u64 zero = 0, *val, bp = 0, *tsp, delta;
99 int depth = 0;
100
101 // calculate delta time
102 tsp = start.lookup(&pid);
103 if (tsp == 0)
104 return 0; // missed start
105 delta = bpf_ktime_get_ns() - *tsp;
106 start.delete(&pid);
107 delta = delta / 1000;
108 if (delta < MINBLOCK_US)
109 return 0;
110
111 bpf_get_current_comm(&key.name, sizeof(key.name));
112 bp = ctx->bp;
113
114 // unrolled loop (MAXDEPTH):
115 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
116 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
117 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
118 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
119 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
120 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
121 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
122 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
123 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
124 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
125
126 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
127 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
128 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
129 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
130 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
131 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
132 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
133 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
134 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
135 if (!(key.ret[depth++] = get_frame(&bp))) goto out;
136
137out:
138 val = counts.lookup_or_init(&key, &zero);
139 (*val) += delta;
140 return 0;
141}
142"""
143if args.pid:
144 bpf_text = bpf_text.replace('FILTER',
145 'if (pid != %s) { return 0; }' % (args.pid))
146else:
147 bpf_text = bpf_text.replace('FILTER', '')
148if debug:
149 print(bpf_text)
150b = BPF(text=bpf_text)
151b.attach_kprobe(event="schedule", fn_name="offcpu")
152b.attach_kprobe(event="finish_task_switch", fn_name="oncpu")
153matched = b.num_open_kprobes()
154if matched == 0:
155 print("0 functions traced. Exiting.")
156 exit()
157
158# header
159if not folded:
160 print("Tracing off-CPU time (us) by kernel stack", end="")
161 if duration < 99999999:
162 print(" for %d secs." % duration)
163 else:
164 print("... Hit Ctrl-C to end.")
165
166# output
167while (1):
168 try:
169 sleep(duration)
170 except KeyboardInterrupt:
171 # as cleanup can take many seconds, trap Ctrl-C:
172 signal.signal(signal.SIGINT, signal_ignore)
173
174 if not folded:
175 print()
176 counts = b.get_table("counts")
177 for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
178 if folded:
179 # print folded stack output
180 line = k.name + ";"
181 for i in reversed(range(0, maxdepth)):
182 if k.ret[i] == 0:
183 continue
184 line = line + b.ksym(k.ret[i])
185 if i != 0:
186 line = line + ";"
187 print("%s %d" % (line, v.value))
188 else:
189 # print default multi-line stack output
190 for i in range(0, maxdepth):
191 if k.ret[i] == 0:
192 break
193 print(" %-16x %s" % (k.ret[i],
194 b.ksym(k.ret[i])))
195 print(" %-16s %s" % ("-", k.name))
196 print(" %d\n" % v.value)
197 counts.clear()
198
199 if not folded:
200 print("Detaching...")
201 exit()