Brendan Gregg | 7bf0e49 | 2016-01-27 23:17:40 -0800 | [diff] [blame] | 1 | #!/usr/bin/python |
| 2 | # |
| 3 | # wakeuptime Summarize sleep to wakeup time by waker kernel stack |
| 4 | # For Linux, uses BCC, eBPF. |
| 5 | # |
Brendan Gregg | 62f4c28 | 2016-01-30 11:05:40 -0800 | [diff] [blame] | 6 | # USAGE: wakeuptime [-h] [-u] [-p PID] [-v] [-f] [duration] |
Brendan Gregg | 7bf0e49 | 2016-01-27 23:17:40 -0800 | [diff] [blame] | 7 | # |
| 8 | # The current implementation uses an unrolled loop for x86_64, and was written |
| 9 | # as a proof of concept. This implementation should be replaced in the future |
| 10 | # with an appropriate bpf_ call, when available. |
| 11 | # |
| 12 | # Currently limited to a stack trace depth of 21 (maxdepth + 1). |
| 13 | # |
| 14 | # Copyright 2016 Netflix, Inc. |
| 15 | # Licensed under the Apache License, Version 2.0 (the "License") |
| 16 | # |
| 17 | # 14-Jan-2016 Brendan Gregg Created this. |
| 18 | |
| 19 | from __future__ import print_function |
| 20 | from bcc import BPF |
| 21 | from time import sleep, strftime |
| 22 | import argparse |
| 23 | import signal |
| 24 | |
| 25 | # arguments |
| 26 | examples = """examples: |
| 27 | ./wakeuptime # trace blocked time with waker stacks |
| 28 | ./wakeuptime 5 # trace for 5 seconds only |
| 29 | ./wakeuptime -f 5 # 5 seconds, and output in folded format |
| 30 | ./wakeuptime -u # don't include kernel threads (user only) |
| 31 | ./wakeuptime -p 185 # trace fo PID 185 only |
| 32 | """ |
| 33 | parser = argparse.ArgumentParser( |
| 34 | description="Summarize sleep to wakeup time by waker kernel stack", |
| 35 | formatter_class=argparse.RawDescriptionHelpFormatter, |
| 36 | epilog=examples) |
| 37 | parser.add_argument("-u", "--useronly", action="store_true", |
| 38 | help="user threads only (no kernel threads)") |
| 39 | parser.add_argument("-p", "--pid", |
| 40 | help="trace this PID only") |
| 41 | parser.add_argument("-v", "--verbose", action="store_true", |
| 42 | help="show raw addresses") |
| 43 | parser.add_argument("-f", "--folded", action="store_true", |
| 44 | help="output folded format") |
| 45 | parser.add_argument("duration", nargs="?", default=99999999, |
| 46 | help="duration of trace, in seconds") |
| 47 | args = parser.parse_args() |
| 48 | folded = args.folded |
| 49 | duration = int(args.duration) |
| 50 | debug = 0 |
| 51 | maxdepth = 20 # and MAXDEPTH |
| 52 | if args.pid and args.useronly: |
| 53 | print("ERROR: use either -p or -u.") |
| 54 | exit() |
| 55 | |
| 56 | # signal handler |
| 57 | def signal_ignore(signal, frame): |
| 58 | print() |
| 59 | |
| 60 | # define BPF program |
| 61 | bpf_text = """ |
| 62 | #include <uapi/linux/ptrace.h> |
| 63 | #include <linux/sched.h> |
| 64 | |
| 65 | #define MAXDEPTH 20 |
| 66 | #define MINBLOCK_US 1 |
| 67 | |
| 68 | struct key_t { |
| 69 | char waker[TASK_COMM_LEN]; |
| 70 | char target[TASK_COMM_LEN]; |
| 71 | // Skip saving the ip |
| 72 | u64 ret[MAXDEPTH]; |
| 73 | }; |
| 74 | BPF_HASH(counts, struct key_t); |
| 75 | BPF_HASH(start, u32); |
| 76 | |
| 77 | static u64 get_frame(u64 *bp) { |
| 78 | if (*bp) { |
| 79 | // The following stack walker is x86_64 specific |
| 80 | u64 ret = 0; |
| 81 | if (bpf_probe_read(&ret, sizeof(ret), (void *)(*bp+8))) |
| 82 | return 0; |
| 83 | if (bpf_probe_read(bp, sizeof(*bp), (void *)*bp)) |
| 84 | bp = 0; |
| 85 | if (ret < __START_KERNEL_map) |
| 86 | return 0; |
| 87 | return ret; |
| 88 | } |
| 89 | return 0; |
| 90 | } |
| 91 | |
| 92 | int offcpu(struct pt_regs *ctx) { |
| 93 | u32 pid = bpf_get_current_pid_tgid(); |
| 94 | u64 ts = bpf_ktime_get_ns(); |
| 95 | // XXX: should filter here too, but need task_struct |
| 96 | start.update(&pid, &ts); |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | int waker(struct pt_regs *ctx, struct task_struct *p) { |
| 101 | u32 pid = p->pid; |
| 102 | u64 delta, *tsp, ts; |
| 103 | |
| 104 | tsp = start.lookup(&pid); |
| 105 | if (tsp == 0) |
| 106 | return 0; // missed start |
| 107 | start.delete(&pid); |
| 108 | |
| 109 | if (FILTER) |
| 110 | return 0; |
| 111 | |
| 112 | // calculate delta time |
| 113 | delta = bpf_ktime_get_ns() - *tsp; |
| 114 | delta = delta / 1000; |
| 115 | if (delta < MINBLOCK_US) |
| 116 | return 0; |
| 117 | |
| 118 | struct key_t key = {}; |
| 119 | u64 zero = 0, *val, bp = 0; |
| 120 | int depth = 0; |
| 121 | |
| 122 | bpf_probe_read(&key.target, sizeof(key.target), p->comm); |
| 123 | bpf_get_current_comm(&key.waker, sizeof(key.waker)); |
| 124 | bp = ctx->bp; |
| 125 | |
| 126 | // unrolled loop (MAXDEPTH): |
| 127 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 128 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 129 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 130 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 131 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 132 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 133 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 134 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 135 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 136 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 137 | |
| 138 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 139 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 140 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 141 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 142 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 143 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 144 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 145 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 146 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 147 | if (!(key.ret[depth++] = get_frame(&bp))) goto out; |
| 148 | |
| 149 | out: |
| 150 | val = counts.lookup_or_init(&key, &zero); |
| 151 | (*val) += delta; |
| 152 | return 0; |
| 153 | } |
| 154 | """ |
| 155 | if args.pid: |
| 156 | filter = 'pid != %s' % args.pid |
| 157 | elif args.useronly: |
| 158 | filter = 'p->flags & PF_KTHREAD' |
| 159 | else: |
| 160 | filter = '0' |
| 161 | bpf_text = bpf_text.replace('FILTER', filter) |
| 162 | if debug: |
| 163 | print(bpf_text) |
| 164 | |
| 165 | # initialize BPF |
| 166 | b = BPF(text=bpf_text) |
| 167 | b.attach_kprobe(event="schedule", fn_name="offcpu") |
| 168 | b.attach_kprobe(event="try_to_wake_up", fn_name="waker") |
| 169 | matched = b.num_open_kprobes() |
| 170 | if matched == 0: |
| 171 | print("0 functions traced. Exiting.") |
| 172 | exit() |
| 173 | |
| 174 | # header |
| 175 | if not folded: |
| 176 | print("Tracing blocked time (us) by kernel stack", end="") |
| 177 | if duration < 99999999: |
| 178 | print(" for %d secs." % duration) |
| 179 | else: |
| 180 | print("... Hit Ctrl-C to end.") |
| 181 | |
| 182 | # output |
| 183 | while (1): |
| 184 | try: |
| 185 | sleep(duration) |
| 186 | except KeyboardInterrupt: |
| 187 | # as cleanup can take many seconds, trap Ctrl-C: |
| 188 | signal.signal(signal.SIGINT, signal_ignore) |
| 189 | |
| 190 | if not folded: |
| 191 | print() |
| 192 | counts = b.get_table("counts") |
| 193 | for k, v in sorted(counts.items(), key=lambda counts: counts[1].value): |
| 194 | if folded: |
| 195 | # print folded stack output |
| 196 | line = k.waker + ";" |
| 197 | for i in reversed(range(0, maxdepth)): |
| 198 | if k.ret[i] == 0: |
| 199 | continue |
| 200 | line = line + b.ksym(k.ret[i]) |
| 201 | if i != 0: |
| 202 | line = line + ";" |
| 203 | print("%s;%s %d" % (line, k.target, v.value)) |
| 204 | else: |
| 205 | # print default multi-line stack output |
| 206 | print(" %-16s %s" % ("target:", k.target)) |
| 207 | for i in range(0, maxdepth): |
| 208 | if k.ret[i] == 0: |
| 209 | break |
| 210 | print(" %-16x %s" % (k.ret[i], |
| 211 | b.ksym(k.ret[i]))) |
| 212 | print(" %-16s %s" % ("waker:", k.waker)) |
| 213 | print(" %d\n" % v.value) |
| 214 | counts.clear() |
| 215 | |
| 216 | if not folded: |
| 217 | print("Detaching...") |
| 218 | exit() |