Alexey Ivanov | cc01a9c | 2019-01-16 09:50:46 -0800 | [diff] [blame] | 1 | #!/usr/bin/python |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 2 | # @lint-avoid-python-3-compatibility-imports |
| 3 | # |
| 4 | # runqslower Trace long process scheduling delays. |
| 5 | # For Linux, uses BCC, eBPF. |
| 6 | # |
| 7 | # This script traces high scheduling delays between tasks being |
| 8 | # ready to run and them running on CPU after that. |
| 9 | # |
| 10 | # USAGE: runqslower [-p PID] [min_us] |
| 11 | # |
| 12 | # REQUIRES: Linux 4.9+ (BPF_PROG_TYPE_PERF_EVENT support). |
| 13 | # |
| 14 | # This measures the time a task spends waiting on a run queue for a turn |
| 15 | # on-CPU, and shows this time as a individual events. This time should be small, |
| 16 | # but a task may need to wait its turn due to CPU load. |
| 17 | # |
| 18 | # This measures two types of run queue latency: |
| 19 | # 1. The time from a task being enqueued on a run queue to its context switch |
| 20 | # and execution. This traces ttwu_do_wakeup(), wake_up_new_task() -> |
| 21 | # finish_task_switch() with either raw tracepoints (if supported) or kprobes |
| 22 | # and instruments the run queue latency after a voluntary context switch. |
| 23 | # 2. The time from when a task was involuntary context switched and still |
| 24 | # in the runnable state, to when it next executed. This is instrumented |
| 25 | # from finish_task_switch() alone. |
| 26 | # |
| 27 | # Copyright 2016 Cloudflare, Inc. |
| 28 | # Licensed under the Apache License, Version 2.0 (the "License") |
| 29 | # |
| 30 | # 02-May-2018 Ivan Babrou Created this. |
Greg Bod | 65885f3 | 2019-11-18 19:35:31 +0000 | [diff] [blame^] | 31 | # 18-Nov-2019 Gergely Bod BUG fix: Use bpf_probe_read_str() to extract the |
| 32 | # process name from 'task_struct* next' in raw tp code. |
| 33 | # bpf_get_current_comm() operates on the current task |
| 34 | # which might already be different than 'next'. |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 35 | |
| 36 | from __future__ import print_function |
| 37 | from bcc import BPF |
| 38 | import argparse |
| 39 | from time import strftime |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 40 | |
| 41 | # arguments |
| 42 | examples = """examples: |
| 43 | ./runqslower # trace run queue latency higher than 10000 us (default) |
| 44 | ./runqslower 1000 # trace run queue latency higher than 1000 us |
| 45 | ./runqslower -p 123 # trace pid 123 only |
| 46 | """ |
| 47 | parser = argparse.ArgumentParser( |
| 48 | description="Trace high run queue latency", |
| 49 | formatter_class=argparse.RawDescriptionHelpFormatter, |
| 50 | epilog=examples) |
| 51 | parser.add_argument("-p", "--pid", type=int, metavar="PID", dest="pid", |
| 52 | help="trace this PID only") |
| 53 | parser.add_argument("min_us", nargs="?", default='10000', |
| 54 | help="minimum run queue latecy to trace, in ms (default 10000)") |
| 55 | parser.add_argument("--ebpf", action="store_true", |
| 56 | help=argparse.SUPPRESS) |
| 57 | args = parser.parse_args() |
| 58 | min_us = int(args.min_us) |
| 59 | debug = 0 |
| 60 | |
| 61 | # define BPF program |
| 62 | bpf_text = """ |
| 63 | #include <uapi/linux/ptrace.h> |
| 64 | #include <linux/sched.h> |
| 65 | #include <linux/nsproxy.h> |
| 66 | #include <linux/pid_namespace.h> |
| 67 | |
| 68 | BPF_HASH(start, u32); |
| 69 | |
| 70 | struct rq; |
| 71 | |
| 72 | struct data_t { |
| 73 | u32 pid; |
| 74 | char task[TASK_COMM_LEN]; |
| 75 | u64 delta_us; |
| 76 | }; |
| 77 | |
| 78 | BPF_PERF_OUTPUT(events); |
| 79 | |
| 80 | // record enqueue timestamp |
| 81 | static int trace_enqueue(u32 tgid, u32 pid) |
| 82 | { |
Ivan Babrou | 799f46a | 2018-05-08 23:10:27 -0700 | [diff] [blame] | 83 | if (FILTER_PID || pid == 0) |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 84 | return 0; |
| 85 | u64 ts = bpf_ktime_get_ns(); |
| 86 | start.update(&pid, &ts); |
| 87 | return 0; |
| 88 | } |
| 89 | """ |
| 90 | |
| 91 | bpf_text_kprobe = """ |
| 92 | int trace_wake_up_new_task(struct pt_regs *ctx, struct task_struct *p) |
| 93 | { |
| 94 | return trace_enqueue(p->tgid, p->pid); |
| 95 | } |
| 96 | |
| 97 | int trace_ttwu_do_wakeup(struct pt_regs *ctx, struct rq *rq, struct task_struct *p, |
| 98 | int wake_flags) |
| 99 | { |
| 100 | return trace_enqueue(p->tgid, p->pid); |
| 101 | } |
| 102 | |
| 103 | // calculate latency |
| 104 | int trace_run(struct pt_regs *ctx, struct task_struct *prev) |
| 105 | { |
| 106 | u32 pid, tgid; |
| 107 | |
| 108 | // ivcsw: treat like an enqueue event and store timestamp |
| 109 | if (prev->state == TASK_RUNNING) { |
| 110 | tgid = prev->tgid; |
| 111 | pid = prev->pid; |
Ivan Babrou | 799f46a | 2018-05-08 23:10:27 -0700 | [diff] [blame] | 112 | if (!(FILTER_PID || pid == 0)) { |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 113 | u64 ts = bpf_ktime_get_ns(); |
| 114 | start.update(&pid, &ts); |
| 115 | } |
| 116 | } |
| 117 | |
| 118 | tgid = bpf_get_current_pid_tgid() >> 32; |
| 119 | pid = bpf_get_current_pid_tgid(); |
| 120 | |
| 121 | u64 *tsp, delta_us; |
| 122 | |
| 123 | // fetch timestamp and calculate delta |
| 124 | tsp = start.lookup(&pid); |
| 125 | if (tsp == 0) { |
| 126 | return 0; // missed enqueue |
| 127 | } |
| 128 | delta_us = (bpf_ktime_get_ns() - *tsp) / 1000; |
| 129 | |
| 130 | if (FILTER_US) |
| 131 | return 0; |
| 132 | |
| 133 | struct data_t data = {}; |
| 134 | data.pid = pid; |
| 135 | data.delta_us = delta_us; |
| 136 | bpf_get_current_comm(&data.task, sizeof(data.task)); |
| 137 | |
| 138 | // output |
| 139 | events.perf_submit(ctx, &data, sizeof(data)); |
| 140 | |
| 141 | start.delete(&pid); |
| 142 | return 0; |
| 143 | } |
| 144 | """ |
| 145 | |
| 146 | bpf_text_raw_tp = """ |
| 147 | RAW_TRACEPOINT_PROBE(sched_wakeup) |
| 148 | { |
| 149 | // TP_PROTO(struct task_struct *p) |
| 150 | struct task_struct *p = (struct task_struct *)ctx->args[0]; |
Paul Chaignon | 8d78edd | 2018-06-29 07:47:44 +0200 | [diff] [blame] | 151 | return trace_enqueue(p->tgid, p->pid); |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | RAW_TRACEPOINT_PROBE(sched_wakeup_new) |
| 155 | { |
| 156 | // TP_PROTO(struct task_struct *p) |
| 157 | struct task_struct *p = (struct task_struct *)ctx->args[0]; |
| 158 | u32 tgid, pid; |
| 159 | |
| 160 | bpf_probe_read(&tgid, sizeof(tgid), &p->tgid); |
| 161 | bpf_probe_read(&pid, sizeof(pid), &p->pid); |
| 162 | return trace_enqueue(tgid, pid); |
| 163 | } |
| 164 | |
| 165 | RAW_TRACEPOINT_PROBE(sched_switch) |
| 166 | { |
| 167 | // TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next) |
| 168 | struct task_struct *prev = (struct task_struct *)ctx->args[1]; |
| 169 | struct task_struct *next= (struct task_struct *)ctx->args[2]; |
Greg Bod | 65885f3 | 2019-11-18 19:35:31 +0000 | [diff] [blame^] | 170 | u32 pid; |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 171 | long state; |
| 172 | |
| 173 | // ivcsw: treat like an enqueue event and store timestamp |
Jerome Marchand | bac633a | 2019-07-03 11:12:08 +0200 | [diff] [blame] | 174 | bpf_probe_read(&state, sizeof(long), (const void *)&prev->state); |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 175 | if (state == TASK_RUNNING) { |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 176 | bpf_probe_read(&pid, sizeof(prev->pid), &prev->pid); |
Ivan Babrou | 799f46a | 2018-05-08 23:10:27 -0700 | [diff] [blame] | 177 | if (!(FILTER_PID || pid == 0)) { |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 178 | u64 ts = bpf_ktime_get_ns(); |
| 179 | start.update(&pid, &ts); |
| 180 | } |
| 181 | } |
| 182 | |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 183 | bpf_probe_read(&pid, sizeof(next->pid), &next->pid); |
| 184 | |
| 185 | u64 *tsp, delta_us; |
| 186 | |
| 187 | // fetch timestamp and calculate delta |
| 188 | tsp = start.lookup(&pid); |
| 189 | if (tsp == 0) { |
| 190 | return 0; // missed enqueue |
| 191 | } |
| 192 | delta_us = (bpf_ktime_get_ns() - *tsp) / 1000; |
| 193 | |
| 194 | if (FILTER_US) |
| 195 | return 0; |
| 196 | |
| 197 | struct data_t data = {}; |
| 198 | data.pid = pid; |
| 199 | data.delta_us = delta_us; |
Greg Bod | 65885f3 | 2019-11-18 19:35:31 +0000 | [diff] [blame^] | 200 | bpf_probe_read_str(&data.task, sizeof(data.task), next->comm); |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 201 | |
| 202 | // output |
| 203 | events.perf_submit(ctx, &data, sizeof(data)); |
| 204 | |
| 205 | start.delete(&pid); |
| 206 | return 0; |
| 207 | } |
| 208 | """ |
| 209 | |
| 210 | is_support_raw_tp = BPF.support_raw_tracepoint() |
| 211 | if is_support_raw_tp: |
| 212 | bpf_text += bpf_text_raw_tp |
| 213 | else: |
| 214 | bpf_text += bpf_text_kprobe |
| 215 | |
| 216 | # code substitutions |
| 217 | if min_us == 0: |
| 218 | bpf_text = bpf_text.replace('FILTER_US', '0') |
| 219 | else: |
| 220 | bpf_text = bpf_text.replace('FILTER_US', 'delta_us <= %s' % str(min_us)) |
| 221 | if args.pid: |
Teng Qin | aaca976 | 2019-01-11 11:18:45 -0800 | [diff] [blame] | 222 | bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % args.pid) |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 223 | else: |
| 224 | bpf_text = bpf_text.replace('FILTER_PID', '0') |
| 225 | if debug or args.ebpf: |
| 226 | print(bpf_text) |
| 227 | if args.ebpf: |
| 228 | exit() |
| 229 | |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 230 | # process event |
| 231 | def print_event(cpu, data, size): |
Xiaozhou Liu | 51d62d3 | 2019-02-15 13:03:05 +0800 | [diff] [blame] | 232 | event = b["events"].event(data) |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 233 | print("%-8s %-16s %-6s %14s" % (strftime("%H:%M:%S"), event.task, event.pid, event.delta_us)) |
| 234 | |
| 235 | # load BPF program |
| 236 | b = BPF(text=bpf_text) |
| 237 | if not is_support_raw_tp: |
| 238 | b.attach_kprobe(event="ttwu_do_wakeup", fn_name="trace_ttwu_do_wakeup") |
| 239 | b.attach_kprobe(event="wake_up_new_task", fn_name="trace_wake_up_new_task") |
| 240 | b.attach_kprobe(event="finish_task_switch", fn_name="trace_run") |
| 241 | |
| 242 | print("Tracing run queue latency higher than %d us" % min_us) |
| 243 | print("%-8s %-16s %-6s %14s" % ("TIME", "COMM", "PID", "LAT(us)")) |
| 244 | |
| 245 | # read events |
| 246 | b["events"].open_perf_buffer(print_event, page_cnt=64) |
| 247 | while 1: |
Jerome Marchand | 5167127 | 2018-12-19 01:57:24 +0100 | [diff] [blame] | 248 | try: |
| 249 | b.perf_buffer_poll() |
| 250 | except KeyboardInterrupt: |
| 251 | exit() |