Alexey Ivanov | cc01a9c | 2019-01-16 09:50:46 -0800 | [diff] [blame] | 1 | #!/usr/bin/python |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 2 | # @lint-avoid-python-3-compatibility-imports |
| 3 | # |
| 4 | # runqslower Trace long process scheduling delays. |
| 5 | # For Linux, uses BCC, eBPF. |
| 6 | # |
| 7 | # This script traces high scheduling delays between tasks being |
| 8 | # ready to run and them running on CPU after that. |
| 9 | # |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 10 | # USAGE: runqslower [-p PID] [-t TID] [-P] [min_us] |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 11 | # |
| 12 | # REQUIRES: Linux 4.9+ (BPF_PROG_TYPE_PERF_EVENT support). |
| 13 | # |
| 14 | # This measures the time a task spends waiting on a run queue for a turn |
| 15 | # on-CPU, and shows this time as a individual events. This time should be small, |
| 16 | # but a task may need to wait its turn due to CPU load. |
| 17 | # |
| 18 | # This measures two types of run queue latency: |
| 19 | # 1. The time from a task being enqueued on a run queue to its context switch |
| 20 | # and execution. This traces ttwu_do_wakeup(), wake_up_new_task() -> |
| 21 | # finish_task_switch() with either raw tracepoints (if supported) or kprobes |
| 22 | # and instruments the run queue latency after a voluntary context switch. |
| 23 | # 2. The time from when a task was involuntary context switched and still |
| 24 | # in the runnable state, to when it next executed. This is instrumented |
| 25 | # from finish_task_switch() alone. |
| 26 | # |
| 27 | # Copyright 2016 Cloudflare, Inc. |
| 28 | # Licensed under the Apache License, Version 2.0 (the "License") |
| 29 | # |
| 30 | # 02-May-2018 Ivan Babrou Created this. |
Sumanth Korikkar | 7f6066d | 2020-05-20 10:49:56 -0500 | [diff] [blame] | 31 | # 18-Nov-2019 Gergely Bod BUG fix: Use bpf_probe_read_kernel_str() to extract the |
Greg Bod | 65885f3 | 2019-11-18 19:35:31 +0000 | [diff] [blame] | 32 | # process name from 'task_struct* next' in raw tp code. |
| 33 | # bpf_get_current_comm() operates on the current task |
| 34 | # which might already be different than 'next'. |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 35 | |
| 36 | from __future__ import print_function |
| 37 | from bcc import BPF |
| 38 | import argparse |
| 39 | from time import strftime |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 40 | |
| 41 | # arguments |
| 42 | examples = """examples: |
| 43 | ./runqslower # trace run queue latency higher than 10000 us (default) |
| 44 | ./runqslower 1000 # trace run queue latency higher than 1000 us |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 45 | ./runqslower -p 123 # trace pid 123 |
| 46 | ./runqslower -t 123 # trace tid 123 (use for threads only) |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 47 | ./runqslower -P # also show previous task comm and TID |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 48 | """ |
| 49 | parser = argparse.ArgumentParser( |
| 50 | description="Trace high run queue latency", |
| 51 | formatter_class=argparse.RawDescriptionHelpFormatter, |
| 52 | epilog=examples) |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 53 | parser.add_argument("min_us", nargs="?", default='10000', |
Aditya Mandaleeka | 78b0f07 | 2020-06-04 13:23:14 -0700 | [diff] [blame] | 54 | help="minimum run queue latency to trace, in us (default 10000)") |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 55 | parser.add_argument("--ebpf", action="store_true", |
| 56 | help=argparse.SUPPRESS) |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 57 | |
| 58 | thread_group = parser.add_mutually_exclusive_group() |
| 59 | thread_group.add_argument("-p", "--pid", metavar="PID", dest="pid", |
| 60 | help="trace this PID only", type=int) |
| 61 | thread_group.add_argument("-t", "--tid", metavar="TID", dest="tid", |
| 62 | help="trace this TID only", type=int) |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 63 | thread_group.add_argument("-P", "--previous", action="store_true", |
| 64 | help="also show previous task name and TID") |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 65 | args = parser.parse_args() |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 66 | |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 67 | min_us = int(args.min_us) |
| 68 | debug = 0 |
| 69 | |
| 70 | # define BPF program |
| 71 | bpf_text = """ |
| 72 | #include <uapi/linux/ptrace.h> |
| 73 | #include <linux/sched.h> |
| 74 | #include <linux/nsproxy.h> |
| 75 | #include <linux/pid_namespace.h> |
| 76 | |
| 77 | BPF_HASH(start, u32); |
| 78 | |
| 79 | struct rq; |
| 80 | |
| 81 | struct data_t { |
| 82 | u32 pid; |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 83 | u32 prev_pid; |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 84 | char task[TASK_COMM_LEN]; |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 85 | char prev_task[TASK_COMM_LEN]; |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 86 | u64 delta_us; |
| 87 | }; |
| 88 | |
| 89 | BPF_PERF_OUTPUT(events); |
| 90 | |
| 91 | // record enqueue timestamp |
| 92 | static int trace_enqueue(u32 tgid, u32 pid) |
| 93 | { |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 94 | if (FILTER_PID || FILTER_TGID || pid == 0) |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 95 | return 0; |
| 96 | u64 ts = bpf_ktime_get_ns(); |
| 97 | start.update(&pid, &ts); |
| 98 | return 0; |
| 99 | } |
| 100 | """ |
| 101 | |
| 102 | bpf_text_kprobe = """ |
| 103 | int trace_wake_up_new_task(struct pt_regs *ctx, struct task_struct *p) |
| 104 | { |
| 105 | return trace_enqueue(p->tgid, p->pid); |
| 106 | } |
| 107 | |
| 108 | int trace_ttwu_do_wakeup(struct pt_regs *ctx, struct rq *rq, struct task_struct *p, |
| 109 | int wake_flags) |
| 110 | { |
| 111 | return trace_enqueue(p->tgid, p->pid); |
| 112 | } |
| 113 | |
| 114 | // calculate latency |
| 115 | int trace_run(struct pt_regs *ctx, struct task_struct *prev) |
| 116 | { |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 117 | u32 pid, tgid, prev_pid; |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 118 | |
| 119 | // ivcsw: treat like an enqueue event and store timestamp |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 120 | prev_pid = prev->pid; |
Hengqi Chen | 08765a9 | 2021-10-31 23:20:10 +0800 | [diff] [blame] | 121 | if (prev->STATE_FIELD == TASK_RUNNING) { |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 122 | tgid = prev->tgid; |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 123 | u64 ts = bpf_ktime_get_ns(); |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 124 | if (prev_pid != 0) { |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 125 | if (!(FILTER_PID) && !(FILTER_TGID)) { |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 126 | start.update(&prev_pid, &ts); |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 127 | } |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 128 | } |
| 129 | } |
| 130 | |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 131 | pid = bpf_get_current_pid_tgid(); |
| 132 | |
| 133 | u64 *tsp, delta_us; |
| 134 | |
| 135 | // fetch timestamp and calculate delta |
| 136 | tsp = start.lookup(&pid); |
| 137 | if (tsp == 0) { |
| 138 | return 0; // missed enqueue |
| 139 | } |
| 140 | delta_us = (bpf_ktime_get_ns() - *tsp) / 1000; |
| 141 | |
| 142 | if (FILTER_US) |
| 143 | return 0; |
| 144 | |
| 145 | struct data_t data = {}; |
| 146 | data.pid = pid; |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 147 | data.prev_pid = prev_pid; |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 148 | data.delta_us = delta_us; |
| 149 | bpf_get_current_comm(&data.task, sizeof(data.task)); |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 150 | bpf_probe_read_kernel_str(&data.prev_task, sizeof(data.prev_task), prev->comm); |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 151 | |
| 152 | // output |
| 153 | events.perf_submit(ctx, &data, sizeof(data)); |
| 154 | |
| 155 | start.delete(&pid); |
| 156 | return 0; |
| 157 | } |
| 158 | """ |
| 159 | |
| 160 | bpf_text_raw_tp = """ |
| 161 | RAW_TRACEPOINT_PROBE(sched_wakeup) |
| 162 | { |
| 163 | // TP_PROTO(struct task_struct *p) |
| 164 | struct task_struct *p = (struct task_struct *)ctx->args[0]; |
Paul Chaignon | 8d78edd | 2018-06-29 07:47:44 +0200 | [diff] [blame] | 165 | return trace_enqueue(p->tgid, p->pid); |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | RAW_TRACEPOINT_PROBE(sched_wakeup_new) |
| 169 | { |
| 170 | // TP_PROTO(struct task_struct *p) |
| 171 | struct task_struct *p = (struct task_struct *)ctx->args[0]; |
| 172 | u32 tgid, pid; |
| 173 | |
Sumanth Korikkar | 7f6066d | 2020-05-20 10:49:56 -0500 | [diff] [blame] | 174 | bpf_probe_read_kernel(&tgid, sizeof(tgid), &p->tgid); |
| 175 | bpf_probe_read_kernel(&pid, sizeof(pid), &p->pid); |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 176 | return trace_enqueue(tgid, pid); |
| 177 | } |
| 178 | |
| 179 | RAW_TRACEPOINT_PROBE(sched_switch) |
| 180 | { |
| 181 | // TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next) |
| 182 | struct task_struct *prev = (struct task_struct *)ctx->args[1]; |
| 183 | struct task_struct *next= (struct task_struct *)ctx->args[2]; |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 184 | u32 tgid, pid, prev_pid; |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 185 | long state; |
| 186 | |
| 187 | // ivcsw: treat like an enqueue event and store timestamp |
Hengqi Chen | 08765a9 | 2021-10-31 23:20:10 +0800 | [diff] [blame] | 188 | bpf_probe_read_kernel(&state, sizeof(long), (const void *)&prev->STATE_FIELD); |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 189 | bpf_probe_read_kernel(&prev_pid, sizeof(prev->pid), &prev->pid); |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 190 | if (state == TASK_RUNNING) { |
Sumanth Korikkar | 7f6066d | 2020-05-20 10:49:56 -0500 | [diff] [blame] | 191 | bpf_probe_read_kernel(&tgid, sizeof(prev->tgid), &prev->tgid); |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 192 | u64 ts = bpf_ktime_get_ns(); |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 193 | if (prev_pid != 0) { |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 194 | if (!(FILTER_PID) && !(FILTER_TGID)) { |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 195 | start.update(&prev_pid, &ts); |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 196 | } |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 197 | } |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 198 | |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 199 | } |
| 200 | |
Sumanth Korikkar | 7f6066d | 2020-05-20 10:49:56 -0500 | [diff] [blame] | 201 | bpf_probe_read_kernel(&pid, sizeof(next->pid), &next->pid); |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 202 | |
| 203 | u64 *tsp, delta_us; |
| 204 | |
| 205 | // fetch timestamp and calculate delta |
| 206 | tsp = start.lookup(&pid); |
| 207 | if (tsp == 0) { |
| 208 | return 0; // missed enqueue |
| 209 | } |
| 210 | delta_us = (bpf_ktime_get_ns() - *tsp) / 1000; |
| 211 | |
| 212 | if (FILTER_US) |
| 213 | return 0; |
| 214 | |
| 215 | struct data_t data = {}; |
| 216 | data.pid = pid; |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 217 | data.prev_pid = prev_pid; |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 218 | data.delta_us = delta_us; |
Sumanth Korikkar | 7f6066d | 2020-05-20 10:49:56 -0500 | [diff] [blame] | 219 | bpf_probe_read_kernel_str(&data.task, sizeof(data.task), next->comm); |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 220 | bpf_probe_read_kernel_str(&data.prev_task, sizeof(data.prev_task), prev->comm); |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 221 | |
| 222 | // output |
| 223 | events.perf_submit(ctx, &data, sizeof(data)); |
| 224 | |
| 225 | start.delete(&pid); |
| 226 | return 0; |
| 227 | } |
| 228 | """ |
| 229 | |
| 230 | is_support_raw_tp = BPF.support_raw_tracepoint() |
| 231 | if is_support_raw_tp: |
| 232 | bpf_text += bpf_text_raw_tp |
| 233 | else: |
| 234 | bpf_text += bpf_text_kprobe |
| 235 | |
| 236 | # code substitutions |
Hengqi Chen | 08765a9 | 2021-10-31 23:20:10 +0800 | [diff] [blame] | 237 | if BPF.kernel_struct_has_field(b'task_struct', b'__state') == 1: |
| 238 | bpf_text = bpf_text.replace('STATE_FIELD', '__state') |
| 239 | else: |
| 240 | bpf_text = bpf_text.replace('STATE_FIELD', 'state') |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 241 | if min_us == 0: |
| 242 | bpf_text = bpf_text.replace('FILTER_US', '0') |
| 243 | else: |
| 244 | bpf_text = bpf_text.replace('FILTER_US', 'delta_us <= %s' % str(min_us)) |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 245 | |
| 246 | if args.tid: |
| 247 | bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % args.tid) |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 248 | else: |
| 249 | bpf_text = bpf_text.replace('FILTER_PID', '0') |
bodgergely | db82644 | 2020-01-03 05:37:24 +0000 | [diff] [blame] | 250 | |
| 251 | if args.pid: |
| 252 | bpf_text = bpf_text.replace('FILTER_TGID', 'tgid != %s' % args.pid) |
| 253 | else: |
| 254 | bpf_text = bpf_text.replace('FILTER_TGID', '0') |
| 255 | |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 256 | if debug or args.ebpf: |
| 257 | print(bpf_text) |
| 258 | if args.ebpf: |
| 259 | exit() |
| 260 | |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 261 | # process event |
| 262 | def print_event(cpu, data, size): |
Xiaozhou Liu | 51d62d3 | 2019-02-15 13:03:05 +0800 | [diff] [blame] | 263 | event = b["events"].event(data) |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 264 | if args.previous: |
| 265 | print("%-8s %-16s %-6s %14s %-16s %-6s" % (strftime("%H:%M:%S"), event.task, event.pid, event.delta_us, event.prev_task, event.prev_pid)) |
| 266 | else: |
| 267 | print("%-8s %-16s %-6s %14s" % (strftime("%H:%M:%S"), event.task, event.pid, event.delta_us)) |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 268 | |
| 269 | # load BPF program |
| 270 | b = BPF(text=bpf_text) |
| 271 | if not is_support_raw_tp: |
| 272 | b.attach_kprobe(event="ttwu_do_wakeup", fn_name="trace_ttwu_do_wakeup") |
| 273 | b.attach_kprobe(event="wake_up_new_task", fn_name="trace_wake_up_new_task") |
Guodong Xu | 00b72fd | 2021-03-13 02:23:47 +0000 | [diff] [blame] | 274 | b.attach_kprobe(event_re="^finish_task_switch$|^finish_task_switch\.isra\.\d$", |
| 275 | fn_name="trace_run") |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 276 | |
| 277 | print("Tracing run queue latency higher than %d us" % min_us) |
zhenwei pi | 508d969 | 2021-08-12 18:04:17 +0800 | [diff] [blame] | 278 | if args.previous: |
| 279 | print("%-8s %-16s %-6s %14s %-16s %-6s" % ("TIME", "COMM", "TID", "LAT(us)", "PREV COMM", "PREV TID")) |
| 280 | else: |
| 281 | print("%-8s %-16s %-6s %14s" % ("TIME", "COMM", "TID", "LAT(us)")) |
Ivan Babrou | 5c48a3f | 2018-05-08 17:24:19 -0700 | [diff] [blame] | 282 | |
| 283 | # read events |
| 284 | b["events"].open_perf_buffer(print_event, page_cnt=64) |
| 285 | while 1: |
Jerome Marchand | 5167127 | 2018-12-19 01:57:24 +0100 | [diff] [blame] | 286 | try: |
| 287 | b.perf_buffer_poll() |
| 288 | except KeyboardInterrupt: |
| 289 | exit() |