blob: 7a1869cf1ce6914f679c6bad0f378493cc7fd08e [file] [log] [blame]
Ivan Babrou5c48a3f2018-05-08 17:24:19 -07001#!/usr/bin/python
2# @lint-avoid-python-3-compatibility-imports
3#
4# runqslower Trace long process scheduling delays.
5# For Linux, uses BCC, eBPF.
6#
7# This script traces high scheduling delays between tasks being
8# ready to run and them running on CPU after that.
9#
10# USAGE: runqslower [-p PID] [min_us]
11#
12# REQUIRES: Linux 4.9+ (BPF_PROG_TYPE_PERF_EVENT support).
13#
14# This measures the time a task spends waiting on a run queue for a turn
15# on-CPU, and shows this time as a individual events. This time should be small,
16# but a task may need to wait its turn due to CPU load.
17#
18# This measures two types of run queue latency:
19# 1. The time from a task being enqueued on a run queue to its context switch
20# and execution. This traces ttwu_do_wakeup(), wake_up_new_task() ->
21# finish_task_switch() with either raw tracepoints (if supported) or kprobes
22# and instruments the run queue latency after a voluntary context switch.
23# 2. The time from when a task was involuntary context switched and still
24# in the runnable state, to when it next executed. This is instrumented
25# from finish_task_switch() alone.
26#
27# Copyright 2016 Cloudflare, Inc.
28# Licensed under the Apache License, Version 2.0 (the "License")
29#
30# 02-May-2018 Ivan Babrou Created this.
31
32from __future__ import print_function
33from bcc import BPF
34import argparse
35from time import strftime
36import ctypes as ct
37
38# arguments
39examples = """examples:
40 ./runqslower # trace run queue latency higher than 10000 us (default)
41 ./runqslower 1000 # trace run queue latency higher than 1000 us
42 ./runqslower -p 123 # trace pid 123 only
43"""
44parser = argparse.ArgumentParser(
45 description="Trace high run queue latency",
46 formatter_class=argparse.RawDescriptionHelpFormatter,
47 epilog=examples)
48parser.add_argument("-p", "--pid", type=int, metavar="PID", dest="pid",
49 help="trace this PID only")
50parser.add_argument("min_us", nargs="?", default='10000',
51 help="minimum run queue latecy to trace, in ms (default 10000)")
52parser.add_argument("--ebpf", action="store_true",
53 help=argparse.SUPPRESS)
54args = parser.parse_args()
55min_us = int(args.min_us)
56debug = 0
57
58# define BPF program
59bpf_text = """
60#include <uapi/linux/ptrace.h>
61#include <linux/sched.h>
62#include <linux/nsproxy.h>
63#include <linux/pid_namespace.h>
64
65BPF_HASH(start, u32);
66
67struct rq;
68
69struct data_t {
70 u32 pid;
71 char task[TASK_COMM_LEN];
72 u64 delta_us;
73};
74
75BPF_PERF_OUTPUT(events);
76
77// record enqueue timestamp
78static int trace_enqueue(u32 tgid, u32 pid)
79{
Ivan Babrou799f46a2018-05-08 23:10:27 -070080 if (FILTER_PID || pid == 0)
Ivan Babrou5c48a3f2018-05-08 17:24:19 -070081 return 0;
82 u64 ts = bpf_ktime_get_ns();
83 start.update(&pid, &ts);
84 return 0;
85}
86"""
87
88bpf_text_kprobe = """
89int trace_wake_up_new_task(struct pt_regs *ctx, struct task_struct *p)
90{
91 return trace_enqueue(p->tgid, p->pid);
92}
93
94int trace_ttwu_do_wakeup(struct pt_regs *ctx, struct rq *rq, struct task_struct *p,
95 int wake_flags)
96{
97 return trace_enqueue(p->tgid, p->pid);
98}
99
100// calculate latency
101int trace_run(struct pt_regs *ctx, struct task_struct *prev)
102{
103 u32 pid, tgid;
104
105 // ivcsw: treat like an enqueue event and store timestamp
106 if (prev->state == TASK_RUNNING) {
107 tgid = prev->tgid;
108 pid = prev->pid;
Ivan Babrou799f46a2018-05-08 23:10:27 -0700109 if (!(FILTER_PID || pid == 0)) {
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700110 u64 ts = bpf_ktime_get_ns();
111 start.update(&pid, &ts);
112 }
113 }
114
115 tgid = bpf_get_current_pid_tgid() >> 32;
116 pid = bpf_get_current_pid_tgid();
117
118 u64 *tsp, delta_us;
119
120 // fetch timestamp and calculate delta
121 tsp = start.lookup(&pid);
122 if (tsp == 0) {
123 return 0; // missed enqueue
124 }
125 delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
126
127 if (FILTER_US)
128 return 0;
129
130 struct data_t data = {};
131 data.pid = pid;
132 data.delta_us = delta_us;
133 bpf_get_current_comm(&data.task, sizeof(data.task));
134
135 // output
136 events.perf_submit(ctx, &data, sizeof(data));
137
138 start.delete(&pid);
139 return 0;
140}
141"""
142
143bpf_text_raw_tp = """
144RAW_TRACEPOINT_PROBE(sched_wakeup)
145{
146 // TP_PROTO(struct task_struct *p)
147 struct task_struct *p = (struct task_struct *)ctx->args[0];
Paul Chaignon8d78edd2018-06-29 07:47:44 +0200148 return trace_enqueue(p->tgid, p->pid);
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700149}
150
151RAW_TRACEPOINT_PROBE(sched_wakeup_new)
152{
153 // TP_PROTO(struct task_struct *p)
154 struct task_struct *p = (struct task_struct *)ctx->args[0];
155 u32 tgid, pid;
156
157 bpf_probe_read(&tgid, sizeof(tgid), &p->tgid);
158 bpf_probe_read(&pid, sizeof(pid), &p->pid);
159 return trace_enqueue(tgid, pid);
160}
161
162RAW_TRACEPOINT_PROBE(sched_switch)
163{
164 // TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next)
165 struct task_struct *prev = (struct task_struct *)ctx->args[1];
166 struct task_struct *next= (struct task_struct *)ctx->args[2];
167 u32 pid, tgid;
168 long state;
169
170 // ivcsw: treat like an enqueue event and store timestamp
171 bpf_probe_read(&state, sizeof(long), &prev->state);
172 if (state == TASK_RUNNING) {
173 bpf_probe_read(&tgid, sizeof(prev->tgid), &prev->tgid);
174 bpf_probe_read(&pid, sizeof(prev->pid), &prev->pid);
Ivan Babrou799f46a2018-05-08 23:10:27 -0700175 if (!(FILTER_PID || pid == 0)) {
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700176 u64 ts = bpf_ktime_get_ns();
177 start.update(&pid, &ts);
178 }
179 }
180
181 bpf_probe_read(&tgid, sizeof(next->tgid), &next->tgid);
182 bpf_probe_read(&pid, sizeof(next->pid), &next->pid);
183
184 u64 *tsp, delta_us;
185
186 // fetch timestamp and calculate delta
187 tsp = start.lookup(&pid);
188 if (tsp == 0) {
189 return 0; // missed enqueue
190 }
191 delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
192
193 if (FILTER_US)
194 return 0;
195
196 struct data_t data = {};
197 data.pid = pid;
198 data.delta_us = delta_us;
199 bpf_get_current_comm(&data.task, sizeof(data.task));
200
201 // output
202 events.perf_submit(ctx, &data, sizeof(data));
203
204 start.delete(&pid);
205 return 0;
206}
207"""
208
209is_support_raw_tp = BPF.support_raw_tracepoint()
210if is_support_raw_tp:
211 bpf_text += bpf_text_raw_tp
212else:
213 bpf_text += bpf_text_kprobe
214
215# code substitutions
216if min_us == 0:
217 bpf_text = bpf_text.replace('FILTER_US', '0')
218else:
219 bpf_text = bpf_text.replace('FILTER_US', 'delta_us <= %s' % str(min_us))
220if args.pid:
221 bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
222else:
223 bpf_text = bpf_text.replace('FILTER_PID', '0')
224if debug or args.ebpf:
225 print(bpf_text)
226 if args.ebpf:
227 exit()
228
229# kernel->user event data: struct data_t
230DNAME_INLINE_LEN = 32 # linux/dcache.h
231TASK_COMM_LEN = 16 # linux/sched.h
232class Data(ct.Structure):
233 _fields_ = [
234 ("pid", ct.c_uint),
235 ("task", ct.c_char * TASK_COMM_LEN),
236 ("delta_us", ct.c_ulonglong),
237 ]
238
239# process event
240def print_event(cpu, data, size):
241 event = ct.cast(data, ct.POINTER(Data)).contents
242 print("%-8s %-16s %-6s %14s" % (strftime("%H:%M:%S"), event.task, event.pid, event.delta_us))
243
244# load BPF program
245b = BPF(text=bpf_text)
246if not is_support_raw_tp:
247 b.attach_kprobe(event="ttwu_do_wakeup", fn_name="trace_ttwu_do_wakeup")
248 b.attach_kprobe(event="wake_up_new_task", fn_name="trace_wake_up_new_task")
249 b.attach_kprobe(event="finish_task_switch", fn_name="trace_run")
250
251print("Tracing run queue latency higher than %d us" % min_us)
252print("%-8s %-16s %-6s %14s" % ("TIME", "COMM", "PID", "LAT(us)"))
253
254# read events
255b["events"].open_perf_buffer(print_event, page_cnt=64)
256while 1:
257 b.perf_buffer_poll()