blob: b67853301699804ea3977b1f2d2046276e5f3276 [file] [log] [blame]
Alexey Ivanovcc01a9c2019-01-16 09:50:46 -08001#!/usr/bin/python
Ivan Babrou5c48a3f2018-05-08 17:24:19 -07002# @lint-avoid-python-3-compatibility-imports
3#
4# runqslower Trace long process scheduling delays.
5# For Linux, uses BCC, eBPF.
6#
7# This script traces high scheduling delays between tasks being
8# ready to run and them running on CPU after that.
9#
10# USAGE: runqslower [-p PID] [min_us]
11#
12# REQUIRES: Linux 4.9+ (BPF_PROG_TYPE_PERF_EVENT support).
13#
14# This measures the time a task spends waiting on a run queue for a turn
15# on-CPU, and shows this time as a individual events. This time should be small,
16# but a task may need to wait its turn due to CPU load.
17#
18# This measures two types of run queue latency:
19# 1. The time from a task being enqueued on a run queue to its context switch
20# and execution. This traces ttwu_do_wakeup(), wake_up_new_task() ->
21# finish_task_switch() with either raw tracepoints (if supported) or kprobes
22# and instruments the run queue latency after a voluntary context switch.
23# 2. The time from when a task was involuntary context switched and still
24# in the runnable state, to when it next executed. This is instrumented
25# from finish_task_switch() alone.
26#
27# Copyright 2016 Cloudflare, Inc.
28# Licensed under the Apache License, Version 2.0 (the "License")
29#
30# 02-May-2018 Ivan Babrou Created this.
Greg Bod65885f32019-11-18 19:35:31 +000031# 18-Nov-2019 Gergely Bod BUG fix: Use bpf_probe_read_str() to extract the
32# process name from 'task_struct* next' in raw tp code.
33# bpf_get_current_comm() operates on the current task
34# which might already be different than 'next'.
Ivan Babrou5c48a3f2018-05-08 17:24:19 -070035
36from __future__ import print_function
37from bcc import BPF
38import argparse
39from time import strftime
Ivan Babrou5c48a3f2018-05-08 17:24:19 -070040
41# arguments
42examples = """examples:
43 ./runqslower # trace run queue latency higher than 10000 us (default)
44 ./runqslower 1000 # trace run queue latency higher than 1000 us
45 ./runqslower -p 123 # trace pid 123 only
46"""
47parser = argparse.ArgumentParser(
48 description="Trace high run queue latency",
49 formatter_class=argparse.RawDescriptionHelpFormatter,
50 epilog=examples)
51parser.add_argument("-p", "--pid", type=int, metavar="PID", dest="pid",
52 help="trace this PID only")
53parser.add_argument("min_us", nargs="?", default='10000',
54 help="minimum run queue latecy to trace, in ms (default 10000)")
55parser.add_argument("--ebpf", action="store_true",
56 help=argparse.SUPPRESS)
57args = parser.parse_args()
58min_us = int(args.min_us)
59debug = 0
60
61# define BPF program
62bpf_text = """
63#include <uapi/linux/ptrace.h>
64#include <linux/sched.h>
65#include <linux/nsproxy.h>
66#include <linux/pid_namespace.h>
67
68BPF_HASH(start, u32);
69
70struct rq;
71
72struct data_t {
73 u32 pid;
74 char task[TASK_COMM_LEN];
75 u64 delta_us;
76};
77
78BPF_PERF_OUTPUT(events);
79
80// record enqueue timestamp
81static int trace_enqueue(u32 tgid, u32 pid)
82{
Ivan Babrou799f46a2018-05-08 23:10:27 -070083 if (FILTER_PID || pid == 0)
Ivan Babrou5c48a3f2018-05-08 17:24:19 -070084 return 0;
85 u64 ts = bpf_ktime_get_ns();
86 start.update(&pid, &ts);
87 return 0;
88}
89"""
90
91bpf_text_kprobe = """
92int trace_wake_up_new_task(struct pt_regs *ctx, struct task_struct *p)
93{
94 return trace_enqueue(p->tgid, p->pid);
95}
96
97int trace_ttwu_do_wakeup(struct pt_regs *ctx, struct rq *rq, struct task_struct *p,
98 int wake_flags)
99{
100 return trace_enqueue(p->tgid, p->pid);
101}
102
103// calculate latency
104int trace_run(struct pt_regs *ctx, struct task_struct *prev)
105{
106 u32 pid, tgid;
107
108 // ivcsw: treat like an enqueue event and store timestamp
109 if (prev->state == TASK_RUNNING) {
110 tgid = prev->tgid;
111 pid = prev->pid;
Ivan Babrou799f46a2018-05-08 23:10:27 -0700112 if (!(FILTER_PID || pid == 0)) {
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700113 u64 ts = bpf_ktime_get_ns();
114 start.update(&pid, &ts);
115 }
116 }
117
118 tgid = bpf_get_current_pid_tgid() >> 32;
119 pid = bpf_get_current_pid_tgid();
120
121 u64 *tsp, delta_us;
122
123 // fetch timestamp and calculate delta
124 tsp = start.lookup(&pid);
125 if (tsp == 0) {
126 return 0; // missed enqueue
127 }
128 delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
129
130 if (FILTER_US)
131 return 0;
132
133 struct data_t data = {};
134 data.pid = pid;
135 data.delta_us = delta_us;
136 bpf_get_current_comm(&data.task, sizeof(data.task));
137
138 // output
139 events.perf_submit(ctx, &data, sizeof(data));
140
141 start.delete(&pid);
142 return 0;
143}
144"""
145
146bpf_text_raw_tp = """
147RAW_TRACEPOINT_PROBE(sched_wakeup)
148{
149 // TP_PROTO(struct task_struct *p)
150 struct task_struct *p = (struct task_struct *)ctx->args[0];
Paul Chaignon8d78edd2018-06-29 07:47:44 +0200151 return trace_enqueue(p->tgid, p->pid);
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700152}
153
154RAW_TRACEPOINT_PROBE(sched_wakeup_new)
155{
156 // TP_PROTO(struct task_struct *p)
157 struct task_struct *p = (struct task_struct *)ctx->args[0];
158 u32 tgid, pid;
159
160 bpf_probe_read(&tgid, sizeof(tgid), &p->tgid);
161 bpf_probe_read(&pid, sizeof(pid), &p->pid);
162 return trace_enqueue(tgid, pid);
163}
164
165RAW_TRACEPOINT_PROBE(sched_switch)
166{
167 // TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next)
168 struct task_struct *prev = (struct task_struct *)ctx->args[1];
169 struct task_struct *next= (struct task_struct *)ctx->args[2];
Greg Bod65885f32019-11-18 19:35:31 +0000170 u32 pid;
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700171 long state;
172
173 // ivcsw: treat like an enqueue event and store timestamp
Jerome Marchandbac633a2019-07-03 11:12:08 +0200174 bpf_probe_read(&state, sizeof(long), (const void *)&prev->state);
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700175 if (state == TASK_RUNNING) {
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700176 bpf_probe_read(&pid, sizeof(prev->pid), &prev->pid);
Ivan Babrou799f46a2018-05-08 23:10:27 -0700177 if (!(FILTER_PID || pid == 0)) {
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700178 u64 ts = bpf_ktime_get_ns();
179 start.update(&pid, &ts);
180 }
181 }
182
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700183 bpf_probe_read(&pid, sizeof(next->pid), &next->pid);
184
185 u64 *tsp, delta_us;
186
187 // fetch timestamp and calculate delta
188 tsp = start.lookup(&pid);
189 if (tsp == 0) {
190 return 0; // missed enqueue
191 }
192 delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
193
194 if (FILTER_US)
195 return 0;
196
197 struct data_t data = {};
198 data.pid = pid;
199 data.delta_us = delta_us;
Greg Bod65885f32019-11-18 19:35:31 +0000200 bpf_probe_read_str(&data.task, sizeof(data.task), next->comm);
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700201
202 // output
203 events.perf_submit(ctx, &data, sizeof(data));
204
205 start.delete(&pid);
206 return 0;
207}
208"""
209
210is_support_raw_tp = BPF.support_raw_tracepoint()
211if is_support_raw_tp:
212 bpf_text += bpf_text_raw_tp
213else:
214 bpf_text += bpf_text_kprobe
215
216# code substitutions
217if min_us == 0:
218 bpf_text = bpf_text.replace('FILTER_US', '0')
219else:
220 bpf_text = bpf_text.replace('FILTER_US', 'delta_us <= %s' % str(min_us))
221if args.pid:
Teng Qinaaca9762019-01-11 11:18:45 -0800222 bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % args.pid)
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700223else:
224 bpf_text = bpf_text.replace('FILTER_PID', '0')
225if debug or args.ebpf:
226 print(bpf_text)
227 if args.ebpf:
228 exit()
229
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700230# process event
231def print_event(cpu, data, size):
Xiaozhou Liu51d62d32019-02-15 13:03:05 +0800232 event = b["events"].event(data)
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700233 print("%-8s %-16s %-6s %14s" % (strftime("%H:%M:%S"), event.task, event.pid, event.delta_us))
234
235# load BPF program
236b = BPF(text=bpf_text)
237if not is_support_raw_tp:
238 b.attach_kprobe(event="ttwu_do_wakeup", fn_name="trace_ttwu_do_wakeup")
239 b.attach_kprobe(event="wake_up_new_task", fn_name="trace_wake_up_new_task")
240 b.attach_kprobe(event="finish_task_switch", fn_name="trace_run")
241
242print("Tracing run queue latency higher than %d us" % min_us)
243print("%-8s %-16s %-6s %14s" % ("TIME", "COMM", "PID", "LAT(us)"))
244
245# read events
246b["events"].open_perf_buffer(print_event, page_cnt=64)
247while 1:
Jerome Marchand51671272018-12-19 01:57:24 +0100248 try:
249 b.perf_buffer_poll()
250 except KeyboardInterrupt:
251 exit()