blob: 1d48be8a617816596791ed7b15db7d7029201259 [file] [log] [blame]
Alexey Ivanovcc01a9c2019-01-16 09:50:46 -08001#!/usr/bin/python
Ivan Babrou5c48a3f2018-05-08 17:24:19 -07002# @lint-avoid-python-3-compatibility-imports
3#
4# runqslower Trace long process scheduling delays.
5# For Linux, uses BCC, eBPF.
6#
7# This script traces high scheduling delays between tasks being
8# ready to run and them running on CPU after that.
9#
10# USAGE: runqslower [-p PID] [min_us]
11#
12# REQUIRES: Linux 4.9+ (BPF_PROG_TYPE_PERF_EVENT support).
13#
14# This measures the time a task spends waiting on a run queue for a turn
15# on-CPU, and shows this time as a individual events. This time should be small,
16# but a task may need to wait its turn due to CPU load.
17#
18# This measures two types of run queue latency:
19# 1. The time from a task being enqueued on a run queue to its context switch
20# and execution. This traces ttwu_do_wakeup(), wake_up_new_task() ->
21# finish_task_switch() with either raw tracepoints (if supported) or kprobes
22# and instruments the run queue latency after a voluntary context switch.
23# 2. The time from when a task was involuntary context switched and still
24# in the runnable state, to when it next executed. This is instrumented
25# from finish_task_switch() alone.
26#
27# Copyright 2016 Cloudflare, Inc.
28# Licensed under the Apache License, Version 2.0 (the "License")
29#
30# 02-May-2018 Ivan Babrou Created this.
31
32from __future__ import print_function
33from bcc import BPF
34import argparse
35from time import strftime
Ivan Babrou5c48a3f2018-05-08 17:24:19 -070036
37# arguments
38examples = """examples:
39 ./runqslower # trace run queue latency higher than 10000 us (default)
40 ./runqslower 1000 # trace run queue latency higher than 1000 us
41 ./runqslower -p 123 # trace pid 123 only
42"""
43parser = argparse.ArgumentParser(
44 description="Trace high run queue latency",
45 formatter_class=argparse.RawDescriptionHelpFormatter,
46 epilog=examples)
47parser.add_argument("-p", "--pid", type=int, metavar="PID", dest="pid",
48 help="trace this PID only")
49parser.add_argument("min_us", nargs="?", default='10000',
50 help="minimum run queue latecy to trace, in ms (default 10000)")
51parser.add_argument("--ebpf", action="store_true",
52 help=argparse.SUPPRESS)
53args = parser.parse_args()
54min_us = int(args.min_us)
55debug = 0
56
57# define BPF program
58bpf_text = """
59#include <uapi/linux/ptrace.h>
60#include <linux/sched.h>
61#include <linux/nsproxy.h>
62#include <linux/pid_namespace.h>
63
64BPF_HASH(start, u32);
65
66struct rq;
67
68struct data_t {
69 u32 pid;
70 char task[TASK_COMM_LEN];
71 u64 delta_us;
72};
73
74BPF_PERF_OUTPUT(events);
75
76// record enqueue timestamp
77static int trace_enqueue(u32 tgid, u32 pid)
78{
Ivan Babrou799f46a2018-05-08 23:10:27 -070079 if (FILTER_PID || pid == 0)
Ivan Babrou5c48a3f2018-05-08 17:24:19 -070080 return 0;
81 u64 ts = bpf_ktime_get_ns();
82 start.update(&pid, &ts);
83 return 0;
84}
85"""
86
87bpf_text_kprobe = """
88int trace_wake_up_new_task(struct pt_regs *ctx, struct task_struct *p)
89{
90 return trace_enqueue(p->tgid, p->pid);
91}
92
93int trace_ttwu_do_wakeup(struct pt_regs *ctx, struct rq *rq, struct task_struct *p,
94 int wake_flags)
95{
96 return trace_enqueue(p->tgid, p->pid);
97}
98
99// calculate latency
100int trace_run(struct pt_regs *ctx, struct task_struct *prev)
101{
102 u32 pid, tgid;
103
104 // ivcsw: treat like an enqueue event and store timestamp
105 if (prev->state == TASK_RUNNING) {
106 tgid = prev->tgid;
107 pid = prev->pid;
Ivan Babrou799f46a2018-05-08 23:10:27 -0700108 if (!(FILTER_PID || pid == 0)) {
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700109 u64 ts = bpf_ktime_get_ns();
110 start.update(&pid, &ts);
111 }
112 }
113
114 tgid = bpf_get_current_pid_tgid() >> 32;
115 pid = bpf_get_current_pid_tgid();
116
117 u64 *tsp, delta_us;
118
119 // fetch timestamp and calculate delta
120 tsp = start.lookup(&pid);
121 if (tsp == 0) {
122 return 0; // missed enqueue
123 }
124 delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
125
126 if (FILTER_US)
127 return 0;
128
129 struct data_t data = {};
130 data.pid = pid;
131 data.delta_us = delta_us;
132 bpf_get_current_comm(&data.task, sizeof(data.task));
133
134 // output
135 events.perf_submit(ctx, &data, sizeof(data));
136
137 start.delete(&pid);
138 return 0;
139}
140"""
141
142bpf_text_raw_tp = """
143RAW_TRACEPOINT_PROBE(sched_wakeup)
144{
145 // TP_PROTO(struct task_struct *p)
146 struct task_struct *p = (struct task_struct *)ctx->args[0];
Paul Chaignon8d78edd2018-06-29 07:47:44 +0200147 return trace_enqueue(p->tgid, p->pid);
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700148}
149
150RAW_TRACEPOINT_PROBE(sched_wakeup_new)
151{
152 // TP_PROTO(struct task_struct *p)
153 struct task_struct *p = (struct task_struct *)ctx->args[0];
154 u32 tgid, pid;
155
156 bpf_probe_read(&tgid, sizeof(tgid), &p->tgid);
157 bpf_probe_read(&pid, sizeof(pid), &p->pid);
158 return trace_enqueue(tgid, pid);
159}
160
161RAW_TRACEPOINT_PROBE(sched_switch)
162{
163 // TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next)
164 struct task_struct *prev = (struct task_struct *)ctx->args[1];
165 struct task_struct *next= (struct task_struct *)ctx->args[2];
166 u32 pid, tgid;
167 long state;
168
169 // ivcsw: treat like an enqueue event and store timestamp
170 bpf_probe_read(&state, sizeof(long), &prev->state);
171 if (state == TASK_RUNNING) {
172 bpf_probe_read(&tgid, sizeof(prev->tgid), &prev->tgid);
173 bpf_probe_read(&pid, sizeof(prev->pid), &prev->pid);
Ivan Babrou799f46a2018-05-08 23:10:27 -0700174 if (!(FILTER_PID || pid == 0)) {
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700175 u64 ts = bpf_ktime_get_ns();
176 start.update(&pid, &ts);
177 }
178 }
179
180 bpf_probe_read(&tgid, sizeof(next->tgid), &next->tgid);
181 bpf_probe_read(&pid, sizeof(next->pid), &next->pid);
182
183 u64 *tsp, delta_us;
184
185 // fetch timestamp and calculate delta
186 tsp = start.lookup(&pid);
187 if (tsp == 0) {
188 return 0; // missed enqueue
189 }
190 delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
191
192 if (FILTER_US)
193 return 0;
194
195 struct data_t data = {};
196 data.pid = pid;
197 data.delta_us = delta_us;
198 bpf_get_current_comm(&data.task, sizeof(data.task));
199
200 // output
201 events.perf_submit(ctx, &data, sizeof(data));
202
203 start.delete(&pid);
204 return 0;
205}
206"""
207
208is_support_raw_tp = BPF.support_raw_tracepoint()
209if is_support_raw_tp:
210 bpf_text += bpf_text_raw_tp
211else:
212 bpf_text += bpf_text_kprobe
213
214# code substitutions
215if min_us == 0:
216 bpf_text = bpf_text.replace('FILTER_US', '0')
217else:
218 bpf_text = bpf_text.replace('FILTER_US', 'delta_us <= %s' % str(min_us))
219if args.pid:
Teng Qinaaca9762019-01-11 11:18:45 -0800220 bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % args.pid)
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700221else:
222 bpf_text = bpf_text.replace('FILTER_PID', '0')
223if debug or args.ebpf:
224 print(bpf_text)
225 if args.ebpf:
226 exit()
227
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700228# process event
229def print_event(cpu, data, size):
Xiaozhou Liu51d62d32019-02-15 13:03:05 +0800230 event = b["events"].event(data)
Ivan Babrou5c48a3f2018-05-08 17:24:19 -0700231 print("%-8s %-16s %-6s %14s" % (strftime("%H:%M:%S"), event.task, event.pid, event.delta_us))
232
233# load BPF program
234b = BPF(text=bpf_text)
235if not is_support_raw_tp:
236 b.attach_kprobe(event="ttwu_do_wakeup", fn_name="trace_ttwu_do_wakeup")
237 b.attach_kprobe(event="wake_up_new_task", fn_name="trace_wake_up_new_task")
238 b.attach_kprobe(event="finish_task_switch", fn_name="trace_run")
239
240print("Tracing run queue latency higher than %d us" % min_us)
241print("%-8s %-16s %-6s %14s" % ("TIME", "COMM", "PID", "LAT(us)"))
242
243# read events
244b["events"].open_perf_buffer(print_event, page_cnt=64)
245while 1:
Jerome Marchand51671272018-12-19 01:57:24 +0100246 try:
247 b.perf_buffer_poll()
248 except KeyboardInterrupt:
249 exit()