blob: 26f8af65bf512e762bb32cedbd995b87ba177928 [file] [log] [blame]
Brendan Greggc937a9e2016-02-12 02:23:39 -08001#!/usr/bin/python
2# @lint-avoid-python-3-compatibility-imports
3#
4# xfsslower Trace slow XFS operations.
5# For Linux, uses BCC, eBPF.
6#
7# USAGE: xfsslower [-h] [-j] [-p PID] [min_ms]
8#
9# This script traces common XFS file operations: reads, writes, opens, and
10# syncs. It measures the time spent in these operations, and prints details
11# for each that exceeded a threshold.
12#
13# WARNING: This adds low-overhead instrumentation to these XFS operations,
14# including reads and writes from the file system cache. Such reads and writes
15# can be very frequent (depending on the workload; eg, 1M/sec), at which
16# point the overhead of this tool (even if it prints no "slower" events) can
17# begin to become significant.
18#
19# By default, a minimum millisecond threshold of 10 is used.
20#
21# Copyright 2016 Netflix, Inc.
22# Licensed under the Apache License, Version 2.0 (the "License")
23#
24# 11-Feb-2016 Brendan Gregg Created this.
25
26from __future__ import print_function
27from bcc import BPF
28import argparse
29from time import strftime
30import ctypes as ct
31
32# arguments
33examples = """examples:
34 ./xfsslower # trace operations slower than 10 ms (default)
35 ./xfsslower 1 # trace operations slower than 1 ms
36 ./xfsslower -j 1 # ... 1 ms, parsable output (csv)
37 ./xfsslower 0 # trace all operations (warning: verbose)
38 ./xfsslower -p 185 # trace PID 185 only
39"""
40parser = argparse.ArgumentParser(
41 description="Trace common XFS file operations slower than a threshold",
42 formatter_class=argparse.RawDescriptionHelpFormatter,
43 epilog=examples)
44parser.add_argument("-j", "--csv", action="store_true",
45 help="just print fields: comma-separated values")
46parser.add_argument("-p", "--pid",
47 help="trace this PID only")
48parser.add_argument("min_ms", nargs="?", default='10',
49 help="minimum I/O duration to trace, in ms (default 10)")
50args = parser.parse_args()
51min_ms = int(args.min_ms)
52pid = args.pid
53csv = args.csv
54debug = 0
55
56# define BPF program
57bpf_text = """
58#include <uapi/linux/ptrace.h>
59#include <linux/fs.h>
60#include <linux/sched.h>
61#include <linux/dcache.h>
62
63// XXX: switch these to char's when supported
64#define TRACE_READ 0
65#define TRACE_WRITE 1
66#define TRACE_OPEN 2
67#define TRACE_FSYNC 3
68
69struct val_t {
70 u64 ts;
71 u64 offset;
72 struct file *fp;
73};
74
75struct data_t {
76 // XXX: switch some to u32's when supported
77 u64 ts_us;
78 u64 type;
79 u64 size;
80 u64 offset;
81 u64 delta_us;
82 u64 pid;
83 char task[TASK_COMM_LEN];
84 char file[DNAME_INLINE_LEN];
85};
86
87BPF_HASH(entryinfo, pid_t, struct val_t);
88BPF_PERF_OUTPUT(events);
89
90//
91// Store timestamp and size on entry
92//
93
94// xfs_file_read_iter(), xfs_file_write_iter():
95int trace_rw_entry(struct pt_regs *ctx, struct kiocb *iocb)
96{
97 u32 pid;
98 pid = bpf_get_current_pid_tgid();
99 if (FILTER_PID)
100 return 0;
101
102 // store filep and timestamp by pid
103 struct val_t val = {};
104 val.ts = bpf_ktime_get_ns();
105 val.fp = iocb->ki_filp;
106 val.offset = iocb->ki_pos;
107 if (val.fp)
108 entryinfo.update(&pid, &val);
109
110 return 0;
111}
112
113// xfs_file_open():
114int trace_open_entry(struct pt_regs *ctx, struct inode *inode,
115 struct file *file)
116{
117 u32 pid;
118 pid = bpf_get_current_pid_tgid();
119 if (FILTER_PID)
120 return 0;
121
122 // store filep and timestamp by pid
123 struct val_t val = {};
124 val.ts = bpf_ktime_get_ns();
125 val.fp = file;
126 val.offset = 0;
127 if (val.fp)
128 entryinfo.update(&pid, &val);
129
130 return 0;
131}
132
133// xfs_file_fsync():
134int trace_fsync_entry(struct pt_regs *ctx, struct file *file)
135{
136 u32 pid;
137 pid = bpf_get_current_pid_tgid();
138 if (FILTER_PID)
139 return 0;
140
141 // store filep and timestamp by pid
142 struct val_t val = {};
143 val.ts = bpf_ktime_get_ns();
144 val.fp = file;
145 val.offset = 0;
146 if (val.fp)
147 entryinfo.update(&pid, &val);
148
149 return 0;
150}
151
152//
153// Output
154//
155
156static int trace_return(struct pt_regs *ctx, int type)
157{
158 struct val_t *valp;
159 u32 pid = bpf_get_current_pid_tgid();
160
161 valp = entryinfo.lookup(&pid);
162 if (valp == 0) {
163 // missed tracing issue or filtered
164 return 0;
165 }
166
167 // calculate delta
168 u64 ts = bpf_ktime_get_ns();
169 u64 delta_us = (ts - valp->ts) / 1000;
170 entryinfo.delete(&pid);
171 if (FILTER_US)
172 return 0;
173
174 // workaround (rewriter should handle file to d_iname in one step):
175 struct dentry *de = NULL;
176 bpf_probe_read(&de, sizeof(de), &valp->fp->f_path.dentry);
177
178 // populate output struct
179 u32 size = ctx->ax;
180 struct data_t data = {.type = type, .size = size, .delta_us = delta_us,
181 .pid = pid};
182 data.ts_us = ts / 1000;
183 data.offset = valp->offset;
184 bpf_probe_read(&data.file, sizeof(data.file), de->d_iname);
185 bpf_get_current_comm(&data.task, sizeof(data.task));
186
187 events.perf_submit(ctx, &data, sizeof(data));
188
189 return 0;
190}
191
192int trace_read_return(struct pt_regs *ctx)
193{
194 return trace_return(ctx, TRACE_READ);
195}
196
197int trace_write_return(struct pt_regs *ctx)
198{
199 return trace_return(ctx, TRACE_WRITE);
200}
201
202int trace_open_return(struct pt_regs *ctx)
203{
204 return trace_return(ctx, TRACE_OPEN);
205}
206
207int trace_fsync_return(struct pt_regs *ctx)
208{
209 return trace_return(ctx, TRACE_FSYNC);
210}
211
212"""
213if min_ms == 0:
214 bpf_text = bpf_text.replace('FILTER_US', '0')
215else:
216 bpf_text = bpf_text.replace('FILTER_US',
217 'delta_us <= %s' % str(min_ms * 1000))
218if args.pid:
219 bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
220else:
221 bpf_text = bpf_text.replace('FILTER_PID', '0')
222if debug:
223 print(bpf_text)
224
225# kernel->user event data: struct data_t
226DNAME_INLINE_LEN = 32 # linux/dcache.h
227TASK_COMM_LEN = 16 # linux/sched.h
228class Data(ct.Structure):
229 _fields_ = [
230 ("ts_us", ct.c_ulonglong),
231 ("type", ct.c_ulonglong),
232 ("size", ct.c_ulonglong),
233 ("offset", ct.c_ulonglong),
234 ("delta_us", ct.c_ulonglong),
235 ("pid", ct.c_ulonglong),
236 ("task", ct.c_char * TASK_COMM_LEN),
237 ("file", ct.c_char * DNAME_INLINE_LEN)
238 ]
239
240# process event
241def print_event(cpu, data, size):
242 event = ct.cast(data, ct.POINTER(Data)).contents
243
244 type = 'R'
245 if event.type == 1:
246 type = 'W'
247 elif event.type == 2:
248 type = 'O'
249 elif event.type == 3:
250 type = 'S'
251
252 if (csv):
253 print("%d,%s,%d,%s,%d,%d,%d,%s" % (
254 event.ts_us, event.task, event.pid, type, event.size,
255 event.offset, event.delta_us, event.file))
256 return
257 print("%-8s %-14.14s %-6s %1s %-7s %-8d %7.2f %s" % (strftime("%H:%M:%S"),
258 event.task, event.pid, type, event.size, event.offset / 1024,
259 float(event.delta_us) / 1000, event.file))
260
261# initialize BPF
262b = BPF(text=bpf_text)
263
264# common file functions
265b.attach_kprobe(event="xfs_file_read_iter", fn_name="trace_rw_entry")
266b.attach_kprobe(event="xfs_file_write_iter", fn_name="trace_rw_entry")
267b.attach_kprobe(event="xfs_file_open", fn_name="trace_open_entry")
268b.attach_kprobe(event="xfs_file_fsync", fn_name="trace_fsync_entry")
269b.attach_kretprobe(event="xfs_file_read_iter", fn_name="trace_read_return")
270b.attach_kretprobe(event="xfs_file_write_iter", fn_name="trace_write_return")
271b.attach_kretprobe(event="xfs_file_open", fn_name="trace_open_return")
272b.attach_kretprobe(event="xfs_file_fsync", fn_name="trace_fsync_return")
273
274# header
275if (csv):
276 print("ENDTIME_us,TASK,PID,TYPE,BYTES,OFFSET_b,LATENCY_us,FILE")
277else:
278 if min_ms == 0:
279 print("Tracing XFS operations")
280 else:
281 print("Tracing XFS operations slower than %d ms" % min_ms)
282 print("%-8s %-14s %-6s %1s %-7s %-8s %7s %s" % ("TIME", "COMM", "PID", "T",
283 "BYTES", "OFF_KB", "LAT(ms)", "FILENAME"))
284
285# read events
286b["events"].open_perf_buffer(print_event)
287while 1:
288 b.kprobe_poll()