blob: 0ce9f06f58c2809a2c1000159950f3b5fd6d6d21 [file] [log] [blame]
Dave Chinner455b2862010-07-07 13:24:06 +10001#undef TRACE_SYSTEM
2#define TRACE_SYSTEM writeback
3
4#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_WRITEBACK_H
6
7#include <linux/backing-dev.h>
Randy Dunlap96dccab2010-07-19 16:49:17 -07008#include <linux/device.h>
Dave Chinner455b2862010-07-07 13:24:06 +10009#include <linux/writeback.h>
10
Wu Fengguang251d6a42010-12-01 17:33:37 -060011#define show_inode_state(state) \
12 __print_flags(state, "|", \
13 {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
14 {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
15 {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
16 {I_NEW, "I_NEW"}, \
17 {I_WILL_FREE, "I_WILL_FREE"}, \
18 {I_FREEING, "I_FREEING"}, \
19 {I_CLEAR, "I_CLEAR"}, \
20 {I_SYNC, "I_SYNC"}, \
21 {I_REFERENCED, "I_REFERENCED"} \
22 )
23
Dave Chinner455b2862010-07-07 13:24:06 +100024struct wb_writeback_work;
25
26DECLARE_EVENT_CLASS(writeback_work_class,
27 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
28 TP_ARGS(bdi, work),
29 TP_STRUCT__entry(
30 __array(char, name, 32)
31 __field(long, nr_pages)
32 __field(dev_t, sb_dev)
33 __field(int, sync_mode)
34 __field(int, for_kupdate)
35 __field(int, range_cyclic)
36 __field(int, for_background)
37 ),
38 TP_fast_assign(
39 strncpy(__entry->name, dev_name(bdi->dev), 32);
40 __entry->nr_pages = work->nr_pages;
41 __entry->sb_dev = work->sb ? work->sb->s_dev : 0;
42 __entry->sync_mode = work->sync_mode;
43 __entry->for_kupdate = work->for_kupdate;
44 __entry->range_cyclic = work->range_cyclic;
45 __entry->for_background = work->for_background;
46 ),
47 TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
48 "kupdate=%d range_cyclic=%d background=%d",
49 __entry->name,
50 MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
51 __entry->nr_pages,
52 __entry->sync_mode,
53 __entry->for_kupdate,
54 __entry->range_cyclic,
55 __entry->for_background
56 )
57);
58#define DEFINE_WRITEBACK_WORK_EVENT(name) \
59DEFINE_EVENT(writeback_work_class, name, \
60 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
61 TP_ARGS(bdi, work))
62DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
63DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
64DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
Wu Fengguangd46db3d2011-05-04 19:54:37 -060065DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
66DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
67DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
Dave Chinner455b2862010-07-07 13:24:06 +100068
69TRACE_EVENT(writeback_pages_written,
70 TP_PROTO(long pages_written),
71 TP_ARGS(pages_written),
72 TP_STRUCT__entry(
73 __field(long, pages)
74 ),
75 TP_fast_assign(
76 __entry->pages = pages_written;
77 ),
78 TP_printk("%ld", __entry->pages)
79);
80
81DECLARE_EVENT_CLASS(writeback_class,
82 TP_PROTO(struct backing_dev_info *bdi),
83 TP_ARGS(bdi),
84 TP_STRUCT__entry(
85 __array(char, name, 32)
86 ),
87 TP_fast_assign(
88 strncpy(__entry->name, dev_name(bdi->dev), 32);
89 ),
90 TP_printk("bdi %s",
91 __entry->name
92 )
93);
94#define DEFINE_WRITEBACK_EVENT(name) \
95DEFINE_EVENT(writeback_class, name, \
96 TP_PROTO(struct backing_dev_info *bdi), \
97 TP_ARGS(bdi))
98
99DEFINE_WRITEBACK_EVENT(writeback_nowork);
Wu Fengguang71927e82011-01-13 15:45:46 -0800100DEFINE_WRITEBACK_EVENT(writeback_wake_background);
Artem Bityutskiy60332022010-07-25 14:29:24 +0300101DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
102DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
Dave Chinner455b2862010-07-07 13:24:06 +1000103DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
104DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
105DEFINE_WRITEBACK_EVENT(writeback_thread_start);
106DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
107
Dave Chinner028c2dd2010-07-07 13:24:07 +1000108DECLARE_EVENT_CLASS(wbc_class,
109 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
110 TP_ARGS(wbc, bdi),
111 TP_STRUCT__entry(
112 __array(char, name, 32)
113 __field(long, nr_to_write)
114 __field(long, pages_skipped)
115 __field(int, sync_mode)
Dave Chinner028c2dd2010-07-07 13:24:07 +1000116 __field(int, for_kupdate)
117 __field(int, for_background)
118 __field(int, for_reclaim)
119 __field(int, range_cyclic)
Dave Chinner028c2dd2010-07-07 13:24:07 +1000120 __field(long, range_start)
121 __field(long, range_end)
122 ),
123
124 TP_fast_assign(
125 strncpy(__entry->name, dev_name(bdi->dev), 32);
126 __entry->nr_to_write = wbc->nr_to_write;
127 __entry->pages_skipped = wbc->pages_skipped;
128 __entry->sync_mode = wbc->sync_mode;
129 __entry->for_kupdate = wbc->for_kupdate;
130 __entry->for_background = wbc->for_background;
131 __entry->for_reclaim = wbc->for_reclaim;
132 __entry->range_cyclic = wbc->range_cyclic;
Dave Chinner028c2dd2010-07-07 13:24:07 +1000133 __entry->range_start = (long)wbc->range_start;
134 __entry->range_end = (long)wbc->range_end;
135 ),
136
137 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
Wu Fengguangd46db3d2011-05-04 19:54:37 -0600138 "bgrd=%d reclm=%d cyclic=%d "
Dave Chinner028c2dd2010-07-07 13:24:07 +1000139 "start=0x%lx end=0x%lx",
140 __entry->name,
141 __entry->nr_to_write,
142 __entry->pages_skipped,
143 __entry->sync_mode,
144 __entry->for_kupdate,
145 __entry->for_background,
146 __entry->for_reclaim,
147 __entry->range_cyclic,
Dave Chinner028c2dd2010-07-07 13:24:07 +1000148 __entry->range_start,
149 __entry->range_end)
150)
151
152#define DEFINE_WBC_EVENT(name) \
153DEFINE_EVENT(wbc_class, name, \
154 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
155 TP_ARGS(wbc, bdi))
Dave Chinner9e094382010-07-07 13:24:08 +1000156DEFINE_WBC_EVENT(wbc_writepage);
Dave Chinner028c2dd2010-07-07 13:24:07 +1000157
Wu Fengguange84d0a42011-04-23 12:27:27 -0600158TRACE_EVENT(writeback_queue_io,
159 TP_PROTO(struct bdi_writeback *wb,
160 unsigned long *older_than_this,
161 int moved),
162 TP_ARGS(wb, older_than_this, moved),
163 TP_STRUCT__entry(
164 __array(char, name, 32)
165 __field(unsigned long, older)
166 __field(long, age)
167 __field(int, moved)
168 ),
169 TP_fast_assign(
170 strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
171 __entry->older = older_than_this ? *older_than_this : 0;
172 __entry->age = older_than_this ?
173 (jiffies - *older_than_this) * 1000 / HZ : -1;
174 __entry->moved = moved;
175 ),
176 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d",
177 __entry->name,
178 __entry->older, /* older_than_this in jiffies */
179 __entry->age, /* older_than_this in relative milliseconds */
180 __entry->moved)
181);
182
Wu Fengguange1cbe232010-12-06 22:34:29 -0600183TRACE_EVENT(global_dirty_state,
184
185 TP_PROTO(unsigned long background_thresh,
186 unsigned long dirty_thresh
187 ),
188
189 TP_ARGS(background_thresh,
190 dirty_thresh
191 ),
192
193 TP_STRUCT__entry(
194 __field(unsigned long, nr_dirty)
195 __field(unsigned long, nr_writeback)
196 __field(unsigned long, nr_unstable)
197 __field(unsigned long, background_thresh)
198 __field(unsigned long, dirty_thresh)
199 __field(unsigned long, dirty_limit)
200 __field(unsigned long, nr_dirtied)
201 __field(unsigned long, nr_written)
202 ),
203
204 TP_fast_assign(
205 __entry->nr_dirty = global_page_state(NR_FILE_DIRTY);
206 __entry->nr_writeback = global_page_state(NR_WRITEBACK);
207 __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
208 __entry->nr_dirtied = global_page_state(NR_DIRTIED);
209 __entry->nr_written = global_page_state(NR_WRITTEN);
210 __entry->background_thresh = background_thresh;
211 __entry->dirty_thresh = dirty_thresh;
212 __entry->dirty_limit = global_dirty_limit;
213 ),
214
215 TP_printk("dirty=%lu writeback=%lu unstable=%lu "
216 "bg_thresh=%lu thresh=%lu limit=%lu "
217 "dirtied=%lu written=%lu",
218 __entry->nr_dirty,
219 __entry->nr_writeback,
220 __entry->nr_unstable,
221 __entry->background_thresh,
222 __entry->dirty_thresh,
223 __entry->dirty_limit,
224 __entry->nr_dirtied,
225 __entry->nr_written
226 )
227);
228
Wu Fengguangb48c1042011-03-02 17:22:49 -0600229#define KBps(x) ((x) << (PAGE_SHIFT - 10))
230
231TRACE_EVENT(bdi_dirty_ratelimit,
232
233 TP_PROTO(struct backing_dev_info *bdi,
234 unsigned long dirty_rate,
235 unsigned long task_ratelimit),
236
237 TP_ARGS(bdi, dirty_rate, task_ratelimit),
238
239 TP_STRUCT__entry(
240 __array(char, bdi, 32)
241 __field(unsigned long, write_bw)
242 __field(unsigned long, avg_write_bw)
243 __field(unsigned long, dirty_rate)
244 __field(unsigned long, dirty_ratelimit)
245 __field(unsigned long, task_ratelimit)
246 __field(unsigned long, balanced_dirty_ratelimit)
247 ),
248
249 TP_fast_assign(
250 strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
251 __entry->write_bw = KBps(bdi->write_bandwidth);
252 __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth);
253 __entry->dirty_rate = KBps(dirty_rate);
254 __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
255 __entry->task_ratelimit = KBps(task_ratelimit);
256 __entry->balanced_dirty_ratelimit =
257 KBps(bdi->balanced_dirty_ratelimit);
258 ),
259
260 TP_printk("bdi %s: "
261 "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
262 "dirty_ratelimit=%lu task_ratelimit=%lu "
263 "balanced_dirty_ratelimit=%lu",
264 __entry->bdi,
265 __entry->write_bw, /* write bandwidth */
266 __entry->avg_write_bw, /* avg write bandwidth */
267 __entry->dirty_rate, /* bdi dirty rate */
268 __entry->dirty_ratelimit, /* base ratelimit */
269 __entry->task_ratelimit, /* ratelimit with position control */
270 __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
271 )
272);
273
Wu Fengguangece13ac2010-08-29 23:33:20 -0600274TRACE_EVENT(balance_dirty_pages,
275
276 TP_PROTO(struct backing_dev_info *bdi,
277 unsigned long thresh,
278 unsigned long bg_thresh,
279 unsigned long dirty,
280 unsigned long bdi_thresh,
281 unsigned long bdi_dirty,
282 unsigned long dirty_ratelimit,
283 unsigned long task_ratelimit,
284 unsigned long dirtied,
285 long pause,
286 unsigned long start_time),
287
288 TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
289 dirty_ratelimit, task_ratelimit,
290 dirtied, pause, start_time),
291
292 TP_STRUCT__entry(
293 __array( char, bdi, 32)
294 __field(unsigned long, limit)
295 __field(unsigned long, setpoint)
296 __field(unsigned long, dirty)
297 __field(unsigned long, bdi_setpoint)
298 __field(unsigned long, bdi_dirty)
299 __field(unsigned long, dirty_ratelimit)
300 __field(unsigned long, task_ratelimit)
301 __field(unsigned int, dirtied)
302 __field(unsigned int, dirtied_pause)
303 __field(unsigned long, paused)
304 __field( long, pause)
305 ),
306
307 TP_fast_assign(
308 unsigned long freerun = (thresh + bg_thresh) / 2;
309 strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
310
311 __entry->limit = global_dirty_limit;
312 __entry->setpoint = (global_dirty_limit + freerun) / 2;
313 __entry->dirty = dirty;
314 __entry->bdi_setpoint = __entry->setpoint *
315 bdi_thresh / (thresh + 1);
316 __entry->bdi_dirty = bdi_dirty;
317 __entry->dirty_ratelimit = KBps(dirty_ratelimit);
318 __entry->task_ratelimit = KBps(task_ratelimit);
319 __entry->dirtied = dirtied;
320 __entry->dirtied_pause = current->nr_dirtied_pause;
321 __entry->pause = pause * 1000 / HZ;
322 __entry->paused = (jiffies - start_time) * 1000 / HZ;
323 ),
324
325
326 TP_printk("bdi %s: "
327 "limit=%lu setpoint=%lu dirty=%lu "
328 "bdi_setpoint=%lu bdi_dirty=%lu "
329 "dirty_ratelimit=%lu task_ratelimit=%lu "
330 "dirtied=%u dirtied_pause=%u "
331 "paused=%lu pause=%ld",
332 __entry->bdi,
333 __entry->limit,
334 __entry->setpoint,
335 __entry->dirty,
336 __entry->bdi_setpoint,
337 __entry->bdi_dirty,
338 __entry->dirty_ratelimit,
339 __entry->task_ratelimit,
340 __entry->dirtied,
341 __entry->dirtied_pause,
342 __entry->paused, /* ms */
343 __entry->pause /* ms */
344 )
345);
346
Mel Gorman52bb9192010-10-26 14:21:41 -0700347DECLARE_EVENT_CLASS(writeback_congest_waited_template,
348
349 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
350
351 TP_ARGS(usec_timeout, usec_delayed),
352
353 TP_STRUCT__entry(
354 __field( unsigned int, usec_timeout )
355 __field( unsigned int, usec_delayed )
356 ),
357
358 TP_fast_assign(
359 __entry->usec_timeout = usec_timeout;
360 __entry->usec_delayed = usec_delayed;
361 ),
362
363 TP_printk("usec_timeout=%u usec_delayed=%u",
364 __entry->usec_timeout,
365 __entry->usec_delayed)
366);
367
368DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
369
370 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
371
372 TP_ARGS(usec_timeout, usec_delayed)
373);
374
Mel Gorman0e093d992010-10-26 14:21:45 -0700375DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
376
377 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
378
379 TP_ARGS(usec_timeout, usec_delayed)
380);
381
Wu Fengguang251d6a42010-12-01 17:33:37 -0600382DECLARE_EVENT_CLASS(writeback_single_inode_template,
383
384 TP_PROTO(struct inode *inode,
385 struct writeback_control *wbc,
386 unsigned long nr_to_write
387 ),
388
389 TP_ARGS(inode, wbc, nr_to_write),
390
391 TP_STRUCT__entry(
392 __array(char, name, 32)
393 __field(unsigned long, ino)
394 __field(unsigned long, state)
Wu Fengguangc8ad6202011-08-29 09:52:23 -0600395 __field(unsigned long, dirtied_when)
Wu Fengguang251d6a42010-12-01 17:33:37 -0600396 __field(unsigned long, writeback_index)
397 __field(long, nr_to_write)
398 __field(unsigned long, wrote)
399 ),
400
401 TP_fast_assign(
402 strncpy(__entry->name,
403 dev_name(inode->i_mapping->backing_dev_info->dev), 32);
404 __entry->ino = inode->i_ino;
405 __entry->state = inode->i_state;
Wu Fengguangc8ad6202011-08-29 09:52:23 -0600406 __entry->dirtied_when = inode->dirtied_when;
Wu Fengguang251d6a42010-12-01 17:33:37 -0600407 __entry->writeback_index = inode->i_mapping->writeback_index;
408 __entry->nr_to_write = nr_to_write;
409 __entry->wrote = nr_to_write - wbc->nr_to_write;
410 ),
411
Wu Fengguangc8ad6202011-08-29 09:52:23 -0600412 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
Wu Fengguang251d6a42010-12-01 17:33:37 -0600413 "index=%lu to_write=%ld wrote=%lu",
414 __entry->name,
415 __entry->ino,
416 show_inode_state(__entry->state),
Wu Fengguangc8ad6202011-08-29 09:52:23 -0600417 __entry->dirtied_when,
418 (jiffies - __entry->dirtied_when) / HZ,
Wu Fengguang251d6a42010-12-01 17:33:37 -0600419 __entry->writeback_index,
420 __entry->nr_to_write,
421 __entry->wrote
422 )
423);
424
425DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
426 TP_PROTO(struct inode *inode,
427 struct writeback_control *wbc,
428 unsigned long nr_to_write),
429 TP_ARGS(inode, wbc, nr_to_write)
430);
431
432DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
433 TP_PROTO(struct inode *inode,
434 struct writeback_control *wbc,
435 unsigned long nr_to_write),
436 TP_ARGS(inode, wbc, nr_to_write)
437);
438
Dave Chinner455b2862010-07-07 13:24:06 +1000439#endif /* _TRACE_WRITEBACK_H */
440
441/* This part must be outside protection */
442#include <trace/define_trace.h>