blob: 6ba596b07a7236ef0c48cd5b910dc6d6844ba508 [file] [log] [blame]
Li Zefand0b6e042009-07-13 10:33:21 +08001#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
Steven Rostedtea20d922009-04-10 08:54:16 -04004#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04005#define _TRACE_SCHED_H
6
7#include <linux/sched.h>
8#include <linux/tracepoint.h>
9
Steven Rostedtea20d922009-04-10 08:54:16 -040010/*
11 * Tracepoint for calling kthread_stop, performed to end a kthread:
12 */
13TRACE_EVENT(sched_kthread_stop,
14
15 TP_PROTO(struct task_struct *t),
16
17 TP_ARGS(t),
18
19 TP_STRUCT__entry(
20 __array( char, comm, TASK_COMM_LEN )
21 __field( pid_t, pid )
22 ),
23
24 TP_fast_assign(
25 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
26 __entry->pid = t->pid;
27 ),
28
Ingo Molnar434a83c2009-10-15 11:50:39 +020029 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
Steven Rostedtea20d922009-04-10 08:54:16 -040030);
31
32/*
33 * Tracepoint for the return value of the kthread stopping:
34 */
35TRACE_EVENT(sched_kthread_stop_ret,
36
37 TP_PROTO(int ret),
38
39 TP_ARGS(ret),
40
41 TP_STRUCT__entry(
42 __field( int, ret )
43 ),
44
45 TP_fast_assign(
46 __entry->ret = ret;
47 ),
48
Ingo Molnar434a83c2009-10-15 11:50:39 +020049 TP_printk("ret=%d", __entry->ret)
Steven Rostedtea20d922009-04-10 08:54:16 -040050);
51
52/*
Steven Rostedtea20d922009-04-10 08:54:16 -040053 * Tracepoint for waking up a task:
Steven Rostedtea20d922009-04-10 08:54:16 -040054 */
Ingo Molnar091ad362009-11-26 09:04:55 +010055DECLARE_EVENT_CLASS(sched_wakeup_template,
Steven Rostedtea20d922009-04-10 08:54:16 -040056
Peter Zijlstra27a9da62010-05-04 20:36:56 +020057 TP_PROTO(struct task_struct *p, int success),
Steven Rostedtea20d922009-04-10 08:54:16 -040058
Peter Zijlstra27a9da62010-05-04 20:36:56 +020059 TP_ARGS(p, success),
Steven Rostedtea20d922009-04-10 08:54:16 -040060
61 TP_STRUCT__entry(
62 __array( char, comm, TASK_COMM_LEN )
63 __field( pid_t, pid )
64 __field( int, prio )
65 __field( int, success )
Ingo Molnar434a83c2009-10-15 11:50:39 +020066 __field( int, target_cpu )
Steven Rostedtea20d922009-04-10 08:54:16 -040067 ),
68
69 TP_fast_assign(
70 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
71 __entry->pid = p->pid;
72 __entry->prio = p->prio;
73 __entry->success = success;
Ingo Molnar434a83c2009-10-15 11:50:39 +020074 __entry->target_cpu = task_cpu(p);
Steven Rostedtea20d922009-04-10 08:54:16 -040075 ),
76
Ingo Molnar434a83c2009-10-15 11:50:39 +020077 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
Steven Rostedtea20d922009-04-10 08:54:16 -040078 __entry->comm, __entry->pid, __entry->prio,
Ingo Molnar434a83c2009-10-15 11:50:39 +020079 __entry->success, __entry->target_cpu)
Steven Rostedtea20d922009-04-10 08:54:16 -040080);
81
Steven Rostedt75ec29a2009-11-18 20:48:08 -050082DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
Peter Zijlstra27a9da62010-05-04 20:36:56 +020083 TP_PROTO(struct task_struct *p, int success),
84 TP_ARGS(p, success));
Steven Rostedt75ec29a2009-11-18 20:48:08 -050085
Steven Rostedtea20d922009-04-10 08:54:16 -040086/*
87 * Tracepoint for waking up a new task:
Steven Rostedtea20d922009-04-10 08:54:16 -040088 */
Steven Rostedt75ec29a2009-11-18 20:48:08 -050089DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
Peter Zijlstra27a9da62010-05-04 20:36:56 +020090 TP_PROTO(struct task_struct *p, int success),
91 TP_ARGS(p, success));
Steven Rostedtea20d922009-04-10 08:54:16 -040092
Peter Zijlstra02f72692010-05-31 18:13:25 +020093#ifdef CREATE_TRACE_POINTS
94static inline long __trace_sched_switch_state(struct task_struct *p)
95{
96 long state = p->state;
97
98#ifdef CONFIG_PREEMPT
99 /*
100 * For all intents and purposes a preempted task is a running task.
101 */
102 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
Peter Zijlstra557ab422011-09-16 11:16:43 +0200103 state = TASK_RUNNING | TASK_STATE_MAX;
Peter Zijlstra02f72692010-05-31 18:13:25 +0200104#endif
105
106 return state;
107}
108#endif
109
Steven Rostedtea20d922009-04-10 08:54:16 -0400110/*
111 * Tracepoint for task switches, performed by the scheduler:
Steven Rostedtea20d922009-04-10 08:54:16 -0400112 */
113TRACE_EVENT(sched_switch,
114
Peter Zijlstra27a9da62010-05-04 20:36:56 +0200115 TP_PROTO(struct task_struct *prev,
Steven Rostedtea20d922009-04-10 08:54:16 -0400116 struct task_struct *next),
117
Peter Zijlstra27a9da62010-05-04 20:36:56 +0200118 TP_ARGS(prev, next),
Steven Rostedtea20d922009-04-10 08:54:16 -0400119
120 TP_STRUCT__entry(
121 __array( char, prev_comm, TASK_COMM_LEN )
122 __field( pid_t, prev_pid )
123 __field( int, prev_prio )
Steven Rostedt937cdb92009-05-15 10:51:13 -0400124 __field( long, prev_state )
Steven Rostedtea20d922009-04-10 08:54:16 -0400125 __array( char, next_comm, TASK_COMM_LEN )
126 __field( pid_t, next_pid )
127 __field( int, next_prio )
128 ),
129
130 TP_fast_assign(
131 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
132 __entry->prev_pid = prev->pid;
133 __entry->prev_prio = prev->prio;
Peter Zijlstra02f72692010-05-31 18:13:25 +0200134 __entry->prev_state = __trace_sched_switch_state(prev);
Steven Rostedtea20d922009-04-10 08:54:16 -0400135 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
136 __entry->next_pid = next->pid;
137 __entry->next_prio = next->prio;
138 ),
139
Peter Zijlstra557ab422011-09-16 11:16:43 +0200140 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400141 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
Peter Zijlstra557ab422011-09-16 11:16:43 +0200142 __entry->prev_state & (TASK_STATE_MAX-1) ?
143 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
Steven Rostedt937cdb92009-05-15 10:51:13 -0400144 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
145 { 16, "Z" }, { 32, "X" }, { 64, "x" },
146 { 128, "W" }) : "R",
Peter Zijlstra557ab422011-09-16 11:16:43 +0200147 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
Steven Rostedtea20d922009-04-10 08:54:16 -0400148 __entry->next_comm, __entry->next_pid, __entry->next_prio)
149);
150
151/*
152 * Tracepoint for a task being migrated:
153 */
154TRACE_EVENT(sched_migrate_task,
155
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +0800156 TP_PROTO(struct task_struct *p, int dest_cpu),
Steven Rostedtea20d922009-04-10 08:54:16 -0400157
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +0800158 TP_ARGS(p, dest_cpu),
Steven Rostedtea20d922009-04-10 08:54:16 -0400159
160 TP_STRUCT__entry(
161 __array( char, comm, TASK_COMM_LEN )
162 __field( pid_t, pid )
163 __field( int, prio )
164 __field( int, orig_cpu )
165 __field( int, dest_cpu )
166 ),
167
168 TP_fast_assign(
169 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
170 __entry->pid = p->pid;
171 __entry->prio = p->prio;
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +0800172 __entry->orig_cpu = task_cpu(p);
Steven Rostedtea20d922009-04-10 08:54:16 -0400173 __entry->dest_cpu = dest_cpu;
174 ),
175
Ingo Molnar434a83c2009-10-15 11:50:39 +0200176 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400177 __entry->comm, __entry->pid, __entry->prio,
178 __entry->orig_cpu, __entry->dest_cpu)
179);
180
Ingo Molnar091ad362009-11-26 09:04:55 +0100181DECLARE_EVENT_CLASS(sched_process_template,
Steven Rostedtea20d922009-04-10 08:54:16 -0400182
183 TP_PROTO(struct task_struct *p),
184
185 TP_ARGS(p),
186
187 TP_STRUCT__entry(
188 __array( char, comm, TASK_COMM_LEN )
189 __field( pid_t, pid )
190 __field( int, prio )
191 ),
192
193 TP_fast_assign(
194 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
195 __entry->pid = p->pid;
196 __entry->prio = p->prio;
197 ),
198
Ingo Molnar434a83c2009-10-15 11:50:39 +0200199 TP_printk("comm=%s pid=%d prio=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400200 __entry->comm, __entry->pid, __entry->prio)
201);
202
203/*
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500204 * Tracepoint for freeing a task:
205 */
206DEFINE_EVENT(sched_process_template, sched_process_free,
207 TP_PROTO(struct task_struct *p),
208 TP_ARGS(p));
209
210
211/*
Steven Rostedtea20d922009-04-10 08:54:16 -0400212 * Tracepoint for a task exiting:
213 */
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500214DEFINE_EVENT(sched_process_template, sched_process_exit,
215 TP_PROTO(struct task_struct *p),
216 TP_ARGS(p));
Steven Rostedtea20d922009-04-10 08:54:16 -0400217
218/*
Li Zefan210f7662010-05-24 16:23:35 +0800219 * Tracepoint for waiting on task to unschedule:
220 */
221DEFINE_EVENT(sched_process_template, sched_wait_task,
222 TP_PROTO(struct task_struct *p),
223 TP_ARGS(p));
224
225/*
Steven Rostedtea20d922009-04-10 08:54:16 -0400226 * Tracepoint for a waiting task:
227 */
228TRACE_EVENT(sched_process_wait,
229
230 TP_PROTO(struct pid *pid),
231
232 TP_ARGS(pid),
233
234 TP_STRUCT__entry(
235 __array( char, comm, TASK_COMM_LEN )
236 __field( pid_t, pid )
237 __field( int, prio )
238 ),
239
240 TP_fast_assign(
241 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
242 __entry->pid = pid_nr(pid);
243 __entry->prio = current->prio;
244 ),
245
Ingo Molnar434a83c2009-10-15 11:50:39 +0200246 TP_printk("comm=%s pid=%d prio=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400247 __entry->comm, __entry->pid, __entry->prio)
248);
249
250/*
251 * Tracepoint for do_fork:
252 */
253TRACE_EVENT(sched_process_fork,
254
255 TP_PROTO(struct task_struct *parent, struct task_struct *child),
256
257 TP_ARGS(parent, child),
258
259 TP_STRUCT__entry(
260 __array( char, parent_comm, TASK_COMM_LEN )
261 __field( pid_t, parent_pid )
262 __array( char, child_comm, TASK_COMM_LEN )
263 __field( pid_t, child_pid )
264 ),
265
266 TP_fast_assign(
267 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
268 __entry->parent_pid = parent->pid;
269 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
270 __entry->child_pid = child->pid;
271 ),
272
Ingo Molnar434a83c2009-10-15 11:50:39 +0200273 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400274 __entry->parent_comm, __entry->parent_pid,
275 __entry->child_comm, __entry->child_pid)
276);
277
278/*
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200279 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
280 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
281 */
Ingo Molnar091ad362009-11-26 09:04:55 +0100282DECLARE_EVENT_CLASS(sched_stat_template,
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200283
284 TP_PROTO(struct task_struct *tsk, u64 delay),
285
286 TP_ARGS(tsk, delay),
287
288 TP_STRUCT__entry(
289 __array( char, comm, TASK_COMM_LEN )
290 __field( pid_t, pid )
291 __field( u64, delay )
292 ),
293
294 TP_fast_assign(
295 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
296 __entry->pid = tsk->pid;
297 __entry->delay = delay;
298 )
299 TP_perf_assign(
300 __perf_count(delay);
301 ),
302
Ingo Molnar434a83c2009-10-15 11:50:39 +0200303 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200304 __entry->comm, __entry->pid,
305 (unsigned long long)__entry->delay)
306);
307
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500308
309/*
310 * Tracepoint for accounting wait time (time the task is runnable
311 * but not actually running due to scheduler contention).
312 */
313DEFINE_EVENT(sched_stat_template, sched_stat_wait,
314 TP_PROTO(struct task_struct *tsk, u64 delay),
315 TP_ARGS(tsk, delay));
316
317/*
318 * Tracepoint for accounting sleep time (time the task is not runnable,
319 * including iowait, see below).
320 */
Li Zefan470dda72009-11-26 15:08:01 +0800321DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
322 TP_PROTO(struct task_struct *tsk, u64 delay),
323 TP_ARGS(tsk, delay));
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500324
325/*
326 * Tracepoint for accounting iowait time (time the task is not runnable
327 * due to waiting on IO to complete).
328 */
Li Zefan470dda72009-11-26 15:08:01 +0800329DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
330 TP_PROTO(struct task_struct *tsk, u64 delay),
331 TP_ARGS(tsk, delay));
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500332
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200333/*
Andrew Vaginb781a602011-11-28 12:03:35 +0300334 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
335 */
336DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
337 TP_PROTO(struct task_struct *tsk, u64 delay),
338 TP_ARGS(tsk, delay));
339
340/*
Ingo Molnarf977bb42009-09-13 18:15:54 +0200341 * Tracepoint for accounting runtime (time the task is executing
342 * on a CPU).
343 */
344TRACE_EVENT(sched_stat_runtime,
345
346 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
347
348 TP_ARGS(tsk, runtime, vruntime),
349
350 TP_STRUCT__entry(
351 __array( char, comm, TASK_COMM_LEN )
352 __field( pid_t, pid )
353 __field( u64, runtime )
354 __field( u64, vruntime )
355 ),
356
357 TP_fast_assign(
358 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
359 __entry->pid = tsk->pid;
360 __entry->runtime = runtime;
361 __entry->vruntime = vruntime;
362 )
363 TP_perf_assign(
364 __perf_count(runtime);
365 ),
366
Ingo Molnar434a83c2009-10-15 11:50:39 +0200367 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
Ingo Molnarf977bb42009-09-13 18:15:54 +0200368 __entry->comm, __entry->pid,
369 (unsigned long long)__entry->runtime,
370 (unsigned long long)__entry->vruntime)
371);
372
Arun Sharma1ac9bc62011-12-21 16:15:40 -0800373#ifdef CREATE_TRACE_POINTS
374static inline u64 trace_get_sleeptime(struct task_struct *tsk)
375{
376#ifdef CONFIG_SCHEDSTATS
377 u64 block, sleep;
378
379 block = tsk->se.statistics.block_start;
380 sleep = tsk->se.statistics.sleep_start;
381 tsk->se.statistics.block_start = 0;
382 tsk->se.statistics.sleep_start = 0;
383
384 return block ? block : sleep ? sleep : 0;
385#else
386 return 0;
387#endif
388}
389#endif
390
391/*
392 * Tracepoint for accounting sleeptime (time the task is sleeping
393 * or waiting for I/O).
394 */
395TRACE_EVENT(sched_stat_sleeptime,
396
397 TP_PROTO(struct task_struct *tsk, u64 now),
398
399 TP_ARGS(tsk, now),
400
401 TP_STRUCT__entry(
402 __array( char, comm, TASK_COMM_LEN )
403 __field( pid_t, pid )
404 __field( u64, sleeptime )
405 ),
406
407 TP_fast_assign(
408 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
409 __entry->pid = tsk->pid;
410 __entry->sleeptime = trace_get_sleeptime(tsk);
411 __entry->sleeptime = __entry->sleeptime ?
412 now - __entry->sleeptime : 0;
413 )
414 TP_perf_assign(
415 __perf_count(__entry->sleeptime);
416 ),
417
418 TP_printk("comm=%s pid=%d sleeptime=%Lu [ns]",
419 __entry->comm, __entry->pid,
420 (unsigned long long)__entry->sleeptime)
421);
422
Steven Rostedta8027072010-09-20 15:13:34 -0400423/*
424 * Tracepoint for showing priority inheritance modifying a tasks
425 * priority.
426 */
427TRACE_EVENT(sched_pi_setprio,
428
429 TP_PROTO(struct task_struct *tsk, int newprio),
430
431 TP_ARGS(tsk, newprio),
432
433 TP_STRUCT__entry(
434 __array( char, comm, TASK_COMM_LEN )
435 __field( pid_t, pid )
436 __field( int, oldprio )
437 __field( int, newprio )
438 ),
439
440 TP_fast_assign(
441 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
442 __entry->pid = tsk->pid;
443 __entry->oldprio = tsk->prio;
444 __entry->newprio = newprio;
445 ),
446
447 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
448 __entry->comm, __entry->pid,
449 __entry->oldprio, __entry->newprio)
450);
451
Steven Rostedtea20d922009-04-10 08:54:16 -0400452#endif /* _TRACE_SCHED_H */
Steven Rostedta8d154b2009-04-10 09:36:00 -0400453
454/* This part must be outside protection */
455#include <trace/define_trace.h>