blob: a52c343ecf42dc8e1155f8f1b2995ae9e2612003 [file] [log] [blame]
Li Zefand0b6e042009-07-13 10:33:21 +08001#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
Steven Rostedtea20d922009-04-10 08:54:16 -04004#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04005#define _TRACE_SCHED_H
6
7#include <linux/sched.h>
8#include <linux/tracepoint.h>
David Smith4ff16c22012-02-07 10:11:05 -06009#include <linux/binfmts.h>
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -040010
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070011struct rq;
12
Steven Rostedtea20d922009-04-10 08:54:16 -040013/*
14 * Tracepoint for calling kthread_stop, performed to end a kthread:
15 */
16TRACE_EVENT(sched_kthread_stop,
17
18 TP_PROTO(struct task_struct *t),
19
20 TP_ARGS(t),
21
22 TP_STRUCT__entry(
23 __array( char, comm, TASK_COMM_LEN )
24 __field( pid_t, pid )
25 ),
26
27 TP_fast_assign(
28 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29 __entry->pid = t->pid;
30 ),
31
Ingo Molnar434a83c2009-10-15 11:50:39 +020032 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
Steven Rostedtea20d922009-04-10 08:54:16 -040033);
34
35/*
36 * Tracepoint for the return value of the kthread stopping:
37 */
38TRACE_EVENT(sched_kthread_stop_ret,
39
40 TP_PROTO(int ret),
41
42 TP_ARGS(ret),
43
44 TP_STRUCT__entry(
45 __field( int, ret )
46 ),
47
48 TP_fast_assign(
49 __entry->ret = ret;
50 ),
51
Ingo Molnar434a83c2009-10-15 11:50:39 +020052 TP_printk("ret=%d", __entry->ret)
Steven Rostedtea20d922009-04-10 08:54:16 -040053);
54
55/*
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070056 * Tracepoint for task enqueue/dequeue:
57 */
58TRACE_EVENT(sched_enq_deq_task,
59
60 TP_PROTO(struct task_struct *p, bool enqueue, unsigned int cpus_allowed),
61
62 TP_ARGS(p, enqueue, cpus_allowed),
63
64 TP_STRUCT__entry(
65 __array( char, comm, TASK_COMM_LEN )
66 __field( pid_t, pid )
67 __field( int, prio )
68 __field( int, cpu )
69 __field( bool, enqueue )
70 __field(unsigned int, nr_running )
71 __field(unsigned long, cpu_load )
72 __field(unsigned int, rt_nr_running )
73 __field(unsigned int, cpus_allowed )
74#ifdef CONFIG_SCHED_HMP
75 __field(unsigned int, demand )
76 __field(unsigned int, pred_demand )
77#endif
78 ),
79
80 TP_fast_assign(
81 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
82 __entry->pid = p->pid;
83 __entry->prio = p->prio;
84 __entry->cpu = task_cpu(p);
85 __entry->enqueue = enqueue;
86 __entry->nr_running = task_rq(p)->nr_running;
87 __entry->cpu_load = task_rq(p)->cpu_load[0];
88 __entry->rt_nr_running = task_rq(p)->rt.rt_nr_running;
89 __entry->cpus_allowed = cpus_allowed;
90#ifdef CONFIG_SCHED_HMP
91 __entry->demand = p->ravg.demand;
92 __entry->pred_demand = p->ravg.pred_demand;
93#endif
94 ),
95
96 TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
97#ifdef CONFIG_SCHED_HMP
98 " demand=%u pred_demand=%u"
99#endif
100 , __entry->cpu,
101 __entry->enqueue ? "enqueue" : "dequeue",
102 __entry->comm, __entry->pid,
103 __entry->prio, __entry->nr_running,
104 __entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
105#ifdef CONFIG_SCHED_HMP
106 , __entry->demand, __entry->pred_demand
107#endif
108 )
109);
110
111#ifdef CONFIG_SCHED_HMP
112
113struct group_cpu_time;
114struct migration_sum_data;
115extern const char *task_event_names[];
116extern const char *migrate_type_names[];
117
118TRACE_EVENT(sched_task_load,
119
120 TP_PROTO(struct task_struct *p, bool boost, int reason,
121 bool sync, bool need_idle, u32 flags, int best_cpu),
122
123 TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu),
124
125 TP_STRUCT__entry(
126 __array( char, comm, TASK_COMM_LEN )
127 __field( pid_t, pid )
128 __field(unsigned int, demand )
129 __field( bool, boost )
130 __field( int, reason )
131 __field( bool, sync )
132 __field( bool, need_idle )
133 __field( u32, flags )
134 __field( int, best_cpu )
135 __field( u64, latency )
136 ),
137
138 TP_fast_assign(
139 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
140 __entry->pid = p->pid;
141 __entry->demand = p->ravg.demand;
142 __entry->boost = boost;
143 __entry->reason = reason;
144 __entry->sync = sync;
145 __entry->need_idle = need_idle;
146 __entry->flags = flags;
147 __entry->best_cpu = best_cpu;
148 __entry->latency = p->state == TASK_WAKING ?
149 sched_ktime_clock() -
150 p->ravg.mark_start : 0;
151 ),
152
153 TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x best_cpu=%d latency=%llu",
154 __entry->pid, __entry->comm, __entry->demand,
155 __entry->boost, __entry->reason, __entry->sync,
156 __entry->need_idle, __entry->flags,
157 __entry->best_cpu, __entry->latency)
158);
159
160TRACE_EVENT(sched_set_preferred_cluster,
161
162 TP_PROTO(struct related_thread_group *grp, u64 total_demand),
163
164 TP_ARGS(grp, total_demand),
165
166 TP_STRUCT__entry(
167 __field( int, id )
168 __field( u64, demand )
169 __field( int, cluster_first_cpu )
170 ),
171
172 TP_fast_assign(
173 __entry->id = grp->id;
174 __entry->demand = total_demand;
175 __entry->cluster_first_cpu = grp->preferred_cluster ?
176 cluster_first_cpu(grp->preferred_cluster)
177 : -1;
178 ),
179
180 TP_printk("group_id %d total_demand %llu preferred_cluster_first_cpu %d",
181 __entry->id, __entry->demand,
182 __entry->cluster_first_cpu)
183);
184
185DECLARE_EVENT_CLASS(sched_cpu_load,
186
187 TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
188
189 TP_ARGS(rq, idle, irqload, power_cost, temp),
190
191 TP_STRUCT__entry(
192 __field(unsigned int, cpu )
193 __field(unsigned int, idle )
194 __field(unsigned int, nr_running )
195 __field(unsigned int, nr_big_tasks )
196 __field(unsigned int, load_scale_factor )
197 __field(unsigned int, capacity )
198 __field( u64, cumulative_runnable_avg )
199 __field( u64, irqload )
200 __field(unsigned int, max_freq )
201 __field(unsigned int, power_cost )
202 __field( int, cstate )
203 __field( int, dstate )
204 __field( int, temp )
205 ),
206
207 TP_fast_assign(
208 __entry->cpu = rq->cpu;
209 __entry->idle = idle;
210 __entry->nr_running = rq->nr_running;
211 __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
212 __entry->load_scale_factor = cpu_load_scale_factor(rq->cpu);
213 __entry->capacity = cpu_capacity(rq->cpu);
214 __entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
215 __entry->irqload = irqload;
216 __entry->max_freq = cpu_max_freq(rq->cpu);
217 __entry->power_cost = power_cost;
218 __entry->cstate = rq->cstate;
219 __entry->dstate = rq->cluster->dstate;
220 __entry->temp = temp;
221 ),
222
223 TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d",
224 __entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
225 __entry->load_scale_factor, __entry->capacity,
226 __entry->cumulative_runnable_avg, __entry->irqload,
227 __entry->max_freq, __entry->power_cost, __entry->cstate,
228 __entry->dstate, __entry->temp)
229);
230
231DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup,
232 TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
233 TP_ARGS(rq, idle, irqload, power_cost, temp)
234);
235
236DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
237 TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
238 TP_ARGS(rq, idle, irqload, power_cost, temp)
239);
240
241DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
242 TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
243 TP_ARGS(rq, idle, irqload, power_cost, temp)
244);
245
246TRACE_EVENT(sched_set_boost,
247
248 TP_PROTO(int ref_count),
249
250 TP_ARGS(ref_count),
251
252 TP_STRUCT__entry(
253 __field(unsigned int, ref_count )
254 ),
255
256 TP_fast_assign(
257 __entry->ref_count = ref_count;
258 ),
259
260 TP_printk("ref_count=%d", __entry->ref_count)
261);
262
263TRACE_EVENT(sched_update_task_ravg,
264
265 TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
266 u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
267 struct group_cpu_time *cpu_time),
268
269 TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
270
271 TP_STRUCT__entry(
272 __array( char, comm, TASK_COMM_LEN )
273 __field( pid_t, pid )
274 __field( pid_t, cur_pid )
275 __field(unsigned int, cur_freq )
276 __field( u64, wallclock )
277 __field( u64, mark_start )
278 __field( u64, delta_m )
279 __field( u64, win_start )
280 __field( u64, delta )
281 __field( u64, irqtime )
282 __field(enum task_event, evt )
283 __field(unsigned int, demand )
284 __field(unsigned int, sum )
285 __field( int, cpu )
286 __field(unsigned int, pred_demand )
287 __field( u64, rq_cs )
288 __field( u64, rq_ps )
289 __field( u64, grp_cs )
290 __field( u64, grp_ps )
291 __field( u64, grp_nt_cs )
292 __field( u64, grp_nt_ps )
293 __field( u32, curr_window )
294 __field( u32, prev_window )
295 __field( u64, nt_cs )
296 __field( u64, nt_ps )
297 __field( u32, active_windows )
298 ),
299
300 TP_fast_assign(
301 __entry->wallclock = wallclock;
302 __entry->win_start = rq->window_start;
303 __entry->delta = (wallclock - rq->window_start);
304 __entry->evt = evt;
305 __entry->cpu = rq->cpu;
306 __entry->cur_pid = rq->curr->pid;
307 __entry->cur_freq = cpu_cycles_to_freq(cycles, exec_time);
308 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
309 __entry->pid = p->pid;
310 __entry->mark_start = p->ravg.mark_start;
311 __entry->delta_m = (wallclock - p->ravg.mark_start);
312 __entry->demand = p->ravg.demand;
313 __entry->sum = p->ravg.sum;
314 __entry->irqtime = irqtime;
315 __entry->pred_demand = p->ravg.pred_demand;
316 __entry->rq_cs = rq->curr_runnable_sum;
317 __entry->rq_ps = rq->prev_runnable_sum;
318 __entry->grp_cs = cpu_time ? cpu_time->curr_runnable_sum : 0;
319 __entry->grp_ps = cpu_time ? cpu_time->prev_runnable_sum : 0;
320 __entry->grp_nt_cs = cpu_time ? cpu_time->nt_curr_runnable_sum : 0;
321 __entry->grp_nt_ps = cpu_time ? cpu_time->nt_prev_runnable_sum : 0;
322 __entry->curr_window = p->ravg.curr_window;
323 __entry->prev_window = p->ravg.prev_window;
324 __entry->nt_cs = rq->nt_curr_runnable_sum;
325 __entry->nt_ps = rq->nt_prev_runnable_sum;
326 __entry->active_windows = p->ravg.active_windows;
327 ),
328
329 TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu"
330 , __entry->wallclock, __entry->win_start, __entry->delta,
331 task_event_names[__entry->evt], __entry->cpu,
332 __entry->cur_freq, __entry->cur_pid,
333 __entry->pid, __entry->comm, __entry->mark_start,
334 __entry->delta_m, __entry->demand,
335 __entry->sum, __entry->irqtime, __entry->pred_demand,
336 __entry->rq_cs, __entry->rq_ps, __entry->curr_window,
337 __entry->prev_window, __entry->nt_cs, __entry->nt_ps,
338 __entry->active_windows, __entry->grp_cs,
339 __entry->grp_ps, __entry->grp_nt_cs, __entry->grp_nt_ps)
340);
341
342TRACE_EVENT(sched_get_task_cpu_cycles,
343
344 TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time),
345
346 TP_ARGS(cpu, event, cycles, exec_time),
347
348 TP_STRUCT__entry(
349 __field(int, cpu )
350 __field(int, event )
351 __field(u64, cycles )
352 __field(u64, exec_time )
353 __field(u32, freq )
354 __field(u32, legacy_freq )
355 ),
356
357 TP_fast_assign(
358 __entry->cpu = cpu;
359 __entry->event = event;
360 __entry->cycles = cycles;
361 __entry->exec_time = exec_time;
362 __entry->freq = cpu_cycles_to_freq(cycles, exec_time);
363 __entry->legacy_freq = cpu_cur_freq(cpu);
364 ),
365
366 TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u",
367 __entry->cpu, __entry->event, __entry->cycles,
368 __entry->exec_time, __entry->freq, __entry->legacy_freq)
369);
370
371TRACE_EVENT(sched_update_history,
372
373 TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
374 enum task_event evt),
375
376 TP_ARGS(rq, p, runtime, samples, evt),
377
378 TP_STRUCT__entry(
379 __array( char, comm, TASK_COMM_LEN )
380 __field( pid_t, pid )
381 __field(unsigned int, runtime )
382 __field( int, samples )
383 __field(enum task_event, evt )
384 __field(unsigned int, demand )
385 __field(unsigned int, pred_demand )
386 __array( u32, hist, RAVG_HIST_SIZE_MAX)
387 __field(unsigned int, nr_big_tasks )
388 __field( int, cpu )
389 ),
390
391 TP_fast_assign(
392 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
393 __entry->pid = p->pid;
394 __entry->runtime = runtime;
395 __entry->samples = samples;
396 __entry->evt = evt;
397 __entry->demand = p->ravg.demand;
398 __entry->pred_demand = p->ravg.pred_demand;
399 memcpy(__entry->hist, p->ravg.sum_history,
400 RAVG_HIST_SIZE_MAX * sizeof(u32));
401 __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
402 __entry->cpu = rq->cpu;
403 ),
404
405 TP_printk("%d (%s): runtime %u samples %d event %s demand %u pred_demand %u"
406 " (hist: %u %u %u %u %u) cpu %d nr_big %u",
407 __entry->pid, __entry->comm,
408 __entry->runtime, __entry->samples,
409 task_event_names[__entry->evt],
410 __entry->demand, __entry->pred_demand,
411 __entry->hist[0], __entry->hist[1],
412 __entry->hist[2], __entry->hist[3],
413 __entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
414);
415
416TRACE_EVENT(sched_reset_all_window_stats,
417
418 TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
419 int reason, unsigned int old_val, unsigned int new_val),
420
421 TP_ARGS(window_start, window_size, time_taken,
422 reason, old_val, new_val),
423
424 TP_STRUCT__entry(
425 __field( u64, window_start )
426 __field( u64, window_size )
427 __field( u64, time_taken )
428 __field( int, reason )
429 __field(unsigned int, old_val )
430 __field(unsigned int, new_val )
431 ),
432
433 TP_fast_assign(
434 __entry->window_start = window_start;
435 __entry->window_size = window_size;
436 __entry->time_taken = time_taken;
437 __entry->reason = reason;
438 __entry->old_val = old_val;
439 __entry->new_val = new_val;
440 ),
441
442 TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u",
443 __entry->time_taken, __entry->window_start,
444 __entry->window_size,
445 sched_window_reset_reasons[__entry->reason],
446 __entry->old_val, __entry->new_val)
447);
448
449TRACE_EVENT(sched_update_pred_demand,
450
451 TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
452 unsigned int pred_demand),
453
454 TP_ARGS(rq, p, runtime, pct, pred_demand),
455
456 TP_STRUCT__entry(
457 __array( char, comm, TASK_COMM_LEN )
458 __field( pid_t, pid )
459 __field(unsigned int, runtime )
460 __field( int, pct )
461 __field(unsigned int, pred_demand )
462 __array( u8, bucket, NUM_BUSY_BUCKETS)
463 __field( int, cpu )
464 ),
465
466 TP_fast_assign(
467 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
468 __entry->pid = p->pid;
469 __entry->runtime = runtime;
470 __entry->pct = pct;
471 __entry->pred_demand = pred_demand;
472 memcpy(__entry->bucket, p->ravg.busy_buckets,
473 NUM_BUSY_BUCKETS * sizeof(u8));
474 __entry->cpu = rq->cpu;
475 ),
476
477 TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
478 __entry->pid, __entry->comm,
479 __entry->runtime, __entry->pct, __entry->cpu,
480 __entry->pred_demand, __entry->bucket[0], __entry->bucket[1],
481 __entry->bucket[2], __entry->bucket[3] ,__entry->bucket[4],
482 __entry->bucket[5], __entry->bucket[6], __entry->bucket[7],
483 __entry->bucket[8], __entry->bucket[9])
484);
485
486TRACE_EVENT(sched_migration_update_sum,
487
488 TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct migration_sum_data *d),
489
490 TP_ARGS(p, migrate_type, d),
491
492 TP_STRUCT__entry(
493 __field(int, tcpu )
494 __field(int, pid )
495 __field( u64, cs )
496 __field( u64, ps )
497 __field( s64, nt_cs )
498 __field( s64, nt_ps )
499 __field(enum migrate_types, migrate_type )
500 __field( s64, src_cs )
501 __field( s64, src_ps )
502 __field( s64, dst_cs )
503 __field( s64, dst_ps )
504 __field( s64, src_nt_cs )
505 __field( s64, src_nt_ps )
506 __field( s64, dst_nt_cs )
507 __field( s64, dst_nt_ps )
508 ),
509
510 TP_fast_assign(
511 __entry->tcpu = task_cpu(p);
512 __entry->pid = p->pid;
513 __entry->migrate_type = migrate_type;
514 __entry->src_cs = d->src_rq ?
515 d->src_rq->curr_runnable_sum :
516 d->src_cpu_time->curr_runnable_sum;
517 __entry->src_ps = d->src_rq ?
518 d->src_rq->prev_runnable_sum :
519 d->src_cpu_time->prev_runnable_sum;
520 __entry->dst_cs = d->dst_rq ?
521 d->dst_rq->curr_runnable_sum :
522 d->dst_cpu_time->curr_runnable_sum;
523 __entry->dst_ps = d->dst_rq ?
524 d->dst_rq->prev_runnable_sum :
525 d->dst_cpu_time->prev_runnable_sum;
526 __entry->src_nt_cs = d->src_rq ?
527 d->src_rq->nt_curr_runnable_sum :
528 d->src_cpu_time->nt_curr_runnable_sum;
529 __entry->src_nt_ps = d->src_rq ?
530 d->src_rq->nt_prev_runnable_sum :
531 d->src_cpu_time->nt_prev_runnable_sum;
532 __entry->dst_nt_cs = d->dst_rq ?
533 d->dst_rq->nt_curr_runnable_sum :
534 d->dst_cpu_time->nt_curr_runnable_sum;
535 __entry->dst_nt_ps = d->dst_rq ?
536 d->dst_rq->nt_prev_runnable_sum :
537 d->dst_cpu_time->nt_prev_runnable_sum;
538 ),
539
540 TP_printk("pid %d task_cpu %d migrate_type %s src_cs %llu src_ps %llu dst_cs %lld dst_ps %lld src_nt_cs %llu src_nt_ps %llu dst_nt_cs %lld dst_nt_ps %lld",
541 __entry->pid, __entry->tcpu, migrate_type_names[__entry->migrate_type],
542 __entry->src_cs, __entry->src_ps, __entry->dst_cs, __entry->dst_ps,
543 __entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps)
544);
545
546TRACE_EVENT(sched_get_busy,
547
548 TP_PROTO(int cpu, u64 load, u64 nload, u64 pload, int early),
549
550 TP_ARGS(cpu, load, nload, pload, early),
551
552 TP_STRUCT__entry(
553 __field( int, cpu )
554 __field( u64, load )
555 __field( u64, nload )
556 __field( u64, pload )
557 __field( int, early )
558 ),
559
560 TP_fast_assign(
561 __entry->cpu = cpu;
562 __entry->load = load;
563 __entry->nload = nload;
564 __entry->pload = pload;
565 __entry->early = early;
566 ),
567
568 TP_printk("cpu %d load %lld new_task_load %lld predicted_load %lld early %d",
569 __entry->cpu, __entry->load, __entry->nload,
570 __entry->pload, __entry->early)
571);
572
573TRACE_EVENT(sched_freq_alert,
574
575 TP_PROTO(int cpu, int pd_notif, int check_groups, struct rq *rq,
576 u64 new_load),
577
578 TP_ARGS(cpu, pd_notif, check_groups, rq, new_load),
579
580 TP_STRUCT__entry(
581 __field( int, cpu )
582 __field( int, pd_notif )
583 __field( int, check_groups )
584 __field( u64, old_busy_time )
585 __field( u64, ps )
586 __field( u64, new_load )
587 __field( u64, old_pred )
588 __field( u64, new_pred )
589 ),
590
591 TP_fast_assign(
592 __entry->cpu = cpu;
593 __entry->pd_notif = pd_notif;
594 __entry->check_groups = check_groups;
595 __entry->old_busy_time = rq->old_busy_time;
596 __entry->ps = rq->prev_runnable_sum;
597 __entry->new_load = new_load;
598 __entry->old_pred = rq->old_estimated_time;
599 __entry->new_pred = rq->hmp_stats.pred_demands_sum;
600 ),
601
602 TP_printk("cpu %d pd_notif=%d check_groups %d old_busy_time=%llu prev_sum=%lld new_load=%llu old_pred=%llu new_pred=%llu",
603 __entry->cpu, __entry->pd_notif, __entry->check_groups,
604 __entry->old_busy_time, __entry->ps, __entry->new_load,
605 __entry->old_pred, __entry->new_pred)
606);
607
608#endif /* CONFIG_SCHED_HMP */
609
610/*
Steven Rostedtea20d922009-04-10 08:54:16 -0400611 * Tracepoint for waking up a task:
Steven Rostedtea20d922009-04-10 08:54:16 -0400612 */
Ingo Molnar091ad362009-11-26 09:04:55 +0100613DECLARE_EVENT_CLASS(sched_wakeup_template,
Steven Rostedtea20d922009-04-10 08:54:16 -0400614
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200615 TP_PROTO(struct task_struct *p),
Steven Rostedtea20d922009-04-10 08:54:16 -0400616
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200617 TP_ARGS(__perf_task(p)),
Steven Rostedtea20d922009-04-10 08:54:16 -0400618
619 TP_STRUCT__entry(
620 __array( char, comm, TASK_COMM_LEN )
621 __field( pid_t, pid )
622 __field( int, prio )
623 __field( int, success )
Ingo Molnar434a83c2009-10-15 11:50:39 +0200624 __field( int, target_cpu )
Steven Rostedtea20d922009-04-10 08:54:16 -0400625 ),
626
627 TP_fast_assign(
628 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
629 __entry->pid = p->pid;
630 __entry->prio = p->prio;
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200631 __entry->success = 1; /* rudiment, kill when possible */
Ingo Molnar434a83c2009-10-15 11:50:39 +0200632 __entry->target_cpu = task_cpu(p);
Steven Rostedtea20d922009-04-10 08:54:16 -0400633 ),
634
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200635 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400636 __entry->comm, __entry->pid, __entry->prio,
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200637 __entry->target_cpu)
Steven Rostedtea20d922009-04-10 08:54:16 -0400638);
639
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200640/*
641 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
642 * called from the waking context.
643 */
644DEFINE_EVENT(sched_wakeup_template, sched_waking,
645 TP_PROTO(struct task_struct *p),
646 TP_ARGS(p));
647
648/*
649 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
650 * It it not always called from the waking context.
651 */
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500652DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200653 TP_PROTO(struct task_struct *p),
654 TP_ARGS(p));
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500655
Steven Rostedtea20d922009-04-10 08:54:16 -0400656/*
657 * Tracepoint for waking up a new task:
Steven Rostedtea20d922009-04-10 08:54:16 -0400658 */
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500659DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200660 TP_PROTO(struct task_struct *p),
661 TP_ARGS(p));
Steven Rostedtea20d922009-04-10 08:54:16 -0400662
Peter Zijlstra02f72692010-05-31 18:13:25 +0200663#ifdef CREATE_TRACE_POINTS
Peter Zijlstrac73464b2015-09-28 18:06:56 +0200664static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
Peter Zijlstra02f72692010-05-31 18:13:25 +0200665{
Oleg Nesterov8f9fbf02014-10-07 21:51:08 +0200666#ifdef CONFIG_SCHED_DEBUG
667 BUG_ON(p != current);
668#endif /* CONFIG_SCHED_DEBUG */
Peter Zijlstra02f72692010-05-31 18:13:25 +0200669
Peter Zijlstrac73464b2015-09-28 18:06:56 +0200670 /*
671 * Preemption ignores task state, therefore preempted tasks are always
672 * RUNNING (we will not have dequeued if state != RUNNING).
673 */
674 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
Peter Zijlstra02f72692010-05-31 18:13:25 +0200675}
Oleg Nesterov8f9fbf02014-10-07 21:51:08 +0200676#endif /* CREATE_TRACE_POINTS */
Peter Zijlstra02f72692010-05-31 18:13:25 +0200677
Steven Rostedtea20d922009-04-10 08:54:16 -0400678/*
679 * Tracepoint for task switches, performed by the scheduler:
Steven Rostedtea20d922009-04-10 08:54:16 -0400680 */
681TRACE_EVENT(sched_switch,
682
Peter Zijlstrac73464b2015-09-28 18:06:56 +0200683 TP_PROTO(bool preempt,
684 struct task_struct *prev,
Steven Rostedtea20d922009-04-10 08:54:16 -0400685 struct task_struct *next),
686
Peter Zijlstrac73464b2015-09-28 18:06:56 +0200687 TP_ARGS(preempt, prev, next),
Steven Rostedtea20d922009-04-10 08:54:16 -0400688
689 TP_STRUCT__entry(
690 __array( char, prev_comm, TASK_COMM_LEN )
691 __field( pid_t, prev_pid )
692 __field( int, prev_prio )
Steven Rostedt937cdb92009-05-15 10:51:13 -0400693 __field( long, prev_state )
Steven Rostedtea20d922009-04-10 08:54:16 -0400694 __array( char, next_comm, TASK_COMM_LEN )
695 __field( pid_t, next_pid )
696 __field( int, next_prio )
697 ),
698
699 TP_fast_assign(
700 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
701 __entry->prev_pid = prev->pid;
702 __entry->prev_prio = prev->prio;
Peter Zijlstrac73464b2015-09-28 18:06:56 +0200703 __entry->prev_state = __trace_sched_switch_state(preempt, prev);
Steven Rostedtea20d922009-04-10 08:54:16 -0400704 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
705 __entry->next_pid = next->pid;
706 __entry->next_prio = next->prio;
707 ),
708
Peter Zijlstra557ab422011-09-16 11:16:43 +0200709 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400710 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
Peter Zijlstra557ab422011-09-16 11:16:43 +0200711 __entry->prev_state & (TASK_STATE_MAX-1) ?
712 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
Steven Rostedt937cdb92009-05-15 10:51:13 -0400713 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
714 { 16, "Z" }, { 32, "X" }, { 64, "x" },
Peter Zijlstra80ed87c2015-05-08 14:23:45 +0200715 { 128, "K" }, { 256, "W" }, { 512, "P" },
716 { 1024, "N" }) : "R",
Peter Zijlstra557ab422011-09-16 11:16:43 +0200717 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
Steven Rostedtea20d922009-04-10 08:54:16 -0400718 __entry->next_comm, __entry->next_pid, __entry->next_prio)
719);
720
721/*
722 * Tracepoint for a task being migrated:
723 */
724TRACE_EVENT(sched_migrate_task,
725
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700726 TP_PROTO(struct task_struct *p, int dest_cpu, unsigned int load),
Steven Rostedtea20d922009-04-10 08:54:16 -0400727
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700728 TP_ARGS(p, dest_cpu, load),
Steven Rostedtea20d922009-04-10 08:54:16 -0400729
730 TP_STRUCT__entry(
731 __array( char, comm, TASK_COMM_LEN )
732 __field( pid_t, pid )
733 __field( int, prio )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700734 __field(unsigned int, load )
Steven Rostedtea20d922009-04-10 08:54:16 -0400735 __field( int, orig_cpu )
736 __field( int, dest_cpu )
737 ),
738
739 TP_fast_assign(
740 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
741 __entry->pid = p->pid;
742 __entry->prio = p->prio;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700743 __entry->load = load;
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +0800744 __entry->orig_cpu = task_cpu(p);
Steven Rostedtea20d922009-04-10 08:54:16 -0400745 __entry->dest_cpu = dest_cpu;
746 ),
747
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700748 TP_printk("comm=%s pid=%d prio=%d load=%d orig_cpu=%d dest_cpu=%d",
749 __entry->comm, __entry->pid, __entry->prio, __entry->load,
Steven Rostedtea20d922009-04-10 08:54:16 -0400750 __entry->orig_cpu, __entry->dest_cpu)
751);
752
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700753/*
754 * Tracepoint for a CPU going offline/online:
755 */
756TRACE_EVENT(sched_cpu_hotplug,
757
758 TP_PROTO(int affected_cpu, int error, int status),
759
760 TP_ARGS(affected_cpu, error, status),
761
762 TP_STRUCT__entry(
763 __field( int, affected_cpu )
764 __field( int, error )
765 __field( int, status )
766 ),
767
768 TP_fast_assign(
769 __entry->affected_cpu = affected_cpu;
770 __entry->error = error;
771 __entry->status = status;
772 ),
773
774 TP_printk("cpu %d %s error=%d", __entry->affected_cpu,
775 __entry->status ? "online" : "offline", __entry->error)
776);
777
778/*
779 * Tracepoint for load balancing:
780 */
781#if NR_CPUS > 32
782#error "Unsupported NR_CPUS for lb tracepoint."
783#endif
784TRACE_EVENT(sched_load_balance,
785
786 TP_PROTO(int cpu, enum cpu_idle_type idle, int balance,
787 unsigned long group_mask, int busiest_nr_running,
788 unsigned long imbalance, unsigned int env_flags, int ld_moved,
789 unsigned int balance_interval),
790
791 TP_ARGS(cpu, idle, balance, group_mask, busiest_nr_running,
792 imbalance, env_flags, ld_moved, balance_interval),
793
794 TP_STRUCT__entry(
795 __field( int, cpu)
796 __field( enum cpu_idle_type, idle)
797 __field( int, balance)
798 __field( unsigned long, group_mask)
799 __field( int, busiest_nr_running)
800 __field( unsigned long, imbalance)
801 __field( unsigned int, env_flags)
802 __field( int, ld_moved)
803 __field( unsigned int, balance_interval)
804 ),
805
806 TP_fast_assign(
807 __entry->cpu = cpu;
808 __entry->idle = idle;
809 __entry->balance = balance;
810 __entry->group_mask = group_mask;
811 __entry->busiest_nr_running = busiest_nr_running;
812 __entry->imbalance = imbalance;
813 __entry->env_flags = env_flags;
814 __entry->ld_moved = ld_moved;
815 __entry->balance_interval = balance_interval;
816 ),
817
818 TP_printk("cpu=%d state=%s balance=%d group=%#lx busy_nr=%d imbalance=%ld flags=%#x ld_moved=%d bal_int=%d",
819 __entry->cpu,
820 __entry->idle == CPU_IDLE ? "idle" :
821 (__entry->idle == CPU_NEWLY_IDLE ? "newly_idle" : "busy"),
822 __entry->balance,
823 __entry->group_mask, __entry->busiest_nr_running,
824 __entry->imbalance, __entry->env_flags, __entry->ld_moved,
825 __entry->balance_interval)
826);
827
Ingo Molnar091ad362009-11-26 09:04:55 +0100828DECLARE_EVENT_CLASS(sched_process_template,
Steven Rostedtea20d922009-04-10 08:54:16 -0400829
830 TP_PROTO(struct task_struct *p),
831
832 TP_ARGS(p),
833
834 TP_STRUCT__entry(
835 __array( char, comm, TASK_COMM_LEN )
836 __field( pid_t, pid )
837 __field( int, prio )
838 ),
839
840 TP_fast_assign(
841 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
842 __entry->pid = p->pid;
843 __entry->prio = p->prio;
844 ),
845
Ingo Molnar434a83c2009-10-15 11:50:39 +0200846 TP_printk("comm=%s pid=%d prio=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400847 __entry->comm, __entry->pid, __entry->prio)
848);
849
850/*
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500851 * Tracepoint for freeing a task:
852 */
853DEFINE_EVENT(sched_process_template, sched_process_free,
854 TP_PROTO(struct task_struct *p),
855 TP_ARGS(p));
Riley Andrews9de15b42015-10-02 00:39:53 -0700856
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500857
858/*
Steven Rostedtea20d922009-04-10 08:54:16 -0400859 * Tracepoint for a task exiting:
860 */
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500861DEFINE_EVENT(sched_process_template, sched_process_exit,
862 TP_PROTO(struct task_struct *p),
863 TP_ARGS(p));
Steven Rostedtea20d922009-04-10 08:54:16 -0400864
865/*
Li Zefan210f7662010-05-24 16:23:35 +0800866 * Tracepoint for waiting on task to unschedule:
867 */
868DEFINE_EVENT(sched_process_template, sched_wait_task,
869 TP_PROTO(struct task_struct *p),
870 TP_ARGS(p));
871
872/*
Steven Rostedtea20d922009-04-10 08:54:16 -0400873 * Tracepoint for a waiting task:
874 */
875TRACE_EVENT(sched_process_wait,
876
877 TP_PROTO(struct pid *pid),
878
879 TP_ARGS(pid),
880
881 TP_STRUCT__entry(
882 __array( char, comm, TASK_COMM_LEN )
883 __field( pid_t, pid )
884 __field( int, prio )
885 ),
886
887 TP_fast_assign(
888 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
889 __entry->pid = pid_nr(pid);
890 __entry->prio = current->prio;
891 ),
892
Ingo Molnar434a83c2009-10-15 11:50:39 +0200893 TP_printk("comm=%s pid=%d prio=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400894 __entry->comm, __entry->pid, __entry->prio)
895);
896
897/*
898 * Tracepoint for do_fork:
899 */
900TRACE_EVENT(sched_process_fork,
901
902 TP_PROTO(struct task_struct *parent, struct task_struct *child),
903
904 TP_ARGS(parent, child),
905
906 TP_STRUCT__entry(
907 __array( char, parent_comm, TASK_COMM_LEN )
908 __field( pid_t, parent_pid )
909 __array( char, child_comm, TASK_COMM_LEN )
910 __field( pid_t, child_pid )
911 ),
912
913 TP_fast_assign(
914 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
915 __entry->parent_pid = parent->pid;
916 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
917 __entry->child_pid = child->pid;
918 ),
919
Ingo Molnar434a83c2009-10-15 11:50:39 +0200920 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400921 __entry->parent_comm, __entry->parent_pid,
922 __entry->child_comm, __entry->child_pid)
923);
924
925/*
David Smith4ff16c22012-02-07 10:11:05 -0600926 * Tracepoint for exec:
927 */
928TRACE_EVENT(sched_process_exec,
929
930 TP_PROTO(struct task_struct *p, pid_t old_pid,
931 struct linux_binprm *bprm),
932
933 TP_ARGS(p, old_pid, bprm),
934
935 TP_STRUCT__entry(
936 __string( filename, bprm->filename )
937 __field( pid_t, pid )
938 __field( pid_t, old_pid )
939 ),
940
941 TP_fast_assign(
942 __assign_str(filename, bprm->filename);
943 __entry->pid = p->pid;
Oleg Nesterov63081912012-03-30 18:26:36 +0200944 __entry->old_pid = old_pid;
David Smith4ff16c22012-02-07 10:11:05 -0600945 ),
946
947 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
948 __entry->pid, __entry->old_pid)
949);
950
951/*
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200952 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
953 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
954 */
Ingo Molnar091ad362009-11-26 09:04:55 +0100955DECLARE_EVENT_CLASS(sched_stat_template,
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200956
957 TP_PROTO(struct task_struct *tsk, u64 delay),
958
Oleg Nesterov12473962013-08-06 18:08:44 +0200959 TP_ARGS(__perf_task(tsk), __perf_count(delay)),
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200960
961 TP_STRUCT__entry(
962 __array( char, comm, TASK_COMM_LEN )
963 __field( pid_t, pid )
964 __field( u64, delay )
965 ),
966
967 TP_fast_assign(
968 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
969 __entry->pid = tsk->pid;
970 __entry->delay = delay;
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200971 ),
972
Ingo Molnar434a83c2009-10-15 11:50:39 +0200973 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200974 __entry->comm, __entry->pid,
975 (unsigned long long)__entry->delay)
976);
977
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500978
979/*
980 * Tracepoint for accounting wait time (time the task is runnable
981 * but not actually running due to scheduler contention).
982 */
983DEFINE_EVENT(sched_stat_template, sched_stat_wait,
984 TP_PROTO(struct task_struct *tsk, u64 delay),
985 TP_ARGS(tsk, delay));
986
987/*
988 * Tracepoint for accounting sleep time (time the task is not runnable,
989 * including iowait, see below).
990 */
Li Zefan470dda742009-11-26 15:08:01 +0800991DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
992 TP_PROTO(struct task_struct *tsk, u64 delay),
993 TP_ARGS(tsk, delay));
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500994
995/*
996 * Tracepoint for accounting iowait time (time the task is not runnable
997 * due to waiting on IO to complete).
998 */
Li Zefan470dda742009-11-26 15:08:01 +0800999DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
1000 TP_PROTO(struct task_struct *tsk, u64 delay),
1001 TP_ARGS(tsk, delay));
Steven Rostedt75ec29a2009-11-18 20:48:08 -05001002
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001003/*
Andrew Vaginb781a602011-11-28 12:03:35 +03001004 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
1005 */
1006DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
1007 TP_PROTO(struct task_struct *tsk, u64 delay),
1008 TP_ARGS(tsk, delay));
1009
1010/*
Riley Andrews9de15b42015-10-02 00:39:53 -07001011 * Tracepoint for recording the cause of uninterruptible sleep.
1012 */
1013TRACE_EVENT(sched_blocked_reason,
1014
1015 TP_PROTO(struct task_struct *tsk),
1016
1017 TP_ARGS(tsk),
1018
1019 TP_STRUCT__entry(
1020 __field( pid_t, pid )
1021 __field( void*, caller )
1022 __field( bool, io_wait )
1023 ),
1024
1025 TP_fast_assign(
1026 __entry->pid = tsk->pid;
1027 __entry->caller = (void*)get_wchan(tsk);
1028 __entry->io_wait = tsk->in_iowait;
1029 ),
1030
1031 TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
1032);
1033
1034/*
Ingo Molnarf977bb42009-09-13 18:15:54 +02001035 * Tracepoint for accounting runtime (time the task is executing
1036 * on a CPU).
1037 */
Oleg Nesterov36009d072013-08-06 18:08:41 +02001038DECLARE_EVENT_CLASS(sched_stat_runtime,
Ingo Molnarf977bb42009-09-13 18:15:54 +02001039
1040 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
1041
Oleg Nesterov12473962013-08-06 18:08:44 +02001042 TP_ARGS(tsk, __perf_count(runtime), vruntime),
Ingo Molnarf977bb42009-09-13 18:15:54 +02001043
1044 TP_STRUCT__entry(
1045 __array( char, comm, TASK_COMM_LEN )
1046 __field( pid_t, pid )
1047 __field( u64, runtime )
1048 __field( u64, vruntime )
1049 ),
1050
1051 TP_fast_assign(
1052 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
1053 __entry->pid = tsk->pid;
1054 __entry->runtime = runtime;
1055 __entry->vruntime = vruntime;
Ingo Molnarf977bb42009-09-13 18:15:54 +02001056 ),
1057
Ingo Molnar434a83c2009-10-15 11:50:39 +02001058 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
Ingo Molnarf977bb42009-09-13 18:15:54 +02001059 __entry->comm, __entry->pid,
1060 (unsigned long long)__entry->runtime,
1061 (unsigned long long)__entry->vruntime)
1062);
1063
Oleg Nesterov36009d072013-08-06 18:08:41 +02001064DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
1065 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
1066 TP_ARGS(tsk, runtime, vruntime));
1067
Steven Rostedta8027072010-09-20 15:13:34 -04001068/*
1069 * Tracepoint for showing priority inheritance modifying a tasks
1070 * priority.
1071 */
1072TRACE_EVENT(sched_pi_setprio,
1073
1074 TP_PROTO(struct task_struct *tsk, int newprio),
1075
1076 TP_ARGS(tsk, newprio),
1077
1078 TP_STRUCT__entry(
1079 __array( char, comm, TASK_COMM_LEN )
1080 __field( pid_t, pid )
1081 __field( int, oldprio )
1082 __field( int, newprio )
1083 ),
1084
1085 TP_fast_assign(
1086 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
1087 __entry->pid = tsk->pid;
1088 __entry->oldprio = tsk->prio;
1089 __entry->newprio = newprio;
1090 ),
1091
1092 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
1093 __entry->comm, __entry->pid,
1094 __entry->oldprio, __entry->newprio)
1095);
1096
Oleg Nesterov6a716c92013-10-19 18:18:28 +02001097#ifdef CONFIG_DETECT_HUNG_TASK
1098TRACE_EVENT(sched_process_hang,
1099 TP_PROTO(struct task_struct *tsk),
1100 TP_ARGS(tsk),
1101
1102 TP_STRUCT__entry(
1103 __array( char, comm, TASK_COMM_LEN )
1104 __field( pid_t, pid )
1105 ),
1106
1107 TP_fast_assign(
1108 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
1109 __entry->pid = tsk->pid;
1110 ),
1111
1112 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
1113);
1114#endif /* CONFIG_DETECT_HUNG_TASK */
1115
Mel Gorman286549d2014-01-21 15:51:03 -08001116DECLARE_EVENT_CLASS(sched_move_task_template,
1117
1118 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
1119
1120 TP_ARGS(tsk, src_cpu, dst_cpu),
1121
1122 TP_STRUCT__entry(
1123 __field( pid_t, pid )
1124 __field( pid_t, tgid )
1125 __field( pid_t, ngid )
1126 __field( int, src_cpu )
1127 __field( int, src_nid )
1128 __field( int, dst_cpu )
1129 __field( int, dst_nid )
1130 ),
1131
1132 TP_fast_assign(
1133 __entry->pid = task_pid_nr(tsk);
1134 __entry->tgid = task_tgid_nr(tsk);
1135 __entry->ngid = task_numa_group_id(tsk);
1136 __entry->src_cpu = src_cpu;
1137 __entry->src_nid = cpu_to_node(src_cpu);
1138 __entry->dst_cpu = dst_cpu;
1139 __entry->dst_nid = cpu_to_node(dst_cpu);
1140 ),
1141
1142 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
1143 __entry->pid, __entry->tgid, __entry->ngid,
1144 __entry->src_cpu, __entry->src_nid,
1145 __entry->dst_cpu, __entry->dst_nid)
1146);
1147
1148/*
1149 * Tracks migration of tasks from one runqueue to another. Can be used to
1150 * detect if automatic NUMA balancing is bouncing between nodes
1151 */
1152DEFINE_EVENT(sched_move_task_template, sched_move_numa,
1153 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
1154
1155 TP_ARGS(tsk, src_cpu, dst_cpu)
1156);
1157
1158DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
1159 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
1160
1161 TP_ARGS(tsk, src_cpu, dst_cpu)
1162);
1163
1164TRACE_EVENT(sched_swap_numa,
1165
1166 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
1167 struct task_struct *dst_tsk, int dst_cpu),
1168
1169 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
1170
1171 TP_STRUCT__entry(
1172 __field( pid_t, src_pid )
1173 __field( pid_t, src_tgid )
1174 __field( pid_t, src_ngid )
1175 __field( int, src_cpu )
1176 __field( int, src_nid )
1177 __field( pid_t, dst_pid )
1178 __field( pid_t, dst_tgid )
1179 __field( pid_t, dst_ngid )
1180 __field( int, dst_cpu )
1181 __field( int, dst_nid )
1182 ),
1183
1184 TP_fast_assign(
1185 __entry->src_pid = task_pid_nr(src_tsk);
1186 __entry->src_tgid = task_tgid_nr(src_tsk);
1187 __entry->src_ngid = task_numa_group_id(src_tsk);
1188 __entry->src_cpu = src_cpu;
1189 __entry->src_nid = cpu_to_node(src_cpu);
1190 __entry->dst_pid = task_pid_nr(dst_tsk);
1191 __entry->dst_tgid = task_tgid_nr(dst_tsk);
1192 __entry->dst_ngid = task_numa_group_id(dst_tsk);
1193 __entry->dst_cpu = dst_cpu;
1194 __entry->dst_nid = cpu_to_node(dst_cpu);
1195 ),
1196
1197 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
1198 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
1199 __entry->src_cpu, __entry->src_nid,
1200 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
1201 __entry->dst_cpu, __entry->dst_nid)
1202);
Andy Lutomirskidfc68f22014-06-04 10:31:15 -07001203
1204/*
1205 * Tracepoint for waking a polling cpu without an IPI.
1206 */
1207TRACE_EVENT(sched_wake_idle_without_ipi,
1208
1209 TP_PROTO(int cpu),
1210
1211 TP_ARGS(cpu),
1212
1213 TP_STRUCT__entry(
1214 __field( int, cpu )
1215 ),
1216
1217 TP_fast_assign(
1218 __entry->cpu = cpu;
1219 ),
1220
1221 TP_printk("cpu=%d", __entry->cpu)
1222);
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -07001223
1224TRACE_EVENT(sched_get_nr_running_avg,
1225
1226 TP_PROTO(int avg, int big_avg, int iowait_avg),
1227
1228 TP_ARGS(avg, big_avg, iowait_avg),
1229
1230 TP_STRUCT__entry(
1231 __field( int, avg )
1232 __field( int, big_avg )
1233 __field( int, iowait_avg )
1234 ),
1235
1236 TP_fast_assign(
1237 __entry->avg = avg;
1238 __entry->big_avg = big_avg;
1239 __entry->iowait_avg = iowait_avg;
1240 ),
1241
1242 TP_printk("avg=%d big_avg=%d iowait_avg=%d",
1243 __entry->avg, __entry->big_avg, __entry->iowait_avg)
1244);
Steven Rostedtea20d922009-04-10 08:54:16 -04001245#endif /* _TRACE_SCHED_H */
Steven Rostedta8d154b2009-04-10 09:36:00 -04001246
1247/* This part must be outside protection */
1248#include <trace/define_trace.h>