blob: cd61786372cebe28408c549109bf4c761dd08f78 [file] [log] [blame]
Li Zefand0b6e042009-07-13 10:33:21 +08001#undef TRACE_SYSTEM
2#define TRACE_SYSTEM sched
3
Steven Rostedtea20d922009-04-10 08:54:16 -04004#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04005#define _TRACE_SCHED_H
6
7#include <linux/sched.h>
8#include <linux/tracepoint.h>
David Smith4ff16c22012-02-07 10:11:05 -06009#include <linux/binfmts.h>
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -040010
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070011struct rq;
12
Steven Rostedtea20d922009-04-10 08:54:16 -040013/*
14 * Tracepoint for calling kthread_stop, performed to end a kthread:
15 */
16TRACE_EVENT(sched_kthread_stop,
17
18 TP_PROTO(struct task_struct *t),
19
20 TP_ARGS(t),
21
22 TP_STRUCT__entry(
23 __array( char, comm, TASK_COMM_LEN )
24 __field( pid_t, pid )
25 ),
26
27 TP_fast_assign(
28 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
29 __entry->pid = t->pid;
30 ),
31
Ingo Molnar434a83c2009-10-15 11:50:39 +020032 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
Steven Rostedtea20d922009-04-10 08:54:16 -040033);
34
35/*
36 * Tracepoint for the return value of the kthread stopping:
37 */
38TRACE_EVENT(sched_kthread_stop_ret,
39
40 TP_PROTO(int ret),
41
42 TP_ARGS(ret),
43
44 TP_STRUCT__entry(
45 __field( int, ret )
46 ),
47
48 TP_fast_assign(
49 __entry->ret = ret;
50 ),
51
Ingo Molnar434a83c2009-10-15 11:50:39 +020052 TP_printk("ret=%d", __entry->ret)
Steven Rostedtea20d922009-04-10 08:54:16 -040053);
54
55/*
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070056 * Tracepoint for task enqueue/dequeue:
57 */
58TRACE_EVENT(sched_enq_deq_task,
59
60 TP_PROTO(struct task_struct *p, bool enqueue, unsigned int cpus_allowed),
61
62 TP_ARGS(p, enqueue, cpus_allowed),
63
64 TP_STRUCT__entry(
65 __array( char, comm, TASK_COMM_LEN )
66 __field( pid_t, pid )
67 __field( int, prio )
68 __field( int, cpu )
69 __field( bool, enqueue )
70 __field(unsigned int, nr_running )
71 __field(unsigned long, cpu_load )
72 __field(unsigned int, rt_nr_running )
73 __field(unsigned int, cpus_allowed )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070074 __field(unsigned int, demand )
75 __field(unsigned int, pred_demand )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070076 ),
77
78 TP_fast_assign(
79 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
80 __entry->pid = p->pid;
81 __entry->prio = p->prio;
82 __entry->cpu = task_cpu(p);
83 __entry->enqueue = enqueue;
84 __entry->nr_running = task_rq(p)->nr_running;
85 __entry->cpu_load = task_rq(p)->cpu_load[0];
86 __entry->rt_nr_running = task_rq(p)->rt.rt_nr_running;
87 __entry->cpus_allowed = cpus_allowed;
Pavankumar Kondetife1a6962017-07-25 11:08:17 +053088 __entry->demand = task_load(p);
89 __entry->pred_demand = task_pl(p);
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070090 ),
91
Pavankumar Kondetife1a6962017-07-25 11:08:17 +053092 TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x demand=%u pred_demand=%u",
93 __entry->cpu,
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070094 __entry->enqueue ? "enqueue" : "dequeue",
95 __entry->comm, __entry->pid,
96 __entry->prio, __entry->nr_running,
97 __entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070098 , __entry->demand, __entry->pred_demand
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -070099 )
100);
101
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800102#ifdef CONFIG_SCHED_WALT
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700103struct group_cpu_time;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700104extern const char *task_event_names[];
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700105
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800106#if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_WALT)
Syed Rameez Mustafa60cef212016-05-09 16:28:07 -0700107static inline void __window_data(u32 *dst, u32 *src)
108{
109 if (src)
110 memcpy(dst, src, nr_cpu_ids * sizeof(u32));
111 else
112 memset(dst, 0, nr_cpu_ids * sizeof(u32));
113}
114
115struct trace_seq;
116const char *__window_print(struct trace_seq *p, const u32 *buf, int buf_len)
117{
118 int i;
119 const char *ret = p->buffer + seq_buf_used(&p->seq);
120
121 for (i = 0; i < buf_len; i++)
122 trace_seq_printf(p, "%u ", buf[i]);
123
124 trace_seq_putc(p, 0);
125
126 return ret;
127}
Pavankumar Kondeti6deb2c42017-01-09 13:56:33 +0530128
129static inline s64 __rq_update_sum(struct rq *rq, bool curr, bool new)
130{
131 if (curr)
132 if (new)
133 return rq->nt_curr_runnable_sum;
134 else
135 return rq->curr_runnable_sum;
136 else
137 if (new)
138 return rq->nt_prev_runnable_sum;
139 else
140 return rq->prev_runnable_sum;
141}
142
143static inline s64 __grp_update_sum(struct rq *rq, bool curr, bool new)
144{
145 if (curr)
146 if (new)
147 return rq->grp_time.nt_curr_runnable_sum;
148 else
149 return rq->grp_time.curr_runnable_sum;
150 else
151 if (new)
152 return rq->grp_time.nt_prev_runnable_sum;
153 else
154 return rq->grp_time.prev_runnable_sum;
155}
156
157static inline s64
158__get_update_sum(struct rq *rq, enum migrate_types migrate_type,
159 bool src, bool new, bool curr)
160{
161 switch (migrate_type) {
162 case RQ_TO_GROUP:
163 if (src)
164 return __rq_update_sum(rq, curr, new);
165 else
166 return __grp_update_sum(rq, curr, new);
167 case GROUP_TO_RQ:
168 if (src)
169 return __grp_update_sum(rq, curr, new);
170 else
171 return __rq_update_sum(rq, curr, new);
172 default:
173 WARN_ON_ONCE(1);
174 return -1;
175 }
176}
Syed Rameez Mustafa60cef212016-05-09 16:28:07 -0700177#endif
178
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800179TRACE_EVENT(sched_update_pred_demand,
180
181 TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
182 unsigned int pred_demand),
183
184 TP_ARGS(rq, p, runtime, pct, pred_demand),
185
186 TP_STRUCT__entry(
187 __array( char, comm, TASK_COMM_LEN )
188 __field( pid_t, pid )
189 __field(unsigned int, runtime )
190 __field( int, pct )
191 __field(unsigned int, pred_demand )
192 __array( u8, bucket, NUM_BUSY_BUCKETS)
193 __field( int, cpu )
194 ),
195
196 TP_fast_assign(
197 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
198 __entry->pid = p->pid;
199 __entry->runtime = runtime;
200 __entry->pct = pct;
201 __entry->pred_demand = pred_demand;
202 memcpy(__entry->bucket, p->ravg.busy_buckets,
203 NUM_BUSY_BUCKETS * sizeof(u8));
204 __entry->cpu = rq->cpu;
205 ),
206
207 TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
208 __entry->pid, __entry->comm,
209 __entry->runtime, __entry->pct, __entry->cpu,
210 __entry->pred_demand, __entry->bucket[0], __entry->bucket[1],
211 __entry->bucket[2], __entry->bucket[3],__entry->bucket[4],
212 __entry->bucket[5], __entry->bucket[6], __entry->bucket[7],
213 __entry->bucket[8], __entry->bucket[9])
214);
215
216TRACE_EVENT(sched_update_history,
217
218 TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
219 enum task_event evt),
220
221 TP_ARGS(rq, p, runtime, samples, evt),
222
223 TP_STRUCT__entry(
224 __array( char, comm, TASK_COMM_LEN )
225 __field( pid_t, pid )
226 __field(unsigned int, runtime )
227 __field( int, samples )
228 __field(enum task_event, evt )
229 __field(unsigned int, demand )
Syed Rameez Mustafaf3f7bf82017-04-11 17:43:48 -0700230 __field(unsigned int, coloc_demand )
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800231 __field(unsigned int, pred_demand )
232 __array( u32, hist, RAVG_HIST_SIZE_MAX)
233 __field(unsigned int, nr_big_tasks )
234 __field( int, cpu )
235 ),
236
237 TP_fast_assign(
238 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
239 __entry->pid = p->pid;
240 __entry->runtime = runtime;
241 __entry->samples = samples;
242 __entry->evt = evt;
243 __entry->demand = p->ravg.demand;
Syed Rameez Mustafaf3f7bf82017-04-11 17:43:48 -0700244 __entry->coloc_demand = p->ravg.coloc_demand;
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800245 __entry->pred_demand = p->ravg.pred_demand;
246 memcpy(__entry->hist, p->ravg.sum_history,
247 RAVG_HIST_SIZE_MAX * sizeof(u32));
Pavankumar Kondeti84f72d72017-07-20 11:00:45 +0530248 __entry->nr_big_tasks = rq->walt_stats.nr_big_tasks;
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800249 __entry->cpu = rq->cpu;
250 ),
251
Syed Rameez Mustafaf3f7bf82017-04-11 17:43:48 -0700252 TP_printk("%d (%s): runtime %u samples %d event %s demand %u coloc_demand %u pred_demand %u"
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800253 " (hist: %u %u %u %u %u) cpu %d nr_big %u",
254 __entry->pid, __entry->comm,
255 __entry->runtime, __entry->samples,
256 task_event_names[__entry->evt],
Syed Rameez Mustafaf3f7bf82017-04-11 17:43:48 -0700257 __entry->demand, __entry->coloc_demand, __entry->pred_demand,
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800258 __entry->hist[0], __entry->hist[1],
259 __entry->hist[2], __entry->hist[3],
260 __entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
261);
262
263TRACE_EVENT(sched_get_task_cpu_cycles,
264
265 TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time),
266
267 TP_ARGS(cpu, event, cycles, exec_time),
268
269 TP_STRUCT__entry(
270 __field(int, cpu )
271 __field(int, event )
272 __field(u64, cycles )
273 __field(u64, exec_time )
274 __field(u32, freq )
275 __field(u32, legacy_freq )
276 ),
277
278 TP_fast_assign(
279 __entry->cpu = cpu;
280 __entry->event = event;
281 __entry->cycles = cycles;
282 __entry->exec_time = exec_time;
283 __entry->freq = cpu_cycles_to_freq(cycles, exec_time);
284 __entry->legacy_freq = cpu_cur_freq(cpu);
285 ),
286
287 TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u",
288 __entry->cpu, __entry->event, __entry->cycles,
289 __entry->exec_time, __entry->freq, __entry->legacy_freq)
290);
291
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700292TRACE_EVENT(sched_update_task_ravg,
293
294 TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
295 u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
296 struct group_cpu_time *cpu_time),
297
298 TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
299
300 TP_STRUCT__entry(
301 __array( char, comm, TASK_COMM_LEN )
302 __field( pid_t, pid )
303 __field( pid_t, cur_pid )
304 __field(unsigned int, cur_freq )
305 __field( u64, wallclock )
306 __field( u64, mark_start )
307 __field( u64, delta_m )
308 __field( u64, win_start )
309 __field( u64, delta )
310 __field( u64, irqtime )
311 __field(enum task_event, evt )
312 __field(unsigned int, demand )
Syed Rameez Mustafaf3f7bf82017-04-11 17:43:48 -0700313 __field(unsigned int, coloc_demand )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700314 __field(unsigned int, sum )
315 __field( int, cpu )
316 __field(unsigned int, pred_demand )
317 __field( u64, rq_cs )
318 __field( u64, rq_ps )
319 __field( u64, grp_cs )
320 __field( u64, grp_ps )
Syed Rameez Mustafa60cef212016-05-09 16:28:07 -0700321 __field( u64, grp_nt_cs )
322 __field( u64, grp_nt_ps )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700323 __field( u32, curr_window )
324 __field( u32, prev_window )
Syed Rameez Mustafa60cef212016-05-09 16:28:07 -0700325 __dynamic_array(u32, curr_sum, nr_cpu_ids )
326 __dynamic_array(u32, prev_sum, nr_cpu_ids )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700327 __field( u64, nt_cs )
328 __field( u64, nt_ps )
329 __field( u32, active_windows )
Syed Rameez Mustafa59b5fb72016-05-31 16:40:45 -0700330 __field( u8, curr_top )
331 __field( u8, prev_top )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700332 ),
333
334 TP_fast_assign(
335 __entry->wallclock = wallclock;
336 __entry->win_start = rq->window_start;
337 __entry->delta = (wallclock - rq->window_start);
338 __entry->evt = evt;
339 __entry->cpu = rq->cpu;
340 __entry->cur_pid = rq->curr->pid;
341 __entry->cur_freq = cpu_cycles_to_freq(cycles, exec_time);
342 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
343 __entry->pid = p->pid;
344 __entry->mark_start = p->ravg.mark_start;
345 __entry->delta_m = (wallclock - p->ravg.mark_start);
346 __entry->demand = p->ravg.demand;
Syed Rameez Mustafaf3f7bf82017-04-11 17:43:48 -0700347 __entry->coloc_demand = p->ravg.coloc_demand;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700348 __entry->sum = p->ravg.sum;
349 __entry->irqtime = irqtime;
350 __entry->pred_demand = p->ravg.pred_demand;
351 __entry->rq_cs = rq->curr_runnable_sum;
352 __entry->rq_ps = rq->prev_runnable_sum;
353 __entry->grp_cs = cpu_time ? cpu_time->curr_runnable_sum : 0;
354 __entry->grp_ps = cpu_time ? cpu_time->prev_runnable_sum : 0;
355 __entry->grp_nt_cs = cpu_time ? cpu_time->nt_curr_runnable_sum : 0;
356 __entry->grp_nt_ps = cpu_time ? cpu_time->nt_prev_runnable_sum : 0;
357 __entry->curr_window = p->ravg.curr_window;
358 __entry->prev_window = p->ravg.prev_window;
Syed Rameez Mustafa60cef212016-05-09 16:28:07 -0700359 __window_data(__get_dynamic_array(curr_sum), p->ravg.curr_window_cpu);
360 __window_data(__get_dynamic_array(prev_sum), p->ravg.prev_window_cpu);
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700361 __entry->nt_cs = rq->nt_curr_runnable_sum;
362 __entry->nt_ps = rq->nt_prev_runnable_sum;
363 __entry->active_windows = p->ravg.active_windows;
Syed Rameez Mustafa59b5fb72016-05-31 16:40:45 -0700364 __entry->curr_top = rq->curr_top;
365 __entry->prev_top = rq->prev_top;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700366 ),
367
Syed Rameez Mustafaf3f7bf82017-04-11 17:43:48 -0700368 TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u coloc_demand: %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u (%s) prev_window %u (%s) nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu curr_top %u prev_top %u",
Syed Rameez Mustafa59b5fb72016-05-31 16:40:45 -0700369 __entry->wallclock, __entry->win_start, __entry->delta,
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700370 task_event_names[__entry->evt], __entry->cpu,
371 __entry->cur_freq, __entry->cur_pid,
372 __entry->pid, __entry->comm, __entry->mark_start,
Syed Rameez Mustafaf3f7bf82017-04-11 17:43:48 -0700373 __entry->delta_m, __entry->demand, __entry->coloc_demand,
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700374 __entry->sum, __entry->irqtime, __entry->pred_demand,
375 __entry->rq_cs, __entry->rq_ps, __entry->curr_window,
Syed Rameez Mustafa60cef212016-05-09 16:28:07 -0700376 __window_print(p, __get_dynamic_array(curr_sum), nr_cpu_ids),
377 __entry->prev_window,
378 __window_print(p, __get_dynamic_array(prev_sum), nr_cpu_ids),
379 __entry->nt_cs, __entry->nt_ps,
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700380 __entry->active_windows, __entry->grp_cs,
Syed Rameez Mustafa59b5fb72016-05-31 16:40:45 -0700381 __entry->grp_ps, __entry->grp_nt_cs, __entry->grp_nt_ps,
382 __entry->curr_top, __entry->prev_top)
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700383);
384
Vikram Mulukutla0b062902017-03-20 16:27:04 -0700385TRACE_EVENT(sched_update_task_ravg_mini,
386
387 TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
388 u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
389 struct group_cpu_time *cpu_time),
390
391 TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
392
393 TP_STRUCT__entry(
394 __array( char, comm, TASK_COMM_LEN )
395 __field( pid_t, pid )
396 __field( u64, wallclock )
397 __field( u64, mark_start )
398 __field( u64, delta_m )
399 __field( u64, win_start )
400 __field( u64, delta )
401 __field(enum task_event, evt )
402 __field(unsigned int, demand )
403 __field( int, cpu )
404 __field( u64, rq_cs )
405 __field( u64, rq_ps )
406 __field( u64, grp_cs )
407 __field( u64, grp_ps )
408 __field( u32, curr_window )
409 __field( u32, prev_window )
410 ),
411
412 TP_fast_assign(
413 __entry->wallclock = wallclock;
414 __entry->win_start = rq->window_start;
415 __entry->delta = (wallclock - rq->window_start);
416 __entry->evt = evt;
417 __entry->cpu = rq->cpu;
418 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
419 __entry->pid = p->pid;
420 __entry->mark_start = p->ravg.mark_start;
421 __entry->delta_m = (wallclock - p->ravg.mark_start);
422 __entry->demand = p->ravg.demand;
423 __entry->rq_cs = rq->curr_runnable_sum;
424 __entry->rq_ps = rq->prev_runnable_sum;
425 __entry->grp_cs = cpu_time ? cpu_time->curr_runnable_sum : 0;
426 __entry->grp_ps = cpu_time ? cpu_time->prev_runnable_sum : 0;
427 __entry->curr_window = p->ravg.curr_window;
428 __entry->prev_window = p->ravg.prev_window;
429 ),
430
431 TP_printk("wc %llu ws %llu delta %llu event %s cpu %d task %d (%s) ms %llu delta %llu demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u grp_cs %lld grp_ps %lld",
432 __entry->wallclock, __entry->win_start, __entry->delta,
433 task_event_names[__entry->evt], __entry->cpu,
434 __entry->pid, __entry->comm, __entry->mark_start,
435 __entry->delta_m, __entry->demand,
436 __entry->rq_cs, __entry->rq_ps, __entry->curr_window,
437 __entry->prev_window,
438 __entry->grp_cs,
439 __entry->grp_ps)
440);
441
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800442struct migration_sum_data;
443extern const char *migrate_type_names[];
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700444
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800445TRACE_EVENT(sched_set_preferred_cluster,
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700446
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800447 TP_PROTO(struct related_thread_group *grp, u64 total_demand),
448
449 TP_ARGS(grp, total_demand),
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700450
451 TP_STRUCT__entry(
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800452 __field( int, id )
453 __field( u64, demand )
454 __field( int, cluster_first_cpu )
455 __array( char, comm, TASK_COMM_LEN )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700456 __field( pid_t, pid )
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800457 __field(unsigned int, task_demand )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700458 ),
459
460 TP_fast_assign(
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800461 __entry->id = grp->id;
462 __entry->demand = total_demand;
463 __entry->cluster_first_cpu = grp->preferred_cluster ?
464 cluster_first_cpu(grp->preferred_cluster)
465 : -1;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700466 ),
467
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800468 TP_printk("group_id %d total_demand %llu preferred_cluster_first_cpu %d",
469 __entry->id, __entry->demand,
470 __entry->cluster_first_cpu)
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700471);
472
473TRACE_EVENT(sched_migration_update_sum,
474
Pavankumar Kondeti6deb2c42017-01-09 13:56:33 +0530475 TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct rq *rq),
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700476
Pavankumar Kondeti6deb2c42017-01-09 13:56:33 +0530477 TP_ARGS(p, migrate_type, rq),
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700478
479 TP_STRUCT__entry(
480 __field(int, tcpu )
481 __field(int, pid )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700482 __field(enum migrate_types, migrate_type )
483 __field( s64, src_cs )
484 __field( s64, src_ps )
485 __field( s64, dst_cs )
486 __field( s64, dst_ps )
487 __field( s64, src_nt_cs )
488 __field( s64, src_nt_ps )
489 __field( s64, dst_nt_cs )
490 __field( s64, dst_nt_ps )
491 ),
492
493 TP_fast_assign(
494 __entry->tcpu = task_cpu(p);
495 __entry->pid = p->pid;
496 __entry->migrate_type = migrate_type;
Pavankumar Kondeti6deb2c42017-01-09 13:56:33 +0530497 __entry->src_cs = __get_update_sum(rq, migrate_type,
498 true, false, true);
499 __entry->src_ps = __get_update_sum(rq, migrate_type,
500 true, false, false);
501 __entry->dst_cs = __get_update_sum(rq, migrate_type,
502 false, false, true);
503 __entry->dst_ps = __get_update_sum(rq, migrate_type,
504 false, false, false);
505 __entry->src_nt_cs = __get_update_sum(rq, migrate_type,
506 true, true, true);
507 __entry->src_nt_ps = __get_update_sum(rq, migrate_type,
508 true, true, false);
509 __entry->dst_nt_cs = __get_update_sum(rq, migrate_type,
510 false, true, true);
511 __entry->dst_nt_ps = __get_update_sum(rq, migrate_type,
512 false, true, false);
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700513 ),
514
515 TP_printk("pid %d task_cpu %d migrate_type %s src_cs %llu src_ps %llu dst_cs %lld dst_ps %lld src_nt_cs %llu src_nt_ps %llu dst_nt_cs %lld dst_nt_ps %lld",
516 __entry->pid, __entry->tcpu, migrate_type_names[__entry->migrate_type],
517 __entry->src_cs, __entry->src_ps, __entry->dst_cs, __entry->dst_ps,
518 __entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps)
519);
520
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530521TRACE_EVENT(sched_set_boost,
522
523 TP_PROTO(int type),
524
525 TP_ARGS(type),
526
527 TP_STRUCT__entry(
528 __field(int, type )
529 ),
530
531 TP_fast_assign(
532 __entry->type = type;
533 ),
534
535 TP_printk("type %d", __entry->type)
536);
537
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800538#endif
539
540#ifdef CONFIG_SCHED_WALT
541DECLARE_EVENT_CLASS(sched_cpu_load,
542
Puja Gupta487dec62017-06-27 10:13:50 -0700543 TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800544
Puja Gupta487dec62017-06-27 10:13:50 -0700545 TP_ARGS(rq, idle, irqload, power_cost),
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800546
547 TP_STRUCT__entry(
548 __field(unsigned int, cpu )
549 __field(unsigned int, idle )
550 __field(unsigned int, nr_running )
551 __field(unsigned int, nr_big_tasks )
552 __field(unsigned int, load_scale_factor )
553 __field(unsigned int, capacity )
554 __field( u64, cumulative_runnable_avg )
555 __field( u64, irqload )
556 __field(unsigned int, max_freq )
557 __field(unsigned int, power_cost )
558 __field( int, cstate )
559 __field( int, dstate )
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800560 ),
561
562 TP_fast_assign(
563 __entry->cpu = rq->cpu;
564 __entry->idle = idle;
565 __entry->nr_running = rq->nr_running;
Pavankumar Kondeti84f72d72017-07-20 11:00:45 +0530566 __entry->nr_big_tasks = rq->walt_stats.nr_big_tasks;
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800567 __entry->load_scale_factor = cpu_load_scale_factor(rq->cpu);
568 __entry->capacity = cpu_capacity(rq->cpu);
Pavankumar Kondeti84f72d72017-07-20 11:00:45 +0530569 __entry->cumulative_runnable_avg = rq->walt_stats.cumulative_runnable_avg;
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800570 __entry->irqload = irqload;
571 __entry->max_freq = cpu_max_freq(rq->cpu);
572 __entry->power_cost = power_cost;
573 __entry->cstate = rq->cstate;
574 __entry->dstate = rq->cluster->dstate;
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800575 ),
576
Puja Gupta487dec62017-06-27 10:13:50 -0700577 TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d",
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800578 __entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
579 __entry->load_scale_factor, __entry->capacity,
580 __entry->cumulative_runnable_avg, __entry->irqload,
581 __entry->max_freq, __entry->power_cost, __entry->cstate,
Puja Gupta487dec62017-06-27 10:13:50 -0700582 __entry->dstate)
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800583);
584
585DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
Puja Gupta487dec62017-06-27 10:13:50 -0700586 TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
587 TP_ARGS(rq, idle, irqload, power_cost)
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800588);
Vikram Mulukutladb410a42017-07-12 12:34:10 -0700589
590TRACE_EVENT(sched_load_to_gov,
591
592 TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load, u64 freq_aggr_thresh, u64 load, int policy),
593 TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr_thresh, load, policy),
594
595 TP_STRUCT__entry(
596 __field( int, cpu )
597 __field( int, policy )
598 __field( int, ed_task_pid )
599 __field( u64, aggr_grp_load )
600 __field( u64, freq_aggr_thresh )
601 __field( u64, tt_load )
602 __field( u64, rq_ps )
603 __field( u64, grp_rq_ps )
604 __field( u64, nt_ps )
605 __field( u64, grp_nt_ps )
606 __field( u64, pl )
607 __field( u64, load )
608 ),
609
610 TP_fast_assign(
611 __entry->cpu = cpu_of(rq);
612 __entry->policy = policy;
613 __entry->ed_task_pid = rq->ed_task ? rq->ed_task->pid : -1;
614 __entry->aggr_grp_load = aggr_grp_load;
615 __entry->freq_aggr_thresh = freq_aggr_thresh;
616 __entry->tt_load = tt_load;
617 __entry->rq_ps = rq->prev_runnable_sum;
618 __entry->grp_rq_ps = rq->grp_time.prev_runnable_sum;
619 __entry->nt_ps = rq->nt_prev_runnable_sum;
620 __entry->grp_nt_ps = rq->grp_time.nt_prev_runnable_sum;
Pavankumar Kondeti84f72d72017-07-20 11:00:45 +0530621 __entry->pl = rq->walt_stats.pred_demands_sum;
Vikram Mulukutladb410a42017-07-12 12:34:10 -0700622 __entry->load = load;
623 ),
624
625 TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr_thresh=%llu tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu",
626 __entry->cpu, __entry->policy, __entry->ed_task_pid,
627 __entry->aggr_grp_load, __entry->freq_aggr_thresh,
628 __entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
629 __entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load)
630);
Joonwoo Parkf7d6cd42017-01-17 15:19:43 -0800631#endif
632
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800633#ifdef CONFIG_SMP
634TRACE_EVENT(sched_cpu_util,
635
Joonwoo Parke19cd6f2017-02-03 14:32:49 -0800636 TP_PROTO(struct task_struct *p, int cpu, int task_util, unsigned long curr_util, unsigned long new_cum_util, int sync),
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800637
Joonwoo Parke19cd6f2017-02-03 14:32:49 -0800638 TP_ARGS(p, cpu, task_util, curr_util, new_cum_util, sync),
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800639
640 TP_STRUCT__entry(
641 __array(char, comm, TASK_COMM_LEN )
642 __field(int, pid )
643 __field(unsigned int, cpu )
644 __field(int, task_util )
645 __field(unsigned int, nr_running )
646 __field(long, cpu_util )
Joonwoo Parke19cd6f2017-02-03 14:32:49 -0800647 __field(long, cpu_util_cum )
648 __field(long, new_cum_util )
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800649 __field(unsigned int, capacity_curr )
650 __field(unsigned int, capacity )
Joonwoo Park449182f2017-01-30 21:04:36 -0800651 __field(unsigned long, curr_util )
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800652 __field(int, sync )
653 __field(int, idle_state )
Joonwoo Park7b211a42017-01-31 18:26:42 -0800654 __field(unsigned int, irqload )
655 __field(int, high_irqload )
Joonwoo Parke19cd6f2017-02-03 14:32:49 -0800656 __field(int, task_in_cum_demand )
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800657 ),
658
659 TP_fast_assign(
660 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
661 __entry->pid = p->pid;
662 __entry->cpu = cpu;
663 __entry->task_util = task_util;
664 __entry->nr_running = cpu_rq(cpu)->nr_running;
665 __entry->cpu_util = cpu_util(cpu);
Joonwoo Parke19cd6f2017-02-03 14:32:49 -0800666 __entry->cpu_util_cum = cpu_util_cum(cpu, 0);
667 __entry->new_cum_util = new_cum_util;
668 __entry->task_in_cum_demand = task_in_cum_window_demand(cpu_rq(cpu), p);
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800669 __entry->capacity_curr = capacity_curr_of(cpu);
670 __entry->capacity = capacity_of(cpu);
Joonwoo Park449182f2017-01-30 21:04:36 -0800671 __entry->curr_util = curr_util;
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800672 __entry->sync = sync;
673 __entry->idle_state = idle_get_state_idx(cpu_rq(cpu));
Joonwoo Park7b211a42017-01-31 18:26:42 -0800674 __entry->irqload = sched_irqload(cpu);
675 __entry->high_irqload = sched_cpu_high_irqload(cpu);
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800676 ),
677
Joonwoo Parke19cd6f2017-02-03 14:32:49 -0800678 TP_printk("comm=%s pid=%d cpu=%d task_util=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld new_cum_util=%ld task_in_cum=%d capacity_curr=%u capacity=%u curr_util=%ld sync=%d idle_state=%d irqload=%u high_irqload=%u",
679 __entry->comm, __entry->pid, __entry->cpu, __entry->task_util, __entry->nr_running, __entry->cpu_util, __entry->cpu_util_cum, __entry->new_cum_util, __entry->task_in_cum_demand, __entry->capacity_curr, __entry->capacity, __entry->curr_util, __entry->sync, __entry->idle_state, __entry->irqload, __entry->high_irqload)
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800680);
681
Joonwoo Park6939ca62017-02-17 22:17:51 -0800682TRACE_EVENT(sched_energy_diff_packing,
683
684 TP_PROTO(struct task_struct *p, unsigned long task_util,
685 int targeted_cpus, int nrg_pack, int nrg_spread),
686
687 TP_ARGS(p, task_util, targeted_cpus, nrg_pack, nrg_spread),
688
689 TP_STRUCT__entry(
690 __array(char, comm, TASK_COMM_LEN )
691 __field(int, pid )
692 __field(unsigned long, task_util )
693 __field(int, targeted_cpus )
694 __field(int, nrg_pack )
695 __field(int, nrg_spread )
696 ),
697
698 TP_fast_assign(
699 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
700 __entry->pid = p->pid;
701 __entry->task_util = task_util;
702 __entry->targeted_cpus = targeted_cpus;
703 __entry->nrg_pack = nrg_pack;
704 __entry->nrg_spread = nrg_spread;
705 ),
706
707 TP_printk("comm=%s pid=%d task_util=%lu targeted_cpus=%d nrg_pack=%d nrg_spread=%d nrg_diff=%d",
708 __entry->comm, __entry->pid, __entry->task_util,
709 __entry->targeted_cpus, __entry->nrg_pack,
710 __entry->nrg_spread, __entry->nrg_pack - __entry->nrg_spread)
711);
712
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800713DECLARE_EVENT_CLASS(sched_task_util,
714
Joonwoo Park4fdf00d2017-02-17 11:42:44 -0800715 TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800716
Joonwoo Park4fdf00d2017-02-17 11:42:44 -0800717 TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle),
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800718
719 TP_STRUCT__entry(
720 __array(char, comm, TASK_COMM_LEN )
721 __field(int, pid )
722 __field(int, task_cpu )
723 __field(unsigned long, task_util )
724 __field(unsigned long, cpu_util_freq )
725 __field(int, nominated_cpu )
726 __field(int, target_cpu )
727 __field(int, ediff )
Joonwoo Park4fdf00d2017-02-17 11:42:44 -0800728 __field(bool, need_idle )
Syed Rameez Mustafae2ccaa22017-04-05 12:35:25 -0700729 __field(u64, latency )
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800730 ),
731
732 TP_fast_assign(
733 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
734 __entry->pid = p->pid;
735 __entry->task_cpu = task_cpu;
736 __entry->task_util = task_util;
737 __entry->cpu_util_freq = cpu_util_freq(target_cpu, NULL);
738 __entry->nominated_cpu = nominated_cpu;
739 __entry->target_cpu = target_cpu;
Joonwoo Park4fdf00d2017-02-17 11:42:44 -0800740 __entry->ediff = ediff;
741 __entry->need_idle = need_idle;
Joonwoo Park8fc91312017-06-07 15:10:58 -0700742 __entry->latency = p->ravg.mark_start ?
Stephen Boyd24c18122017-08-15 10:39:25 -0700743 ktime_get_ns() -
Joonwoo Park8fc91312017-06-07 15:10:58 -0700744 p->ravg.mark_start : 0;
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800745 ),
746
Syed Rameez Mustafae2ccaa22017-04-05 12:35:25 -0700747 TP_printk("comm=%s pid=%d task_cpu=%d task_util=%lu nominated_cpu=%d target_cpu=%d energy_diff=%d need_idle=%d latency=%llu",
748 __entry->comm, __entry->pid, __entry->task_cpu, __entry->task_util, __entry->nominated_cpu, __entry->target_cpu, __entry->ediff, __entry->need_idle, __entry->latency)
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800749);
750
Syed Rameez Mustafa20acfe72017-01-30 09:35:46 +0530751DEFINE_EVENT(sched_task_util, sched_task_util_bias_to_waker,
752 TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
753 TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
754);
755
756DEFINE_EVENT(sched_task_util, sched_task_util_colocated,
757 TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
758 TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
759);
760
Pavankumar Kondetieb486cc2017-06-19 15:28:50 +0530761DEFINE_EVENT(sched_task_util, sched_task_util_boosted,
762 TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
763 TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
764);
765
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800766DEFINE_EVENT(sched_task_util, sched_task_util_overutilzed,
Joonwoo Park4fdf00d2017-02-17 11:42:44 -0800767 TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
768 TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800769);
770
771DEFINE_EVENT(sched_task_util, sched_task_util_energy_diff,
Joonwoo Park4fdf00d2017-02-17 11:42:44 -0800772 TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
773 TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800774);
775
776DEFINE_EVENT(sched_task_util, sched_task_util_energy_aware,
Joonwoo Park4fdf00d2017-02-17 11:42:44 -0800777 TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
778 TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800779);
Joonwoo Park12084a12017-02-16 16:46:27 -0800780
781DEFINE_EVENT(sched_task_util, sched_task_util_imbalance,
Joonwoo Park4fdf00d2017-02-17 11:42:44 -0800782 TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
783 TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
Joonwoo Park12084a12017-02-16 16:46:27 -0800784);
Joonwoo Parkcc7d1272017-01-26 14:47:00 -0800785#endif
786
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700787/*
Steven Rostedtea20d922009-04-10 08:54:16 -0400788 * Tracepoint for waking up a task:
Steven Rostedtea20d922009-04-10 08:54:16 -0400789 */
Ingo Molnar091ad362009-11-26 09:04:55 +0100790DECLARE_EVENT_CLASS(sched_wakeup_template,
Steven Rostedtea20d922009-04-10 08:54:16 -0400791
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200792 TP_PROTO(struct task_struct *p),
Steven Rostedtea20d922009-04-10 08:54:16 -0400793
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200794 TP_ARGS(__perf_task(p)),
Steven Rostedtea20d922009-04-10 08:54:16 -0400795
796 TP_STRUCT__entry(
797 __array( char, comm, TASK_COMM_LEN )
798 __field( pid_t, pid )
799 __field( int, prio )
800 __field( int, success )
Ingo Molnar434a83c2009-10-15 11:50:39 +0200801 __field( int, target_cpu )
Steven Rostedtea20d922009-04-10 08:54:16 -0400802 ),
803
804 TP_fast_assign(
805 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
806 __entry->pid = p->pid;
807 __entry->prio = p->prio;
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200808 __entry->success = 1; /* rudiment, kill when possible */
Ingo Molnar434a83c2009-10-15 11:50:39 +0200809 __entry->target_cpu = task_cpu(p);
Steven Rostedtea20d922009-04-10 08:54:16 -0400810 ),
811
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200812 TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400813 __entry->comm, __entry->pid, __entry->prio,
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200814 __entry->target_cpu)
Steven Rostedtea20d922009-04-10 08:54:16 -0400815);
816
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200817/*
818 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
819 * called from the waking context.
820 */
821DEFINE_EVENT(sched_wakeup_template, sched_waking,
822 TP_PROTO(struct task_struct *p),
823 TP_ARGS(p));
824
825/*
826 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
827 * It it not always called from the waking context.
828 */
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500829DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200830 TP_PROTO(struct task_struct *p),
831 TP_ARGS(p));
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500832
Steven Rostedtea20d922009-04-10 08:54:16 -0400833/*
834 * Tracepoint for waking up a new task:
Steven Rostedtea20d922009-04-10 08:54:16 -0400835 */
Steven Rostedt75ec29a2009-11-18 20:48:08 -0500836DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
Peter Zijlstrafbd705a2015-06-09 11:13:36 +0200837 TP_PROTO(struct task_struct *p),
838 TP_ARGS(p));
Steven Rostedtea20d922009-04-10 08:54:16 -0400839
Peter Zijlstra02f72692010-05-31 18:13:25 +0200840#ifdef CREATE_TRACE_POINTS
Peter Zijlstrac73464b2015-09-28 18:06:56 +0200841static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
Peter Zijlstra02f72692010-05-31 18:13:25 +0200842{
Oleg Nesterov8f9fbf02014-10-07 21:51:08 +0200843#ifdef CONFIG_SCHED_DEBUG
844 BUG_ON(p != current);
845#endif /* CONFIG_SCHED_DEBUG */
Peter Zijlstra02f72692010-05-31 18:13:25 +0200846
Peter Zijlstrac73464b2015-09-28 18:06:56 +0200847 /*
848 * Preemption ignores task state, therefore preempted tasks are always
849 * RUNNING (we will not have dequeued if state != RUNNING).
850 */
851 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
Peter Zijlstra02f72692010-05-31 18:13:25 +0200852}
Oleg Nesterov8f9fbf02014-10-07 21:51:08 +0200853#endif /* CREATE_TRACE_POINTS */
Peter Zijlstra02f72692010-05-31 18:13:25 +0200854
Steven Rostedtea20d922009-04-10 08:54:16 -0400855/*
856 * Tracepoint for task switches, performed by the scheduler:
Steven Rostedtea20d922009-04-10 08:54:16 -0400857 */
858TRACE_EVENT(sched_switch,
859
Peter Zijlstrac73464b2015-09-28 18:06:56 +0200860 TP_PROTO(bool preempt,
861 struct task_struct *prev,
Steven Rostedtea20d922009-04-10 08:54:16 -0400862 struct task_struct *next),
863
Peter Zijlstrac73464b2015-09-28 18:06:56 +0200864 TP_ARGS(preempt, prev, next),
Steven Rostedtea20d922009-04-10 08:54:16 -0400865
866 TP_STRUCT__entry(
867 __array( char, prev_comm, TASK_COMM_LEN )
868 __field( pid_t, prev_pid )
869 __field( int, prev_prio )
Steven Rostedt937cdb92009-05-15 10:51:13 -0400870 __field( long, prev_state )
Steven Rostedtea20d922009-04-10 08:54:16 -0400871 __array( char, next_comm, TASK_COMM_LEN )
872 __field( pid_t, next_pid )
873 __field( int, next_prio )
874 ),
875
876 TP_fast_assign(
877 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
878 __entry->prev_pid = prev->pid;
879 __entry->prev_prio = prev->prio;
Peter Zijlstrac73464b2015-09-28 18:06:56 +0200880 __entry->prev_state = __trace_sched_switch_state(preempt, prev);
Steven Rostedtea20d922009-04-10 08:54:16 -0400881 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
882 __entry->next_pid = next->pid;
883 __entry->next_prio = next->prio;
884 ),
885
Peter Zijlstra557ab422011-09-16 11:16:43 +0200886 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -0400887 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
Peter Zijlstra557ab422011-09-16 11:16:43 +0200888 __entry->prev_state & (TASK_STATE_MAX-1) ?
889 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
Steven Rostedt937cdb92009-05-15 10:51:13 -0400890 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
891 { 16, "Z" }, { 32, "X" }, { 64, "x" },
Peter Zijlstra80ed87c2015-05-08 14:23:45 +0200892 { 128, "K" }, { 256, "W" }, { 512, "P" },
893 { 1024, "N" }) : "R",
Peter Zijlstra557ab422011-09-16 11:16:43 +0200894 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
Steven Rostedtea20d922009-04-10 08:54:16 -0400895 __entry->next_comm, __entry->next_pid, __entry->next_prio)
896);
897
898/*
899 * Tracepoint for a task being migrated:
900 */
901TRACE_EVENT(sched_migrate_task,
902
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700903 TP_PROTO(struct task_struct *p, int dest_cpu, unsigned int load),
Steven Rostedtea20d922009-04-10 08:54:16 -0400904
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700905 TP_ARGS(p, dest_cpu, load),
Steven Rostedtea20d922009-04-10 08:54:16 -0400906
907 TP_STRUCT__entry(
908 __array( char, comm, TASK_COMM_LEN )
909 __field( pid_t, pid )
910 __field( int, prio )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700911 __field(unsigned int, load )
Steven Rostedtea20d922009-04-10 08:54:16 -0400912 __field( int, orig_cpu )
913 __field( int, dest_cpu )
914 ),
915
916 TP_fast_assign(
917 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
918 __entry->pid = p->pid;
919 __entry->prio = p->prio;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700920 __entry->load = load;
Mathieu Desnoyersde1d7282009-05-05 16:49:59 +0800921 __entry->orig_cpu = task_cpu(p);
Steven Rostedtea20d922009-04-10 08:54:16 -0400922 __entry->dest_cpu = dest_cpu;
923 ),
924
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700925 TP_printk("comm=%s pid=%d prio=%d load=%d orig_cpu=%d dest_cpu=%d",
926 __entry->comm, __entry->pid, __entry->prio, __entry->load,
Steven Rostedtea20d922009-04-10 08:54:16 -0400927 __entry->orig_cpu, __entry->dest_cpu)
928);
929
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -0700930/*
931 * Tracepoint for a CPU going offline/online:
932 */
933TRACE_EVENT(sched_cpu_hotplug,
934
935 TP_PROTO(int affected_cpu, int error, int status),
936
937 TP_ARGS(affected_cpu, error, status),
938
939 TP_STRUCT__entry(
940 __field( int, affected_cpu )
941 __field( int, error )
942 __field( int, status )
943 ),
944
945 TP_fast_assign(
946 __entry->affected_cpu = affected_cpu;
947 __entry->error = error;
948 __entry->status = status;
949 ),
950
951 TP_printk("cpu %d %s error=%d", __entry->affected_cpu,
952 __entry->status ? "online" : "offline", __entry->error)
953);
954
955/*
956 * Tracepoint for load balancing:
957 */
958#if NR_CPUS > 32
959#error "Unsupported NR_CPUS for lb tracepoint."
960#endif
961TRACE_EVENT(sched_load_balance,
962
963 TP_PROTO(int cpu, enum cpu_idle_type idle, int balance,
964 unsigned long group_mask, int busiest_nr_running,
965 unsigned long imbalance, unsigned int env_flags, int ld_moved,
966 unsigned int balance_interval),
967
968 TP_ARGS(cpu, idle, balance, group_mask, busiest_nr_running,
969 imbalance, env_flags, ld_moved, balance_interval),
970
971 TP_STRUCT__entry(
972 __field( int, cpu)
973 __field( enum cpu_idle_type, idle)
974 __field( int, balance)
975 __field( unsigned long, group_mask)
976 __field( int, busiest_nr_running)
977 __field( unsigned long, imbalance)
978 __field( unsigned int, env_flags)
979 __field( int, ld_moved)
980 __field( unsigned int, balance_interval)
981 ),
982
983 TP_fast_assign(
984 __entry->cpu = cpu;
985 __entry->idle = idle;
986 __entry->balance = balance;
987 __entry->group_mask = group_mask;
988 __entry->busiest_nr_running = busiest_nr_running;
989 __entry->imbalance = imbalance;
990 __entry->env_flags = env_flags;
991 __entry->ld_moved = ld_moved;
992 __entry->balance_interval = balance_interval;
993 ),
994
995 TP_printk("cpu=%d state=%s balance=%d group=%#lx busy_nr=%d imbalance=%ld flags=%#x ld_moved=%d bal_int=%d",
996 __entry->cpu,
997 __entry->idle == CPU_IDLE ? "idle" :
998 (__entry->idle == CPU_NEWLY_IDLE ? "newly_idle" : "busy"),
999 __entry->balance,
1000 __entry->group_mask, __entry->busiest_nr_running,
1001 __entry->imbalance, __entry->env_flags, __entry->ld_moved,
1002 __entry->balance_interval)
1003);
1004
Ingo Molnar091ad362009-11-26 09:04:55 +01001005DECLARE_EVENT_CLASS(sched_process_template,
Steven Rostedtea20d922009-04-10 08:54:16 -04001006
1007 TP_PROTO(struct task_struct *p),
1008
1009 TP_ARGS(p),
1010
1011 TP_STRUCT__entry(
1012 __array( char, comm, TASK_COMM_LEN )
1013 __field( pid_t, pid )
1014 __field( int, prio )
1015 ),
1016
1017 TP_fast_assign(
1018 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
1019 __entry->pid = p->pid;
1020 __entry->prio = p->prio;
1021 ),
1022
Ingo Molnar434a83c2009-10-15 11:50:39 +02001023 TP_printk("comm=%s pid=%d prio=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -04001024 __entry->comm, __entry->pid, __entry->prio)
1025);
1026
1027/*
Steven Rostedt75ec29a2009-11-18 20:48:08 -05001028 * Tracepoint for freeing a task:
1029 */
1030DEFINE_EVENT(sched_process_template, sched_process_free,
1031 TP_PROTO(struct task_struct *p),
1032 TP_ARGS(p));
Riley Andrews9de15b42015-10-02 00:39:53 -07001033
Steven Rostedt75ec29a2009-11-18 20:48:08 -05001034
1035/*
Steven Rostedtea20d922009-04-10 08:54:16 -04001036 * Tracepoint for a task exiting:
1037 */
Steven Rostedt75ec29a2009-11-18 20:48:08 -05001038DEFINE_EVENT(sched_process_template, sched_process_exit,
1039 TP_PROTO(struct task_struct *p),
1040 TP_ARGS(p));
Steven Rostedtea20d922009-04-10 08:54:16 -04001041
1042/*
Li Zefan210f7662010-05-24 16:23:35 +08001043 * Tracepoint for waiting on task to unschedule:
1044 */
1045DEFINE_EVENT(sched_process_template, sched_wait_task,
1046 TP_PROTO(struct task_struct *p),
1047 TP_ARGS(p));
1048
1049/*
Steven Rostedtea20d922009-04-10 08:54:16 -04001050 * Tracepoint for a waiting task:
1051 */
1052TRACE_EVENT(sched_process_wait,
1053
1054 TP_PROTO(struct pid *pid),
1055
1056 TP_ARGS(pid),
1057
1058 TP_STRUCT__entry(
1059 __array( char, comm, TASK_COMM_LEN )
1060 __field( pid_t, pid )
1061 __field( int, prio )
1062 ),
1063
1064 TP_fast_assign(
1065 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
1066 __entry->pid = pid_nr(pid);
1067 __entry->prio = current->prio;
1068 ),
1069
Ingo Molnar434a83c2009-10-15 11:50:39 +02001070 TP_printk("comm=%s pid=%d prio=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -04001071 __entry->comm, __entry->pid, __entry->prio)
1072);
1073
1074/*
1075 * Tracepoint for do_fork:
1076 */
1077TRACE_EVENT(sched_process_fork,
1078
1079 TP_PROTO(struct task_struct *parent, struct task_struct *child),
1080
1081 TP_ARGS(parent, child),
1082
1083 TP_STRUCT__entry(
1084 __array( char, parent_comm, TASK_COMM_LEN )
1085 __field( pid_t, parent_pid )
1086 __array( char, child_comm, TASK_COMM_LEN )
1087 __field( pid_t, child_pid )
1088 ),
1089
1090 TP_fast_assign(
1091 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
1092 __entry->parent_pid = parent->pid;
1093 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
1094 __entry->child_pid = child->pid;
1095 ),
1096
Ingo Molnar434a83c2009-10-15 11:50:39 +02001097 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
Steven Rostedtea20d922009-04-10 08:54:16 -04001098 __entry->parent_comm, __entry->parent_pid,
1099 __entry->child_comm, __entry->child_pid)
1100);
1101
1102/*
David Smith4ff16c22012-02-07 10:11:05 -06001103 * Tracepoint for exec:
1104 */
1105TRACE_EVENT(sched_process_exec,
1106
1107 TP_PROTO(struct task_struct *p, pid_t old_pid,
1108 struct linux_binprm *bprm),
1109
1110 TP_ARGS(p, old_pid, bprm),
1111
1112 TP_STRUCT__entry(
1113 __string( filename, bprm->filename )
1114 __field( pid_t, pid )
1115 __field( pid_t, old_pid )
1116 ),
1117
1118 TP_fast_assign(
1119 __assign_str(filename, bprm->filename);
1120 __entry->pid = p->pid;
Oleg Nesterov63081912012-03-30 18:26:36 +02001121 __entry->old_pid = old_pid;
David Smith4ff16c22012-02-07 10:11:05 -06001122 ),
1123
1124 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
1125 __entry->pid, __entry->old_pid)
1126);
1127
1128/*
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001129 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
1130 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
1131 */
Ingo Molnar091ad362009-11-26 09:04:55 +01001132DECLARE_EVENT_CLASS(sched_stat_template,
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001133
1134 TP_PROTO(struct task_struct *tsk, u64 delay),
1135
Oleg Nesterov12473962013-08-06 18:08:44 +02001136 TP_ARGS(__perf_task(tsk), __perf_count(delay)),
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001137
1138 TP_STRUCT__entry(
1139 __array( char, comm, TASK_COMM_LEN )
1140 __field( pid_t, pid )
1141 __field( u64, delay )
1142 ),
1143
1144 TP_fast_assign(
1145 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
1146 __entry->pid = tsk->pid;
1147 __entry->delay = delay;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001148 ),
1149
Ingo Molnar434a83c2009-10-15 11:50:39 +02001150 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001151 __entry->comm, __entry->pid,
1152 (unsigned long long)__entry->delay)
1153);
1154
Steven Rostedt75ec29a2009-11-18 20:48:08 -05001155
1156/*
1157 * Tracepoint for accounting wait time (time the task is runnable
1158 * but not actually running due to scheduler contention).
1159 */
1160DEFINE_EVENT(sched_stat_template, sched_stat_wait,
1161 TP_PROTO(struct task_struct *tsk, u64 delay),
1162 TP_ARGS(tsk, delay));
1163
1164/*
1165 * Tracepoint for accounting sleep time (time the task is not runnable,
1166 * including iowait, see below).
1167 */
Li Zefan470dda742009-11-26 15:08:01 +08001168DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
1169 TP_PROTO(struct task_struct *tsk, u64 delay),
1170 TP_ARGS(tsk, delay));
Steven Rostedt75ec29a2009-11-18 20:48:08 -05001171
1172/*
1173 * Tracepoint for accounting iowait time (time the task is not runnable
1174 * due to waiting on IO to complete).
1175 */
Li Zefan470dda742009-11-26 15:08:01 +08001176DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
1177 TP_PROTO(struct task_struct *tsk, u64 delay),
1178 TP_ARGS(tsk, delay));
Steven Rostedt75ec29a2009-11-18 20:48:08 -05001179
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001180/*
Andrew Vaginb781a602011-11-28 12:03:35 +03001181 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
1182 */
1183DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
1184 TP_PROTO(struct task_struct *tsk, u64 delay),
1185 TP_ARGS(tsk, delay));
1186
1187/*
Riley Andrews9de15b42015-10-02 00:39:53 -07001188 * Tracepoint for recording the cause of uninterruptible sleep.
1189 */
1190TRACE_EVENT(sched_blocked_reason,
1191
1192 TP_PROTO(struct task_struct *tsk),
1193
1194 TP_ARGS(tsk),
1195
1196 TP_STRUCT__entry(
1197 __field( pid_t, pid )
1198 __field( void*, caller )
1199 __field( bool, io_wait )
1200 ),
1201
1202 TP_fast_assign(
1203 __entry->pid = tsk->pid;
1204 __entry->caller = (void*)get_wchan(tsk);
1205 __entry->io_wait = tsk->in_iowait;
1206 ),
1207
1208 TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
1209);
1210
1211/*
Ingo Molnarf977bb42009-09-13 18:15:54 +02001212 * Tracepoint for accounting runtime (time the task is executing
1213 * on a CPU).
1214 */
Oleg Nesterov36009d072013-08-06 18:08:41 +02001215DECLARE_EVENT_CLASS(sched_stat_runtime,
Ingo Molnarf977bb42009-09-13 18:15:54 +02001216
1217 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
1218
Oleg Nesterov12473962013-08-06 18:08:44 +02001219 TP_ARGS(tsk, __perf_count(runtime), vruntime),
Ingo Molnarf977bb42009-09-13 18:15:54 +02001220
1221 TP_STRUCT__entry(
1222 __array( char, comm, TASK_COMM_LEN )
1223 __field( pid_t, pid )
1224 __field( u64, runtime )
1225 __field( u64, vruntime )
1226 ),
1227
1228 TP_fast_assign(
1229 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
1230 __entry->pid = tsk->pid;
1231 __entry->runtime = runtime;
1232 __entry->vruntime = vruntime;
Ingo Molnarf977bb42009-09-13 18:15:54 +02001233 ),
1234
Ingo Molnar434a83c2009-10-15 11:50:39 +02001235 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
Ingo Molnarf977bb42009-09-13 18:15:54 +02001236 __entry->comm, __entry->pid,
1237 (unsigned long long)__entry->runtime,
1238 (unsigned long long)__entry->vruntime)
1239);
1240
Oleg Nesterov36009d072013-08-06 18:08:41 +02001241DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
1242 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
1243 TP_ARGS(tsk, runtime, vruntime));
1244
Steven Rostedta8027072010-09-20 15:13:34 -04001245/*
1246 * Tracepoint for showing priority inheritance modifying a tasks
1247 * priority.
1248 */
1249TRACE_EVENT(sched_pi_setprio,
1250
1251 TP_PROTO(struct task_struct *tsk, int newprio),
1252
1253 TP_ARGS(tsk, newprio),
1254
1255 TP_STRUCT__entry(
1256 __array( char, comm, TASK_COMM_LEN )
1257 __field( pid_t, pid )
1258 __field( int, oldprio )
1259 __field( int, newprio )
1260 ),
1261
1262 TP_fast_assign(
1263 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
1264 __entry->pid = tsk->pid;
1265 __entry->oldprio = tsk->prio;
1266 __entry->newprio = newprio;
1267 ),
1268
1269 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
1270 __entry->comm, __entry->pid,
1271 __entry->oldprio, __entry->newprio)
1272);
1273
Oleg Nesterov6a716c92013-10-19 18:18:28 +02001274#ifdef CONFIG_DETECT_HUNG_TASK
1275TRACE_EVENT(sched_process_hang,
1276 TP_PROTO(struct task_struct *tsk),
1277 TP_ARGS(tsk),
1278
1279 TP_STRUCT__entry(
1280 __array( char, comm, TASK_COMM_LEN )
1281 __field( pid_t, pid )
1282 ),
1283
1284 TP_fast_assign(
1285 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
1286 __entry->pid = tsk->pid;
1287 ),
1288
1289 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
1290);
1291#endif /* CONFIG_DETECT_HUNG_TASK */
1292
Mel Gorman286549d2014-01-21 15:51:03 -08001293DECLARE_EVENT_CLASS(sched_move_task_template,
1294
1295 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
1296
1297 TP_ARGS(tsk, src_cpu, dst_cpu),
1298
1299 TP_STRUCT__entry(
1300 __field( pid_t, pid )
1301 __field( pid_t, tgid )
1302 __field( pid_t, ngid )
1303 __field( int, src_cpu )
1304 __field( int, src_nid )
1305 __field( int, dst_cpu )
1306 __field( int, dst_nid )
1307 ),
1308
1309 TP_fast_assign(
1310 __entry->pid = task_pid_nr(tsk);
1311 __entry->tgid = task_tgid_nr(tsk);
1312 __entry->ngid = task_numa_group_id(tsk);
1313 __entry->src_cpu = src_cpu;
1314 __entry->src_nid = cpu_to_node(src_cpu);
1315 __entry->dst_cpu = dst_cpu;
1316 __entry->dst_nid = cpu_to_node(dst_cpu);
1317 ),
1318
1319 TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
1320 __entry->pid, __entry->tgid, __entry->ngid,
1321 __entry->src_cpu, __entry->src_nid,
1322 __entry->dst_cpu, __entry->dst_nid)
1323);
1324
1325/*
1326 * Tracks migration of tasks from one runqueue to another. Can be used to
1327 * detect if automatic NUMA balancing is bouncing between nodes
1328 */
1329DEFINE_EVENT(sched_move_task_template, sched_move_numa,
1330 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
1331
1332 TP_ARGS(tsk, src_cpu, dst_cpu)
1333);
1334
1335DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
1336 TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
1337
1338 TP_ARGS(tsk, src_cpu, dst_cpu)
1339);
1340
1341TRACE_EVENT(sched_swap_numa,
1342
1343 TP_PROTO(struct task_struct *src_tsk, int src_cpu,
1344 struct task_struct *dst_tsk, int dst_cpu),
1345
1346 TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
1347
1348 TP_STRUCT__entry(
1349 __field( pid_t, src_pid )
1350 __field( pid_t, src_tgid )
1351 __field( pid_t, src_ngid )
1352 __field( int, src_cpu )
1353 __field( int, src_nid )
1354 __field( pid_t, dst_pid )
1355 __field( pid_t, dst_tgid )
1356 __field( pid_t, dst_ngid )
1357 __field( int, dst_cpu )
1358 __field( int, dst_nid )
1359 ),
1360
1361 TP_fast_assign(
1362 __entry->src_pid = task_pid_nr(src_tsk);
1363 __entry->src_tgid = task_tgid_nr(src_tsk);
1364 __entry->src_ngid = task_numa_group_id(src_tsk);
1365 __entry->src_cpu = src_cpu;
1366 __entry->src_nid = cpu_to_node(src_cpu);
1367 __entry->dst_pid = task_pid_nr(dst_tsk);
1368 __entry->dst_tgid = task_tgid_nr(dst_tsk);
1369 __entry->dst_ngid = task_numa_group_id(dst_tsk);
1370 __entry->dst_cpu = dst_cpu;
1371 __entry->dst_nid = cpu_to_node(dst_cpu);
1372 ),
1373
1374 TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
1375 __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
1376 __entry->src_cpu, __entry->src_nid,
1377 __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
1378 __entry->dst_cpu, __entry->dst_nid)
1379);
Andy Lutomirskidfc68f22014-06-04 10:31:15 -07001380
1381/*
1382 * Tracepoint for waking a polling cpu without an IPI.
1383 */
1384TRACE_EVENT(sched_wake_idle_without_ipi,
1385
1386 TP_PROTO(int cpu),
1387
1388 TP_ARGS(cpu),
1389
1390 TP_STRUCT__entry(
1391 __field( int, cpu )
1392 ),
1393
1394 TP_fast_assign(
1395 __entry->cpu = cpu;
1396 ),
1397
1398 TP_printk("cpu=%d", __entry->cpu)
1399);
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -07001400
Juri Lelli0a942002015-11-09 12:06:24 +00001401TRACE_EVENT(sched_contrib_scale_f,
Juri Lelli0a942002015-11-09 12:06:24 +00001402 TP_PROTO(int cpu, unsigned long freq_scale_factor,
1403 unsigned long cpu_scale_factor),
Juri Lelli0a942002015-11-09 12:06:24 +00001404 TP_ARGS(cpu, freq_scale_factor, cpu_scale_factor),
Juri Lelli0a942002015-11-09 12:06:24 +00001405 TP_STRUCT__entry(
1406 __field(int, cpu)
1407 __field(unsigned long, freq_scale_factor)
1408 __field(unsigned long, cpu_scale_factor)
1409 ),
Juri Lelli0a942002015-11-09 12:06:24 +00001410 TP_fast_assign(
1411 __entry->cpu = cpu;
1412 __entry->freq_scale_factor = freq_scale_factor;
1413 __entry->cpu_scale_factor = cpu_scale_factor;
1414 ),
Juri Lelli0a942002015-11-09 12:06:24 +00001415 TP_printk("cpu=%d freq_scale_factor=%lu cpu_scale_factor=%lu",
1416 __entry->cpu, __entry->freq_scale_factor,
1417 __entry->cpu_scale_factor)
1418);
Juri Lellia4b0c3a2015-11-09 12:07:27 +00001419
Jeevan Shriram49f575e2017-03-02 16:52:44 -08001420#ifdef CONFIG_SMP
Juri Lellia4b0c3a2015-11-09 12:07:27 +00001421/*
1422 * Tracepoint for accounting sched averages for tasks.
1423 */
1424TRACE_EVENT(sched_load_avg_task,
Juri Lellia4b0c3a2015-11-09 12:07:27 +00001425 TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
Juri Lellia4b0c3a2015-11-09 12:07:27 +00001426 TP_ARGS(tsk, avg),
Juri Lellia4b0c3a2015-11-09 12:07:27 +00001427 TP_STRUCT__entry(
1428 __array( char, comm, TASK_COMM_LEN )
1429 __field( pid_t, pid )
1430 __field( int, cpu )
1431 __field( unsigned long, load_avg )
1432 __field( unsigned long, util_avg )
1433 __field( u64, load_sum )
1434 __field( u32, util_sum )
1435 __field( u32, period_contrib )
1436 ),
Juri Lellia4b0c3a2015-11-09 12:07:27 +00001437 TP_fast_assign(
1438 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
1439 __entry->pid = tsk->pid;
1440 __entry->cpu = task_cpu(tsk);
1441 __entry->load_avg = avg->load_avg;
1442 __entry->util_avg = avg->util_avg;
1443 __entry->load_sum = avg->load_sum;
1444 __entry->util_sum = avg->util_sum;
1445 __entry->period_contrib = avg->period_contrib;
1446 ),
Juri Lellia4b0c3a2015-11-09 12:07:27 +00001447 TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu load_sum=%llu"
1448 " util_sum=%u period_contrib=%u",
1449 __entry->comm,
1450 __entry->pid,
1451 __entry->cpu,
1452 __entry->load_avg,
1453 __entry->util_avg,
1454 (u64)__entry->load_sum,
1455 (u32)__entry->util_sum,
1456 (u32)__entry->period_contrib)
1457);
Juri Lelli79478802015-11-09 12:07:48 +00001458/*
1459 * Tracepoint for accounting sched averages for cpus.
1460 */
1461TRACE_EVENT(sched_load_avg_cpu,
Juri Lelli79478802015-11-09 12:07:48 +00001462 TP_PROTO(int cpu, struct cfs_rq *cfs_rq),
Juri Lelli79478802015-11-09 12:07:48 +00001463 TP_ARGS(cpu, cfs_rq),
Juri Lelli79478802015-11-09 12:07:48 +00001464 TP_STRUCT__entry(
1465 __field( int, cpu )
1466 __field( unsigned long, load_avg )
1467 __field( unsigned long, util_avg )
1468 ),
Juri Lelli79478802015-11-09 12:07:48 +00001469 TP_fast_assign(
1470 __entry->cpu = cpu;
1471 __entry->load_avg = cfs_rq->avg.load_avg;
1472 __entry->util_avg = cfs_rq->avg.util_avg;
1473 ),
Juri Lelli79478802015-11-09 12:07:48 +00001474 TP_printk("cpu=%d load_avg=%lu util_avg=%lu",
1475 __entry->cpu, __entry->load_avg, __entry->util_avg)
1476);
Patrick Bellasi050dcb82015-06-22 13:49:07 +01001477/*
1478 * Tracepoint for sched_tune_config settings
1479 */
1480TRACE_EVENT(sched_tune_config,
Patrick Bellasi050dcb82015-06-22 13:49:07 +01001481 TP_PROTO(int boost),
Patrick Bellasi050dcb82015-06-22 13:49:07 +01001482 TP_ARGS(boost),
Patrick Bellasi050dcb82015-06-22 13:49:07 +01001483 TP_STRUCT__entry(
1484 __field( int, boost )
1485 ),
Patrick Bellasi050dcb82015-06-22 13:49:07 +01001486 TP_fast_assign(
1487 __entry->boost = boost;
1488 ),
Patrick Bellasi050dcb82015-06-22 13:49:07 +01001489 TP_printk("boost=%d ", __entry->boost)
1490);
Patrick Bellasicccead12015-06-22 13:51:07 +01001491/*
1492 * Tracepoint for accounting CPU boosted utilization
1493 */
1494TRACE_EVENT(sched_boost_cpu,
Srinath Sridharane71c4252016-07-28 17:28:55 +01001495 TP_PROTO(int cpu, unsigned long util, long margin),
Patrick Bellasicccead12015-06-22 13:51:07 +01001496 TP_ARGS(cpu, util, margin),
Patrick Bellasicccead12015-06-22 13:51:07 +01001497 TP_STRUCT__entry(
1498 __field( int, cpu )
1499 __field( unsigned long, util )
Srinath Sridharane71c4252016-07-28 17:28:55 +01001500 __field(long, margin )
Patrick Bellasicccead12015-06-22 13:51:07 +01001501 ),
Patrick Bellasicccead12015-06-22 13:51:07 +01001502 TP_fast_assign(
1503 __entry->cpu = cpu;
1504 __entry->util = util;
1505 __entry->margin = margin;
1506 ),
Srinath Sridharane71c4252016-07-28 17:28:55 +01001507 TP_printk("cpu=%d util=%lu margin=%ld",
Patrick Bellasicccead12015-06-22 13:51:07 +01001508 __entry->cpu,
1509 __entry->util,
1510 __entry->margin)
1511);
Patrick Bellasi953b1042015-06-24 15:36:08 +01001512/*
1513 * Tracepoint for schedtune_tasks_update
1514 */
1515TRACE_EVENT(sched_tune_tasks_update,
Patrick Bellasi953b1042015-06-24 15:36:08 +01001516 TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx,
Srinath Sridharane71c4252016-07-28 17:28:55 +01001517 int boost, int max_boost),
Patrick Bellasi953b1042015-06-24 15:36:08 +01001518 TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost),
Patrick Bellasi953b1042015-06-24 15:36:08 +01001519 TP_STRUCT__entry(
1520 __array( char, comm, TASK_COMM_LEN )
1521 __field( pid_t, pid )
1522 __field( int, cpu )
1523 __field( int, tasks )
1524 __field( int, idx )
Srinath Sridharane71c4252016-07-28 17:28:55 +01001525 __field( int, boost )
1526 __field( int, max_boost )
Patrick Bellasi953b1042015-06-24 15:36:08 +01001527 ),
Patrick Bellasi953b1042015-06-24 15:36:08 +01001528 TP_fast_assign(
1529 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
1530 __entry->pid = tsk->pid;
1531 __entry->cpu = cpu;
1532 __entry->tasks = tasks;
1533 __entry->idx = idx;
1534 __entry->boost = boost;
1535 __entry->max_boost = max_boost;
1536 ),
Patrick Bellasi953b1042015-06-24 15:36:08 +01001537 TP_printk("pid=%d comm=%s "
Srinath Sridharane71c4252016-07-28 17:28:55 +01001538 "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d",
Patrick Bellasi953b1042015-06-24 15:36:08 +01001539 __entry->pid, __entry->comm,
1540 __entry->cpu, __entry->tasks, __entry->idx,
1541 __entry->boost, __entry->max_boost)
1542);
Patrick Bellasi953b1042015-06-24 15:36:08 +01001543/*
1544 * Tracepoint for schedtune_boostgroup_update
1545 */
1546TRACE_EVENT(sched_tune_boostgroup_update,
Patrick Bellasi953b1042015-06-24 15:36:08 +01001547 TP_PROTO(int cpu, int variation, int max_boost),
Patrick Bellasi953b1042015-06-24 15:36:08 +01001548 TP_ARGS(cpu, variation, max_boost),
Patrick Bellasi953b1042015-06-24 15:36:08 +01001549 TP_STRUCT__entry(
1550 __field( int, cpu )
1551 __field( int, variation )
1552 __field( int, max_boost )
1553 ),
Patrick Bellasi953b1042015-06-24 15:36:08 +01001554 TP_fast_assign(
1555 __entry->cpu = cpu;
1556 __entry->variation = variation;
1557 __entry->max_boost = max_boost;
1558 ),
Patrick Bellasi953b1042015-06-24 15:36:08 +01001559 TP_printk("cpu=%d variation=%d max_boost=%d",
1560 __entry->cpu, __entry->variation, __entry->max_boost)
1561);
Patrick Bellasiecccdb72016-01-14 18:43:37 +00001562/*
1563 * Tracepoint for accounting task boosted utilization
1564 */
1565TRACE_EVENT(sched_boost_task,
Srinath Sridharane71c4252016-07-28 17:28:55 +01001566 TP_PROTO(struct task_struct *tsk, unsigned long util, long margin),
Patrick Bellasiecccdb72016-01-14 18:43:37 +00001567 TP_ARGS(tsk, util, margin),
Patrick Bellasiecccdb72016-01-14 18:43:37 +00001568 TP_STRUCT__entry(
1569 __array( char, comm, TASK_COMM_LEN )
1570 __field( pid_t, pid )
1571 __field( unsigned long, util )
Srinath Sridharane71c4252016-07-28 17:28:55 +01001572 __field( long, margin )
Patrick Bellasiecccdb72016-01-14 18:43:37 +00001573 ),
Patrick Bellasiecccdb72016-01-14 18:43:37 +00001574 TP_fast_assign(
1575 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
1576 __entry->pid = tsk->pid;
1577 __entry->util = util;
1578 __entry->margin = margin;
1579 ),
Srinath Sridharane71c4252016-07-28 17:28:55 +01001580 TP_printk("comm=%s pid=%d util=%lu margin=%ld",
Patrick Bellasiecccdb72016-01-14 18:43:37 +00001581 __entry->comm, __entry->pid,
1582 __entry->util,
1583 __entry->margin)
1584);
Patrick Bellasi110441b2016-01-14 18:47:21 +00001585/*
1586 * Tracepoint for accounting sched group energy
1587 */
1588TRACE_EVENT(sched_energy_diff,
Patrick Bellasi110441b2016-01-14 18:47:21 +00001589 TP_PROTO(struct task_struct *tsk, int scpu, int dcpu, int udelta,
1590 int nrgb, int nrga, int nrgd, int capb, int capa, int capd,
1591 int nrgn, int nrgp),
Patrick Bellasi110441b2016-01-14 18:47:21 +00001592 TP_ARGS(tsk, scpu, dcpu, udelta,
1593 nrgb, nrga, nrgd, capb, capa, capd,
1594 nrgn, nrgp),
Patrick Bellasi110441b2016-01-14 18:47:21 +00001595 TP_STRUCT__entry(
1596 __array( char, comm, TASK_COMM_LEN )
1597 __field( pid_t, pid )
1598 __field( int, scpu )
1599 __field( int, dcpu )
1600 __field( int, udelta )
1601 __field( int, nrgb )
1602 __field( int, nrga )
1603 __field( int, nrgd )
1604 __field( int, capb )
1605 __field( int, capa )
1606 __field( int, capd )
1607 __field( int, nrgn )
1608 __field( int, nrgp )
1609 ),
Patrick Bellasi110441b2016-01-14 18:47:21 +00001610 TP_fast_assign(
1611 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
1612 __entry->pid = tsk->pid;
1613 __entry->scpu = scpu;
1614 __entry->dcpu = dcpu;
1615 __entry->udelta = udelta;
1616 __entry->nrgb = nrgb;
1617 __entry->nrga = nrga;
1618 __entry->nrgd = nrgd;
1619 __entry->capb = capb;
1620 __entry->capa = capa;
1621 __entry->capd = capd;
1622 __entry->nrgn = nrgn;
1623 __entry->nrgp = nrgp;
1624 ),
Patrick Bellasi110441b2016-01-14 18:47:21 +00001625 TP_printk("pid=%d comm=%s "
1626 "src_cpu=%d dst_cpu=%d usage_delta=%d "
1627 "nrg_before=%d nrg_after=%d nrg_diff=%d "
1628 "cap_before=%d cap_after=%d cap_delta=%d "
1629 "nrg_delta=%d nrg_payoff=%d",
1630 __entry->pid, __entry->comm,
1631 __entry->scpu, __entry->dcpu, __entry->udelta,
1632 __entry->nrgb, __entry->nrga, __entry->nrgd,
1633 __entry->capb, __entry->capa, __entry->capd,
1634 __entry->nrgn, __entry->nrgp)
1635);
Joonwoo Park16f1ddd2017-02-16 12:52:16 -08001636
1637TRACE_EVENT(sched_group_energy,
1638
1639 TP_PROTO(int cpu, long group_util, u64 total_nrg,
1640 int busy_nrg, int idle_nrg, int grp_idle_idx,
1641 int new_capacity),
1642
1643 TP_ARGS(cpu, group_util, total_nrg,
1644 busy_nrg, idle_nrg, grp_idle_idx,
1645 new_capacity),
1646
1647 TP_STRUCT__entry(
1648 __field(int, cpu)
1649 __field(long, group_util)
1650 __field(u64, total_nrg)
1651 __field(int, busy_nrg)
1652 __field(int, idle_nrg)
1653 __field(int, grp_idle_idx)
1654 __field(int, new_capacity)
1655 ),
1656
1657 TP_fast_assign(
1658 __entry->cpu = cpu;
1659 __entry->group_util = group_util;
1660 __entry->total_nrg = total_nrg;
1661 __entry->busy_nrg = busy_nrg;
1662 __entry->idle_nrg = idle_nrg;
1663 __entry->grp_idle_idx = grp_idle_idx;
1664 __entry->new_capacity = new_capacity;
1665 ),
1666
1667 TP_printk("cpu=%d group_util=%ld total_nrg=%llu busy_nrg=%d idle_nrg=%d grp_idle_idx=%d new_capacity=%d",
1668 __entry->cpu, __entry->group_util,
1669 __entry->total_nrg, __entry->busy_nrg, __entry->idle_nrg,
1670 __entry->grp_idle_idx, __entry->new_capacity)
1671);
1672
Patrick Bellasi5824d982016-01-20 14:06:05 +00001673/*
1674 * Tracepoint for schedtune_tasks_update
1675 */
1676TRACE_EVENT(sched_tune_filter,
Patrick Bellasi5824d982016-01-20 14:06:05 +00001677 TP_PROTO(int nrg_delta, int cap_delta,
1678 int nrg_gain, int cap_gain,
1679 int payoff, int region),
Patrick Bellasi5824d982016-01-20 14:06:05 +00001680 TP_ARGS(nrg_delta, cap_delta, nrg_gain, cap_gain, payoff, region),
Patrick Bellasi5824d982016-01-20 14:06:05 +00001681 TP_STRUCT__entry(
1682 __field( int, nrg_delta )
1683 __field( int, cap_delta )
1684 __field( int, nrg_gain )
1685 __field( int, cap_gain )
1686 __field( int, payoff )
1687 __field( int, region )
1688 ),
Patrick Bellasi5824d982016-01-20 14:06:05 +00001689 TP_fast_assign(
1690 __entry->nrg_delta = nrg_delta;
1691 __entry->cap_delta = cap_delta;
1692 __entry->nrg_gain = nrg_gain;
1693 __entry->cap_gain = cap_gain;
1694 __entry->payoff = payoff;
1695 __entry->region = region;
1696 ),
Patrick Bellasi5824d982016-01-20 14:06:05 +00001697 TP_printk("nrg_delta=%d cap_delta=%d nrg_gain=%d cap_gain=%d payoff=%d region=%d",
1698 __entry->nrg_delta, __entry->cap_delta,
1699 __entry->nrg_gain, __entry->cap_gain,
1700 __entry->payoff, __entry->region)
1701);
Patrick Bellasi8e45d942016-02-10 09:24:36 +00001702/*
1703 * Tracepoint for system overutilized flag
1704 */
1705TRACE_EVENT(sched_overutilized,
Patrick Bellasi8e45d942016-02-10 09:24:36 +00001706 TP_PROTO(bool overutilized),
Patrick Bellasi8e45d942016-02-10 09:24:36 +00001707 TP_ARGS(overutilized),
Patrick Bellasi8e45d942016-02-10 09:24:36 +00001708 TP_STRUCT__entry(
1709 __field( bool, overutilized )
1710 ),
Patrick Bellasi8e45d942016-02-10 09:24:36 +00001711 TP_fast_assign(
1712 __entry->overutilized = overutilized;
1713 ),
Patrick Bellasi8e45d942016-02-10 09:24:36 +00001714 TP_printk("overutilized=%d",
1715 __entry->overutilized ? 1 : 0)
1716);
Jeevan Shriram49f575e2017-03-02 16:52:44 -08001717#endif
Srivatsa Vaddagiri26c21542016-05-31 09:08:38 -07001718
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -07001719TRACE_EVENT(sched_get_nr_running_avg,
1720
Pavankumar Kondeti005309282017-05-10 15:43:29 +05301721 TP_PROTO(int avg, int big_avg, int iowait_avg,
1722 unsigned int max_nr, unsigned int big_max_nr),
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -07001723
Pavankumar Kondeti005309282017-05-10 15:43:29 +05301724 TP_ARGS(avg, big_avg, iowait_avg, max_nr, big_max_nr),
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -07001725
1726 TP_STRUCT__entry(
1727 __field( int, avg )
1728 __field( int, big_avg )
1729 __field( int, iowait_avg )
Pavankumar Kondeti005309282017-05-10 15:43:29 +05301730 __field( unsigned int, max_nr )
1731 __field( unsigned int, big_max_nr )
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -07001732 ),
1733
1734 TP_fast_assign(
1735 __entry->avg = avg;
1736 __entry->big_avg = big_avg;
1737 __entry->iowait_avg = iowait_avg;
Pavankumar Kondeti005309282017-05-10 15:43:29 +05301738 __entry->max_nr = max_nr;
1739 __entry->big_max_nr = big_max_nr;
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -07001740 ),
1741
Pavankumar Kondeti005309282017-05-10 15:43:29 +05301742 TP_printk("avg=%d big_avg=%d iowait_avg=%d max_nr=%u big_max_nr=%u",
1743 __entry->avg, __entry->big_avg, __entry->iowait_avg,
1744 __entry->max_nr, __entry->big_max_nr)
Syed Rameez Mustafadddcab72016-09-07 16:18:27 -07001745);
Olav Haugan39432ea2016-06-12 13:57:05 -07001746
Olav Haugan09bcc682016-09-02 17:12:20 -07001747TRACE_EVENT(core_ctl_eval_need,
1748
1749 TP_PROTO(unsigned int cpu, unsigned int old_need,
1750 unsigned int new_need, unsigned int updated),
1751 TP_ARGS(cpu, old_need, new_need, updated),
1752 TP_STRUCT__entry(
1753 __field(u32, cpu)
1754 __field(u32, old_need)
1755 __field(u32, new_need)
1756 __field(u32, updated)
1757 ),
1758 TP_fast_assign(
1759 __entry->cpu = cpu;
1760 __entry->old_need = old_need;
1761 __entry->new_need = new_need;
1762 __entry->updated = updated;
1763 ),
1764 TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu,
1765 __entry->old_need, __entry->new_need, __entry->updated)
1766);
1767
1768TRACE_EVENT(core_ctl_set_busy,
1769
1770 TP_PROTO(unsigned int cpu, unsigned int busy,
1771 unsigned int old_is_busy, unsigned int is_busy),
1772 TP_ARGS(cpu, busy, old_is_busy, is_busy),
1773 TP_STRUCT__entry(
1774 __field(u32, cpu)
1775 __field(u32, busy)
1776 __field(u32, old_is_busy)
1777 __field(u32, is_busy)
1778 ),
1779 TP_fast_assign(
1780 __entry->cpu = cpu;
1781 __entry->busy = busy;
1782 __entry->old_is_busy = old_is_busy;
1783 __entry->is_busy = is_busy;
1784 ),
1785 TP_printk("cpu=%u, busy=%u, old_is_busy=%u, new_is_busy=%u",
1786 __entry->cpu, __entry->busy, __entry->old_is_busy,
1787 __entry->is_busy)
1788);
1789
Olav Haugana024f472016-10-13 10:34:11 -07001790TRACE_EVENT(core_ctl_set_boost,
1791
1792 TP_PROTO(u32 refcount, s32 ret),
1793 TP_ARGS(refcount, ret),
1794 TP_STRUCT__entry(
1795 __field(u32, refcount)
1796 __field(s32, ret)
1797 ),
1798 TP_fast_assign(
1799 __entry->refcount = refcount;
1800 __entry->ret = ret;
1801 ),
1802 TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret)
1803);
1804
Olav Haugan39432ea2016-06-12 13:57:05 -07001805/*
1806 * sched_isolate - called when cores are isolated/unisolated
1807 *
1808 * @acutal_mask: mask of cores actually isolated/unisolated
1809 * @req_mask: mask of cores requested isolated/unisolated
1810 * @online_mask: cpu online mask
1811 * @time: amount of time in us it took to isolate/unisolate
1812 * @isolate: 1 if isolating, 0 if unisolating
1813 *
1814 */
1815TRACE_EVENT(sched_isolate,
1816
1817 TP_PROTO(unsigned int requested_cpu, unsigned int isolated_cpus,
1818 u64 start_time, unsigned char isolate),
1819
1820 TP_ARGS(requested_cpu, isolated_cpus, start_time, isolate),
1821
1822 TP_STRUCT__entry(
1823 __field(u32, requested_cpu)
1824 __field(u32, isolated_cpus)
1825 __field(u32, time)
1826 __field(unsigned char, isolate)
1827 ),
1828
1829 TP_fast_assign(
1830 __entry->requested_cpu = requested_cpu;
1831 __entry->isolated_cpus = isolated_cpus;
1832 __entry->time = div64_u64(sched_clock() - start_time, 1000);
1833 __entry->isolate = isolate;
1834 ),
1835
1836 TP_printk("iso cpu=%u cpus=0x%x time=%u us isolated=%d",
1837 __entry->requested_cpu, __entry->isolated_cpus,
1838 __entry->time, __entry->isolate)
1839);
Steven Rostedtea20d922009-04-10 08:54:16 -04001840#endif /* _TRACE_SCHED_H */
Steven Rostedta8d154b2009-04-10 09:36:00 -04001841
1842/* This part must be outside protection */
1843#include <trace/define_trace.h>