Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Li Zefan | d0b6e04 | 2009-07-13 10:33:21 +0800 | [diff] [blame] | 2 | #undef TRACE_SYSTEM |
| 3 | #define TRACE_SYSTEM sched |
| 4 | |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 5 | #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) |
Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 6 | #define _TRACE_SCHED_H |
| 7 | |
Ingo Molnar | 6a3827d | 2017-02-08 18:51:31 +0100 | [diff] [blame] | 8 | #include <linux/sched/numa_balancing.h> |
Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 9 | #include <linux/tracepoint.h> |
David Smith | 4ff16c2 | 2012-02-07 10:11:05 -0600 | [diff] [blame] | 10 | #include <linux/binfmts.h> |
Satya Durga Srinivasu Prabhala | 2febb53 | 2018-11-15 17:50:07 -0800 | [diff] [blame] | 11 | #include <linux/sched/idle.h> |
Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 12 | |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 13 | /* |
| 14 | * Tracepoint for calling kthread_stop, performed to end a kthread: |
| 15 | */ |
| 16 | TRACE_EVENT(sched_kthread_stop, |
| 17 | |
| 18 | TP_PROTO(struct task_struct *t), |
| 19 | |
| 20 | TP_ARGS(t), |
| 21 | |
| 22 | TP_STRUCT__entry( |
| 23 | __array( char, comm, TASK_COMM_LEN ) |
| 24 | __field( pid_t, pid ) |
| 25 | ), |
| 26 | |
| 27 | TP_fast_assign( |
| 28 | memcpy(__entry->comm, t->comm, TASK_COMM_LEN); |
| 29 | __entry->pid = t->pid; |
| 30 | ), |
| 31 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 32 | TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 33 | ); |
| 34 | |
| 35 | /* |
| 36 | * Tracepoint for the return value of the kthread stopping: |
| 37 | */ |
| 38 | TRACE_EVENT(sched_kthread_stop_ret, |
| 39 | |
| 40 | TP_PROTO(int ret), |
| 41 | |
| 42 | TP_ARGS(ret), |
| 43 | |
| 44 | TP_STRUCT__entry( |
| 45 | __field( int, ret ) |
| 46 | ), |
| 47 | |
| 48 | TP_fast_assign( |
| 49 | __entry->ret = ret; |
| 50 | ), |
| 51 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 52 | TP_printk("ret=%d", __entry->ret) |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 53 | ); |
| 54 | |
| 55 | /* |
Satya Durga Srinivasu Prabhala | 2e3fece | 2018-11-16 17:20:56 -0800 | [diff] [blame] | 56 | * Tracepoint for task enqueue/dequeue: |
| 57 | */ |
| 58 | TRACE_EVENT(sched_enq_deq_task, |
| 59 | |
| 60 | TP_PROTO(struct task_struct *p, bool enqueue, |
| 61 | unsigned int cpus_allowed), |
| 62 | |
| 63 | TP_ARGS(p, enqueue, cpus_allowed), |
| 64 | |
| 65 | TP_STRUCT__entry( |
| 66 | __array(char, comm, TASK_COMM_LEN) |
| 67 | __field(pid_t, pid) |
| 68 | __field(int, prio) |
| 69 | __field(int, cpu) |
| 70 | __field(bool, enqueue) |
| 71 | __field(unsigned int, nr_running) |
| 72 | __field(unsigned long, cpu_load) |
| 73 | __field(unsigned int, rt_nr_running) |
| 74 | __field(unsigned int, cpus_allowed) |
| 75 | __field(unsigned int, demand) |
| 76 | __field(unsigned int, pred_demand) |
| 77 | ), |
| 78 | |
| 79 | TP_fast_assign( |
| 80 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
| 81 | __entry->pid = p->pid; |
| 82 | __entry->prio = p->prio; |
| 83 | __entry->cpu = task_cpu(p); |
| 84 | __entry->enqueue = enqueue; |
| 85 | __entry->nr_running = task_rq(p)->nr_running; |
Satya Durga Srinivasu Prabhala | 5826c09 | 2018-06-14 14:56:54 -0700 | [diff] [blame] | 86 | __entry->cpu_load = task_rq(p)->cpu_load[0]; |
Satya Durga Srinivasu Prabhala | 2e3fece | 2018-11-16 17:20:56 -0800 | [diff] [blame] | 87 | __entry->rt_nr_running = task_rq(p)->rt.rt_nr_running; |
| 88 | __entry->cpus_allowed = cpus_allowed; |
| 89 | __entry->demand = task_load(p); |
| 90 | __entry->pred_demand = task_pl(p); |
| 91 | ), |
| 92 | |
| 93 | TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x demand=%u pred_demand=%u", |
| 94 | __entry->cpu, |
| 95 | __entry->enqueue ? "enqueue" : "dequeue", |
| 96 | __entry->comm, __entry->pid, |
| 97 | __entry->prio, __entry->nr_running, |
| 98 | __entry->cpu_load, __entry->rt_nr_running, |
| 99 | __entry->cpus_allowed, __entry->demand, |
| 100 | __entry->pred_demand) |
| 101 | ); |
| 102 | |
| 103 | /* |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 104 | * Tracepoint for waking up a task: |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 105 | */ |
Ingo Molnar | 091ad36 | 2009-11-26 09:04:55 +0100 | [diff] [blame] | 106 | DECLARE_EVENT_CLASS(sched_wakeup_template, |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 107 | |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 108 | TP_PROTO(struct task_struct *p), |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 109 | |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 110 | TP_ARGS(__perf_task(p)), |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 111 | |
| 112 | TP_STRUCT__entry( |
| 113 | __array( char, comm, TASK_COMM_LEN ) |
| 114 | __field( pid_t, pid ) |
| 115 | __field( int, prio ) |
| 116 | __field( int, success ) |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 117 | __field( int, target_cpu ) |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 118 | ), |
| 119 | |
| 120 | TP_fast_assign( |
| 121 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
| 122 | __entry->pid = p->pid; |
Peter Zijlstra | b91473f | 2017-03-23 15:56:12 +0100 | [diff] [blame] | 123 | __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 124 | __entry->success = 1; /* rudiment, kill when possible */ |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 125 | __entry->target_cpu = task_cpu(p); |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 126 | ), |
| 127 | |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 128 | TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 129 | __entry->comm, __entry->pid, __entry->prio, |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 130 | __entry->target_cpu) |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 131 | ); |
| 132 | |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 133 | /* |
| 134 | * Tracepoint called when waking a task; this tracepoint is guaranteed to be |
| 135 | * called from the waking context. |
| 136 | */ |
| 137 | DEFINE_EVENT(sched_wakeup_template, sched_waking, |
| 138 | TP_PROTO(struct task_struct *p), |
| 139 | TP_ARGS(p)); |
| 140 | |
| 141 | /* |
| 142 | * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG. |
| 143 | * It it not always called from the waking context. |
| 144 | */ |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 145 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup, |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 146 | TP_PROTO(struct task_struct *p), |
| 147 | TP_ARGS(p)); |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 148 | |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 149 | /* |
| 150 | * Tracepoint for waking up a new task: |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 151 | */ |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 152 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 153 | TP_PROTO(struct task_struct *p), |
| 154 | TP_ARGS(p)); |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 155 | |
Peter Zijlstra | 02f7269 | 2010-05-31 18:13:25 +0200 | [diff] [blame] | 156 | #ifdef CREATE_TRACE_POINTS |
Peter Zijlstra | c73464b | 2015-09-28 18:06:56 +0200 | [diff] [blame] | 157 | static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) |
Peter Zijlstra | 02f7269 | 2010-05-31 18:13:25 +0200 | [diff] [blame] | 158 | { |
Pavankumar Kondeti | fd81528 | 2018-10-30 12:24:33 +0530 | [diff] [blame] | 159 | unsigned int state; |
| 160 | |
Oleg Nesterov | 8f9fbf0 | 2014-10-07 21:51:08 +0200 | [diff] [blame] | 161 | #ifdef CONFIG_SCHED_DEBUG |
| 162 | BUG_ON(p != current); |
| 163 | #endif /* CONFIG_SCHED_DEBUG */ |
Peter Zijlstra | 02f7269 | 2010-05-31 18:13:25 +0200 | [diff] [blame] | 164 | |
Peter Zijlstra | c73464b | 2015-09-28 18:06:56 +0200 | [diff] [blame] | 165 | /* |
| 166 | * Preemption ignores task state, therefore preempted tasks are always |
| 167 | * RUNNING (we will not have dequeued if state != RUNNING). |
| 168 | */ |
Peter Zijlstra | efb40f5 | 2017-09-22 18:19:53 +0200 | [diff] [blame] | 169 | if (preempt) |
Thomas Gleixner | 3f5fe9f | 2017-11-22 13:05:48 +0100 | [diff] [blame] | 170 | return TASK_REPORT_MAX; |
Peter Zijlstra | efb40f5 | 2017-09-22 18:19:53 +0200 | [diff] [blame] | 171 | |
Pavankumar Kondeti | fd81528 | 2018-10-30 12:24:33 +0530 | [diff] [blame] | 172 | /* |
| 173 | * task_state_index() uses fls() and returns a value from 0-8 range. |
| 174 | * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using |
| 175 | * it for left shift operation to get the correct task->state |
| 176 | * mapping. |
| 177 | */ |
| 178 | state = task_state_index(p); |
| 179 | |
| 180 | return state ? (1 << (state - 1)) : state; |
Peter Zijlstra | 02f7269 | 2010-05-31 18:13:25 +0200 | [diff] [blame] | 181 | } |
Oleg Nesterov | 8f9fbf0 | 2014-10-07 21:51:08 +0200 | [diff] [blame] | 182 | #endif /* CREATE_TRACE_POINTS */ |
Peter Zijlstra | 02f7269 | 2010-05-31 18:13:25 +0200 | [diff] [blame] | 183 | |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 184 | /* |
| 185 | * Tracepoint for task switches, performed by the scheduler: |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 186 | */ |
| 187 | TRACE_EVENT(sched_switch, |
| 188 | |
Peter Zijlstra | c73464b | 2015-09-28 18:06:56 +0200 | [diff] [blame] | 189 | TP_PROTO(bool preempt, |
| 190 | struct task_struct *prev, |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 191 | struct task_struct *next), |
| 192 | |
Peter Zijlstra | c73464b | 2015-09-28 18:06:56 +0200 | [diff] [blame] | 193 | TP_ARGS(preempt, prev, next), |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 194 | |
| 195 | TP_STRUCT__entry( |
| 196 | __array( char, prev_comm, TASK_COMM_LEN ) |
| 197 | __field( pid_t, prev_pid ) |
| 198 | __field( int, prev_prio ) |
Steven Rostedt | 937cdb9 | 2009-05-15 10:51:13 -0400 | [diff] [blame] | 199 | __field( long, prev_state ) |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 200 | __array( char, next_comm, TASK_COMM_LEN ) |
| 201 | __field( pid_t, next_pid ) |
| 202 | __field( int, next_prio ) |
| 203 | ), |
| 204 | |
| 205 | TP_fast_assign( |
| 206 | memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); |
| 207 | __entry->prev_pid = prev->pid; |
| 208 | __entry->prev_prio = prev->prio; |
Peter Zijlstra | c73464b | 2015-09-28 18:06:56 +0200 | [diff] [blame] | 209 | __entry->prev_state = __trace_sched_switch_state(preempt, prev); |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 210 | memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); |
| 211 | __entry->next_pid = next->pid; |
| 212 | __entry->next_prio = next->prio; |
Peter Zijlstra | b91473f | 2017-03-23 15:56:12 +0100 | [diff] [blame] | 213 | /* XXX SCHED_DEADLINE */ |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 214 | ), |
| 215 | |
Peter Zijlstra | 557ab42 | 2011-09-16 11:16:43 +0200 | [diff] [blame] | 216 | TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 217 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, |
Peter Zijlstra | efb40f5 | 2017-09-22 18:19:53 +0200 | [diff] [blame] | 218 | |
Peter Zijlstra | 06eb618 | 2017-09-22 18:30:40 +0200 | [diff] [blame] | 219 | (__entry->prev_state & (TASK_REPORT_MAX - 1)) ? |
| 220 | __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|", |
Peter Zijlstra | efb40f5 | 2017-09-22 18:19:53 +0200 | [diff] [blame] | 221 | { 0x01, "S" }, { 0x02, "D" }, { 0x04, "T" }, |
Peter Zijlstra | 06eb618 | 2017-09-22 18:30:40 +0200 | [diff] [blame] | 222 | { 0x08, "t" }, { 0x10, "X" }, { 0x20, "Z" }, |
Peter Zijlstra | 8ef9925 | 2017-09-22 18:37:28 +0200 | [diff] [blame] | 223 | { 0x40, "P" }, { 0x80, "I" }) : |
Peter Zijlstra | efb40f5 | 2017-09-22 18:19:53 +0200 | [diff] [blame] | 224 | "R", |
| 225 | |
Thomas Gleixner | 3f5fe9f | 2017-11-22 13:05:48 +0100 | [diff] [blame] | 226 | __entry->prev_state & TASK_REPORT_MAX ? "+" : "", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 227 | __entry->next_comm, __entry->next_pid, __entry->next_prio) |
| 228 | ); |
| 229 | |
| 230 | /* |
| 231 | * Tracepoint for a task being migrated: |
| 232 | */ |
| 233 | TRACE_EVENT(sched_migrate_task, |
| 234 | |
Mathieu Desnoyers | de1d728 | 2009-05-05 16:49:59 +0800 | [diff] [blame] | 235 | TP_PROTO(struct task_struct *p, int dest_cpu), |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 236 | |
Mathieu Desnoyers | de1d728 | 2009-05-05 16:49:59 +0800 | [diff] [blame] | 237 | TP_ARGS(p, dest_cpu), |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 238 | |
| 239 | TP_STRUCT__entry( |
| 240 | __array( char, comm, TASK_COMM_LEN ) |
| 241 | __field( pid_t, pid ) |
| 242 | __field( int, prio ) |
| 243 | __field( int, orig_cpu ) |
| 244 | __field( int, dest_cpu ) |
| 245 | ), |
| 246 | |
| 247 | TP_fast_assign( |
| 248 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
| 249 | __entry->pid = p->pid; |
Peter Zijlstra | b91473f | 2017-03-23 15:56:12 +0100 | [diff] [blame] | 250 | __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ |
Mathieu Desnoyers | de1d728 | 2009-05-05 16:49:59 +0800 | [diff] [blame] | 251 | __entry->orig_cpu = task_cpu(p); |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 252 | __entry->dest_cpu = dest_cpu; |
| 253 | ), |
| 254 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 255 | TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 256 | __entry->comm, __entry->pid, __entry->prio, |
| 257 | __entry->orig_cpu, __entry->dest_cpu) |
| 258 | ); |
| 259 | |
Satya Durga Srinivasu Prabhala | 2febb53 | 2018-11-15 17:50:07 -0800 | [diff] [blame] | 260 | /* |
| 261 | * Tracepoint for load balancing: |
| 262 | */ |
| 263 | #ifdef CONFIG_SMP |
Satya Durga Srinivasu Prabhala | 9205d3ce | 2019-01-17 14:41:31 -0800 | [diff] [blame] | 264 | #if NR_CPUS > BITS_PER_LONG |
| 265 | #define trace_sched_load_balance_sg_stats(...) |
| 266 | #define trace_sched_load_balance_stats(...) |
| 267 | #define trace_sched_load_balance(...) |
| 268 | #define trace_sched_load_balance_nohz_kick(...) |
| 269 | #else |
Satya Durga Srinivasu Prabhala | 2febb53 | 2018-11-15 17:50:07 -0800 | [diff] [blame] | 270 | TRACE_EVENT(sched_load_balance, |
| 271 | |
| 272 | TP_PROTO(int cpu, enum cpu_idle_type idle, int balance, |
| 273 | unsigned long group_mask, int busiest_nr_running, |
| 274 | unsigned long imbalance, unsigned int env_flags, int ld_moved, |
| 275 | unsigned int balance_interval, int active_balance), |
| 276 | |
| 277 | TP_ARGS(cpu, idle, balance, group_mask, busiest_nr_running, |
| 278 | imbalance, env_flags, ld_moved, balance_interval, |
| 279 | active_balance), |
| 280 | |
| 281 | TP_STRUCT__entry( |
| 282 | __field(int, cpu) |
| 283 | __field(enum cpu_idle_type, idle) |
| 284 | __field(int, balance) |
| 285 | __field(unsigned long, group_mask) |
| 286 | __field(int, busiest_nr_running) |
| 287 | __field(unsigned long, imbalance) |
| 288 | __field(unsigned int, env_flags) |
| 289 | __field(int, ld_moved) |
| 290 | __field(unsigned int, balance_interval) |
| 291 | __field(int, active_balance) |
| 292 | ), |
| 293 | |
| 294 | TP_fast_assign( |
| 295 | __entry->cpu = cpu; |
| 296 | __entry->idle = idle; |
| 297 | __entry->balance = balance; |
| 298 | __entry->group_mask = group_mask; |
| 299 | __entry->busiest_nr_running = busiest_nr_running; |
| 300 | __entry->imbalance = imbalance; |
| 301 | __entry->env_flags = env_flags; |
| 302 | __entry->ld_moved = ld_moved; |
| 303 | __entry->balance_interval = balance_interval; |
| 304 | __entry->active_balance = active_balance; |
| 305 | ), |
| 306 | |
| 307 | TP_printk("cpu=%d state=%s balance=%d group=%#lx busy_nr=%d imbalance=%ld flags=%#x ld_moved=%d bal_int=%d active_balance=%d", |
| 308 | __entry->cpu, |
| 309 | __entry->idle == CPU_IDLE ? "idle" : |
| 310 | (__entry->idle == CPU_NEWLY_IDLE ? "newly_idle" : "busy"), |
| 311 | __entry->balance, |
| 312 | __entry->group_mask, __entry->busiest_nr_running, |
| 313 | __entry->imbalance, __entry->env_flags, __entry->ld_moved, |
| 314 | __entry->balance_interval, __entry->active_balance) |
| 315 | ); |
| 316 | |
| 317 | TRACE_EVENT(sched_load_balance_nohz_kick, |
| 318 | |
| 319 | TP_PROTO(int cpu, int kick_cpu), |
| 320 | |
| 321 | TP_ARGS(cpu, kick_cpu), |
| 322 | |
| 323 | TP_STRUCT__entry( |
| 324 | __field(int, cpu) |
| 325 | __field(unsigned int, cpu_nr) |
| 326 | __field(unsigned long, misfit_task_load) |
| 327 | __field(int, cpu_overutil) |
| 328 | __field(int, kick_cpu) |
| 329 | __field(unsigned long, nohz_flags) |
| 330 | ), |
| 331 | |
| 332 | TP_fast_assign( |
| 333 | __entry->cpu = cpu; |
| 334 | __entry->cpu_nr = cpu_rq(cpu)->nr_running; |
| 335 | __entry->misfit_task_load = cpu_rq(cpu)->misfit_task_load; |
| 336 | __entry->cpu_overutil = cpu_overutilized(cpu); |
| 337 | __entry->kick_cpu = kick_cpu; |
| 338 | __entry->nohz_flags = atomic_read(nohz_flags(kick_cpu)); |
| 339 | ), |
| 340 | |
| 341 | TP_printk("cpu=%d nr_run=%u misfit_task_load=%lu overutilized=%d kick_cpu=%d nohz_flags=0x%lx", |
| 342 | __entry->cpu, __entry->cpu_nr, |
| 343 | __entry->misfit_task_load, __entry->cpu_overutil, |
| 344 | __entry->kick_cpu, __entry->nohz_flags) |
| 345 | |
| 346 | ); |
| 347 | |
| 348 | TRACE_EVENT(sched_load_balance_sg_stats, |
| 349 | |
| 350 | TP_PROTO(unsigned long sg_cpus, int group_type, unsigned int idle_cpus, |
| 351 | unsigned int sum_nr_running, unsigned long group_load, |
| 352 | unsigned long group_capacity, unsigned long group_util, |
| 353 | int group_no_capacity, unsigned long load_per_task, |
| 354 | unsigned long misfit_load, unsigned long busiest), |
| 355 | |
| 356 | TP_ARGS(sg_cpus, group_type, idle_cpus, sum_nr_running, group_load, |
| 357 | group_capacity, group_util, group_no_capacity, load_per_task, |
| 358 | misfit_load, busiest), |
| 359 | |
| 360 | TP_STRUCT__entry( |
| 361 | __field(unsigned long, group_mask) |
| 362 | __field(int, group_type) |
| 363 | __field(unsigned int, group_idle_cpus) |
| 364 | __field(unsigned int, sum_nr_running) |
| 365 | __field(unsigned long, group_load) |
| 366 | __field(unsigned long, group_capacity) |
| 367 | __field(unsigned long, group_util) |
| 368 | __field(int, group_no_capacity) |
| 369 | __field(unsigned long, load_per_task) |
| 370 | __field(unsigned long, misfit_task_load) |
| 371 | __field(unsigned long, busiest) |
| 372 | ), |
| 373 | |
| 374 | TP_fast_assign( |
| 375 | __entry->group_mask = sg_cpus; |
| 376 | __entry->group_type = group_type; |
| 377 | __entry->group_idle_cpus = idle_cpus; |
| 378 | __entry->sum_nr_running = sum_nr_running; |
| 379 | __entry->group_load = group_load; |
| 380 | __entry->group_capacity = group_capacity; |
| 381 | __entry->group_util = group_util; |
| 382 | __entry->group_no_capacity = group_no_capacity; |
| 383 | __entry->load_per_task = load_per_task; |
| 384 | __entry->misfit_task_load = misfit_load; |
| 385 | __entry->busiest = busiest; |
| 386 | ), |
| 387 | |
| 388 | TP_printk("sched_group=%#lx type=%d idle_cpus=%u sum_nr_run=%u group_load=%lu capacity=%lu util=%lu no_capacity=%d lpt=%lu misfit_tload=%lu busiest_group=%#lx", |
| 389 | __entry->group_mask, __entry->group_type, |
| 390 | __entry->group_idle_cpus, __entry->sum_nr_running, |
| 391 | __entry->group_load, __entry->group_capacity, |
| 392 | __entry->group_util, __entry->group_no_capacity, |
| 393 | __entry->load_per_task, __entry->misfit_task_load, |
| 394 | __entry->busiest) |
| 395 | ); |
| 396 | |
| 397 | TRACE_EVENT(sched_load_balance_stats, |
| 398 | |
| 399 | TP_PROTO(unsigned long busiest, int bgroup_type, |
| 400 | unsigned long bavg_load, unsigned long bload_per_task, |
| 401 | unsigned long local, int lgroup_type, unsigned long lavg_load, |
| 402 | unsigned long lload_per_task, unsigned long sds_avg_load, |
| 403 | unsigned long imbalance), |
| 404 | |
| 405 | TP_ARGS(busiest, bgroup_type, bavg_load, bload_per_task, local, |
| 406 | lgroup_type, lavg_load, lload_per_task, sds_avg_load, |
| 407 | imbalance), |
| 408 | |
| 409 | TP_STRUCT__entry( |
| 410 | __field(unsigned long, busiest) |
| 411 | __field(int, bgp_type) |
| 412 | __field(unsigned long, bavg_load) |
| 413 | __field(unsigned long, blpt) |
| 414 | __field(unsigned long, local) |
| 415 | __field(int, lgp_type) |
| 416 | __field(unsigned long, lavg_load) |
| 417 | __field(unsigned long, llpt) |
| 418 | __field(unsigned long, sds_avg) |
| 419 | __field(unsigned long, imbalance) |
| 420 | ), |
| 421 | |
| 422 | TP_fast_assign( |
| 423 | __entry->busiest = busiest; |
| 424 | __entry->bgp_type = bgroup_type; |
| 425 | __entry->bavg_load = bavg_load; |
| 426 | __entry->blpt = bload_per_task; |
| 427 | __entry->bgp_type = bgroup_type; |
| 428 | __entry->local = local; |
| 429 | __entry->lgp_type = lgroup_type; |
| 430 | __entry->lavg_load = lavg_load; |
| 431 | __entry->llpt = lload_per_task; |
| 432 | __entry->sds_avg = sds_avg_load; |
| 433 | __entry->imbalance = imbalance; |
| 434 | ), |
| 435 | |
| 436 | TP_printk("busiest_group=%#lx busiest_type=%d busiest_avg_load=%ld busiest_lpt=%ld local_group=%#lx local_type=%d local_avg_load=%ld local_lpt=%ld domain_avg_load=%ld imbalance=%ld", |
| 437 | __entry->busiest, __entry->bgp_type, __entry->bavg_load, |
| 438 | __entry->blpt, __entry->local, __entry->lgp_type, |
| 439 | __entry->lavg_load, __entry->llpt, __entry->sds_avg, |
| 440 | __entry->imbalance) |
| 441 | ); |
Satya Durga Srinivasu Prabhala | 9205d3ce | 2019-01-17 14:41:31 -0800 | [diff] [blame] | 442 | #endif /* NR_CPUS > BITS_PER_LONG */ |
| 443 | #endif /* CONFIG_SMP */ |
Satya Durga Srinivasu Prabhala | 2febb53 | 2018-11-15 17:50:07 -0800 | [diff] [blame] | 444 | |
Ingo Molnar | 091ad36 | 2009-11-26 09:04:55 +0100 | [diff] [blame] | 445 | DECLARE_EVENT_CLASS(sched_process_template, |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 446 | |
| 447 | TP_PROTO(struct task_struct *p), |
| 448 | |
| 449 | TP_ARGS(p), |
| 450 | |
| 451 | TP_STRUCT__entry( |
| 452 | __array( char, comm, TASK_COMM_LEN ) |
| 453 | __field( pid_t, pid ) |
| 454 | __field( int, prio ) |
| 455 | ), |
| 456 | |
| 457 | TP_fast_assign( |
| 458 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
| 459 | __entry->pid = p->pid; |
Peter Zijlstra | b91473f | 2017-03-23 15:56:12 +0100 | [diff] [blame] | 460 | __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 461 | ), |
| 462 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 463 | TP_printk("comm=%s pid=%d prio=%d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 464 | __entry->comm, __entry->pid, __entry->prio) |
| 465 | ); |
| 466 | |
| 467 | /* |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 468 | * Tracepoint for freeing a task: |
| 469 | */ |
| 470 | DEFINE_EVENT(sched_process_template, sched_process_free, |
| 471 | TP_PROTO(struct task_struct *p), |
| 472 | TP_ARGS(p)); |
Riley Andrews | 8a0d35f | 2015-10-02 00:39:53 -0700 | [diff] [blame] | 473 | |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 474 | |
| 475 | /* |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 476 | * Tracepoint for a task exiting: |
| 477 | */ |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 478 | DEFINE_EVENT(sched_process_template, sched_process_exit, |
| 479 | TP_PROTO(struct task_struct *p), |
| 480 | TP_ARGS(p)); |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 481 | |
| 482 | /* |
Li Zefan | 210f766 | 2010-05-24 16:23:35 +0800 | [diff] [blame] | 483 | * Tracepoint for waiting on task to unschedule: |
| 484 | */ |
| 485 | DEFINE_EVENT(sched_process_template, sched_wait_task, |
| 486 | TP_PROTO(struct task_struct *p), |
| 487 | TP_ARGS(p)); |
| 488 | |
| 489 | /* |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 490 | * Tracepoint for a waiting task: |
| 491 | */ |
| 492 | TRACE_EVENT(sched_process_wait, |
| 493 | |
| 494 | TP_PROTO(struct pid *pid), |
| 495 | |
| 496 | TP_ARGS(pid), |
| 497 | |
| 498 | TP_STRUCT__entry( |
| 499 | __array( char, comm, TASK_COMM_LEN ) |
| 500 | __field( pid_t, pid ) |
| 501 | __field( int, prio ) |
| 502 | ), |
| 503 | |
| 504 | TP_fast_assign( |
| 505 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
| 506 | __entry->pid = pid_nr(pid); |
Peter Zijlstra | b91473f | 2017-03-23 15:56:12 +0100 | [diff] [blame] | 507 | __entry->prio = current->prio; /* XXX SCHED_DEADLINE */ |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 508 | ), |
| 509 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 510 | TP_printk("comm=%s pid=%d prio=%d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 511 | __entry->comm, __entry->pid, __entry->prio) |
| 512 | ); |
| 513 | |
| 514 | /* |
| 515 | * Tracepoint for do_fork: |
| 516 | */ |
| 517 | TRACE_EVENT(sched_process_fork, |
| 518 | |
| 519 | TP_PROTO(struct task_struct *parent, struct task_struct *child), |
| 520 | |
| 521 | TP_ARGS(parent, child), |
| 522 | |
| 523 | TP_STRUCT__entry( |
| 524 | __array( char, parent_comm, TASK_COMM_LEN ) |
| 525 | __field( pid_t, parent_pid ) |
| 526 | __array( char, child_comm, TASK_COMM_LEN ) |
| 527 | __field( pid_t, child_pid ) |
| 528 | ), |
| 529 | |
| 530 | TP_fast_assign( |
| 531 | memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN); |
| 532 | __entry->parent_pid = parent->pid; |
| 533 | memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN); |
| 534 | __entry->child_pid = child->pid; |
| 535 | ), |
| 536 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 537 | TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 538 | __entry->parent_comm, __entry->parent_pid, |
| 539 | __entry->child_comm, __entry->child_pid) |
| 540 | ); |
| 541 | |
| 542 | /* |
David Smith | 4ff16c2 | 2012-02-07 10:11:05 -0600 | [diff] [blame] | 543 | * Tracepoint for exec: |
| 544 | */ |
| 545 | TRACE_EVENT(sched_process_exec, |
| 546 | |
| 547 | TP_PROTO(struct task_struct *p, pid_t old_pid, |
| 548 | struct linux_binprm *bprm), |
| 549 | |
| 550 | TP_ARGS(p, old_pid, bprm), |
| 551 | |
| 552 | TP_STRUCT__entry( |
| 553 | __string( filename, bprm->filename ) |
| 554 | __field( pid_t, pid ) |
| 555 | __field( pid_t, old_pid ) |
| 556 | ), |
| 557 | |
| 558 | TP_fast_assign( |
| 559 | __assign_str(filename, bprm->filename); |
| 560 | __entry->pid = p->pid; |
Oleg Nesterov | 6308191 | 2012-03-30 18:26:36 +0200 | [diff] [blame] | 561 | __entry->old_pid = old_pid; |
David Smith | 4ff16c2 | 2012-02-07 10:11:05 -0600 | [diff] [blame] | 562 | ), |
| 563 | |
| 564 | TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename), |
| 565 | __entry->pid, __entry->old_pid) |
| 566 | ); |
| 567 | |
| 568 | /* |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 569 | * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE |
| 570 | * adding sched_stat support to SCHED_FIFO/RR would be welcome. |
| 571 | */ |
Ingo Molnar | 091ad36 | 2009-11-26 09:04:55 +0100 | [diff] [blame] | 572 | DECLARE_EVENT_CLASS(sched_stat_template, |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 573 | |
| 574 | TP_PROTO(struct task_struct *tsk, u64 delay), |
| 575 | |
Oleg Nesterov | 1247396 | 2013-08-06 18:08:44 +0200 | [diff] [blame] | 576 | TP_ARGS(__perf_task(tsk), __perf_count(delay)), |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 577 | |
| 578 | TP_STRUCT__entry( |
| 579 | __array( char, comm, TASK_COMM_LEN ) |
| 580 | __field( pid_t, pid ) |
| 581 | __field( u64, delay ) |
| 582 | ), |
| 583 | |
| 584 | TP_fast_assign( |
| 585 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 586 | __entry->pid = tsk->pid; |
| 587 | __entry->delay = delay; |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 588 | ), |
| 589 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 590 | TP_printk("comm=%s pid=%d delay=%Lu [ns]", |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 591 | __entry->comm, __entry->pid, |
| 592 | (unsigned long long)__entry->delay) |
| 593 | ); |
| 594 | |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 595 | |
| 596 | /* |
| 597 | * Tracepoint for accounting wait time (time the task is runnable |
| 598 | * but not actually running due to scheduler contention). |
| 599 | */ |
| 600 | DEFINE_EVENT(sched_stat_template, sched_stat_wait, |
| 601 | TP_PROTO(struct task_struct *tsk, u64 delay), |
| 602 | TP_ARGS(tsk, delay)); |
| 603 | |
| 604 | /* |
| 605 | * Tracepoint for accounting sleep time (time the task is not runnable, |
| 606 | * including iowait, see below). |
| 607 | */ |
Li Zefan | 470dda74 | 2009-11-26 15:08:01 +0800 | [diff] [blame] | 608 | DEFINE_EVENT(sched_stat_template, sched_stat_sleep, |
| 609 | TP_PROTO(struct task_struct *tsk, u64 delay), |
| 610 | TP_ARGS(tsk, delay)); |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 611 | |
| 612 | /* |
| 613 | * Tracepoint for accounting iowait time (time the task is not runnable |
| 614 | * due to waiting on IO to complete). |
| 615 | */ |
Li Zefan | 470dda74 | 2009-11-26 15:08:01 +0800 | [diff] [blame] | 616 | DEFINE_EVENT(sched_stat_template, sched_stat_iowait, |
| 617 | TP_PROTO(struct task_struct *tsk, u64 delay), |
| 618 | TP_ARGS(tsk, delay)); |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 619 | |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 620 | /* |
Andrew Vagin | b781a60 | 2011-11-28 12:03:35 +0300 | [diff] [blame] | 621 | * Tracepoint for accounting blocked time (time the task is in uninterruptible). |
| 622 | */ |
| 623 | DEFINE_EVENT(sched_stat_template, sched_stat_blocked, |
| 624 | TP_PROTO(struct task_struct *tsk, u64 delay), |
| 625 | TP_ARGS(tsk, delay)); |
| 626 | |
| 627 | /* |
Riley Andrews | 8a0d35f | 2015-10-02 00:39:53 -0700 | [diff] [blame] | 628 | * Tracepoint for recording the cause of uninterruptible sleep. |
| 629 | */ |
| 630 | TRACE_EVENT(sched_blocked_reason, |
| 631 | |
| 632 | TP_PROTO(struct task_struct *tsk), |
| 633 | |
| 634 | TP_ARGS(tsk), |
| 635 | |
| 636 | TP_STRUCT__entry( |
| 637 | __field( pid_t, pid ) |
| 638 | __field( void*, caller ) |
| 639 | __field( bool, io_wait ) |
| 640 | ), |
| 641 | |
| 642 | TP_fast_assign( |
| 643 | __entry->pid = tsk->pid; |
| 644 | __entry->caller = (void*)get_wchan(tsk); |
| 645 | __entry->io_wait = tsk->in_iowait; |
| 646 | ), |
| 647 | |
| 648 | TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller) |
| 649 | ); |
| 650 | |
| 651 | /* |
Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 652 | * Tracepoint for accounting runtime (time the task is executing |
| 653 | * on a CPU). |
| 654 | */ |
Oleg Nesterov | 36009d07 | 2013-08-06 18:08:41 +0200 | [diff] [blame] | 655 | DECLARE_EVENT_CLASS(sched_stat_runtime, |
Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 656 | |
| 657 | TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), |
| 658 | |
Oleg Nesterov | 1247396 | 2013-08-06 18:08:44 +0200 | [diff] [blame] | 659 | TP_ARGS(tsk, __perf_count(runtime), vruntime), |
Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 660 | |
| 661 | TP_STRUCT__entry( |
| 662 | __array( char, comm, TASK_COMM_LEN ) |
| 663 | __field( pid_t, pid ) |
| 664 | __field( u64, runtime ) |
| 665 | __field( u64, vruntime ) |
| 666 | ), |
| 667 | |
| 668 | TP_fast_assign( |
| 669 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 670 | __entry->pid = tsk->pid; |
| 671 | __entry->runtime = runtime; |
| 672 | __entry->vruntime = vruntime; |
Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 673 | ), |
| 674 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 675 | TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", |
Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 676 | __entry->comm, __entry->pid, |
| 677 | (unsigned long long)__entry->runtime, |
| 678 | (unsigned long long)__entry->vruntime) |
| 679 | ); |
| 680 | |
Oleg Nesterov | 36009d07 | 2013-08-06 18:08:41 +0200 | [diff] [blame] | 681 | DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime, |
| 682 | TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), |
| 683 | TP_ARGS(tsk, runtime, vruntime)); |
| 684 | |
Steven Rostedt | a802707 | 2010-09-20 15:13:34 -0400 | [diff] [blame] | 685 | /* |
| 686 | * Tracepoint for showing priority inheritance modifying a tasks |
| 687 | * priority. |
| 688 | */ |
| 689 | TRACE_EVENT(sched_pi_setprio, |
| 690 | |
Peter Zijlstra | b91473f | 2017-03-23 15:56:12 +0100 | [diff] [blame] | 691 | TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task), |
Steven Rostedt | a802707 | 2010-09-20 15:13:34 -0400 | [diff] [blame] | 692 | |
Peter Zijlstra | b91473f | 2017-03-23 15:56:12 +0100 | [diff] [blame] | 693 | TP_ARGS(tsk, pi_task), |
Steven Rostedt | a802707 | 2010-09-20 15:13:34 -0400 | [diff] [blame] | 694 | |
| 695 | TP_STRUCT__entry( |
| 696 | __array( char, comm, TASK_COMM_LEN ) |
| 697 | __field( pid_t, pid ) |
| 698 | __field( int, oldprio ) |
| 699 | __field( int, newprio ) |
| 700 | ), |
| 701 | |
| 702 | TP_fast_assign( |
| 703 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 704 | __entry->pid = tsk->pid; |
| 705 | __entry->oldprio = tsk->prio; |
Sebastian Andrzej Siewior | 4ff648d | 2018-05-24 15:26:48 +0200 | [diff] [blame] | 706 | __entry->newprio = pi_task ? |
| 707 | min(tsk->normal_prio, pi_task->prio) : |
| 708 | tsk->normal_prio; |
Peter Zijlstra | b91473f | 2017-03-23 15:56:12 +0100 | [diff] [blame] | 709 | /* XXX SCHED_DEADLINE bits missing */ |
Steven Rostedt | a802707 | 2010-09-20 15:13:34 -0400 | [diff] [blame] | 710 | ), |
| 711 | |
| 712 | TP_printk("comm=%s pid=%d oldprio=%d newprio=%d", |
| 713 | __entry->comm, __entry->pid, |
| 714 | __entry->oldprio, __entry->newprio) |
| 715 | ); |
| 716 | |
Oleg Nesterov | 6a716c9 | 2013-10-19 18:18:28 +0200 | [diff] [blame] | 717 | #ifdef CONFIG_DETECT_HUNG_TASK |
| 718 | TRACE_EVENT(sched_process_hang, |
| 719 | TP_PROTO(struct task_struct *tsk), |
| 720 | TP_ARGS(tsk), |
| 721 | |
| 722 | TP_STRUCT__entry( |
| 723 | __array( char, comm, TASK_COMM_LEN ) |
| 724 | __field( pid_t, pid ) |
| 725 | ), |
| 726 | |
| 727 | TP_fast_assign( |
| 728 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 729 | __entry->pid = tsk->pid; |
| 730 | ), |
| 731 | |
| 732 | TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) |
| 733 | ); |
| 734 | #endif /* CONFIG_DETECT_HUNG_TASK */ |
| 735 | |
Mel Gorman | 286549d | 2014-01-21 15:51:03 -0800 | [diff] [blame] | 736 | DECLARE_EVENT_CLASS(sched_move_task_template, |
| 737 | |
| 738 | TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), |
| 739 | |
| 740 | TP_ARGS(tsk, src_cpu, dst_cpu), |
| 741 | |
| 742 | TP_STRUCT__entry( |
| 743 | __field( pid_t, pid ) |
| 744 | __field( pid_t, tgid ) |
| 745 | __field( pid_t, ngid ) |
| 746 | __field( int, src_cpu ) |
| 747 | __field( int, src_nid ) |
| 748 | __field( int, dst_cpu ) |
| 749 | __field( int, dst_nid ) |
| 750 | ), |
| 751 | |
| 752 | TP_fast_assign( |
| 753 | __entry->pid = task_pid_nr(tsk); |
| 754 | __entry->tgid = task_tgid_nr(tsk); |
| 755 | __entry->ngid = task_numa_group_id(tsk); |
| 756 | __entry->src_cpu = src_cpu; |
| 757 | __entry->src_nid = cpu_to_node(src_cpu); |
| 758 | __entry->dst_cpu = dst_cpu; |
| 759 | __entry->dst_nid = cpu_to_node(dst_cpu); |
| 760 | ), |
| 761 | |
| 762 | TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d", |
| 763 | __entry->pid, __entry->tgid, __entry->ngid, |
| 764 | __entry->src_cpu, __entry->src_nid, |
| 765 | __entry->dst_cpu, __entry->dst_nid) |
| 766 | ); |
| 767 | |
| 768 | /* |
| 769 | * Tracks migration of tasks from one runqueue to another. Can be used to |
| 770 | * detect if automatic NUMA balancing is bouncing between nodes |
| 771 | */ |
| 772 | DEFINE_EVENT(sched_move_task_template, sched_move_numa, |
| 773 | TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), |
| 774 | |
| 775 | TP_ARGS(tsk, src_cpu, dst_cpu) |
| 776 | ); |
| 777 | |
| 778 | DEFINE_EVENT(sched_move_task_template, sched_stick_numa, |
| 779 | TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), |
| 780 | |
| 781 | TP_ARGS(tsk, src_cpu, dst_cpu) |
| 782 | ); |
| 783 | |
| 784 | TRACE_EVENT(sched_swap_numa, |
| 785 | |
| 786 | TP_PROTO(struct task_struct *src_tsk, int src_cpu, |
| 787 | struct task_struct *dst_tsk, int dst_cpu), |
| 788 | |
| 789 | TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu), |
| 790 | |
| 791 | TP_STRUCT__entry( |
| 792 | __field( pid_t, src_pid ) |
| 793 | __field( pid_t, src_tgid ) |
| 794 | __field( pid_t, src_ngid ) |
| 795 | __field( int, src_cpu ) |
| 796 | __field( int, src_nid ) |
| 797 | __field( pid_t, dst_pid ) |
| 798 | __field( pid_t, dst_tgid ) |
| 799 | __field( pid_t, dst_ngid ) |
| 800 | __field( int, dst_cpu ) |
| 801 | __field( int, dst_nid ) |
| 802 | ), |
| 803 | |
| 804 | TP_fast_assign( |
| 805 | __entry->src_pid = task_pid_nr(src_tsk); |
| 806 | __entry->src_tgid = task_tgid_nr(src_tsk); |
| 807 | __entry->src_ngid = task_numa_group_id(src_tsk); |
| 808 | __entry->src_cpu = src_cpu; |
| 809 | __entry->src_nid = cpu_to_node(src_cpu); |
| 810 | __entry->dst_pid = task_pid_nr(dst_tsk); |
| 811 | __entry->dst_tgid = task_tgid_nr(dst_tsk); |
| 812 | __entry->dst_ngid = task_numa_group_id(dst_tsk); |
| 813 | __entry->dst_cpu = dst_cpu; |
| 814 | __entry->dst_nid = cpu_to_node(dst_cpu); |
| 815 | ), |
| 816 | |
| 817 | TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d", |
| 818 | __entry->src_pid, __entry->src_tgid, __entry->src_ngid, |
| 819 | __entry->src_cpu, __entry->src_nid, |
| 820 | __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid, |
| 821 | __entry->dst_cpu, __entry->dst_nid) |
| 822 | ); |
Andy Lutomirski | dfc68f2 | 2014-06-04 10:31:15 -0700 | [diff] [blame] | 823 | |
| 824 | /* |
| 825 | * Tracepoint for waking a polling cpu without an IPI. |
| 826 | */ |
| 827 | TRACE_EVENT(sched_wake_idle_without_ipi, |
| 828 | |
| 829 | TP_PROTO(int cpu), |
| 830 | |
| 831 | TP_ARGS(cpu), |
| 832 | |
| 833 | TP_STRUCT__entry( |
| 834 | __field( int, cpu ) |
| 835 | ), |
| 836 | |
| 837 | TP_fast_assign( |
| 838 | __entry->cpu = cpu; |
| 839 | ), |
| 840 | |
| 841 | TP_printk("cpu=%d", __entry->cpu) |
| 842 | ); |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 843 | |
| 844 | #ifdef CONFIG_SMP |
| 845 | #ifdef CREATE_TRACE_POINTS |
| 846 | static inline |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 847 | int __trace_sched_cpu(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 848 | { |
| 849 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 850 | struct rq *rq = cfs_rq ? cfs_rq->rq : NULL; |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 851 | #else |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 852 | struct rq *rq = cfs_rq ? container_of(cfs_rq, struct rq, cfs) : NULL; |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 853 | #endif |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 854 | return rq ? cpu_of(rq) |
| 855 | : task_cpu((container_of(se, struct task_struct, se))); |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 856 | } |
| 857 | |
| 858 | static inline |
| 859 | int __trace_sched_path(struct cfs_rq *cfs_rq, char *path, int len) |
| 860 | { |
| 861 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 862 | int l = path ? len : 0; |
| 863 | |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 864 | if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 865 | return autogroup_path(cfs_rq->tg, path, l) + 1; |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 866 | else if (cfs_rq && cfs_rq->tg->css.cgroup) |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 867 | return cgroup_path(cfs_rq->tg->css.cgroup, path, l) + 1; |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 868 | #endif |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 869 | if (path) |
| 870 | strcpy(path, "(null)"); |
| 871 | |
| 872 | return strlen("(null)"); |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 873 | } |
| 874 | |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 875 | static inline |
| 876 | struct cfs_rq *__trace_sched_group_cfs_rq(struct sched_entity *se) |
| 877 | { |
| 878 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 879 | return se->my_q; |
| 880 | #else |
| 881 | return NULL; |
| 882 | #endif |
| 883 | } |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 884 | #endif /* CREATE_TRACE_POINTS */ |
| 885 | |
| 886 | /* |
| 887 | * Tracepoint for cfs_rq load tracking: |
| 888 | */ |
| 889 | TRACE_EVENT(sched_load_cfs_rq, |
| 890 | |
| 891 | TP_PROTO(struct cfs_rq *cfs_rq), |
| 892 | |
| 893 | TP_ARGS(cfs_rq), |
| 894 | |
| 895 | TP_STRUCT__entry( |
| 896 | __field( int, cpu ) |
| 897 | __dynamic_array(char, path, |
| 898 | __trace_sched_path(cfs_rq, NULL, 0) ) |
| 899 | __field( unsigned long, load ) |
| 900 | __field( unsigned long, rbl_load ) |
| 901 | __field( unsigned long, util ) |
| 902 | ), |
| 903 | |
| 904 | TP_fast_assign( |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 905 | __entry->cpu = __trace_sched_cpu(cfs_rq, NULL); |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 906 | __trace_sched_path(cfs_rq, __get_dynamic_array(path), |
| 907 | __get_dynamic_array_len(path)); |
| 908 | __entry->load = cfs_rq->avg.load_avg; |
| 909 | __entry->rbl_load = cfs_rq->avg.runnable_load_avg; |
| 910 | __entry->util = cfs_rq->avg.util_avg; |
| 911 | ), |
| 912 | |
| 913 | TP_printk("cpu=%d path=%s load=%lu rbl_load=%lu util=%lu", |
| 914 | __entry->cpu, __get_str(path), __entry->load, |
| 915 | __entry->rbl_load,__entry->util) |
| 916 | ); |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 917 | |
| 918 | /* |
Chris Redpath | 3124a5b | 2017-10-27 16:52:17 +0100 | [diff] [blame] | 919 | * Tracepoint for rt_rq load tracking: |
| 920 | */ |
| 921 | struct rq; |
| 922 | TRACE_EVENT(sched_load_rt_rq, |
| 923 | |
| 924 | TP_PROTO(struct rq *rq), |
| 925 | |
| 926 | TP_ARGS(rq), |
| 927 | |
| 928 | TP_STRUCT__entry( |
| 929 | __field( int, cpu ) |
| 930 | __field( unsigned long, util ) |
| 931 | ), |
| 932 | |
| 933 | TP_fast_assign( |
| 934 | __entry->cpu = rq->cpu; |
| 935 | __entry->util = rq->avg_rt.util_avg; |
| 936 | ), |
| 937 | |
| 938 | TP_printk("cpu=%d util=%lu", __entry->cpu, |
| 939 | __entry->util) |
| 940 | ); |
| 941 | |
Satya Durga Srinivasu Prabhala | 7ebdf76 | 2018-11-07 13:55:58 -0800 | [diff] [blame] | 942 | #ifdef CONFIG_SCHED_WALT |
| 943 | extern unsigned int sysctl_sched_use_walt_cpu_util; |
| 944 | extern unsigned int sysctl_sched_use_walt_task_util; |
| 945 | extern unsigned int sched_ravg_window; |
| 946 | extern unsigned int walt_disabled; |
| 947 | #endif |
| 948 | |
| 949 | /* |
| 950 | * Tracepoint for accounting cpu root cfs_rq |
| 951 | */ |
| 952 | TRACE_EVENT(sched_load_avg_cpu, |
| 953 | |
| 954 | TP_PROTO(int cpu, struct cfs_rq *cfs_rq), |
| 955 | |
| 956 | TP_ARGS(cpu, cfs_rq), |
| 957 | |
| 958 | TP_STRUCT__entry( |
| 959 | __field(int, cpu) |
| 960 | __field(unsigned long, load_avg) |
| 961 | __field(unsigned long, util_avg) |
| 962 | __field(unsigned long, util_avg_pelt) |
Pavankumar Kondeti | 596bca7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 963 | __field(u32, util_avg_walt) |
Satya Durga Srinivasu Prabhala | 7ebdf76 | 2018-11-07 13:55:58 -0800 | [diff] [blame] | 964 | ), |
| 965 | |
| 966 | TP_fast_assign( |
| 967 | __entry->cpu = cpu; |
| 968 | __entry->load_avg = cfs_rq->avg.load_avg; |
| 969 | __entry->util_avg = cfs_rq->avg.util_avg; |
| 970 | __entry->util_avg_pelt = cfs_rq->avg.util_avg; |
| 971 | __entry->util_avg_walt = 0; |
| 972 | #ifdef CONFIG_SCHED_WALT |
Pavankumar Kondeti | 596bca7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 973 | __entry->util_avg_walt = div64_ul(cpu_rq(cpu)->prev_runnable_sum, |
| 974 | sched_ravg_window >> SCHED_CAPACITY_SHIFT); |
| 975 | |
Satya Durga Srinivasu Prabhala | 7ebdf76 | 2018-11-07 13:55:58 -0800 | [diff] [blame] | 976 | if (!walt_disabled && sysctl_sched_use_walt_cpu_util) |
| 977 | __entry->util_avg = __entry->util_avg_walt; |
| 978 | #endif |
| 979 | ), |
| 980 | |
Pavankumar Kondeti | 596bca7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 981 | TP_printk("cpu=%d load_avg=%lu util_avg=%lu util_avg_pelt=%lu util_avg_walt=%u", |
Satya Durga Srinivasu Prabhala | 7ebdf76 | 2018-11-07 13:55:58 -0800 | [diff] [blame] | 982 | __entry->cpu, __entry->load_avg, __entry->util_avg, |
| 983 | __entry->util_avg_pelt, __entry->util_avg_walt) |
| 984 | ); |
| 985 | |
| 986 | |
Chris Redpath | 3124a5b | 2017-10-27 16:52:17 +0100 | [diff] [blame] | 987 | /* |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 988 | * Tracepoint for sched_entity load tracking: |
| 989 | */ |
| 990 | TRACE_EVENT(sched_load_se, |
| 991 | |
| 992 | TP_PROTO(struct sched_entity *se), |
| 993 | |
| 994 | TP_ARGS(se), |
| 995 | |
| 996 | TP_STRUCT__entry( |
| 997 | __field( int, cpu ) |
| 998 | __dynamic_array(char, path, |
| 999 | __trace_sched_path(__trace_sched_group_cfs_rq(se), NULL, 0) ) |
| 1000 | __array( char, comm, TASK_COMM_LEN ) |
| 1001 | __field( pid_t, pid ) |
| 1002 | __field( unsigned long, load ) |
| 1003 | __field( unsigned long, rbl_load ) |
| 1004 | __field( unsigned long, util ) |
Pavankumar Kondeti | 596bca7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 1005 | __field( unsigned long, util_pelt ) |
| 1006 | __field( u32, util_walt ) |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 1007 | ), |
| 1008 | |
| 1009 | TP_fast_assign( |
| 1010 | struct cfs_rq *gcfs_rq = __trace_sched_group_cfs_rq(se); |
| 1011 | struct task_struct *p = gcfs_rq ? NULL |
| 1012 | : container_of(se, struct task_struct, se); |
| 1013 | |
| 1014 | __entry->cpu = __trace_sched_cpu(gcfs_rq, se); |
| 1015 | __trace_sched_path(gcfs_rq, __get_dynamic_array(path), |
| 1016 | __get_dynamic_array_len(path)); |
Quentin Perret | a359bef | 2018-12-14 09:05:20 +0000 | [diff] [blame] | 1017 | memcpy(__entry->comm, p ? p->comm : "(null)", |
| 1018 | p ? TASK_COMM_LEN : sizeof("(null)")); |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 1019 | __entry->pid = p ? p->pid : -1; |
| 1020 | __entry->load = se->avg.load_avg; |
| 1021 | __entry->rbl_load = se->avg.runnable_load_avg; |
| 1022 | __entry->util = se->avg.util_avg; |
Pavankumar Kondeti | 596bca7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 1023 | __entry->util_pelt = __entry->util; |
| 1024 | __entry->util_walt = 0; |
| 1025 | #ifdef CONFIG_SCHED_WALT |
| 1026 | if (!se->my_q) { |
| 1027 | struct task_struct *p = container_of(se, struct task_struct, se); |
| 1028 | __entry->util_walt = p->ravg.demand / (sched_ravg_window >> SCHED_CAPACITY_SHIFT); |
| 1029 | if (!walt_disabled && sysctl_sched_use_walt_task_util) |
| 1030 | __entry->util = __entry->util_walt; |
| 1031 | } |
| 1032 | #endif |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 1033 | ), |
| 1034 | |
Pavankumar Kondeti | 596bca7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 1035 | TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu rbl_load=%lu util=%lu util_pelt=%lu util_walt=%u", |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 1036 | __entry->cpu, __get_str(path), __entry->comm, __entry->pid, |
Pavankumar Kondeti | 596bca7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 1037 | __entry->load, __entry->rbl_load, __entry->util, |
| 1038 | __entry->util_pelt, __entry->util_walt) |
Dietmar Eggemann | 4290369 | 2017-03-20 17:26:47 +0000 | [diff] [blame] | 1039 | ); |
Dietmar Eggemann | 9156793 | 2017-03-17 21:23:35 +0000 | [diff] [blame] | 1040 | |
| 1041 | /* |
| 1042 | * Tracepoint for task_group load tracking: |
| 1043 | */ |
| 1044 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 1045 | TRACE_EVENT(sched_load_tg, |
| 1046 | |
| 1047 | TP_PROTO(struct cfs_rq *cfs_rq), |
| 1048 | |
| 1049 | TP_ARGS(cfs_rq), |
| 1050 | |
| 1051 | TP_STRUCT__entry( |
| 1052 | __field( int, cpu ) |
| 1053 | __dynamic_array(char, path, |
| 1054 | __trace_sched_path(cfs_rq, NULL, 0) ) |
| 1055 | __field( long, load ) |
| 1056 | ), |
| 1057 | |
| 1058 | TP_fast_assign( |
| 1059 | __entry->cpu = cfs_rq->rq->cpu; |
| 1060 | __trace_sched_path(cfs_rq, __get_dynamic_array(path), |
| 1061 | __get_dynamic_array_len(path)); |
| 1062 | __entry->load = atomic_long_read(&cfs_rq->tg->load_avg); |
| 1063 | ), |
| 1064 | |
| 1065 | TP_printk("cpu=%d path=%s load=%ld", __entry->cpu, __get_str(path), |
| 1066 | __entry->load) |
| 1067 | ); |
| 1068 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
Patrick Bellasi | 8eb64d5 | 2017-10-27 16:12:51 +0100 | [diff] [blame] | 1069 | |
| 1070 | /* |
| 1071 | * Tracepoint for tasks' estimated utilization. |
| 1072 | */ |
| 1073 | TRACE_EVENT(sched_util_est_task, |
| 1074 | |
| 1075 | TP_PROTO(struct task_struct *tsk, struct sched_avg *avg), |
| 1076 | |
| 1077 | TP_ARGS(tsk, avg), |
| 1078 | |
| 1079 | TP_STRUCT__entry( |
| 1080 | __array( char, comm, TASK_COMM_LEN ) |
| 1081 | __field( pid_t, pid ) |
| 1082 | __field( int, cpu ) |
| 1083 | __field( unsigned int, util_avg ) |
| 1084 | __field( unsigned int, est_enqueued ) |
| 1085 | __field( unsigned int, est_ewma ) |
| 1086 | |
| 1087 | ), |
| 1088 | |
| 1089 | TP_fast_assign( |
| 1090 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 1091 | __entry->pid = tsk->pid; |
| 1092 | __entry->cpu = task_cpu(tsk); |
| 1093 | __entry->util_avg = avg->util_avg; |
| 1094 | __entry->est_enqueued = avg->util_est.enqueued; |
| 1095 | __entry->est_ewma = avg->util_est.ewma; |
| 1096 | ), |
| 1097 | |
| 1098 | TP_printk("comm=%s pid=%d cpu=%d util_avg=%u util_est_ewma=%u util_est_enqueued=%u", |
| 1099 | __entry->comm, |
| 1100 | __entry->pid, |
| 1101 | __entry->cpu, |
| 1102 | __entry->util_avg, |
| 1103 | __entry->est_ewma, |
| 1104 | __entry->est_enqueued) |
| 1105 | ); |
| 1106 | |
| 1107 | /* |
| 1108 | * Tracepoint for root cfs_rq's estimated utilization. |
| 1109 | */ |
| 1110 | TRACE_EVENT(sched_util_est_cpu, |
| 1111 | |
| 1112 | TP_PROTO(int cpu, struct cfs_rq *cfs_rq), |
| 1113 | |
| 1114 | TP_ARGS(cpu, cfs_rq), |
| 1115 | |
| 1116 | TP_STRUCT__entry( |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1117 | __field(int, cpu) |
| 1118 | __field(unsigned int, util_avg) |
| 1119 | __field(unsigned int, util_est_enqueued) |
Patrick Bellasi | 8eb64d5 | 2017-10-27 16:12:51 +0100 | [diff] [blame] | 1120 | ), |
| 1121 | |
| 1122 | TP_fast_assign( |
| 1123 | __entry->cpu = cpu; |
| 1124 | __entry->util_avg = cfs_rq->avg.util_avg; |
| 1125 | __entry->util_est_enqueued = cfs_rq->avg.util_est.enqueued; |
| 1126 | ), |
| 1127 | |
| 1128 | TP_printk("cpu=%d util_avg=%u util_est_enqueued=%u", |
| 1129 | __entry->cpu, |
| 1130 | __entry->util_avg, |
| 1131 | __entry->util_est_enqueued) |
| 1132 | ); |
Quentin Perret | 171db7d | 2018-05-31 11:15:26 +0100 | [diff] [blame] | 1133 | |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1134 | TRACE_EVENT(sched_cpu_util, |
| 1135 | |
| 1136 | TP_PROTO(int cpu), |
| 1137 | |
| 1138 | TP_ARGS(cpu), |
| 1139 | |
| 1140 | TP_STRUCT__entry( |
| 1141 | __field(unsigned int, cpu) |
| 1142 | __field(unsigned int, nr_running) |
| 1143 | __field(long, cpu_util) |
| 1144 | __field(long, cpu_util_cum) |
| 1145 | __field(unsigned int, capacity_curr) |
| 1146 | __field(unsigned int, capacity) |
| 1147 | __field(unsigned int, capacity_orig) |
| 1148 | __field(int, idle_state) |
| 1149 | __field(u64, irqload) |
| 1150 | __field(int, online) |
| 1151 | __field(int, isolated) |
| 1152 | __field(int, reserved) |
| 1153 | __field(int, high_irq_load) |
| 1154 | ), |
| 1155 | |
| 1156 | TP_fast_assign( |
| 1157 | __entry->cpu = cpu; |
| 1158 | __entry->nr_running = cpu_rq(cpu)->nr_running; |
| 1159 | __entry->cpu_util = cpu_util(cpu); |
| 1160 | __entry->cpu_util_cum = cpu_util_cum(cpu, 0); |
| 1161 | __entry->capacity_curr = capacity_curr_of(cpu); |
| 1162 | __entry->capacity = capacity_of(cpu); |
| 1163 | __entry->capacity_orig = capacity_orig_of(cpu); |
| 1164 | __entry->idle_state = idle_get_state_idx(cpu_rq(cpu)); |
| 1165 | __entry->irqload = sched_irqload(cpu); |
| 1166 | __entry->online = cpu_online(cpu); |
| 1167 | __entry->isolated = cpu_isolated(cpu); |
| 1168 | __entry->reserved = is_reserved(cpu); |
| 1169 | __entry->high_irq_load = sched_cpu_high_irqload(cpu); |
| 1170 | ), |
| 1171 | |
| 1172 | TP_printk("cpu=%d nr_running=%d cpu_util=%ld cpu_util_cum=%ld capacity_curr=%u capacity=%u capacity_orig=%u idle_state=%d irqload=%llu online=%u, isolated=%u, reserved=%u, high_irq_load=%u", |
| 1173 | __entry->cpu, __entry->nr_running, __entry->cpu_util, |
| 1174 | __entry->cpu_util_cum, __entry->capacity_curr, |
| 1175 | __entry->capacity, __entry->capacity_orig, |
| 1176 | __entry->idle_state, __entry->irqload, __entry->online, |
| 1177 | __entry->isolated, __entry->reserved, __entry->high_irq_load) |
| 1178 | ); |
| 1179 | |
| 1180 | TRACE_EVENT(sched_task_util, |
| 1181 | |
| 1182 | TP_PROTO(struct task_struct *p, int best_energy_cpu, |
| 1183 | bool sync, bool need_idle, int fastpath, |
Pavankumar Kondeti | 113e389 | 2019-01-29 10:14:16 +0530 | [diff] [blame] | 1184 | bool placement_boost, int rtg_cpu, u64 start_t, |
| 1185 | bool stune_boosted), |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1186 | |
| 1187 | TP_ARGS(p, best_energy_cpu, sync, need_idle, fastpath, |
Pavankumar Kondeti | 113e389 | 2019-01-29 10:14:16 +0530 | [diff] [blame] | 1188 | placement_boost, rtg_cpu, start_t, stune_boosted), |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1189 | |
| 1190 | TP_STRUCT__entry( |
| 1191 | __field(int, pid) |
| 1192 | __array(char, comm, TASK_COMM_LEN) |
| 1193 | __field(unsigned long, util) |
| 1194 | __field(int, prev_cpu) |
| 1195 | __field(int, best_energy_cpu) |
| 1196 | __field(bool, sync) |
| 1197 | __field(bool, need_idle) |
| 1198 | __field(int, fastpath) |
| 1199 | __field(int, placement_boost) |
| 1200 | __field(int, rtg_cpu) |
| 1201 | __field(u64, latency) |
Pavankumar Kondeti | 113e389 | 2019-01-29 10:14:16 +0530 | [diff] [blame] | 1202 | __field(bool, stune_boosted) |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1203 | ), |
| 1204 | |
| 1205 | TP_fast_assign( |
| 1206 | __entry->pid = p->pid; |
| 1207 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
| 1208 | __entry->util = task_util(p); |
| 1209 | __entry->prev_cpu = task_cpu(p); |
| 1210 | __entry->best_energy_cpu = best_energy_cpu; |
| 1211 | __entry->sync = sync; |
| 1212 | __entry->need_idle = need_idle; |
| 1213 | __entry->fastpath = fastpath; |
| 1214 | __entry->placement_boost = placement_boost; |
| 1215 | __entry->rtg_cpu = rtg_cpu; |
| 1216 | __entry->latency = (sched_clock() - start_t); |
Pavankumar Kondeti | 113e389 | 2019-01-29 10:14:16 +0530 | [diff] [blame] | 1217 | __entry->stune_boosted = stune_boosted; |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1218 | ), |
| 1219 | |
Pavankumar Kondeti | 113e389 | 2019-01-29 10:14:16 +0530 | [diff] [blame] | 1220 | TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d rtg_cpu=%d latency=%llu stune_boosted=%d", |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1221 | __entry->pid, __entry->comm, __entry->util, __entry->prev_cpu, |
| 1222 | __entry->best_energy_cpu, __entry->sync, __entry->need_idle, |
| 1223 | __entry->fastpath, __entry->placement_boost, __entry->rtg_cpu, |
Pavankumar Kondeti | 113e389 | 2019-01-29 10:14:16 +0530 | [diff] [blame] | 1224 | __entry->latency, __entry->stune_boosted) |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1225 | ) |
| 1226 | |
Quentin Perret | 171db7d | 2018-05-31 11:15:26 +0100 | [diff] [blame] | 1227 | /* |
| 1228 | * Tracepoint for find_best_target |
| 1229 | */ |
| 1230 | TRACE_EVENT(sched_find_best_target, |
| 1231 | |
| 1232 | TP_PROTO(struct task_struct *tsk, bool prefer_idle, |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1233 | unsigned long min_util, int start_cpu, |
| 1234 | int best_idle, int best_active, int most_spare_cap, |
Quentin Perret | 171db7d | 2018-05-31 11:15:26 +0100 | [diff] [blame] | 1235 | int target, int backup), |
| 1236 | |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1237 | TP_ARGS(tsk, prefer_idle, min_util, start_cpu, |
| 1238 | best_idle, best_active, most_spare_cap, |
| 1239 | target, backup), |
Quentin Perret | 171db7d | 2018-05-31 11:15:26 +0100 | [diff] [blame] | 1240 | |
| 1241 | TP_STRUCT__entry( |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1242 | __array(char, comm, TASK_COMM_LEN) |
| 1243 | __field(pid_t, pid) |
| 1244 | __field(unsigned long, min_util) |
| 1245 | __field(bool, prefer_idle) |
| 1246 | __field(int, start_cpu) |
| 1247 | __field(int, best_idle) |
| 1248 | __field(int, best_active) |
| 1249 | __field(int, most_spare_cap) |
| 1250 | __field(int, target) |
| 1251 | __field(int, backup) |
Quentin Perret | 171db7d | 2018-05-31 11:15:26 +0100 | [diff] [blame] | 1252 | ), |
| 1253 | |
| 1254 | TP_fast_assign( |
| 1255 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 1256 | __entry->pid = tsk->pid; |
| 1257 | __entry->min_util = min_util; |
| 1258 | __entry->prefer_idle = prefer_idle; |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1259 | __entry->start_cpu = start_cpu; |
Quentin Perret | 171db7d | 2018-05-31 11:15:26 +0100 | [diff] [blame] | 1260 | __entry->best_idle = best_idle; |
| 1261 | __entry->best_active = best_active; |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1262 | __entry->most_spare_cap = most_spare_cap; |
Quentin Perret | 171db7d | 2018-05-31 11:15:26 +0100 | [diff] [blame] | 1263 | __entry->target = target; |
| 1264 | __entry->backup = backup; |
| 1265 | ), |
| 1266 | |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1267 | TP_printk("pid=%d comm=%s prefer_idle=%d start_cpu=%d best_idle=%d best_active=%d most_spare_cap=%d target=%d backup=%d", |
Quentin Perret | 171db7d | 2018-05-31 11:15:26 +0100 | [diff] [blame] | 1268 | __entry->pid, __entry->comm, __entry->prefer_idle, |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1269 | __entry->start_cpu, |
Quentin Perret | 171db7d | 2018-05-31 11:15:26 +0100 | [diff] [blame] | 1270 | __entry->best_idle, __entry->best_active, |
Satya Durga Srinivasu Prabhala | 39d60ed | 2018-11-16 15:12:05 -0800 | [diff] [blame] | 1271 | __entry->most_spare_cap, |
Quentin Perret | 171db7d | 2018-05-31 11:15:26 +0100 | [diff] [blame] | 1272 | __entry->target, __entry->backup) |
| 1273 | ); |
Quentin Perret | 8c85053 | 2018-05-31 11:26:52 +0100 | [diff] [blame] | 1274 | |
| 1275 | /* |
| 1276 | * Tracepoint for accounting CPU boosted utilization |
| 1277 | */ |
| 1278 | TRACE_EVENT(sched_boost_cpu, |
| 1279 | |
| 1280 | TP_PROTO(int cpu, unsigned long util, long margin), |
| 1281 | |
| 1282 | TP_ARGS(cpu, util, margin), |
| 1283 | |
| 1284 | TP_STRUCT__entry( |
| 1285 | __field( int, cpu ) |
| 1286 | __field( unsigned long, util ) |
| 1287 | __field(long, margin ) |
| 1288 | ), |
| 1289 | |
| 1290 | TP_fast_assign( |
| 1291 | __entry->cpu = cpu; |
| 1292 | __entry->util = util; |
| 1293 | __entry->margin = margin; |
| 1294 | ), |
| 1295 | |
| 1296 | TP_printk("cpu=%d util=%lu margin=%ld", |
| 1297 | __entry->cpu, |
| 1298 | __entry->util, |
| 1299 | __entry->margin) |
| 1300 | ); |
| 1301 | |
Satya Durga Srinivasu Prabhala | 07a16a8 | 2018-11-14 09:50:44 -0800 | [diff] [blame] | 1302 | TRACE_EVENT(core_ctl_eval_need, |
| 1303 | |
| 1304 | TP_PROTO(unsigned int cpu, unsigned int old_need, |
| 1305 | unsigned int new_need, unsigned int updated), |
| 1306 | TP_ARGS(cpu, old_need, new_need, updated), |
| 1307 | TP_STRUCT__entry( |
| 1308 | __field(u32, cpu) |
| 1309 | __field(u32, old_need) |
| 1310 | __field(u32, new_need) |
| 1311 | __field(u32, updated) |
| 1312 | ), |
| 1313 | TP_fast_assign( |
| 1314 | __entry->cpu = cpu; |
| 1315 | __entry->old_need = old_need; |
| 1316 | __entry->new_need = new_need; |
| 1317 | __entry->updated = updated; |
| 1318 | ), |
| 1319 | TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu, |
| 1320 | __entry->old_need, __entry->new_need, __entry->updated) |
| 1321 | ); |
| 1322 | |
| 1323 | TRACE_EVENT(core_ctl_set_busy, |
| 1324 | |
| 1325 | TP_PROTO(unsigned int cpu, unsigned int busy, |
| 1326 | unsigned int old_is_busy, unsigned int is_busy), |
| 1327 | TP_ARGS(cpu, busy, old_is_busy, is_busy), |
| 1328 | TP_STRUCT__entry( |
| 1329 | __field(u32, cpu) |
| 1330 | __field(u32, busy) |
| 1331 | __field(u32, old_is_busy) |
| 1332 | __field(u32, is_busy) |
| 1333 | __field(bool, high_irqload) |
| 1334 | ), |
| 1335 | TP_fast_assign( |
| 1336 | __entry->cpu = cpu; |
| 1337 | __entry->busy = busy; |
| 1338 | __entry->old_is_busy = old_is_busy; |
| 1339 | __entry->is_busy = is_busy; |
| 1340 | __entry->high_irqload = sched_cpu_high_irqload(cpu); |
| 1341 | ), |
| 1342 | TP_printk("cpu=%u, busy=%u, old_is_busy=%u, new_is_busy=%u high_irqload=%d", |
| 1343 | __entry->cpu, __entry->busy, __entry->old_is_busy, |
| 1344 | __entry->is_busy, __entry->high_irqload) |
| 1345 | ); |
| 1346 | |
| 1347 | TRACE_EVENT(core_ctl_set_boost, |
| 1348 | |
| 1349 | TP_PROTO(u32 refcount, s32 ret), |
| 1350 | TP_ARGS(refcount, ret), |
| 1351 | TP_STRUCT__entry( |
| 1352 | __field(u32, refcount) |
| 1353 | __field(s32, ret) |
| 1354 | ), |
| 1355 | TP_fast_assign( |
| 1356 | __entry->refcount = refcount; |
| 1357 | __entry->ret = ret; |
| 1358 | ), |
| 1359 | TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret) |
| 1360 | ); |
| 1361 | |
| 1362 | TRACE_EVENT(core_ctl_update_nr_need, |
| 1363 | |
| 1364 | TP_PROTO(int cpu, int nr_need, int prev_misfit_need, |
| 1365 | int nrrun, int max_nr, int nr_prev_assist), |
| 1366 | |
| 1367 | TP_ARGS(cpu, nr_need, prev_misfit_need, nrrun, max_nr, nr_prev_assist), |
| 1368 | |
| 1369 | TP_STRUCT__entry( |
| 1370 | __field(int, cpu) |
| 1371 | __field(int, nr_need) |
| 1372 | __field(int, prev_misfit_need) |
| 1373 | __field(int, nrrun) |
| 1374 | __field(int, max_nr) |
| 1375 | __field(int, nr_prev_assist) |
| 1376 | ), |
| 1377 | |
| 1378 | TP_fast_assign( |
| 1379 | __entry->cpu = cpu; |
| 1380 | __entry->nr_need = nr_need; |
| 1381 | __entry->prev_misfit_need = prev_misfit_need; |
| 1382 | __entry->nrrun = nrrun; |
| 1383 | __entry->max_nr = max_nr; |
| 1384 | __entry->nr_prev_assist = nr_prev_assist; |
| 1385 | ), |
| 1386 | |
| 1387 | TP_printk("cpu=%d nr_need=%d prev_misfit_need=%d nrrun=%d max_nr=%d nr_prev_assist=%d", |
| 1388 | __entry->cpu, __entry->nr_need, __entry->prev_misfit_need, |
| 1389 | __entry->nrrun, __entry->max_nr, __entry->nr_prev_assist) |
| 1390 | ); |
| 1391 | |
Quentin Perret | 8c85053 | 2018-05-31 11:26:52 +0100 | [diff] [blame] | 1392 | /* |
| 1393 | * Tracepoint for schedtune_tasks_update |
| 1394 | */ |
| 1395 | TRACE_EVENT(sched_tune_tasks_update, |
| 1396 | |
| 1397 | TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx, |
Chris Redpath | 4cb6535 | 2018-07-09 15:44:00 +0100 | [diff] [blame] | 1398 | int boost, int max_boost, u64 group_ts), |
Quentin Perret | 8c85053 | 2018-05-31 11:26:52 +0100 | [diff] [blame] | 1399 | |
Chris Redpath | 4cb6535 | 2018-07-09 15:44:00 +0100 | [diff] [blame] | 1400 | TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost, group_ts), |
Quentin Perret | 8c85053 | 2018-05-31 11:26:52 +0100 | [diff] [blame] | 1401 | |
| 1402 | TP_STRUCT__entry( |
| 1403 | __array( char, comm, TASK_COMM_LEN ) |
| 1404 | __field( pid_t, pid ) |
| 1405 | __field( int, cpu ) |
| 1406 | __field( int, tasks ) |
| 1407 | __field( int, idx ) |
| 1408 | __field( int, boost ) |
| 1409 | __field( int, max_boost ) |
Chris Redpath | 4cb6535 | 2018-07-09 15:44:00 +0100 | [diff] [blame] | 1410 | __field( u64, group_ts ) |
Quentin Perret | 8c85053 | 2018-05-31 11:26:52 +0100 | [diff] [blame] | 1411 | ), |
| 1412 | |
| 1413 | TP_fast_assign( |
| 1414 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 1415 | __entry->pid = tsk->pid; |
| 1416 | __entry->cpu = cpu; |
| 1417 | __entry->tasks = tasks; |
| 1418 | __entry->idx = idx; |
| 1419 | __entry->boost = boost; |
| 1420 | __entry->max_boost = max_boost; |
Chris Redpath | 4cb6535 | 2018-07-09 15:44:00 +0100 | [diff] [blame] | 1421 | __entry->group_ts = group_ts; |
Quentin Perret | 8c85053 | 2018-05-31 11:26:52 +0100 | [diff] [blame] | 1422 | ), |
| 1423 | |
| 1424 | TP_printk("pid=%d comm=%s " |
Chris Redpath | 4cb6535 | 2018-07-09 15:44:00 +0100 | [diff] [blame] | 1425 | "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d timeout=%llu", |
Quentin Perret | 8c85053 | 2018-05-31 11:26:52 +0100 | [diff] [blame] | 1426 | __entry->pid, __entry->comm, |
| 1427 | __entry->cpu, __entry->tasks, __entry->idx, |
Chris Redpath | 4cb6535 | 2018-07-09 15:44:00 +0100 | [diff] [blame] | 1428 | __entry->boost, __entry->max_boost, |
| 1429 | __entry->group_ts) |
Quentin Perret | 8c85053 | 2018-05-31 11:26:52 +0100 | [diff] [blame] | 1430 | ); |
| 1431 | |
| 1432 | /* |
| 1433 | * Tracepoint for schedtune_boostgroup_update |
| 1434 | */ |
| 1435 | TRACE_EVENT(sched_tune_boostgroup_update, |
| 1436 | |
| 1437 | TP_PROTO(int cpu, int variation, int max_boost), |
| 1438 | |
| 1439 | TP_ARGS(cpu, variation, max_boost), |
| 1440 | |
| 1441 | TP_STRUCT__entry( |
| 1442 | __field( int, cpu ) |
| 1443 | __field( int, variation ) |
| 1444 | __field( int, max_boost ) |
| 1445 | ), |
| 1446 | |
| 1447 | TP_fast_assign( |
| 1448 | __entry->cpu = cpu; |
| 1449 | __entry->variation = variation; |
| 1450 | __entry->max_boost = max_boost; |
| 1451 | ), |
| 1452 | |
| 1453 | TP_printk("cpu=%d variation=%d max_boost=%d", |
| 1454 | __entry->cpu, __entry->variation, __entry->max_boost) |
| 1455 | ); |
| 1456 | |
| 1457 | /* |
| 1458 | * Tracepoint for accounting task boosted utilization |
| 1459 | */ |
| 1460 | TRACE_EVENT(sched_boost_task, |
| 1461 | |
| 1462 | TP_PROTO(struct task_struct *tsk, unsigned long util, long margin), |
| 1463 | |
| 1464 | TP_ARGS(tsk, util, margin), |
| 1465 | |
| 1466 | TP_STRUCT__entry( |
| 1467 | __array( char, comm, TASK_COMM_LEN ) |
| 1468 | __field( pid_t, pid ) |
| 1469 | __field( unsigned long, util ) |
| 1470 | __field( long, margin ) |
| 1471 | |
| 1472 | ), |
| 1473 | |
| 1474 | TP_fast_assign( |
| 1475 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 1476 | __entry->pid = tsk->pid; |
| 1477 | __entry->util = util; |
| 1478 | __entry->margin = margin; |
| 1479 | ), |
| 1480 | |
| 1481 | TP_printk("comm=%s pid=%d util=%lu margin=%ld", |
| 1482 | __entry->comm, __entry->pid, |
| 1483 | __entry->util, |
| 1484 | __entry->margin) |
| 1485 | ); |
| 1486 | |
Patrick Bellasi | 6dfaed9 | 2016-02-10 09:24:36 +0000 | [diff] [blame] | 1487 | /* |
| 1488 | * Tracepoint for system overutilized flag |
| 1489 | */ |
| 1490 | TRACE_EVENT(sched_overutilized, |
| 1491 | |
| 1492 | TP_PROTO(int overutilized), |
| 1493 | |
| 1494 | TP_ARGS(overutilized), |
| 1495 | |
| 1496 | TP_STRUCT__entry( |
| 1497 | __field( int, overutilized ) |
| 1498 | ), |
| 1499 | |
| 1500 | TP_fast_assign( |
| 1501 | __entry->overutilized = overutilized; |
| 1502 | ), |
| 1503 | |
| 1504 | TP_printk("overutilized=%d", |
| 1505 | __entry->overutilized) |
| 1506 | ); |
| 1507 | |
Satya Durga Srinivasu Prabhala | 52e7ec2 | 2018-11-13 11:56:20 -0800 | [diff] [blame] | 1508 | /* |
| 1509 | * Tracepoint for sched_get_nr_running_avg |
| 1510 | */ |
| 1511 | TRACE_EVENT(sched_get_nr_running_avg, |
| 1512 | |
| 1513 | TP_PROTO(int cpu, int nr, int nr_misfit, int nr_max), |
| 1514 | |
| 1515 | TP_ARGS(cpu, nr, nr_misfit, nr_max), |
| 1516 | |
| 1517 | TP_STRUCT__entry( |
| 1518 | __field(int, cpu) |
| 1519 | __field(int, nr) |
| 1520 | __field(int, nr_misfit) |
| 1521 | __field(int, nr_max) |
| 1522 | ), |
| 1523 | |
| 1524 | TP_fast_assign( |
| 1525 | __entry->cpu = cpu; |
| 1526 | __entry->nr = nr; |
| 1527 | __entry->nr_misfit = nr_misfit; |
| 1528 | __entry->nr_max = nr_max; |
| 1529 | ), |
| 1530 | |
| 1531 | TP_printk("cpu=%d nr=%d nr_misfit=%d nr_max=%d", |
| 1532 | __entry->cpu, __entry->nr, __entry->nr_misfit, __entry->nr_max) |
| 1533 | ); |
| 1534 | |
Olav Haugan | f191c4a | 2016-06-12 13:57:05 -0700 | [diff] [blame] | 1535 | /* |
| 1536 | * sched_isolate - called when cores are isolated/unisolated |
| 1537 | * |
| 1538 | * @acutal_mask: mask of cores actually isolated/unisolated |
| 1539 | * @req_mask: mask of cores requested isolated/unisolated |
| 1540 | * @online_mask: cpu online mask |
| 1541 | * @time: amount of time in us it took to isolate/unisolate |
| 1542 | * @isolate: 1 if isolating, 0 if unisolating |
| 1543 | * |
| 1544 | */ |
| 1545 | TRACE_EVENT(sched_isolate, |
| 1546 | |
| 1547 | TP_PROTO(unsigned int requested_cpu, unsigned int isolated_cpus, |
| 1548 | u64 start_time, unsigned char isolate), |
| 1549 | |
| 1550 | TP_ARGS(requested_cpu, isolated_cpus, start_time, isolate), |
| 1551 | |
| 1552 | TP_STRUCT__entry( |
| 1553 | __field(u32, requested_cpu) |
| 1554 | __field(u32, isolated_cpus) |
| 1555 | __field(u32, time) |
| 1556 | __field(unsigned char, isolate) |
| 1557 | ), |
| 1558 | |
| 1559 | TP_fast_assign( |
| 1560 | __entry->requested_cpu = requested_cpu; |
| 1561 | __entry->isolated_cpus = isolated_cpus; |
| 1562 | __entry->time = div64_u64(sched_clock() - start_time, 1000); |
| 1563 | __entry->isolate = isolate; |
| 1564 | ), |
| 1565 | |
| 1566 | TP_printk("iso cpu=%u cpus=0x%x time=%u us isolated=%d", |
| 1567 | __entry->requested_cpu, __entry->isolated_cpus, |
| 1568 | __entry->time, __entry->isolate) |
| 1569 | ); |
| 1570 | |
Satya Durga Srinivasu Prabhala | 5b405e6 | 2018-11-15 14:15:35 -0800 | [diff] [blame] | 1571 | TRACE_EVENT(sched_preempt_disable, |
| 1572 | |
| 1573 | TP_PROTO(u64 delta, bool irqs_disabled, |
| 1574 | unsigned long caddr0, unsigned long caddr1, |
| 1575 | unsigned long caddr2, unsigned long caddr3), |
| 1576 | |
| 1577 | TP_ARGS(delta, irqs_disabled, caddr0, caddr1, caddr2, caddr3), |
| 1578 | |
| 1579 | TP_STRUCT__entry( |
| 1580 | __field(u64, delta) |
| 1581 | __field(bool, irqs_disabled) |
| 1582 | __field(void*, caddr0) |
| 1583 | __field(void*, caddr1) |
| 1584 | __field(void*, caddr2) |
| 1585 | __field(void*, caddr3) |
| 1586 | ), |
| 1587 | |
| 1588 | TP_fast_assign( |
| 1589 | __entry->delta = delta; |
| 1590 | __entry->irqs_disabled = irqs_disabled; |
| 1591 | __entry->caddr0 = (void *)caddr0; |
| 1592 | __entry->caddr1 = (void *)caddr1; |
| 1593 | __entry->caddr2 = (void *)caddr2; |
| 1594 | __entry->caddr3 = (void *)caddr3; |
| 1595 | ), |
| 1596 | |
| 1597 | TP_printk("delta=%llu(ns) irqs_d=%d Callers:(%ps<-%ps<-%ps<-%ps)", |
| 1598 | __entry->delta, __entry->irqs_disabled, |
| 1599 | __entry->caddr0, __entry->caddr1, |
| 1600 | __entry->caddr2, __entry->caddr3) |
| 1601 | ); |
| 1602 | |
Satya Durga Srinivasu Prabhala | 7ebdf76 | 2018-11-07 13:55:58 -0800 | [diff] [blame] | 1603 | #include "walt.h" |
| 1604 | |
Dietmar Eggemann | b540360 | 2017-03-17 20:27:06 +0000 | [diff] [blame] | 1605 | #endif /* CONFIG_SMP */ |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 1606 | #endif /* _TRACE_SCHED_H */ |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 1607 | |
| 1608 | /* This part must be outside protection */ |
| 1609 | #include <trace/define_trace.h> |