Li Zefan | d0b6e04 | 2009-07-13 10:33:21 +0800 | [diff] [blame] | 1 | #undef TRACE_SYSTEM |
| 2 | #define TRACE_SYSTEM sched |
| 3 | |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 4 | #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) |
Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 5 | #define _TRACE_SCHED_H |
| 6 | |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/tracepoint.h> |
David Smith | 4ff16c2 | 2012-02-07 10:11:05 -0600 | [diff] [blame] | 9 | #include <linux/binfmts.h> |
Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 10 | |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 11 | /* |
| 12 | * Tracepoint for calling kthread_stop, performed to end a kthread: |
| 13 | */ |
| 14 | TRACE_EVENT(sched_kthread_stop, |
| 15 | |
| 16 | TP_PROTO(struct task_struct *t), |
| 17 | |
| 18 | TP_ARGS(t), |
| 19 | |
| 20 | TP_STRUCT__entry( |
| 21 | __array( char, comm, TASK_COMM_LEN ) |
| 22 | __field( pid_t, pid ) |
| 23 | ), |
| 24 | |
| 25 | TP_fast_assign( |
| 26 | memcpy(__entry->comm, t->comm, TASK_COMM_LEN); |
| 27 | __entry->pid = t->pid; |
| 28 | ), |
| 29 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 30 | TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 31 | ); |
| 32 | |
| 33 | /* |
| 34 | * Tracepoint for the return value of the kthread stopping: |
| 35 | */ |
| 36 | TRACE_EVENT(sched_kthread_stop_ret, |
| 37 | |
| 38 | TP_PROTO(int ret), |
| 39 | |
| 40 | TP_ARGS(ret), |
| 41 | |
| 42 | TP_STRUCT__entry( |
| 43 | __field( int, ret ) |
| 44 | ), |
| 45 | |
| 46 | TP_fast_assign( |
| 47 | __entry->ret = ret; |
| 48 | ), |
| 49 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 50 | TP_printk("ret=%d", __entry->ret) |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 51 | ); |
| 52 | |
| 53 | /* |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 54 | * Tracepoint for waking up a task: |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 55 | */ |
Ingo Molnar | 091ad36 | 2009-11-26 09:04:55 +0100 | [diff] [blame] | 56 | DECLARE_EVENT_CLASS(sched_wakeup_template, |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 57 | |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 58 | TP_PROTO(struct task_struct *p), |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 59 | |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 60 | TP_ARGS(__perf_task(p)), |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 61 | |
| 62 | TP_STRUCT__entry( |
| 63 | __array( char, comm, TASK_COMM_LEN ) |
| 64 | __field( pid_t, pid ) |
| 65 | __field( int, prio ) |
| 66 | __field( int, success ) |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 67 | __field( int, target_cpu ) |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 68 | ), |
| 69 | |
| 70 | TP_fast_assign( |
| 71 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
| 72 | __entry->pid = p->pid; |
| 73 | __entry->prio = p->prio; |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 74 | __entry->success = 1; /* rudiment, kill when possible */ |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 75 | __entry->target_cpu = task_cpu(p); |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 76 | ), |
| 77 | |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 78 | TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 79 | __entry->comm, __entry->pid, __entry->prio, |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 80 | __entry->target_cpu) |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 81 | ); |
| 82 | |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 83 | /* |
| 84 | * Tracepoint called when waking a task; this tracepoint is guaranteed to be |
| 85 | * called from the waking context. |
| 86 | */ |
| 87 | DEFINE_EVENT(sched_wakeup_template, sched_waking, |
| 88 | TP_PROTO(struct task_struct *p), |
| 89 | TP_ARGS(p)); |
| 90 | |
| 91 | /* |
| 92 | * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG. |
| 93 | * It it not always called from the waking context. |
| 94 | */ |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 95 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup, |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 96 | TP_PROTO(struct task_struct *p), |
| 97 | TP_ARGS(p)); |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 98 | |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 99 | /* |
| 100 | * Tracepoint for waking up a new task: |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 101 | */ |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 102 | DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, |
Peter Zijlstra | fbd705a | 2015-06-09 11:13:36 +0200 | [diff] [blame] | 103 | TP_PROTO(struct task_struct *p), |
| 104 | TP_ARGS(p)); |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 105 | |
Peter Zijlstra | 02f7269 | 2010-05-31 18:13:25 +0200 | [diff] [blame] | 106 | #ifdef CREATE_TRACE_POINTS |
Peter Zijlstra | c73464b | 2015-09-28 18:06:56 +0200 | [diff] [blame] | 107 | static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) |
Peter Zijlstra | 02f7269 | 2010-05-31 18:13:25 +0200 | [diff] [blame] | 108 | { |
Oleg Nesterov | 8f9fbf0 | 2014-10-07 21:51:08 +0200 | [diff] [blame] | 109 | #ifdef CONFIG_SCHED_DEBUG |
| 110 | BUG_ON(p != current); |
| 111 | #endif /* CONFIG_SCHED_DEBUG */ |
Peter Zijlstra | 02f7269 | 2010-05-31 18:13:25 +0200 | [diff] [blame] | 112 | |
Peter Zijlstra | c73464b | 2015-09-28 18:06:56 +0200 | [diff] [blame] | 113 | /* |
| 114 | * Preemption ignores task state, therefore preempted tasks are always |
| 115 | * RUNNING (we will not have dequeued if state != RUNNING). |
| 116 | */ |
| 117 | return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state; |
Peter Zijlstra | 02f7269 | 2010-05-31 18:13:25 +0200 | [diff] [blame] | 118 | } |
Oleg Nesterov | 8f9fbf0 | 2014-10-07 21:51:08 +0200 | [diff] [blame] | 119 | #endif /* CREATE_TRACE_POINTS */ |
Peter Zijlstra | 02f7269 | 2010-05-31 18:13:25 +0200 | [diff] [blame] | 120 | |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 121 | /* |
| 122 | * Tracepoint for task switches, performed by the scheduler: |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 123 | */ |
| 124 | TRACE_EVENT(sched_switch, |
| 125 | |
Peter Zijlstra | c73464b | 2015-09-28 18:06:56 +0200 | [diff] [blame] | 126 | TP_PROTO(bool preempt, |
| 127 | struct task_struct *prev, |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 128 | struct task_struct *next), |
| 129 | |
Peter Zijlstra | c73464b | 2015-09-28 18:06:56 +0200 | [diff] [blame] | 130 | TP_ARGS(preempt, prev, next), |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 131 | |
| 132 | TP_STRUCT__entry( |
| 133 | __array( char, prev_comm, TASK_COMM_LEN ) |
| 134 | __field( pid_t, prev_pid ) |
| 135 | __field( int, prev_prio ) |
Steven Rostedt | 937cdb9 | 2009-05-15 10:51:13 -0400 | [diff] [blame] | 136 | __field( long, prev_state ) |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 137 | __array( char, next_comm, TASK_COMM_LEN ) |
| 138 | __field( pid_t, next_pid ) |
| 139 | __field( int, next_prio ) |
| 140 | ), |
| 141 | |
| 142 | TP_fast_assign( |
| 143 | memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); |
| 144 | __entry->prev_pid = prev->pid; |
| 145 | __entry->prev_prio = prev->prio; |
Peter Zijlstra | c73464b | 2015-09-28 18:06:56 +0200 | [diff] [blame] | 146 | __entry->prev_state = __trace_sched_switch_state(preempt, prev); |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 147 | memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); |
| 148 | __entry->next_pid = next->pid; |
| 149 | __entry->next_prio = next->prio; |
| 150 | ), |
| 151 | |
Peter Zijlstra | 557ab42 | 2011-09-16 11:16:43 +0200 | [diff] [blame] | 152 | TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 153 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, |
Peter Zijlstra | 557ab42 | 2011-09-16 11:16:43 +0200 | [diff] [blame] | 154 | __entry->prev_state & (TASK_STATE_MAX-1) ? |
| 155 | __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", |
Steven Rostedt | 937cdb9 | 2009-05-15 10:51:13 -0400 | [diff] [blame] | 156 | { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, |
| 157 | { 16, "Z" }, { 32, "X" }, { 64, "x" }, |
Peter Zijlstra | 80ed87c | 2015-05-08 14:23:45 +0200 | [diff] [blame] | 158 | { 128, "K" }, { 256, "W" }, { 512, "P" }, |
| 159 | { 1024, "N" }) : "R", |
Peter Zijlstra | 557ab42 | 2011-09-16 11:16:43 +0200 | [diff] [blame] | 160 | __entry->prev_state & TASK_STATE_MAX ? "+" : "", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 161 | __entry->next_comm, __entry->next_pid, __entry->next_prio) |
| 162 | ); |
| 163 | |
| 164 | /* |
| 165 | * Tracepoint for a task being migrated: |
| 166 | */ |
| 167 | TRACE_EVENT(sched_migrate_task, |
| 168 | |
Mathieu Desnoyers | de1d728 | 2009-05-05 16:49:59 +0800 | [diff] [blame] | 169 | TP_PROTO(struct task_struct *p, int dest_cpu), |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 170 | |
Mathieu Desnoyers | de1d728 | 2009-05-05 16:49:59 +0800 | [diff] [blame] | 171 | TP_ARGS(p, dest_cpu), |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 172 | |
| 173 | TP_STRUCT__entry( |
| 174 | __array( char, comm, TASK_COMM_LEN ) |
| 175 | __field( pid_t, pid ) |
| 176 | __field( int, prio ) |
| 177 | __field( int, orig_cpu ) |
| 178 | __field( int, dest_cpu ) |
| 179 | ), |
| 180 | |
| 181 | TP_fast_assign( |
| 182 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
| 183 | __entry->pid = p->pid; |
| 184 | __entry->prio = p->prio; |
Mathieu Desnoyers | de1d728 | 2009-05-05 16:49:59 +0800 | [diff] [blame] | 185 | __entry->orig_cpu = task_cpu(p); |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 186 | __entry->dest_cpu = dest_cpu; |
| 187 | ), |
| 188 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 189 | TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 190 | __entry->comm, __entry->pid, __entry->prio, |
| 191 | __entry->orig_cpu, __entry->dest_cpu) |
| 192 | ); |
| 193 | |
Ingo Molnar | 091ad36 | 2009-11-26 09:04:55 +0100 | [diff] [blame] | 194 | DECLARE_EVENT_CLASS(sched_process_template, |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 195 | |
| 196 | TP_PROTO(struct task_struct *p), |
| 197 | |
| 198 | TP_ARGS(p), |
| 199 | |
| 200 | TP_STRUCT__entry( |
| 201 | __array( char, comm, TASK_COMM_LEN ) |
| 202 | __field( pid_t, pid ) |
| 203 | __field( int, prio ) |
| 204 | ), |
| 205 | |
| 206 | TP_fast_assign( |
| 207 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
| 208 | __entry->pid = p->pid; |
| 209 | __entry->prio = p->prio; |
| 210 | ), |
| 211 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 212 | TP_printk("comm=%s pid=%d prio=%d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 213 | __entry->comm, __entry->pid, __entry->prio) |
| 214 | ); |
| 215 | |
| 216 | /* |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 217 | * Tracepoint for freeing a task: |
| 218 | */ |
| 219 | DEFINE_EVENT(sched_process_template, sched_process_free, |
| 220 | TP_PROTO(struct task_struct *p), |
| 221 | TP_ARGS(p)); |
Riley Andrews | a5a4447 | 2015-10-02 00:39:53 -0700 | [diff] [blame] | 222 | |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 223 | |
| 224 | /* |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 225 | * Tracepoint for a task exiting: |
| 226 | */ |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 227 | DEFINE_EVENT(sched_process_template, sched_process_exit, |
| 228 | TP_PROTO(struct task_struct *p), |
| 229 | TP_ARGS(p)); |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 230 | |
| 231 | /* |
Li Zefan | 210f766 | 2010-05-24 16:23:35 +0800 | [diff] [blame] | 232 | * Tracepoint for waiting on task to unschedule: |
| 233 | */ |
| 234 | DEFINE_EVENT(sched_process_template, sched_wait_task, |
| 235 | TP_PROTO(struct task_struct *p), |
| 236 | TP_ARGS(p)); |
| 237 | |
| 238 | /* |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 239 | * Tracepoint for a waiting task: |
| 240 | */ |
| 241 | TRACE_EVENT(sched_process_wait, |
| 242 | |
| 243 | TP_PROTO(struct pid *pid), |
| 244 | |
| 245 | TP_ARGS(pid), |
| 246 | |
| 247 | TP_STRUCT__entry( |
| 248 | __array( char, comm, TASK_COMM_LEN ) |
| 249 | __field( pid_t, pid ) |
| 250 | __field( int, prio ) |
| 251 | ), |
| 252 | |
| 253 | TP_fast_assign( |
| 254 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
| 255 | __entry->pid = pid_nr(pid); |
| 256 | __entry->prio = current->prio; |
| 257 | ), |
| 258 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 259 | TP_printk("comm=%s pid=%d prio=%d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 260 | __entry->comm, __entry->pid, __entry->prio) |
| 261 | ); |
| 262 | |
| 263 | /* |
| 264 | * Tracepoint for do_fork: |
| 265 | */ |
| 266 | TRACE_EVENT(sched_process_fork, |
| 267 | |
| 268 | TP_PROTO(struct task_struct *parent, struct task_struct *child), |
| 269 | |
| 270 | TP_ARGS(parent, child), |
| 271 | |
| 272 | TP_STRUCT__entry( |
| 273 | __array( char, parent_comm, TASK_COMM_LEN ) |
| 274 | __field( pid_t, parent_pid ) |
| 275 | __array( char, child_comm, TASK_COMM_LEN ) |
| 276 | __field( pid_t, child_pid ) |
| 277 | ), |
| 278 | |
| 279 | TP_fast_assign( |
| 280 | memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN); |
| 281 | __entry->parent_pid = parent->pid; |
| 282 | memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN); |
| 283 | __entry->child_pid = child->pid; |
| 284 | ), |
| 285 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 286 | TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d", |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 287 | __entry->parent_comm, __entry->parent_pid, |
| 288 | __entry->child_comm, __entry->child_pid) |
| 289 | ); |
| 290 | |
| 291 | /* |
David Smith | 4ff16c2 | 2012-02-07 10:11:05 -0600 | [diff] [blame] | 292 | * Tracepoint for exec: |
| 293 | */ |
| 294 | TRACE_EVENT(sched_process_exec, |
| 295 | |
| 296 | TP_PROTO(struct task_struct *p, pid_t old_pid, |
| 297 | struct linux_binprm *bprm), |
| 298 | |
| 299 | TP_ARGS(p, old_pid, bprm), |
| 300 | |
| 301 | TP_STRUCT__entry( |
| 302 | __string( filename, bprm->filename ) |
| 303 | __field( pid_t, pid ) |
| 304 | __field( pid_t, old_pid ) |
| 305 | ), |
| 306 | |
| 307 | TP_fast_assign( |
| 308 | __assign_str(filename, bprm->filename); |
| 309 | __entry->pid = p->pid; |
Oleg Nesterov | 6308191 | 2012-03-30 18:26:36 +0200 | [diff] [blame] | 310 | __entry->old_pid = old_pid; |
David Smith | 4ff16c2 | 2012-02-07 10:11:05 -0600 | [diff] [blame] | 311 | ), |
| 312 | |
| 313 | TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename), |
| 314 | __entry->pid, __entry->old_pid) |
| 315 | ); |
| 316 | |
| 317 | /* |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 318 | * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE |
| 319 | * adding sched_stat support to SCHED_FIFO/RR would be welcome. |
| 320 | */ |
Ingo Molnar | 091ad36 | 2009-11-26 09:04:55 +0100 | [diff] [blame] | 321 | DECLARE_EVENT_CLASS(sched_stat_template, |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 322 | |
| 323 | TP_PROTO(struct task_struct *tsk, u64 delay), |
| 324 | |
Oleg Nesterov | 1247396 | 2013-08-06 18:08:44 +0200 | [diff] [blame] | 325 | TP_ARGS(__perf_task(tsk), __perf_count(delay)), |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 326 | |
| 327 | TP_STRUCT__entry( |
| 328 | __array( char, comm, TASK_COMM_LEN ) |
| 329 | __field( pid_t, pid ) |
| 330 | __field( u64, delay ) |
| 331 | ), |
| 332 | |
| 333 | TP_fast_assign( |
| 334 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 335 | __entry->pid = tsk->pid; |
| 336 | __entry->delay = delay; |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 337 | ), |
| 338 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 339 | TP_printk("comm=%s pid=%d delay=%Lu [ns]", |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 340 | __entry->comm, __entry->pid, |
| 341 | (unsigned long long)__entry->delay) |
| 342 | ); |
| 343 | |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 344 | |
| 345 | /* |
| 346 | * Tracepoint for accounting wait time (time the task is runnable |
| 347 | * but not actually running due to scheduler contention). |
| 348 | */ |
| 349 | DEFINE_EVENT(sched_stat_template, sched_stat_wait, |
| 350 | TP_PROTO(struct task_struct *tsk, u64 delay), |
| 351 | TP_ARGS(tsk, delay)); |
| 352 | |
| 353 | /* |
| 354 | * Tracepoint for accounting sleep time (time the task is not runnable, |
| 355 | * including iowait, see below). |
| 356 | */ |
Li Zefan | 470dda74 | 2009-11-26 15:08:01 +0800 | [diff] [blame] | 357 | DEFINE_EVENT(sched_stat_template, sched_stat_sleep, |
| 358 | TP_PROTO(struct task_struct *tsk, u64 delay), |
| 359 | TP_ARGS(tsk, delay)); |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 360 | |
| 361 | /* |
| 362 | * Tracepoint for accounting iowait time (time the task is not runnable |
| 363 | * due to waiting on IO to complete). |
| 364 | */ |
Li Zefan | 470dda74 | 2009-11-26 15:08:01 +0800 | [diff] [blame] | 365 | DEFINE_EVENT(sched_stat_template, sched_stat_iowait, |
| 366 | TP_PROTO(struct task_struct *tsk, u64 delay), |
| 367 | TP_ARGS(tsk, delay)); |
Steven Rostedt | 75ec29a | 2009-11-18 20:48:08 -0500 | [diff] [blame] | 368 | |
Peter Zijlstra | 768d0c2 | 2009-07-23 20:13:26 +0200 | [diff] [blame] | 369 | /* |
Andrew Vagin | b781a60 | 2011-11-28 12:03:35 +0300 | [diff] [blame] | 370 | * Tracepoint for accounting blocked time (time the task is in uninterruptible). |
| 371 | */ |
| 372 | DEFINE_EVENT(sched_stat_template, sched_stat_blocked, |
| 373 | TP_PROTO(struct task_struct *tsk, u64 delay), |
| 374 | TP_ARGS(tsk, delay)); |
| 375 | |
| 376 | /* |
Riley Andrews | a5a4447 | 2015-10-02 00:39:53 -0700 | [diff] [blame] | 377 | * Tracepoint for recording the cause of uninterruptible sleep. |
| 378 | */ |
| 379 | TRACE_EVENT(sched_blocked_reason, |
| 380 | |
| 381 | TP_PROTO(struct task_struct *tsk), |
| 382 | |
| 383 | TP_ARGS(tsk), |
| 384 | |
| 385 | TP_STRUCT__entry( |
| 386 | __field( pid_t, pid ) |
| 387 | __field( void*, caller ) |
| 388 | __field( bool, io_wait ) |
| 389 | ), |
| 390 | |
| 391 | TP_fast_assign( |
| 392 | __entry->pid = tsk->pid; |
| 393 | __entry->caller = (void*)get_wchan(tsk); |
| 394 | __entry->io_wait = tsk->in_iowait; |
| 395 | ), |
| 396 | |
| 397 | TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller) |
| 398 | ); |
| 399 | |
| 400 | /* |
Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 401 | * Tracepoint for accounting runtime (time the task is executing |
| 402 | * on a CPU). |
| 403 | */ |
Oleg Nesterov | 36009d07 | 2013-08-06 18:08:41 +0200 | [diff] [blame] | 404 | DECLARE_EVENT_CLASS(sched_stat_runtime, |
Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 405 | |
| 406 | TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), |
| 407 | |
Oleg Nesterov | 1247396 | 2013-08-06 18:08:44 +0200 | [diff] [blame] | 408 | TP_ARGS(tsk, __perf_count(runtime), vruntime), |
Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 409 | |
| 410 | TP_STRUCT__entry( |
| 411 | __array( char, comm, TASK_COMM_LEN ) |
| 412 | __field( pid_t, pid ) |
| 413 | __field( u64, runtime ) |
| 414 | __field( u64, vruntime ) |
| 415 | ), |
| 416 | |
| 417 | TP_fast_assign( |
| 418 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 419 | __entry->pid = tsk->pid; |
| 420 | __entry->runtime = runtime; |
| 421 | __entry->vruntime = vruntime; |
Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 422 | ), |
| 423 | |
Ingo Molnar | 434a83c | 2009-10-15 11:50:39 +0200 | [diff] [blame] | 424 | TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]", |
Ingo Molnar | f977bb4 | 2009-09-13 18:15:54 +0200 | [diff] [blame] | 425 | __entry->comm, __entry->pid, |
| 426 | (unsigned long long)__entry->runtime, |
| 427 | (unsigned long long)__entry->vruntime) |
| 428 | ); |
| 429 | |
Oleg Nesterov | 36009d07 | 2013-08-06 18:08:41 +0200 | [diff] [blame] | 430 | DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime, |
| 431 | TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), |
| 432 | TP_ARGS(tsk, runtime, vruntime)); |
| 433 | |
Steven Rostedt | a802707 | 2010-09-20 15:13:34 -0400 | [diff] [blame] | 434 | /* |
| 435 | * Tracepoint for showing priority inheritance modifying a tasks |
| 436 | * priority. |
| 437 | */ |
| 438 | TRACE_EVENT(sched_pi_setprio, |
| 439 | |
| 440 | TP_PROTO(struct task_struct *tsk, int newprio), |
| 441 | |
| 442 | TP_ARGS(tsk, newprio), |
| 443 | |
| 444 | TP_STRUCT__entry( |
| 445 | __array( char, comm, TASK_COMM_LEN ) |
| 446 | __field( pid_t, pid ) |
| 447 | __field( int, oldprio ) |
| 448 | __field( int, newprio ) |
| 449 | ), |
| 450 | |
| 451 | TP_fast_assign( |
| 452 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 453 | __entry->pid = tsk->pid; |
| 454 | __entry->oldprio = tsk->prio; |
| 455 | __entry->newprio = newprio; |
| 456 | ), |
| 457 | |
| 458 | TP_printk("comm=%s pid=%d oldprio=%d newprio=%d", |
| 459 | __entry->comm, __entry->pid, |
| 460 | __entry->oldprio, __entry->newprio) |
| 461 | ); |
| 462 | |
Oleg Nesterov | 6a716c9 | 2013-10-19 18:18:28 +0200 | [diff] [blame] | 463 | #ifdef CONFIG_DETECT_HUNG_TASK |
| 464 | TRACE_EVENT(sched_process_hang, |
| 465 | TP_PROTO(struct task_struct *tsk), |
| 466 | TP_ARGS(tsk), |
| 467 | |
| 468 | TP_STRUCT__entry( |
| 469 | __array( char, comm, TASK_COMM_LEN ) |
| 470 | __field( pid_t, pid ) |
| 471 | ), |
| 472 | |
| 473 | TP_fast_assign( |
| 474 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 475 | __entry->pid = tsk->pid; |
| 476 | ), |
| 477 | |
| 478 | TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid) |
| 479 | ); |
| 480 | #endif /* CONFIG_DETECT_HUNG_TASK */ |
| 481 | |
Mel Gorman | 286549d | 2014-01-21 15:51:03 -0800 | [diff] [blame] | 482 | DECLARE_EVENT_CLASS(sched_move_task_template, |
| 483 | |
| 484 | TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), |
| 485 | |
| 486 | TP_ARGS(tsk, src_cpu, dst_cpu), |
| 487 | |
| 488 | TP_STRUCT__entry( |
| 489 | __field( pid_t, pid ) |
| 490 | __field( pid_t, tgid ) |
| 491 | __field( pid_t, ngid ) |
| 492 | __field( int, src_cpu ) |
| 493 | __field( int, src_nid ) |
| 494 | __field( int, dst_cpu ) |
| 495 | __field( int, dst_nid ) |
| 496 | ), |
| 497 | |
| 498 | TP_fast_assign( |
| 499 | __entry->pid = task_pid_nr(tsk); |
| 500 | __entry->tgid = task_tgid_nr(tsk); |
| 501 | __entry->ngid = task_numa_group_id(tsk); |
| 502 | __entry->src_cpu = src_cpu; |
| 503 | __entry->src_nid = cpu_to_node(src_cpu); |
| 504 | __entry->dst_cpu = dst_cpu; |
| 505 | __entry->dst_nid = cpu_to_node(dst_cpu); |
| 506 | ), |
| 507 | |
| 508 | TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d", |
| 509 | __entry->pid, __entry->tgid, __entry->ngid, |
| 510 | __entry->src_cpu, __entry->src_nid, |
| 511 | __entry->dst_cpu, __entry->dst_nid) |
| 512 | ); |
| 513 | |
| 514 | /* |
| 515 | * Tracks migration of tasks from one runqueue to another. Can be used to |
| 516 | * detect if automatic NUMA balancing is bouncing between nodes |
| 517 | */ |
| 518 | DEFINE_EVENT(sched_move_task_template, sched_move_numa, |
| 519 | TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), |
| 520 | |
| 521 | TP_ARGS(tsk, src_cpu, dst_cpu) |
| 522 | ); |
| 523 | |
| 524 | DEFINE_EVENT(sched_move_task_template, sched_stick_numa, |
| 525 | TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu), |
| 526 | |
| 527 | TP_ARGS(tsk, src_cpu, dst_cpu) |
| 528 | ); |
| 529 | |
| 530 | TRACE_EVENT(sched_swap_numa, |
| 531 | |
| 532 | TP_PROTO(struct task_struct *src_tsk, int src_cpu, |
| 533 | struct task_struct *dst_tsk, int dst_cpu), |
| 534 | |
| 535 | TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu), |
| 536 | |
| 537 | TP_STRUCT__entry( |
| 538 | __field( pid_t, src_pid ) |
| 539 | __field( pid_t, src_tgid ) |
| 540 | __field( pid_t, src_ngid ) |
| 541 | __field( int, src_cpu ) |
| 542 | __field( int, src_nid ) |
| 543 | __field( pid_t, dst_pid ) |
| 544 | __field( pid_t, dst_tgid ) |
| 545 | __field( pid_t, dst_ngid ) |
| 546 | __field( int, dst_cpu ) |
| 547 | __field( int, dst_nid ) |
| 548 | ), |
| 549 | |
| 550 | TP_fast_assign( |
| 551 | __entry->src_pid = task_pid_nr(src_tsk); |
| 552 | __entry->src_tgid = task_tgid_nr(src_tsk); |
| 553 | __entry->src_ngid = task_numa_group_id(src_tsk); |
| 554 | __entry->src_cpu = src_cpu; |
| 555 | __entry->src_nid = cpu_to_node(src_cpu); |
| 556 | __entry->dst_pid = task_pid_nr(dst_tsk); |
| 557 | __entry->dst_tgid = task_tgid_nr(dst_tsk); |
| 558 | __entry->dst_ngid = task_numa_group_id(dst_tsk); |
| 559 | __entry->dst_cpu = dst_cpu; |
| 560 | __entry->dst_nid = cpu_to_node(dst_cpu); |
| 561 | ), |
| 562 | |
| 563 | TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d", |
| 564 | __entry->src_pid, __entry->src_tgid, __entry->src_ngid, |
| 565 | __entry->src_cpu, __entry->src_nid, |
| 566 | __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid, |
| 567 | __entry->dst_cpu, __entry->dst_nid) |
| 568 | ); |
Andy Lutomirski | dfc68f2 | 2014-06-04 10:31:15 -0700 | [diff] [blame] | 569 | |
| 570 | /* |
| 571 | * Tracepoint for waking a polling cpu without an IPI. |
| 572 | */ |
| 573 | TRACE_EVENT(sched_wake_idle_without_ipi, |
| 574 | |
| 575 | TP_PROTO(int cpu), |
| 576 | |
| 577 | TP_ARGS(cpu), |
| 578 | |
| 579 | TP_STRUCT__entry( |
| 580 | __field( int, cpu ) |
| 581 | ), |
| 582 | |
| 583 | TP_fast_assign( |
| 584 | __entry->cpu = cpu; |
| 585 | ), |
| 586 | |
| 587 | TP_printk("cpu=%d", __entry->cpu) |
| 588 | ); |
Juri Lelli | 0a94200 | 2015-11-09 12:06:24 +0000 | [diff] [blame] | 589 | |
| 590 | TRACE_EVENT(sched_contrib_scale_f, |
| 591 | |
| 592 | TP_PROTO(int cpu, unsigned long freq_scale_factor, |
| 593 | unsigned long cpu_scale_factor), |
| 594 | |
| 595 | TP_ARGS(cpu, freq_scale_factor, cpu_scale_factor), |
| 596 | |
| 597 | TP_STRUCT__entry( |
| 598 | __field(int, cpu) |
| 599 | __field(unsigned long, freq_scale_factor) |
| 600 | __field(unsigned long, cpu_scale_factor) |
| 601 | ), |
| 602 | |
| 603 | TP_fast_assign( |
| 604 | __entry->cpu = cpu; |
| 605 | __entry->freq_scale_factor = freq_scale_factor; |
| 606 | __entry->cpu_scale_factor = cpu_scale_factor; |
| 607 | ), |
| 608 | |
| 609 | TP_printk("cpu=%d freq_scale_factor=%lu cpu_scale_factor=%lu", |
| 610 | __entry->cpu, __entry->freq_scale_factor, |
| 611 | __entry->cpu_scale_factor) |
| 612 | ); |
Juri Lelli | a4b0c3a | 2015-11-09 12:07:27 +0000 | [diff] [blame] | 613 | |
Patrick Bellasi | 2178e84 | 2016-07-22 11:35:59 +0100 | [diff] [blame] | 614 | #ifdef CONFIG_SMP |
| 615 | |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 616 | #ifdef CONFIG_SCHED_WALT |
| 617 | extern unsigned int sysctl_sched_use_walt_cpu_util; |
| 618 | extern unsigned int sysctl_sched_use_walt_task_util; |
| 619 | extern unsigned int walt_ravg_window; |
Vikram Mulukutla | 44310bf | 2017-08-10 17:26:20 -0700 | [diff] [blame] | 620 | extern bool walt_disabled; |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 621 | #endif |
| 622 | |
Juri Lelli | a4b0c3a | 2015-11-09 12:07:27 +0000 | [diff] [blame] | 623 | /* |
| 624 | * Tracepoint for accounting sched averages for tasks. |
| 625 | */ |
| 626 | TRACE_EVENT(sched_load_avg_task, |
| 627 | |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 628 | TP_PROTO(struct task_struct *tsk, struct sched_avg *avg, void *_ravg), |
Juri Lelli | a4b0c3a | 2015-11-09 12:07:27 +0000 | [diff] [blame] | 629 | |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 630 | TP_ARGS(tsk, avg, _ravg), |
Juri Lelli | a4b0c3a | 2015-11-09 12:07:27 +0000 | [diff] [blame] | 631 | |
| 632 | TP_STRUCT__entry( |
| 633 | __array( char, comm, TASK_COMM_LEN ) |
| 634 | __field( pid_t, pid ) |
| 635 | __field( int, cpu ) |
| 636 | __field( unsigned long, load_avg ) |
| 637 | __field( unsigned long, util_avg ) |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 638 | __field( unsigned long, util_avg_pelt ) |
Pavankumar Kondeti | a2ee4e7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 639 | __field( u32, util_avg_walt ) |
Juri Lelli | a4b0c3a | 2015-11-09 12:07:27 +0000 | [diff] [blame] | 640 | __field( u64, load_sum ) |
| 641 | __field( u32, util_sum ) |
| 642 | __field( u32, period_contrib ) |
| 643 | ), |
| 644 | |
| 645 | TP_fast_assign( |
| 646 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 647 | __entry->pid = tsk->pid; |
| 648 | __entry->cpu = task_cpu(tsk); |
| 649 | __entry->load_avg = avg->load_avg; |
| 650 | __entry->util_avg = avg->util_avg; |
| 651 | __entry->load_sum = avg->load_sum; |
| 652 | __entry->util_sum = avg->util_sum; |
| 653 | __entry->period_contrib = avg->period_contrib; |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 654 | __entry->util_avg_pelt = avg->util_avg; |
| 655 | __entry->util_avg_walt = 0; |
| 656 | #ifdef CONFIG_SCHED_WALT |
Pavankumar Kondeti | a2ee4e7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 657 | __entry->util_avg_walt = ((struct ravg*)_ravg)->demand / |
| 658 | (walt_ravg_window >> SCHED_CAPACITY_SHIFT); |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 659 | if (!walt_disabled && sysctl_sched_use_walt_task_util) |
| 660 | __entry->util_avg = __entry->util_avg_walt; |
| 661 | #endif |
Juri Lelli | a4b0c3a | 2015-11-09 12:07:27 +0000 | [diff] [blame] | 662 | ), |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 663 | TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu " |
Pavankumar Kondeti | a2ee4e7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 664 | "util_avg_pelt=%lu util_avg_walt=%u load_sum=%llu" |
Juri Lelli | a4b0c3a | 2015-11-09 12:07:27 +0000 | [diff] [blame] | 665 | " util_sum=%u period_contrib=%u", |
| 666 | __entry->comm, |
| 667 | __entry->pid, |
| 668 | __entry->cpu, |
| 669 | __entry->load_avg, |
| 670 | __entry->util_avg, |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 671 | __entry->util_avg_pelt, |
| 672 | __entry->util_avg_walt, |
Juri Lelli | a4b0c3a | 2015-11-09 12:07:27 +0000 | [diff] [blame] | 673 | (u64)__entry->load_sum, |
| 674 | (u32)__entry->util_sum, |
| 675 | (u32)__entry->period_contrib) |
| 676 | ); |
Juri Lelli | 7947880 | 2015-11-09 12:07:48 +0000 | [diff] [blame] | 677 | |
| 678 | /* |
| 679 | * Tracepoint for accounting sched averages for cpus. |
| 680 | */ |
| 681 | TRACE_EVENT(sched_load_avg_cpu, |
| 682 | |
| 683 | TP_PROTO(int cpu, struct cfs_rq *cfs_rq), |
| 684 | |
| 685 | TP_ARGS(cpu, cfs_rq), |
| 686 | |
| 687 | TP_STRUCT__entry( |
| 688 | __field( int, cpu ) |
| 689 | __field( unsigned long, load_avg ) |
| 690 | __field( unsigned long, util_avg ) |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 691 | __field( unsigned long, util_avg_pelt ) |
Pavankumar Kondeti | a2ee4e7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 692 | __field( u32, util_avg_walt ) |
Juri Lelli | 7947880 | 2015-11-09 12:07:48 +0000 | [diff] [blame] | 693 | ), |
| 694 | |
| 695 | TP_fast_assign( |
| 696 | __entry->cpu = cpu; |
| 697 | __entry->load_avg = cfs_rq->avg.load_avg; |
| 698 | __entry->util_avg = cfs_rq->avg.util_avg; |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 699 | __entry->util_avg_pelt = cfs_rq->avg.util_avg; |
| 700 | __entry->util_avg_walt = 0; |
| 701 | #ifdef CONFIG_SCHED_WALT |
Pavankumar Kondeti | a2ee4e7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 702 | __entry->util_avg_walt = div64_ul(cpu_rq(cpu)->prev_runnable_sum, |
| 703 | walt_ravg_window >> SCHED_CAPACITY_SHIFT); |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 704 | if (!walt_disabled && sysctl_sched_use_walt_cpu_util) |
| 705 | __entry->util_avg = __entry->util_avg_walt; |
| 706 | #endif |
Juri Lelli | 7947880 | 2015-11-09 12:07:48 +0000 | [diff] [blame] | 707 | ), |
| 708 | |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 709 | TP_printk("cpu=%d load_avg=%lu util_avg=%lu " |
Pavankumar Kondeti | a2ee4e7 | 2018-02-23 09:53:22 +0530 | [diff] [blame] | 710 | "util_avg_pelt=%lu util_avg_walt=%u", |
Chris Redpath | 8d40f58 | 2017-02-08 14:25:35 +0000 | [diff] [blame] | 711 | __entry->cpu, __entry->load_avg, __entry->util_avg, |
| 712 | __entry->util_avg_pelt, __entry->util_avg_walt) |
Juri Lelli | 7947880 | 2015-11-09 12:07:48 +0000 | [diff] [blame] | 713 | ); |
Patrick Bellasi | 050dcb8 | 2015-06-22 13:49:07 +0100 | [diff] [blame] | 714 | |
| 715 | /* |
| 716 | * Tracepoint for sched_tune_config settings |
| 717 | */ |
| 718 | TRACE_EVENT(sched_tune_config, |
| 719 | |
| 720 | TP_PROTO(int boost), |
| 721 | |
| 722 | TP_ARGS(boost), |
| 723 | |
| 724 | TP_STRUCT__entry( |
| 725 | __field( int, boost ) |
| 726 | ), |
| 727 | |
| 728 | TP_fast_assign( |
| 729 | __entry->boost = boost; |
| 730 | ), |
| 731 | |
| 732 | TP_printk("boost=%d ", __entry->boost) |
| 733 | ); |
| 734 | |
Patrick Bellasi | cccead1 | 2015-06-22 13:51:07 +0100 | [diff] [blame] | 735 | /* |
| 736 | * Tracepoint for accounting CPU boosted utilization |
| 737 | */ |
| 738 | TRACE_EVENT(sched_boost_cpu, |
| 739 | |
Srinath Sridharan | e71c425 | 2016-07-28 17:28:55 +0100 | [diff] [blame] | 740 | TP_PROTO(int cpu, unsigned long util, long margin), |
Patrick Bellasi | cccead1 | 2015-06-22 13:51:07 +0100 | [diff] [blame] | 741 | |
| 742 | TP_ARGS(cpu, util, margin), |
| 743 | |
| 744 | TP_STRUCT__entry( |
| 745 | __field( int, cpu ) |
| 746 | __field( unsigned long, util ) |
Srinath Sridharan | e71c425 | 2016-07-28 17:28:55 +0100 | [diff] [blame] | 747 | __field(long, margin ) |
Patrick Bellasi | cccead1 | 2015-06-22 13:51:07 +0100 | [diff] [blame] | 748 | ), |
| 749 | |
| 750 | TP_fast_assign( |
| 751 | __entry->cpu = cpu; |
| 752 | __entry->util = util; |
| 753 | __entry->margin = margin; |
| 754 | ), |
| 755 | |
Srinath Sridharan | e71c425 | 2016-07-28 17:28:55 +0100 | [diff] [blame] | 756 | TP_printk("cpu=%d util=%lu margin=%ld", |
Patrick Bellasi | cccead1 | 2015-06-22 13:51:07 +0100 | [diff] [blame] | 757 | __entry->cpu, |
| 758 | __entry->util, |
| 759 | __entry->margin) |
| 760 | ); |
| 761 | |
Patrick Bellasi | 953b104 | 2015-06-24 15:36:08 +0100 | [diff] [blame] | 762 | /* |
| 763 | * Tracepoint for schedtune_tasks_update |
| 764 | */ |
| 765 | TRACE_EVENT(sched_tune_tasks_update, |
| 766 | |
| 767 | TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx, |
Srinath Sridharan | e71c425 | 2016-07-28 17:28:55 +0100 | [diff] [blame] | 768 | int boost, int max_boost), |
Patrick Bellasi | 953b104 | 2015-06-24 15:36:08 +0100 | [diff] [blame] | 769 | |
| 770 | TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost), |
| 771 | |
| 772 | TP_STRUCT__entry( |
| 773 | __array( char, comm, TASK_COMM_LEN ) |
| 774 | __field( pid_t, pid ) |
| 775 | __field( int, cpu ) |
| 776 | __field( int, tasks ) |
| 777 | __field( int, idx ) |
Srinath Sridharan | e71c425 | 2016-07-28 17:28:55 +0100 | [diff] [blame] | 778 | __field( int, boost ) |
| 779 | __field( int, max_boost ) |
Patrick Bellasi | 953b104 | 2015-06-24 15:36:08 +0100 | [diff] [blame] | 780 | ), |
| 781 | |
| 782 | TP_fast_assign( |
| 783 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 784 | __entry->pid = tsk->pid; |
| 785 | __entry->cpu = cpu; |
| 786 | __entry->tasks = tasks; |
| 787 | __entry->idx = idx; |
| 788 | __entry->boost = boost; |
| 789 | __entry->max_boost = max_boost; |
| 790 | ), |
| 791 | |
| 792 | TP_printk("pid=%d comm=%s " |
Srinath Sridharan | e71c425 | 2016-07-28 17:28:55 +0100 | [diff] [blame] | 793 | "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d", |
Patrick Bellasi | 953b104 | 2015-06-24 15:36:08 +0100 | [diff] [blame] | 794 | __entry->pid, __entry->comm, |
| 795 | __entry->cpu, __entry->tasks, __entry->idx, |
| 796 | __entry->boost, __entry->max_boost) |
| 797 | ); |
| 798 | |
| 799 | /* |
| 800 | * Tracepoint for schedtune_boostgroup_update |
| 801 | */ |
| 802 | TRACE_EVENT(sched_tune_boostgroup_update, |
| 803 | |
| 804 | TP_PROTO(int cpu, int variation, int max_boost), |
| 805 | |
| 806 | TP_ARGS(cpu, variation, max_boost), |
| 807 | |
| 808 | TP_STRUCT__entry( |
| 809 | __field( int, cpu ) |
| 810 | __field( int, variation ) |
| 811 | __field( int, max_boost ) |
| 812 | ), |
| 813 | |
| 814 | TP_fast_assign( |
| 815 | __entry->cpu = cpu; |
| 816 | __entry->variation = variation; |
| 817 | __entry->max_boost = max_boost; |
| 818 | ), |
| 819 | |
| 820 | TP_printk("cpu=%d variation=%d max_boost=%d", |
| 821 | __entry->cpu, __entry->variation, __entry->max_boost) |
| 822 | ); |
| 823 | |
Patrick Bellasi | ecccdb7 | 2016-01-14 18:43:37 +0000 | [diff] [blame] | 824 | /* |
| 825 | * Tracepoint for accounting task boosted utilization |
| 826 | */ |
| 827 | TRACE_EVENT(sched_boost_task, |
| 828 | |
Srinath Sridharan | e71c425 | 2016-07-28 17:28:55 +0100 | [diff] [blame] | 829 | TP_PROTO(struct task_struct *tsk, unsigned long util, long margin), |
Patrick Bellasi | ecccdb7 | 2016-01-14 18:43:37 +0000 | [diff] [blame] | 830 | |
| 831 | TP_ARGS(tsk, util, margin), |
| 832 | |
| 833 | TP_STRUCT__entry( |
| 834 | __array( char, comm, TASK_COMM_LEN ) |
| 835 | __field( pid_t, pid ) |
| 836 | __field( unsigned long, util ) |
Srinath Sridharan | e71c425 | 2016-07-28 17:28:55 +0100 | [diff] [blame] | 837 | __field( long, margin ) |
Patrick Bellasi | ecccdb7 | 2016-01-14 18:43:37 +0000 | [diff] [blame] | 838 | |
| 839 | ), |
| 840 | |
| 841 | TP_fast_assign( |
| 842 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 843 | __entry->pid = tsk->pid; |
| 844 | __entry->util = util; |
| 845 | __entry->margin = margin; |
| 846 | ), |
| 847 | |
Srinath Sridharan | e71c425 | 2016-07-28 17:28:55 +0100 | [diff] [blame] | 848 | TP_printk("comm=%s pid=%d util=%lu margin=%ld", |
Patrick Bellasi | ecccdb7 | 2016-01-14 18:43:37 +0000 | [diff] [blame] | 849 | __entry->comm, __entry->pid, |
| 850 | __entry->util, |
| 851 | __entry->margin) |
| 852 | ); |
| 853 | |
Patrick Bellasi | 110441b | 2016-01-14 18:47:21 +0000 | [diff] [blame] | 854 | /* |
Patrick Bellasi | 11184b6 | 2017-06-29 12:24:27 +0100 | [diff] [blame] | 855 | * Tracepoint for find_best_target |
| 856 | */ |
| 857 | TRACE_EVENT(sched_find_best_target, |
| 858 | |
| 859 | TP_PROTO(struct task_struct *tsk, bool prefer_idle, |
| 860 | unsigned long min_util, int start_cpu, |
| 861 | int best_idle, int best_active, int target), |
| 862 | |
| 863 | TP_ARGS(tsk, prefer_idle, min_util, start_cpu, |
| 864 | best_idle, best_active, target), |
| 865 | |
| 866 | TP_STRUCT__entry( |
| 867 | __array( char, comm, TASK_COMM_LEN ) |
| 868 | __field( pid_t, pid ) |
| 869 | __field( unsigned long, min_util ) |
| 870 | __field( bool, prefer_idle ) |
| 871 | __field( int, start_cpu ) |
| 872 | __field( int, best_idle ) |
| 873 | __field( int, best_active ) |
| 874 | __field( int, target ) |
| 875 | ), |
| 876 | |
| 877 | TP_fast_assign( |
| 878 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); |
| 879 | __entry->pid = tsk->pid; |
| 880 | __entry->min_util = min_util; |
| 881 | __entry->prefer_idle = prefer_idle; |
| 882 | __entry->start_cpu = start_cpu; |
| 883 | __entry->best_idle = best_idle; |
| 884 | __entry->best_active = best_active; |
| 885 | __entry->target = target; |
| 886 | ), |
| 887 | |
| 888 | TP_printk("pid=%d comm=%s prefer_idle=%d start_cpu=%d " |
| 889 | "best_idle=%d best_active=%d target=%d", |
| 890 | __entry->pid, __entry->comm, |
| 891 | __entry->prefer_idle, __entry->start_cpu, |
| 892 | __entry->best_idle, __entry->best_active, |
| 893 | __entry->target) |
| 894 | ); |
| 895 | |
| 896 | /* |
Patrick Bellasi | 5824d98 | 2016-01-20 14:06:05 +0000 | [diff] [blame] | 897 | * Tracepoint for schedtune_tasks_update |
| 898 | */ |
| 899 | TRACE_EVENT(sched_tune_filter, |
| 900 | |
| 901 | TP_PROTO(int nrg_delta, int cap_delta, |
| 902 | int nrg_gain, int cap_gain, |
| 903 | int payoff, int region), |
| 904 | |
| 905 | TP_ARGS(nrg_delta, cap_delta, nrg_gain, cap_gain, payoff, region), |
| 906 | |
| 907 | TP_STRUCT__entry( |
| 908 | __field( int, nrg_delta ) |
| 909 | __field( int, cap_delta ) |
| 910 | __field( int, nrg_gain ) |
| 911 | __field( int, cap_gain ) |
| 912 | __field( int, payoff ) |
| 913 | __field( int, region ) |
| 914 | ), |
| 915 | |
| 916 | TP_fast_assign( |
| 917 | __entry->nrg_delta = nrg_delta; |
| 918 | __entry->cap_delta = cap_delta; |
| 919 | __entry->nrg_gain = nrg_gain; |
| 920 | __entry->cap_gain = cap_gain; |
| 921 | __entry->payoff = payoff; |
| 922 | __entry->region = region; |
| 923 | ), |
| 924 | |
| 925 | TP_printk("nrg_delta=%d cap_delta=%d nrg_gain=%d cap_gain=%d payoff=%d region=%d", |
| 926 | __entry->nrg_delta, __entry->cap_delta, |
| 927 | __entry->nrg_gain, __entry->cap_gain, |
| 928 | __entry->payoff, __entry->region) |
| 929 | ); |
| 930 | |
Patrick Bellasi | 8e45d94 | 2016-02-10 09:24:36 +0000 | [diff] [blame] | 931 | /* |
| 932 | * Tracepoint for system overutilized flag |
| 933 | */ |
| 934 | TRACE_EVENT(sched_overutilized, |
| 935 | |
| 936 | TP_PROTO(bool overutilized), |
| 937 | |
| 938 | TP_ARGS(overutilized), |
| 939 | |
| 940 | TP_STRUCT__entry( |
| 941 | __field( bool, overutilized ) |
| 942 | ), |
| 943 | |
| 944 | TP_fast_assign( |
| 945 | __entry->overutilized = overutilized; |
| 946 | ), |
| 947 | |
| 948 | TP_printk("overutilized=%d", |
| 949 | __entry->overutilized ? 1 : 0) |
| 950 | ); |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 951 | #ifdef CONFIG_SCHED_WALT |
| 952 | struct rq; |
| 953 | |
| 954 | TRACE_EVENT(walt_update_task_ravg, |
| 955 | |
| 956 | TP_PROTO(struct task_struct *p, struct rq *rq, int evt, |
| 957 | u64 wallclock, u64 irqtime), |
| 958 | |
| 959 | TP_ARGS(p, rq, evt, wallclock, irqtime), |
| 960 | |
| 961 | TP_STRUCT__entry( |
| 962 | __array( char, comm, TASK_COMM_LEN ) |
| 963 | __field( pid_t, pid ) |
| 964 | __field( pid_t, cur_pid ) |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 965 | __field( u64, wallclock ) |
| 966 | __field( u64, mark_start ) |
| 967 | __field( u64, delta_m ) |
| 968 | __field( u64, win_start ) |
| 969 | __field( u64, delta ) |
| 970 | __field( u64, irqtime ) |
| 971 | __field( int, evt ) |
| 972 | __field(unsigned int, demand ) |
| 973 | __field(unsigned int, sum ) |
| 974 | __field( int, cpu ) |
| 975 | __field( u64, cs ) |
| 976 | __field( u64, ps ) |
Juri Lelli | 4328ce3 | 2016-11-30 11:09:42 +0000 | [diff] [blame] | 977 | __field(unsigned long, util ) |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 978 | __field( u32, curr_window ) |
| 979 | __field( u32, prev_window ) |
| 980 | __field( u64, nt_cs ) |
| 981 | __field( u64, nt_ps ) |
| 982 | __field( u32, active_windows ) |
| 983 | ), |
| 984 | |
| 985 | TP_fast_assign( |
| 986 | __entry->wallclock = wallclock; |
| 987 | __entry->win_start = rq->window_start; |
| 988 | __entry->delta = (wallclock - rq->window_start); |
| 989 | __entry->evt = evt; |
| 990 | __entry->cpu = rq->cpu; |
| 991 | __entry->cur_pid = rq->curr->pid; |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 992 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
| 993 | __entry->pid = p->pid; |
| 994 | __entry->mark_start = p->ravg.mark_start; |
| 995 | __entry->delta_m = (wallclock - p->ravg.mark_start); |
| 996 | __entry->demand = p->ravg.demand; |
| 997 | __entry->sum = p->ravg.sum; |
| 998 | __entry->irqtime = irqtime; |
| 999 | __entry->cs = rq->curr_runnable_sum; |
| 1000 | __entry->ps = rq->prev_runnable_sum; |
Quentin Perret | 904c79c | 2017-08-30 16:54:44 +0100 | [diff] [blame] | 1001 | __entry->util = rq->prev_runnable_sum << SCHED_CAPACITY_SHIFT; |
Juri Lelli | 4328ce3 | 2016-11-30 11:09:42 +0000 | [diff] [blame] | 1002 | do_div(__entry->util, walt_ravg_window); |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1003 | __entry->curr_window = p->ravg.curr_window; |
| 1004 | __entry->prev_window = p->ravg.prev_window; |
| 1005 | __entry->nt_cs = rq->nt_curr_runnable_sum; |
| 1006 | __entry->nt_ps = rq->nt_prev_runnable_sum; |
| 1007 | __entry->active_windows = p->ravg.active_windows; |
| 1008 | ), |
| 1009 | |
Vikram Mulukutla | b28cab9 | 2017-08-24 11:38:00 -0700 | [diff] [blame] | 1010 | TP_printk("wc %llu ws %llu delta %llu event %d cpu %d cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu" |
Juri Lelli | 4328ce3 | 2016-11-30 11:09:42 +0000 | [diff] [blame] | 1011 | " cs %llu ps %llu util %lu cur_window %u prev_window %u active_wins %u" |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1012 | , __entry->wallclock, __entry->win_start, __entry->delta, |
Vikram Mulukutla | b28cab9 | 2017-08-24 11:38:00 -0700 | [diff] [blame] | 1013 | __entry->evt, __entry->cpu, __entry->cur_pid, |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1014 | __entry->pid, __entry->comm, __entry->mark_start, |
| 1015 | __entry->delta_m, __entry->demand, |
| 1016 | __entry->sum, __entry->irqtime, |
Juri Lelli | 4328ce3 | 2016-11-30 11:09:42 +0000 | [diff] [blame] | 1017 | __entry->cs, __entry->ps, __entry->util, |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1018 | __entry->curr_window, __entry->prev_window, |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1019 | __entry->active_windows |
| 1020 | ) |
| 1021 | ); |
| 1022 | |
| 1023 | TRACE_EVENT(walt_update_history, |
| 1024 | |
| 1025 | TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples, |
| 1026 | int evt), |
| 1027 | |
| 1028 | TP_ARGS(rq, p, runtime, samples, evt), |
| 1029 | |
| 1030 | TP_STRUCT__entry( |
| 1031 | __array( char, comm, TASK_COMM_LEN ) |
| 1032 | __field( pid_t, pid ) |
| 1033 | __field(unsigned int, runtime ) |
| 1034 | __field( int, samples ) |
| 1035 | __field( int, evt ) |
| 1036 | __field( u64, demand ) |
Ke Wang | 12e056c | 2016-12-08 14:02:10 +0800 | [diff] [blame] | 1037 | __field( u64, walt_avg ) |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1038 | __field(unsigned int, pelt_avg ) |
| 1039 | __array( u32, hist, RAVG_HIST_SIZE_MAX) |
| 1040 | __field( int, cpu ) |
| 1041 | ), |
| 1042 | |
| 1043 | TP_fast_assign( |
| 1044 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
| 1045 | __entry->pid = p->pid; |
| 1046 | __entry->runtime = runtime; |
| 1047 | __entry->samples = samples; |
| 1048 | __entry->evt = evt; |
| 1049 | __entry->demand = p->ravg.demand; |
Quentin Perret | 904c79c | 2017-08-30 16:54:44 +0100 | [diff] [blame] | 1050 | __entry->walt_avg = (__entry->demand << SCHED_CAPACITY_SHIFT); |
Dmitry Shmidt | d8a4d03 | 2017-02-08 16:37:17 -0800 | [diff] [blame] | 1051 | __entry->walt_avg = div_u64(__entry->walt_avg, |
| 1052 | walt_ravg_window); |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1053 | __entry->pelt_avg = p->se.avg.util_avg; |
| 1054 | memcpy(__entry->hist, p->ravg.sum_history, |
| 1055 | RAVG_HIST_SIZE_MAX * sizeof(u32)); |
| 1056 | __entry->cpu = rq->cpu; |
| 1057 | ), |
| 1058 | |
| 1059 | TP_printk("%d (%s): runtime %u samples %d event %d demand %llu" |
Ke Wang | 12e056c | 2016-12-08 14:02:10 +0800 | [diff] [blame] | 1060 | " walt %llu pelt %u (hist: %u %u %u %u %u) cpu %d", |
Srivatsa Vaddagiri | 26c2154 | 2016-05-31 09:08:38 -0700 | [diff] [blame] | 1061 | __entry->pid, __entry->comm, |
| 1062 | __entry->runtime, __entry->samples, __entry->evt, |
| 1063 | __entry->demand, |
| 1064 | __entry->walt_avg, |
| 1065 | __entry->pelt_avg, |
| 1066 | __entry->hist[0], __entry->hist[1], |
| 1067 | __entry->hist[2], __entry->hist[3], |
| 1068 | __entry->hist[4], __entry->cpu) |
| 1069 | ); |
| 1070 | |
| 1071 | TRACE_EVENT(walt_migration_update_sum, |
| 1072 | |
| 1073 | TP_PROTO(struct rq *rq, struct task_struct *p), |
| 1074 | |
| 1075 | TP_ARGS(rq, p), |
| 1076 | |
| 1077 | TP_STRUCT__entry( |
| 1078 | __field(int, cpu ) |
| 1079 | __field(int, pid ) |
| 1080 | __field( u64, cs ) |
| 1081 | __field( u64, ps ) |
| 1082 | __field( s64, nt_cs ) |
| 1083 | __field( s64, nt_ps ) |
| 1084 | ), |
| 1085 | |
| 1086 | TP_fast_assign( |
| 1087 | __entry->cpu = cpu_of(rq); |
| 1088 | __entry->cs = rq->curr_runnable_sum; |
| 1089 | __entry->ps = rq->prev_runnable_sum; |
| 1090 | __entry->nt_cs = (s64)rq->nt_curr_runnable_sum; |
| 1091 | __entry->nt_ps = (s64)rq->nt_prev_runnable_sum; |
| 1092 | __entry->pid = p->pid; |
| 1093 | ), |
| 1094 | |
| 1095 | TP_printk("cpu %d: cs %llu ps %llu nt_cs %lld nt_ps %lld pid %d", |
| 1096 | __entry->cpu, __entry->cs, __entry->ps, |
| 1097 | __entry->nt_cs, __entry->nt_ps, __entry->pid) |
| 1098 | ); |
| 1099 | #endif /* CONFIG_SCHED_WALT */ |
| 1100 | |
Patrick Bellasi | 2178e84 | 2016-07-22 11:35:59 +0100 | [diff] [blame] | 1101 | #endif /* CONFIG_SMP */ |
| 1102 | |
Steven Rostedt | ea20d92 | 2009-04-10 08:54:16 -0400 | [diff] [blame] | 1103 | #endif /* _TRACE_SCHED_H */ |
Steven Rostedt | a8d154b | 2009-04-10 09:36:00 -0400 | [diff] [blame] | 1104 | |
| 1105 | /* This part must be outside protection */ |
| 1106 | #include <trace/define_trace.h> |