Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | |
| 2 | #ifdef CONFIG_SCHEDSTATS |
Alexey Dobriyan | b5aadf7 | 2008-10-06 13:23:43 +0400 | [diff] [blame] | 3 | |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4 | /* |
| 5 | * Expects runqueue lock to be held for atomicity of update |
| 6 | */ |
| 7 | static inline void |
| 8 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
| 9 | { |
| 10 | if (rq) { |
| 11 | rq->rq_sched_info.run_delay += delta; |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 12 | rq->rq_sched_info.pcount++; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 13 | } |
| 14 | } |
| 15 | |
| 16 | /* |
| 17 | * Expects runqueue lock to be held for atomicity of update |
| 18 | */ |
| 19 | static inline void |
| 20 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
| 21 | { |
| 22 | if (rq) |
Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 23 | rq->rq_cpu_time += delta; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 24 | } |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 25 | |
| 26 | static inline void |
| 27 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
| 28 | { |
| 29 | if (rq) |
| 30 | rq->rq_sched_info.run_delay += delta; |
| 31 | } |
Josh Poimboeuf | ae92882 | 2016-06-17 12:43:24 -0500 | [diff] [blame] | 32 | #define schedstat_enabled() static_branch_unlikely(&sched_schedstats) |
| 33 | #define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0) |
| 34 | #define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0) |
| 35 | #define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) |
Josh Poimboeuf | 20e1d48 | 2016-06-17 12:43:25 -0500 | [diff] [blame] | 36 | #define schedstat_val(var) (var) |
| 37 | #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) |
Josh Poimboeuf | 9c57259 | 2016-06-03 17:58:40 -0500 | [diff] [blame] | 38 | |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 39 | #else /* !CONFIG_SCHEDSTATS */ |
| 40 | static inline void |
| 41 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
| 42 | {} |
| 43 | static inline void |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 44 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) |
| 45 | {} |
| 46 | static inline void |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 47 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
| 48 | {} |
Josh Poimboeuf | ae92882 | 2016-06-17 12:43:24 -0500 | [diff] [blame] | 49 | #define schedstat_enabled() 0 |
| 50 | #define schedstat_inc(var) do { } while (0) |
| 51 | #define schedstat_add(var, amt) do { } while (0) |
| 52 | #define schedstat_set(var, val) do { } while (0) |
| 53 | #define schedstat_val(var) 0 |
Josh Poimboeuf | 20e1d48 | 2016-06-17 12:43:25 -0500 | [diff] [blame] | 54 | #define schedstat_val_or_zero(var) 0 |
Josh Poimboeuf | ae92882 | 2016-06-17 12:43:24 -0500 | [diff] [blame] | 55 | #endif /* CONFIG_SCHEDSTATS */ |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 56 | |
Johannes Weiner | 3df0e59 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 57 | #ifdef CONFIG_PSI |
| 58 | /* |
| 59 | * PSI tracks state that persists across sleeps, such as iowaits and |
| 60 | * memory stalls. As a result, it has to distinguish between sleeps, |
| 61 | * where a task's runnable state changes, and requeues, where a task |
| 62 | * and its state are being moved between CPUs and runqueues. |
| 63 | */ |
| 64 | static inline void psi_enqueue(struct task_struct *p, bool wakeup) |
| 65 | { |
| 66 | int clear = 0, set = TSK_RUNNING; |
| 67 | |
Johannes Weiner | c9f51ce | 2018-11-30 14:09:58 -0800 | [diff] [blame] | 68 | if (static_branch_likely(&psi_disabled)) |
Johannes Weiner | 3df0e59 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 69 | return; |
| 70 | |
| 71 | if (!wakeup || p->sched_psi_wake_requeue) { |
| 72 | if (p->flags & PF_MEMSTALL) |
| 73 | set |= TSK_MEMSTALL; |
| 74 | if (p->sched_psi_wake_requeue) |
| 75 | p->sched_psi_wake_requeue = 0; |
| 76 | } else { |
| 77 | if (p->in_iowait) |
| 78 | clear |= TSK_IOWAIT; |
| 79 | } |
| 80 | |
| 81 | psi_task_change(p, clear, set); |
| 82 | } |
| 83 | |
| 84 | static inline void psi_dequeue(struct task_struct *p, bool sleep) |
| 85 | { |
| 86 | int clear = TSK_RUNNING, set = 0; |
| 87 | |
Johannes Weiner | c9f51ce | 2018-11-30 14:09:58 -0800 | [diff] [blame] | 88 | if (static_branch_likely(&psi_disabled)) |
Johannes Weiner | 3df0e59 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 89 | return; |
| 90 | |
| 91 | if (!sleep) { |
| 92 | if (p->flags & PF_MEMSTALL) |
| 93 | clear |= TSK_MEMSTALL; |
| 94 | } else { |
| 95 | if (p->in_iowait) |
| 96 | set |= TSK_IOWAIT; |
| 97 | } |
| 98 | |
| 99 | psi_task_change(p, clear, set); |
| 100 | } |
| 101 | |
| 102 | static inline void psi_ttwu_dequeue(struct task_struct *p) |
| 103 | { |
Johannes Weiner | c9f51ce | 2018-11-30 14:09:58 -0800 | [diff] [blame] | 104 | if (static_branch_likely(&psi_disabled)) |
Johannes Weiner | 3df0e59 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 105 | return; |
| 106 | /* |
| 107 | * Is the task being migrated during a wakeup? Make sure to |
| 108 | * deregister its sleep-persistent psi states from the old |
| 109 | * queue, and let psi_enqueue() know it has to requeue. |
| 110 | */ |
| 111 | if (unlikely(p->in_iowait || (p->flags & PF_MEMSTALL))) { |
| 112 | struct rq_flags rf; |
| 113 | struct rq *rq; |
| 114 | int clear = 0; |
| 115 | |
| 116 | if (p->in_iowait) |
| 117 | clear |= TSK_IOWAIT; |
| 118 | if (p->flags & PF_MEMSTALL) |
| 119 | clear |= TSK_MEMSTALL; |
| 120 | |
| 121 | rq = __task_rq_lock(p, &rf); |
| 122 | psi_task_change(p, clear, 0); |
| 123 | p->sched_psi_wake_requeue = 1; |
| 124 | __task_rq_unlock(rq, &rf); |
| 125 | } |
| 126 | } |
| 127 | |
| 128 | static inline void psi_task_tick(struct rq *rq) |
| 129 | { |
Johannes Weiner | c9f51ce | 2018-11-30 14:09:58 -0800 | [diff] [blame] | 130 | if (static_branch_likely(&psi_disabled)) |
Johannes Weiner | 3df0e59 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 131 | return; |
| 132 | |
| 133 | if (unlikely(rq->curr->flags & PF_MEMSTALL)) |
| 134 | psi_memstall_tick(rq->curr, cpu_of(rq)); |
| 135 | } |
| 136 | #else /* CONFIG_PSI */ |
| 137 | static inline void psi_enqueue(struct task_struct *p, bool wakeup) {} |
| 138 | static inline void psi_dequeue(struct task_struct *p, bool sleep) {} |
| 139 | static inline void psi_ttwu_dequeue(struct task_struct *p) {} |
| 140 | static inline void psi_task_tick(struct rq *rq) {} |
| 141 | #endif /* CONFIG_PSI */ |
| 142 | |
Naveen N. Rao | f6db834 | 2015-06-25 23:53:37 +0530 | [diff] [blame] | 143 | #ifdef CONFIG_SCHED_INFO |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 144 | static inline void sched_info_reset_dequeued(struct task_struct *t) |
| 145 | { |
| 146 | t->sched_info.last_queued = 0; |
| 147 | } |
| 148 | |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 149 | /* |
Rakib Mullick | d4a6f3c | 2010-10-24 16:28:47 +0600 | [diff] [blame] | 150 | * We are interested in knowing how long it was from the *first* time a |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 151 | * task was queued to the time that it finally hit a cpu, we call this routine |
| 152 | * from dequeue_task() to account for possible rq->clock skew across cpus. The |
| 153 | * delta taken on each cpu would annul the skew. |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 154 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 155 | static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 156 | { |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 157 | unsigned long long now = rq_clock(rq), delta = 0; |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 158 | |
| 159 | if (unlikely(sched_info_on())) |
| 160 | if (t->sched_info.last_queued) |
| 161 | delta = now - t->sched_info.last_queued; |
| 162 | sched_info_reset_dequeued(t); |
| 163 | t->sched_info.run_delay += delta; |
| 164 | |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 165 | rq_sched_info_dequeued(rq, delta); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | /* |
| 169 | * Called when a task finally hits the cpu. We can now calculate how |
| 170 | * long it was waiting to run. We also note when it began so that we |
| 171 | * can keep stats on how long its timeslice is. |
| 172 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 173 | static void sched_info_arrive(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 174 | { |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 175 | unsigned long long now = rq_clock(rq), delta = 0; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 176 | |
| 177 | if (t->sched_info.last_queued) |
| 178 | delta = now - t->sched_info.last_queued; |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 179 | sched_info_reset_dequeued(t); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 180 | t->sched_info.run_delay += delta; |
| 181 | t->sched_info.last_arrival = now; |
Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 182 | t->sched_info.pcount++; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 183 | |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 184 | rq_sched_info_arrive(rq, delta); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | /* |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 188 | * This function is only called from enqueue_task(), but also only updates |
| 189 | * the timestamp if it is already not set. It's assumed that |
| 190 | * sched_info_dequeued() will clear that stamp when appropriate. |
| 191 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 192 | static inline void sched_info_queued(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 193 | { |
| 194 | if (unlikely(sched_info_on())) |
| 195 | if (!t->sched_info.last_queued) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 196 | t->sched_info.last_queued = rq_clock(rq); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | /* |
Michael S. Tsirkin | 13b62e4 | 2013-09-16 11:30:36 +0300 | [diff] [blame] | 200 | * Called when a process ceases being the active-running process involuntarily |
| 201 | * due, typically, to expiring its time slice (this may also be called when |
| 202 | * switching to the idle task). Now we can calculate how long we ran. |
Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 203 | * Also, if the process is still in the TASK_RUNNING state, call |
| 204 | * sched_info_queued() to mark that it has now again started waiting on |
| 205 | * the runqueue. |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 206 | */ |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 207 | static inline void sched_info_depart(struct rq *rq, struct task_struct *t) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 208 | { |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 209 | unsigned long long delta = rq_clock(rq) - |
Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 210 | t->sched_info.last_arrival; |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 211 | |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 212 | rq_sched_info_depart(rq, delta); |
Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 213 | |
| 214 | if (t->state == TASK_RUNNING) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 215 | sched_info_queued(rq, t); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 216 | } |
| 217 | |
| 218 | /* |
| 219 | * Called when tasks are switched involuntarily due, typically, to expiring |
| 220 | * their time slice. (This may also be called when switching to or from |
| 221 | * the idle task.) We are only called when prev != next. |
| 222 | */ |
| 223 | static inline void |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 224 | __sched_info_switch(struct rq *rq, |
| 225 | struct task_struct *prev, struct task_struct *next) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 226 | { |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 227 | /* |
| 228 | * prev now departs the cpu. It's not interesting to record |
| 229 | * stats about how efficient we were at scheduling the idle |
| 230 | * process, however. |
| 231 | */ |
| 232 | if (prev != rq->idle) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 233 | sched_info_depart(rq, prev); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 234 | |
| 235 | if (next != rq->idle) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 236 | sched_info_arrive(rq, next); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 237 | } |
| 238 | static inline void |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 239 | sched_info_switch(struct rq *rq, |
| 240 | struct task_struct *prev, struct task_struct *next) |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 241 | { |
| 242 | if (unlikely(sched_info_on())) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 243 | __sched_info_switch(rq, prev, next); |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 244 | } |
| 245 | #else |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 246 | #define sched_info_queued(rq, t) do { } while (0) |
Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 247 | #define sched_info_reset_dequeued(t) do { } while (0) |
Michael S. Tsirkin | 4314895 | 2013-09-22 17:20:54 +0300 | [diff] [blame] | 248 | #define sched_info_dequeued(rq, t) do { } while (0) |
| 249 | #define sched_info_depart(rq, t) do { } while (0) |
| 250 | #define sched_info_arrive(rq, next) do { } while (0) |
| 251 | #define sched_info_switch(rq, t, next) do { } while (0) |
Naveen N. Rao | f6db834 | 2015-06-25 23:53:37 +0530 | [diff] [blame] | 252 | #endif /* CONFIG_SCHED_INFO */ |
Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 253 | |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 254 | /* |
| 255 | * The following are functions that support scheduler-internal time accounting. |
| 256 | * These functions are generally called at the timer tick. None of this depends |
| 257 | * on CONFIG_SCHEDSTATS. |
| 258 | */ |
| 259 | |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 260 | /** |
KOSAKI Motohiro | fa18f7b | 2013-05-26 17:35:41 -0400 | [diff] [blame] | 261 | * cputimer_running - return true if cputimer is running |
| 262 | * |
| 263 | * @tsk: Pointer to target task. |
| 264 | */ |
| 265 | static inline bool cputimer_running(struct task_struct *tsk) |
| 266 | |
| 267 | { |
| 268 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
| 269 | |
Jason Low | 1018016 | 2015-04-28 13:00:22 -0700 | [diff] [blame] | 270 | /* Check if cputimer isn't running. This is accessed without locking. */ |
| 271 | if (!READ_ONCE(cputimer->running)) |
KOSAKI Motohiro | fa18f7b | 2013-05-26 17:35:41 -0400 | [diff] [blame] | 272 | return false; |
| 273 | |
| 274 | /* |
| 275 | * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime |
| 276 | * in __exit_signal(), we won't account to the signal struct further |
| 277 | * cputime consumed by that task, even though the task can still be |
| 278 | * ticking after __exit_signal(). |
| 279 | * |
| 280 | * In order to keep a consistent behaviour between thread group cputime |
| 281 | * and thread group cputimer accounting, lets also ignore the cputime |
| 282 | * elapsing after __exit_signal() in any thread group timer running. |
| 283 | * |
| 284 | * This makes sure that POSIX CPU clocks and timers are synchronized, so |
| 285 | * that a POSIX CPU timer won't expire while the corresponding POSIX CPU |
| 286 | * clock delta is behind the expiring timer value. |
| 287 | */ |
| 288 | if (unlikely(!tsk->sighand)) |
| 289 | return false; |
| 290 | |
| 291 | return true; |
| 292 | } |
| 293 | |
| 294 | /** |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 295 | * account_group_user_time - Maintain utime for a thread group. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 296 | * |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 297 | * @tsk: Pointer to task structure. |
| 298 | * @cputime: Time value by which to increment the utime field of the |
| 299 | * thread_group_cputime structure. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 300 | * |
| 301 | * If thread group time is being maintained, get the structure for the |
| 302 | * running CPU and update the utime field there. |
| 303 | */ |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 304 | static inline void account_group_user_time(struct task_struct *tsk, |
| 305 | cputime_t cputime) |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 306 | { |
Oleg Nesterov | 48286d5 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 307 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 308 | |
KOSAKI Motohiro | fa18f7b | 2013-05-26 17:35:41 -0400 | [diff] [blame] | 309 | if (!cputimer_running(tsk)) |
Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 310 | return; |
| 311 | |
Jason Low | 7110744 | 2015-04-28 13:00:24 -0700 | [diff] [blame] | 312 | atomic64_add(cputime, &cputimer->cputime_atomic.utime); |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 313 | } |
| 314 | |
| 315 | /** |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 316 | * account_group_system_time - Maintain stime for a thread group. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 317 | * |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 318 | * @tsk: Pointer to task structure. |
| 319 | * @cputime: Time value by which to increment the stime field of the |
| 320 | * thread_group_cputime structure. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 321 | * |
| 322 | * If thread group time is being maintained, get the structure for the |
| 323 | * running CPU and update the stime field there. |
| 324 | */ |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 325 | static inline void account_group_system_time(struct task_struct *tsk, |
| 326 | cputime_t cputime) |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 327 | { |
Oleg Nesterov | 48286d5 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 328 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 329 | |
KOSAKI Motohiro | fa18f7b | 2013-05-26 17:35:41 -0400 | [diff] [blame] | 330 | if (!cputimer_running(tsk)) |
Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 331 | return; |
| 332 | |
Jason Low | 7110744 | 2015-04-28 13:00:24 -0700 | [diff] [blame] | 333 | atomic64_add(cputime, &cputimer->cputime_atomic.stime); |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 334 | } |
| 335 | |
| 336 | /** |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 337 | * account_group_exec_runtime - Maintain exec runtime for a thread group. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 338 | * |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 339 | * @tsk: Pointer to task structure. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 340 | * @ns: Time value by which to increment the sum_exec_runtime field |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 341 | * of the thread_group_cputime structure. |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 342 | * |
| 343 | * If thread group time is being maintained, get the structure for the |
| 344 | * running CPU and update the sum_exec_runtime field there. |
| 345 | */ |
Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 346 | static inline void account_group_exec_runtime(struct task_struct *tsk, |
| 347 | unsigned long long ns) |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 348 | { |
Oleg Nesterov | 48286d5 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 349 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 350 | |
KOSAKI Motohiro | fa18f7b | 2013-05-26 17:35:41 -0400 | [diff] [blame] | 351 | if (!cputimer_running(tsk)) |
Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 352 | return; |
| 353 | |
Jason Low | 7110744 | 2015-04-28 13:00:24 -0700 | [diff] [blame] | 354 | atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime); |
Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 355 | } |