blob: f5a64ffad176f12b01381cb1dc2e25a05f02508d [file] [log] [blame]
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001#include <linux/export.h>
2#include <linux/sched.h>
3#include <linux/tsacct_kern.h>
4#include <linux/kernel_stat.h>
5#include <linux/static_key.h>
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +02006#include <linux/context_tracking.h>
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02007#include "sched.h"
8
9
10#ifdef CONFIG_IRQ_TIME_ACCOUNTING
11
12/*
13 * There are no locks covering percpu hardirq/softirq time.
Frederic Weisbeckerbf9fae92012-09-08 15:23:11 +020014 * They are only modified in vtime_account, on corresponding CPU
Frederic Weisbecker73fbec62012-06-16 15:57:37 +020015 * with interrupts disabled. So, writes are safe.
16 * They are read and saved off onto struct rq in update_rq_clock().
17 * This may result in other CPU reading this CPU's irq time and can
Frederic Weisbeckerbf9fae92012-09-08 15:23:11 +020018 * race with irq/vtime_account on this CPU. We would either get old
Frederic Weisbecker73fbec62012-06-16 15:57:37 +020019 * or new value with a side effect of accounting a slice of irq time to wrong
20 * task when irq is in progress while we read rq->clock. That is a worthy
21 * compromise in place of having locks on each irq in account_system_time.
22 */
23DEFINE_PER_CPU(u64, cpu_hardirq_time);
24DEFINE_PER_CPU(u64, cpu_softirq_time);
25
26static DEFINE_PER_CPU(u64, irq_start_time);
27static int sched_clock_irqtime;
28
29void enable_sched_clock_irqtime(void)
30{
31 sched_clock_irqtime = 1;
32}
33
34void disable_sched_clock_irqtime(void)
35{
36 sched_clock_irqtime = 0;
37}
38
39#ifndef CONFIG_64BIT
40DEFINE_PER_CPU(seqcount_t, irq_time_seq);
41#endif /* CONFIG_64BIT */
42
43/*
44 * Called before incrementing preempt_count on {soft,}irq_enter
45 * and before decrementing preempt_count on {soft,}irq_exit.
46 */
Frederic Weisbecker3e1df4f52012-10-06 05:23:22 +020047void irqtime_account_irq(struct task_struct *curr)
Frederic Weisbecker73fbec62012-06-16 15:57:37 +020048{
49 unsigned long flags;
50 s64 delta;
51 int cpu;
52
53 if (!sched_clock_irqtime)
54 return;
55
56 local_irq_save(flags);
57
58 cpu = smp_processor_id();
59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
60 __this_cpu_add(irq_start_time, delta);
61
62 irq_time_write_begin();
63 /*
64 * We do not account for softirq time from ksoftirqd here.
65 * We want to continue accounting softirq time to ksoftirqd thread
66 * in that case, so as not to confuse scheduler with a special task
67 * that do not consume any time, but still wants to run.
68 */
69 if (hardirq_count())
70 __this_cpu_add(cpu_hardirq_time, delta);
71 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
72 __this_cpu_add(cpu_softirq_time, delta);
73
74 irq_time_write_end();
75 local_irq_restore(flags);
76}
Frederic Weisbecker3e1df4f52012-10-06 05:23:22 +020077EXPORT_SYMBOL_GPL(irqtime_account_irq);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +020078
79static int irqtime_account_hi_update(void)
80{
81 u64 *cpustat = kcpustat_this_cpu->cpustat;
82 unsigned long flags;
83 u64 latest_ns;
84 int ret = 0;
85
86 local_irq_save(flags);
87 latest_ns = this_cpu_read(cpu_hardirq_time);
88 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
89 ret = 1;
90 local_irq_restore(flags);
91 return ret;
92}
93
94static int irqtime_account_si_update(void)
95{
96 u64 *cpustat = kcpustat_this_cpu->cpustat;
97 unsigned long flags;
98 u64 latest_ns;
99 int ret = 0;
100
101 local_irq_save(flags);
102 latest_ns = this_cpu_read(cpu_softirq_time);
103 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
104 ret = 1;
105 local_irq_restore(flags);
106 return ret;
107}
108
109#else /* CONFIG_IRQ_TIME_ACCOUNTING */
110
111#define sched_clock_irqtime (0)
112
113#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
114
115static inline void task_group_account_field(struct task_struct *p, int index,
116 u64 tmp)
117{
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200118 /*
119 * Since all updates are sure to touch the root cgroup, we
120 * get ourselves ahead and touch it first. If the root cgroup
121 * is the only cgroup, then nothing else should be necessary.
122 *
123 */
Christoph Lametera4f61cc2013-08-07 15:38:24 +0000124 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200125
Li Zefan1966aaf2013-03-29 14:37:06 +0800126 cpuacct_account_field(p, index, tmp);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200127}
128
129/*
130 * Account user cpu time to a process.
131 * @p: the process that the cpu time gets accounted to
132 * @cputime: the cpu time spent in user space since the last update
133 * @cputime_scaled: cputime scaled by cpu frequency
134 */
135void account_user_time(struct task_struct *p, cputime_t cputime,
136 cputime_t cputime_scaled)
137{
138 int index;
139
140 /* Add user time to process. */
141 p->utime += cputime;
142 p->utimescaled += cputime_scaled;
143 account_group_user_time(p, cputime);
144
Dongsheng Yangd0ea0262014-01-27 22:00:45 -0500145 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200146
147 /* Add user time to cpustat. */
148 task_group_account_field(p, index, (__force u64) cputime);
149
150 /* Account for user time used */
Frederic Weisbecker6fac4822012-11-13 14:20:55 +0100151 acct_account_cputime(p);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200152}
153
154/*
155 * Account guest cpu time to a process.
156 * @p: the process that the cpu time gets accounted to
157 * @cputime: the cpu time spent in virtual machine since the last update
158 * @cputime_scaled: cputime scaled by cpu frequency
159 */
160static void account_guest_time(struct task_struct *p, cputime_t cputime,
161 cputime_t cputime_scaled)
162{
163 u64 *cpustat = kcpustat_this_cpu->cpustat;
164
165 /* Add guest time to process. */
166 p->utime += cputime;
167 p->utimescaled += cputime_scaled;
168 account_group_user_time(p, cputime);
169 p->gtime += cputime;
170
171 /* Add guest time to cpustat. */
Dongsheng Yangd0ea0262014-01-27 22:00:45 -0500172 if (task_nice(p) > 0) {
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200173 cpustat[CPUTIME_NICE] += (__force u64) cputime;
174 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
175 } else {
176 cpustat[CPUTIME_USER] += (__force u64) cputime;
177 cpustat[CPUTIME_GUEST] += (__force u64) cputime;
178 }
179}
180
181/*
182 * Account system cpu time to a process and desired cpustat field
183 * @p: the process that the cpu time gets accounted to
184 * @cputime: the cpu time spent in kernel space since the last update
185 * @cputime_scaled: cputime scaled by cpu frequency
186 * @target_cputime64: pointer to cpustat field that has to be updated
187 */
188static inline
189void __account_system_time(struct task_struct *p, cputime_t cputime,
190 cputime_t cputime_scaled, int index)
191{
192 /* Add system time to process. */
193 p->stime += cputime;
194 p->stimescaled += cputime_scaled;
195 account_group_system_time(p, cputime);
196
197 /* Add system time to cpustat. */
198 task_group_account_field(p, index, (__force u64) cputime);
199
200 /* Account for system time used */
Frederic Weisbecker6fac4822012-11-13 14:20:55 +0100201 acct_account_cputime(p);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200202}
203
204/*
205 * Account system cpu time to a process.
206 * @p: the process that the cpu time gets accounted to
207 * @hardirq_offset: the offset to subtract from hardirq_count()
208 * @cputime: the cpu time spent in kernel space since the last update
209 * @cputime_scaled: cputime scaled by cpu frequency
210 */
211void account_system_time(struct task_struct *p, int hardirq_offset,
212 cputime_t cputime, cputime_t cputime_scaled)
213{
214 int index;
215
216 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
217 account_guest_time(p, cputime, cputime_scaled);
218 return;
219 }
220
221 if (hardirq_count() - hardirq_offset)
222 index = CPUTIME_IRQ;
223 else if (in_serving_softirq())
224 index = CPUTIME_SOFTIRQ;
225 else
226 index = CPUTIME_SYSTEM;
227
228 __account_system_time(p, cputime, cputime_scaled, index);
229}
230
231/*
232 * Account for involuntary wait time.
233 * @cputime: the cpu time spent in involuntary wait
234 */
235void account_steal_time(cputime_t cputime)
236{
237 u64 *cpustat = kcpustat_this_cpu->cpustat;
238
239 cpustat[CPUTIME_STEAL] += (__force u64) cputime;
240}
241
242/*
243 * Account for idle time.
244 * @cputime: the cpu time spent in idle wait
245 */
246void account_idle_time(cputime_t cputime)
247{
248 u64 *cpustat = kcpustat_this_cpu->cpustat;
249 struct rq *rq = this_rq();
250
251 if (atomic_read(&rq->nr_iowait) > 0)
252 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
253 else
254 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
255}
256
257static __always_inline bool steal_account_process_tick(void)
258{
259#ifdef CONFIG_PARAVIRT
260 if (static_key_false(&paravirt_steal_enabled)) {
Frederic Weisbeckerdee08a72014-03-05 17:02:22 +0100261 u64 steal;
262 cputime_t steal_ct;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200263
264 steal = paravirt_steal_clock(smp_processor_id());
265 steal -= this_rq()->prev_steal_time;
266
Frederic Weisbeckerdee08a72014-03-05 17:02:22 +0100267 /*
268 * cputime_t may be less precise than nsecs (eg: if it's
269 * based on jiffies). Lets cast the result to cputime
270 * granularity and account the rest on the next rounds.
271 */
272 steal_ct = nsecs_to_cputime(steal);
273 this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200274
Frederic Weisbeckerdee08a72014-03-05 17:02:22 +0100275 account_steal_time(steal_ct);
276 return steal_ct;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200277 }
278#endif
279 return false;
280}
281
Frederic Weisbeckera634f932012-11-21 15:55:59 +0100282/*
283 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
284 * tasks (sum on group iteration) belonging to @tsk's group.
285 */
286void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
287{
288 struct signal_struct *sig = tsk->signal;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +0100289 cputime_t utime, stime;
Frederic Weisbeckera634f932012-11-21 15:55:59 +0100290 struct task_struct *t;
Rik van Riele78c3492014-08-16 13:40:10 -0400291 unsigned int seq, nextseq;
Rik van Riel9c368b52014-09-12 09:12:15 -0400292 unsigned long flags;
Frederic Weisbeckera634f932012-11-21 15:55:59 +0100293
294 rcu_read_lock();
Rik van Riele78c3492014-08-16 13:40:10 -0400295 /* Attempt a lockless read on the first round. */
296 nextseq = 0;
297 do {
298 seq = nextseq;
Rik van Riel9c368b52014-09-12 09:12:15 -0400299 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
Rik van Riele78c3492014-08-16 13:40:10 -0400300 times->utime = sig->utime;
301 times->stime = sig->stime;
302 times->sum_exec_runtime = sig->sum_sched_runtime;
303
304 for_each_thread(tsk, t) {
305 task_cputime(t, &utime, &stime);
306 times->utime += utime;
307 times->stime += stime;
308 times->sum_exec_runtime += task_sched_runtime(t);
309 }
310 /* If lockless access failed, take the lock. */
311 nextseq = 1;
312 } while (need_seqretry(&sig->stats_lock, seq));
Rik van Riel9c368b52014-09-12 09:12:15 -0400313 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
Frederic Weisbeckera634f932012-11-21 15:55:59 +0100314 rcu_read_unlock();
315}
316
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200317#ifdef CONFIG_IRQ_TIME_ACCOUNTING
318/*
319 * Account a tick to a process and cpustat
320 * @p: the process that the cpu time gets accounted to
321 * @user_tick: is the tick from userspace
322 * @rq: the pointer to rq
323 *
324 * Tick demultiplexing follows the order
325 * - pending hardirq update
326 * - pending softirq update
327 * - user_time
328 * - idle_time
329 * - system time
330 * - check for guest_time
331 * - else account as system_time
332 *
333 * Check for hardirq is done both for system and user time as there is
334 * no timer going off while we are on hardirq and hence we may never get an
335 * opportunity to update it solely in system time.
336 * p->stime and friends are only updated on system time and not on irq
337 * softirq as those do not count in task exec_runtime any more.
338 */
339static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
Thomas Gleixner2d513862014-05-02 23:26:24 +0200340 struct rq *rq, int ticks)
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200341{
Thomas Gleixner2d513862014-05-02 23:26:24 +0200342 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
343 u64 cputime = (__force u64) cputime_one_jiffy;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200344 u64 *cpustat = kcpustat_this_cpu->cpustat;
345
346 if (steal_account_process_tick())
347 return;
348
Thomas Gleixner2d513862014-05-02 23:26:24 +0200349 cputime *= ticks;
350 scaled *= ticks;
351
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200352 if (irqtime_account_hi_update()) {
Thomas Gleixner2d513862014-05-02 23:26:24 +0200353 cpustat[CPUTIME_IRQ] += cputime;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200354 } else if (irqtime_account_si_update()) {
Thomas Gleixner2d513862014-05-02 23:26:24 +0200355 cpustat[CPUTIME_SOFTIRQ] += cputime;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200356 } else if (this_cpu_ksoftirqd() == p) {
357 /*
358 * ksoftirqd time do not get accounted in cpu_softirq_time.
359 * So, we have to handle it separately here.
360 * Also, p->stime needs to be updated for ksoftirqd.
361 */
Thomas Gleixner2d513862014-05-02 23:26:24 +0200362 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200363 } else if (user_tick) {
Thomas Gleixner2d513862014-05-02 23:26:24 +0200364 account_user_time(p, cputime, scaled);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200365 } else if (p == rq->idle) {
Thomas Gleixner2d513862014-05-02 23:26:24 +0200366 account_idle_time(cputime);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200367 } else if (p->flags & PF_VCPU) { /* System time or guest time */
Thomas Gleixner2d513862014-05-02 23:26:24 +0200368 account_guest_time(p, cputime, scaled);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200369 } else {
Thomas Gleixner2d513862014-05-02 23:26:24 +0200370 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200371 }
372}
373
374static void irqtime_account_idle_ticks(int ticks)
375{
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200376 struct rq *rq = this_rq();
377
Thomas Gleixner2d513862014-05-02 23:26:24 +0200378 irqtime_account_process_tick(current, 0, rq, ticks);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200379}
380#else /* CONFIG_IRQ_TIME_ACCOUNTING */
Frederic Weisbecker3f4724e2012-07-16 18:00:34 +0200381static inline void irqtime_account_idle_ticks(int ticks) {}
382static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
Thomas Gleixner2d513862014-05-02 23:26:24 +0200383 struct rq *rq, int nr_ticks) {}
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200384#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
385
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200386/*
387 * Use precise platform statistics if available:
388 */
389#ifdef CONFIG_VIRT_CPU_ACCOUNTING
Frederic Weisbeckera7e1a9e2012-09-08 16:14:02 +0200390
Frederic Weisbeckere3942ba2012-11-14 00:24:25 +0100391#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
Frederic Weisbeckerb0493402013-07-12 03:10:15 +0200392void vtime_common_task_switch(struct task_struct *prev)
Frederic Weisbeckere3942ba2012-11-14 00:24:25 +0100393{
394 if (is_idle_task(prev))
395 vtime_account_idle(prev);
396 else
397 vtime_account_system(prev);
398
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200399#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
Frederic Weisbeckere3942ba2012-11-14 00:24:25 +0100400 vtime_account_user(prev);
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200401#endif
Frederic Weisbeckere3942ba2012-11-14 00:24:25 +0100402 arch_vtime_task_switch(prev);
403}
404#endif
Frederic Weisbecker11113332012-10-24 18:05:51 +0200405
Frederic Weisbeckera7e1a9e2012-09-08 16:14:02 +0200406/*
407 * Archs that account the whole time spent in the idle task
408 * (outside irq) as idle time can rely on this and just implement
Frederic Weisbeckerfd25b4c2012-11-13 18:21:22 +0100409 * vtime_account_system() and vtime_account_idle(). Archs that
Frederic Weisbeckera7e1a9e2012-09-08 16:14:02 +0200410 * have other meaning of the idle time (s390 only includes the
411 * time spent by the CPU when it's in low power mode) must override
412 * vtime_account().
413 */
414#ifndef __ARCH_HAS_VTIME_ACCOUNT
Frederic Weisbeckerb0493402013-07-12 03:10:15 +0200415void vtime_common_account_irq_enter(struct task_struct *tsk)
Frederic Weisbeckera7e1a9e2012-09-08 16:14:02 +0200416{
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200417 if (!in_interrupt()) {
418 /*
419 * If we interrupted user, context_tracking_in_user()
420 * is 1 because the context tracking don't hook
421 * on irq entry/exit. This way we know if
422 * we need to flush user time on kernel entry.
423 */
424 if (context_tracking_in_user()) {
425 vtime_account_user(tsk);
426 return;
427 }
428
429 if (is_idle_task(tsk)) {
430 vtime_account_idle(tsk);
431 return;
432 }
433 }
434 vtime_account_system(tsk);
Frederic Weisbeckera7e1a9e2012-09-08 16:14:02 +0200435}
Frederic Weisbeckerb0493402013-07-12 03:10:15 +0200436EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
Frederic Weisbeckera7e1a9e2012-09-08 16:14:02 +0200437#endif /* __ARCH_HAS_VTIME_ACCOUNT */
Frederic Weisbecker9fbc42e2013-02-25 17:25:39 +0100438#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
Frederic Weisbeckera7e1a9e2012-09-08 16:14:02 +0200439
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200440
Frederic Weisbecker9fbc42e2013-02-25 17:25:39 +0100441#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
442void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200443{
Frederic Weisbecker9fbc42e2013-02-25 17:25:39 +0100444 *ut = p->utime;
445 *st = p->stime;
446}
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200447
Frederic Weisbecker9fbc42e2013-02-25 17:25:39 +0100448void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
449{
450 struct task_cputime cputime;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200451
Frederic Weisbecker9fbc42e2013-02-25 17:25:39 +0100452 thread_group_cputime(p, &cputime);
453
454 *ut = cputime.utime;
455 *st = cputime.stime;
456}
457#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
458/*
459 * Account a single tick of cpu time.
460 * @p: the process that the cpu time gets accounted to
461 * @user_tick: indicates if the tick is a user or a system tick
462 */
463void account_process_tick(struct task_struct *p, int user_tick)
464{
465 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
466 struct rq *rq = this_rq();
467
468 if (vtime_accounting_enabled())
469 return;
470
471 if (sched_clock_irqtime) {
Thomas Gleixner2d513862014-05-02 23:26:24 +0200472 irqtime_account_process_tick(p, user_tick, rq, 1);
Frederic Weisbecker9fbc42e2013-02-25 17:25:39 +0100473 return;
474 }
475
476 if (steal_account_process_tick())
477 return;
478
479 if (user_tick)
480 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
481 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
482 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
483 one_jiffy_scaled);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200484 else
Frederic Weisbecker9fbc42e2013-02-25 17:25:39 +0100485 account_idle_time(cputime_one_jiffy);
486}
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200487
Frederic Weisbecker9fbc42e2013-02-25 17:25:39 +0100488/*
489 * Account multiple ticks of steal time.
490 * @p: the process from which the cpu time has been stolen
491 * @ticks: number of stolen ticks
492 */
493void account_steal_ticks(unsigned long ticks)
494{
495 account_steal_time(jiffies_to_cputime(ticks));
496}
497
498/*
499 * Account multiple ticks of idle time.
500 * @ticks: number of stolen ticks
501 */
502void account_idle_ticks(unsigned long ticks)
503{
504
505 if (sched_clock_irqtime) {
506 irqtime_account_idle_ticks(ticks);
507 return;
508 }
509
510 account_idle_time(jiffies_to_cputime(ticks));
511}
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200512
Frederic Weisbeckerd9a3c982013-02-20 18:54:55 +0100513/*
Stanislaw Gruszka55eaa7c2013-04-30 17:14:42 +0200514 * Perform (stime * rtime) / total, but avoid multiplication overflow by
515 * loosing precision when the numbers are big.
Frederic Weisbeckerd9a3c982013-02-20 18:54:55 +0100516 */
517static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200518{
Stanislaw Gruszka55eaa7c2013-04-30 17:14:42 +0200519 u64 scaled;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200520
Stanislaw Gruszka55eaa7c2013-04-30 17:14:42 +0200521 for (;;) {
522 /* Make sure "rtime" is the bigger of stime/rtime */
Stanislaw Gruszka84f9f3a2013-05-02 15:34:33 +0200523 if (stime > rtime)
524 swap(rtime, stime);
Stanislaw Gruszka55eaa7c2013-04-30 17:14:42 +0200525
526 /* Make sure 'total' fits in 32 bits */
527 if (total >> 32)
528 goto drop_precision;
529
530 /* Does rtime (and thus stime) fit in 32 bits? */
531 if (!(rtime >> 32))
532 break;
533
534 /* Can we just balance rtime/stime rather than dropping bits? */
535 if (stime >> 31)
536 goto drop_precision;
537
538 /* We can grow stime and shrink rtime and try to make them both fit */
539 stime <<= 1;
540 rtime >>= 1;
541 continue;
542
543drop_precision:
544 /* We drop from rtime, it has more bits than stime */
545 rtime >>= 1;
546 total >>= 1;
Frederic Weisbeckerd9a3c982013-02-20 18:54:55 +0100547 }
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200548
Stanislaw Gruszka55eaa7c2013-04-30 17:14:42 +0200549 /*
550 * Make sure gcc understands that this is a 32x32->64 multiply,
551 * followed by a 64/32->64 divide.
552 */
553 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
Frederic Weisbeckerd9a3c982013-02-20 18:54:55 +0100554 return (__force cputime_t) scaled;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200555}
556
Frederic Weisbeckerfa092052012-11-28 17:00:57 +0100557/*
Rik van Riel347abad2014-09-30 15:59:47 -0400558 * Atomically advance counter to the new value. Interrupts, vcpu
559 * scheduling, and scaling inaccuracies can cause cputime_advance
560 * to be occasionally called with a new value smaller than counter.
561 * Let's enforce atomicity.
562 *
563 * Normally a caller will only go through this loop once, or not
564 * at all in case a previous caller updated counter the same jiffy.
565 */
566static void cputime_advance(cputime_t *counter, cputime_t new)
567{
568 cputime_t old;
569
Jason Low316c1608d2015-04-28 13:00:20 -0700570 while (new > (old = READ_ONCE(*counter)))
Rik van Riel347abad2014-09-30 15:59:47 -0400571 cmpxchg_cputime(counter, old, new);
572}
573
574/*
Frederic Weisbeckerfa092052012-11-28 17:00:57 +0100575 * Adjust tick based cputime random precision against scheduler
576 * runtime accounting.
577 */
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100578static void cputime_adjust(struct task_cputime *curr,
579 struct cputime *prev,
580 cputime_t *ut, cputime_t *st)
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200581{
Stanislaw Gruszka5a8e01f2013-09-04 15:16:03 +0200582 cputime_t rtime, stime, utime;
Frederic Weisbeckerfa092052012-11-28 17:00:57 +0100583
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200584 /*
Frederic Weisbeckerfa092052012-11-28 17:00:57 +0100585 * Tick based cputime accounting depend on random scheduling
586 * timeslices of a task to be interrupted or not by the timer.
587 * Depending on these circumstances, the number of these interrupts
588 * may be over or under-optimistic, matching the real user and system
589 * cputime with a variable precision.
590 *
591 * Fix this by scaling these tick based values against the total
592 * runtime accounted by the CFS scheduler.
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200593 */
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100594 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200595
Stanislaw Gruszka772c8082013-04-30 11:35:05 +0200596 /*
597 * Update userspace visible utime/stime values only if actual execution
598 * time is bigger than already exported. Note that can happen, that we
599 * provided bigger values due to scaling inaccuracy on big numbers.
600 */
601 if (prev->stime + prev->utime >= rtime)
602 goto out;
603
Stanislaw Gruszka5a8e01f2013-09-04 15:16:03 +0200604 stime = curr->stime;
605 utime = curr->utime;
606
607 if (utime == 0) {
608 stime = rtime;
609 } else if (stime == 0) {
610 utime = rtime;
611 } else {
612 cputime_t total = stime + utime;
613
Frederic Weisbeckerd9a3c982013-02-20 18:54:55 +0100614 stime = scale_stime((__force u64)stime,
615 (__force u64)rtime, (__force u64)total);
Stanislaw Gruszka68aa8ef2013-04-30 11:35:06 +0200616 utime = rtime - stime;
Frederic Weisbeckerd9a3c982013-02-20 18:54:55 +0100617 }
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200618
Rik van Riel347abad2014-09-30 15:59:47 -0400619 cputime_advance(&prev->stime, stime);
620 cputime_advance(&prev->utime, utime);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200621
Stanislaw Gruszka772c8082013-04-30 11:35:05 +0200622out:
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100623 *ut = prev->utime;
624 *st = prev->stime;
625}
626
627void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
628{
629 struct task_cputime cputime = {
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100630 .sum_exec_runtime = p->se.sum_exec_runtime,
631 };
632
Frederic Weisbecker6fac4822012-11-13 14:20:55 +0100633 task_cputime(p, &cputime.utime, &cputime.stime);
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100634 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200635}
636
Frederic Weisbeckere80d0a1a2012-11-21 16:26:44 +0100637void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200638{
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200639 struct task_cputime cputime;
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200640
641 thread_group_cputime(p, &cputime);
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100642 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
Frederic Weisbecker73fbec62012-06-16 15:57:37 +0200643}
Frederic Weisbecker9fbc42e2013-02-25 17:25:39 +0100644#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200645
646#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100647static unsigned long long vtime_delta(struct task_struct *tsk)
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200648{
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100649 unsigned long long clock;
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200650
Frederic Weisbecker7f6575f2013-02-23 17:28:45 +0100651 clock = local_clock();
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100652 if (clock < tsk->vtime_snap)
653 return 0;
654
655 return clock - tsk->vtime_snap;
656}
657
658static cputime_t get_vtime_delta(struct task_struct *tsk)
659{
660 unsigned long long delta = vtime_delta(tsk);
661
662 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
663 tsk->vtime_snap += delta;
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200664
665 /* CHECKME: always safe to convert nsecs to cputime? */
666 return nsecs_to_cputime(delta);
667}
668
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100669static void __vtime_account_system(struct task_struct *tsk)
670{
671 cputime_t delta_cpu = get_vtime_delta(tsk);
672
673 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
674}
675
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200676void vtime_account_system(struct task_struct *tsk)
677{
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100678 write_seqlock(&tsk->vtime_seqlock);
679 __vtime_account_system(tsk);
680 write_sequnlock(&tsk->vtime_seqlock);
681}
682
Frederic Weisbeckerb0493402013-07-12 03:10:15 +0200683void vtime_gen_account_irq_exit(struct task_struct *tsk)
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100684{
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100685 write_seqlock(&tsk->vtime_seqlock);
Frederic Weisbeckeraf2350b2013-07-15 16:35:55 +0200686 __vtime_account_system(tsk);
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100687 if (context_tracking_in_user())
688 tsk->vtime_snap_whence = VTIME_USER;
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100689 write_sequnlock(&tsk->vtime_seqlock);
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200690}
691
692void vtime_account_user(struct task_struct *tsk)
693{
Frederic Weisbecker3f4724e2012-07-16 18:00:34 +0200694 cputime_t delta_cpu;
695
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100696 write_seqlock(&tsk->vtime_seqlock);
Frederic Weisbecker54461562013-07-13 17:10:18 +0200697 delta_cpu = get_vtime_delta(tsk);
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100698 tsk->vtime_snap_whence = VTIME_SYS;
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200699 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100700 write_sequnlock(&tsk->vtime_seqlock);
701}
702
703void vtime_user_enter(struct task_struct *tsk)
704{
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100705 write_seqlock(&tsk->vtime_seqlock);
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100706 __vtime_account_system(tsk);
Frederic Weisbeckeraf2350b2013-07-15 16:35:55 +0200707 tsk->vtime_snap_whence = VTIME_USER;
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100708 write_sequnlock(&tsk->vtime_seqlock);
709}
710
711void vtime_guest_enter(struct task_struct *tsk)
712{
Frederic Weisbecker5b206d42013-07-12 19:05:14 +0200713 /*
714 * The flags must be updated under the lock with
715 * the vtime_snap flush and update.
716 * That enforces a right ordering and update sequence
717 * synchronization against the reader (task_gtime())
718 * that can thus safely catch up with a tickless delta.
719 */
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100720 write_seqlock(&tsk->vtime_seqlock);
721 __vtime_account_system(tsk);
722 current->flags |= PF_VCPU;
723 write_sequnlock(&tsk->vtime_seqlock);
724}
Frederic Weisbecker48d6a812013-07-10 02:44:35 +0200725EXPORT_SYMBOL_GPL(vtime_guest_enter);
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100726
727void vtime_guest_exit(struct task_struct *tsk)
728{
729 write_seqlock(&tsk->vtime_seqlock);
730 __vtime_account_system(tsk);
731 current->flags &= ~PF_VCPU;
732 write_sequnlock(&tsk->vtime_seqlock);
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200733}
Frederic Weisbecker48d6a812013-07-10 02:44:35 +0200734EXPORT_SYMBOL_GPL(vtime_guest_exit);
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200735
736void vtime_account_idle(struct task_struct *tsk)
737{
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100738 cputime_t delta_cpu = get_vtime_delta(tsk);
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200739
740 account_idle_time(delta_cpu);
741}
Frederic Weisbecker3f4724e2012-07-16 18:00:34 +0200742
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100743void arch_vtime_task_switch(struct task_struct *prev)
744{
745 write_seqlock(&prev->vtime_seqlock);
746 prev->vtime_snap_whence = VTIME_SLEEPING;
747 write_sequnlock(&prev->vtime_seqlock);
748
749 write_seqlock(&current->vtime_seqlock);
750 current->vtime_snap_whence = VTIME_SYS;
Frederic Weisbecker45eacc62013-05-15 22:16:32 +0200751 current->vtime_snap = sched_clock_cpu(smp_processor_id());
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100752 write_sequnlock(&current->vtime_seqlock);
753}
754
Frederic Weisbecker45eacc62013-05-15 22:16:32 +0200755void vtime_init_idle(struct task_struct *t, int cpu)
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100756{
757 unsigned long flags;
758
759 write_seqlock_irqsave(&t->vtime_seqlock, flags);
760 t->vtime_snap_whence = VTIME_SYS;
Frederic Weisbecker45eacc62013-05-15 22:16:32 +0200761 t->vtime_snap = sched_clock_cpu(cpu);
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100762 write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
763}
764
765cputime_t task_gtime(struct task_struct *t)
766{
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100767 unsigned int seq;
768 cputime_t gtime;
769
770 do {
Thomas Gleixnercdc4e862013-02-15 23:47:07 +0100771 seq = read_seqbegin(&t->vtime_seqlock);
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100772
773 gtime = t->gtime;
774 if (t->flags & PF_VCPU)
775 gtime += vtime_delta(t);
776
Thomas Gleixnercdc4e862013-02-15 23:47:07 +0100777 } while (read_seqretry(&t->vtime_seqlock, seq));
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100778
779 return gtime;
780}
781
782/*
783 * Fetch cputime raw values from fields of task_struct and
784 * add up the pending nohz execution time since the last
785 * cputime snapshot.
786 */
787static void
788fetch_task_cputime(struct task_struct *t,
789 cputime_t *u_dst, cputime_t *s_dst,
790 cputime_t *u_src, cputime_t *s_src,
791 cputime_t *udelta, cputime_t *sdelta)
792{
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100793 unsigned int seq;
794 unsigned long long delta;
795
796 do {
797 *udelta = 0;
798 *sdelta = 0;
799
Thomas Gleixnercdc4e862013-02-15 23:47:07 +0100800 seq = read_seqbegin(&t->vtime_seqlock);
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100801
802 if (u_dst)
803 *u_dst = *u_src;
804 if (s_dst)
805 *s_dst = *s_src;
806
807 /* Task is sleeping, nothing to add */
808 if (t->vtime_snap_whence == VTIME_SLEEPING ||
809 is_idle_task(t))
810 continue;
811
812 delta = vtime_delta(t);
813
814 /*
815 * Task runs either in user or kernel space, add pending nohz time to
816 * the right place.
817 */
818 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
819 *udelta = delta;
820 } else {
821 if (t->vtime_snap_whence == VTIME_SYS)
822 *sdelta = delta;
823 }
Thomas Gleixnercdc4e862013-02-15 23:47:07 +0100824 } while (read_seqretry(&t->vtime_seqlock, seq));
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100825}
826
827
828void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
829{
830 cputime_t udelta, sdelta;
831
832 fetch_task_cputime(t, utime, stime, &t->utime,
833 &t->stime, &udelta, &sdelta);
834 if (utime)
835 *utime += udelta;
836 if (stime)
837 *stime += sdelta;
838}
839
840void task_cputime_scaled(struct task_struct *t,
841 cputime_t *utimescaled, cputime_t *stimescaled)
842{
843 cputime_t udelta, sdelta;
844
845 fetch_task_cputime(t, utimescaled, stimescaled,
846 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
847 if (utimescaled)
848 *utimescaled += cputime_to_scaled(udelta);
849 if (stimescaled)
850 *stimescaled += cputime_to_scaled(sdelta);
851}
Frederic Weisbeckerabf917c2012-07-25 07:56:04 +0200852#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */