blob: 978cbb0af5f38c2502fb1611d024c567a416d353 [file] [log] [blame]
Ingo Molnare6017572017-02-01 16:36:40 +01001#ifndef _LINUX_SCHED_CLOCK_H
2#define _LINUX_SCHED_CLOCK_H
3
Ingo Molnarea947632017-02-01 16:36:40 +01004#include <linux/smp.h>
Ingo Molnare6017572017-02-01 16:36:40 +01005
Ingo Molnar56898102017-02-07 12:07:18 +01006/*
7 * Do not use outside of architecture code which knows its limitations.
8 *
9 * sched_clock() has no promise of monotonicity or bounded drift between
10 * CPUs, use (which you should not) requires disabling IRQs.
11 *
12 * Please use one of the three interfaces below.
13 */
14extern unsigned long long notrace sched_clock(void);
15
16/*
17 * See the comment in kernel/sched/clock.c
18 */
19extern u64 running_clock(void);
20extern u64 sched_clock_cpu(int cpu);
21
22
23extern void sched_clock_init(void);
24
25#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
26static inline void sched_clock_init_late(void)
27{
28}
29
30static inline void sched_clock_tick(void)
31{
32}
33
34static inline void clear_sched_clock_stable(void)
35{
36}
37
38static inline void sched_clock_idle_sleep_event(void)
39{
40}
41
42static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
43{
44}
45
46static inline u64 cpu_clock(int cpu)
47{
48 return sched_clock();
49}
50
51static inline u64 local_clock(void)
52{
53 return sched_clock();
54}
55#else
56extern void sched_clock_init_late(void);
Ingo Molnar56898102017-02-07 12:07:18 +010057extern int sched_clock_stable(void);
58extern void clear_sched_clock_stable(void);
59
Peter Zijlstra698eff62017-03-17 12:48:18 +010060/*
61 * When sched_clock_stable(), __sched_clock_offset provides the offset
62 * between local_clock() and sched_clock().
63 */
64extern u64 __sched_clock_offset;
65
Ingo Molnar56898102017-02-07 12:07:18 +010066extern void sched_clock_tick(void);
Peter Zijlstrab421b222017-04-21 12:14:13 +020067extern void sched_clock_tick_stable(void);
Ingo Molnar56898102017-02-07 12:07:18 +010068extern void sched_clock_idle_sleep_event(void);
69extern void sched_clock_idle_wakeup_event(u64 delta_ns);
70
71/*
72 * As outlined in clock.c, provides a fast, high resolution, nanosecond
73 * time source that is monotonic per cpu argument and has bounded drift
74 * between cpus.
75 *
76 * ######################### BIG FAT WARNING ##########################
77 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
78 * # go backwards !! #
79 * ####################################################################
80 */
81static inline u64 cpu_clock(int cpu)
82{
83 return sched_clock_cpu(cpu);
84}
85
86static inline u64 local_clock(void)
87{
88 return sched_clock_cpu(raw_smp_processor_id());
89}
90#endif
91
92#ifdef CONFIG_IRQ_TIME_ACCOUNTING
93/*
94 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
95 * The reason for this explicit opt-in is not to have perf penalty with
96 * slow sched_clocks.
97 */
98extern void enable_sched_clock_irqtime(void);
99extern void disable_sched_clock_irqtime(void);
100#else
101static inline void enable_sched_clock_irqtime(void) {}
102static inline void disable_sched_clock_irqtime(void) {}
103#endif
104
Ingo Molnare6017572017-02-01 16:36:40 +0100105#endif /* _LINUX_SCHED_CLOCK_H */