blob: 867d588314e0359b01557c0f2476f4711080bbc2 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Ingo Molnare6017572017-02-01 16:36:40 +01002#ifndef _LINUX_SCHED_CLOCK_H
3#define _LINUX_SCHED_CLOCK_H
4
Ingo Molnarea947632017-02-01 16:36:40 +01005#include <linux/smp.h>
Ingo Molnare6017572017-02-01 16:36:40 +01006
Ingo Molnar56898102017-02-07 12:07:18 +01007/*
8 * Do not use outside of architecture code which knows its limitations.
9 *
10 * sched_clock() has no promise of monotonicity or bounded drift between
11 * CPUs, use (which you should not) requires disabling IRQs.
12 *
13 * Please use one of the three interfaces below.
14 */
15extern unsigned long long notrace sched_clock(void);
16
17/*
18 * See the comment in kernel/sched/clock.c
19 */
20extern u64 running_clock(void);
21extern u64 sched_clock_cpu(int cpu);
22
23
24extern void sched_clock_init(void);
25
26#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Ingo Molnar56898102017-02-07 12:07:18 +010027static inline void sched_clock_tick(void)
28{
29}
30
31static inline void clear_sched_clock_stable(void)
32{
33}
34
35static inline void sched_clock_idle_sleep_event(void)
36{
37}
38
Peter Zijlstraac1e8432017-04-21 12:26:23 +020039static inline void sched_clock_idle_wakeup_event(void)
Ingo Molnar56898102017-02-07 12:07:18 +010040{
41}
42
43static inline u64 cpu_clock(int cpu)
44{
45 return sched_clock();
46}
47
48static inline u64 local_clock(void)
49{
50 return sched_clock();
51}
52#else
Ingo Molnar56898102017-02-07 12:07:18 +010053extern int sched_clock_stable(void);
54extern void clear_sched_clock_stable(void);
55
Peter Zijlstra698eff62017-03-17 12:48:18 +010056/*
57 * When sched_clock_stable(), __sched_clock_offset provides the offset
58 * between local_clock() and sched_clock().
59 */
60extern u64 __sched_clock_offset;
61
Ingo Molnar56898102017-02-07 12:07:18 +010062extern void sched_clock_tick(void);
Peter Zijlstrab421b222017-04-21 12:14:13 +020063extern void sched_clock_tick_stable(void);
Ingo Molnar56898102017-02-07 12:07:18 +010064extern void sched_clock_idle_sleep_event(void);
Peter Zijlstraac1e8432017-04-21 12:26:23 +020065extern void sched_clock_idle_wakeup_event(void);
Ingo Molnar56898102017-02-07 12:07:18 +010066
67/*
68 * As outlined in clock.c, provides a fast, high resolution, nanosecond
69 * time source that is monotonic per cpu argument and has bounded drift
70 * between cpus.
71 *
72 * ######################### BIG FAT WARNING ##########################
73 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
74 * # go backwards !! #
75 * ####################################################################
76 */
77static inline u64 cpu_clock(int cpu)
78{
79 return sched_clock_cpu(cpu);
80}
81
82static inline u64 local_clock(void)
83{
84 return sched_clock_cpu(raw_smp_processor_id());
85}
86#endif
87
88#ifdef CONFIG_IRQ_TIME_ACCOUNTING
89/*
90 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
91 * The reason for this explicit opt-in is not to have perf penalty with
92 * slow sched_clocks.
93 */
94extern void enable_sched_clock_irqtime(void);
95extern void disable_sched_clock_irqtime(void);
96#else
97static inline void enable_sched_clock_irqtime(void) {}
98static inline void disable_sched_clock_irqtime(void) {}
99#endif
100
Ingo Molnare6017572017-02-01 16:36:40 +0100101#endif /* _LINUX_SCHED_CLOCK_H */