Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 1 | #ifndef _LINUX_SCHED_CLOCK_H |
| 2 | #define _LINUX_SCHED_CLOCK_H |
| 3 | |
Ingo Molnar | ea94763 | 2017-02-01 16:36:40 +0100 | [diff] [blame^] | 4 | #include <linux/smp.h> |
Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 5 | |
Ingo Molnar | 5689810 | 2017-02-07 12:07:18 +0100 | [diff] [blame] | 6 | /* |
| 7 | * Do not use outside of architecture code which knows its limitations. |
| 8 | * |
| 9 | * sched_clock() has no promise of monotonicity or bounded drift between |
| 10 | * CPUs, use (which you should not) requires disabling IRQs. |
| 11 | * |
| 12 | * Please use one of the three interfaces below. |
| 13 | */ |
| 14 | extern unsigned long long notrace sched_clock(void); |
| 15 | |
| 16 | /* |
| 17 | * See the comment in kernel/sched/clock.c |
| 18 | */ |
| 19 | extern u64 running_clock(void); |
| 20 | extern u64 sched_clock_cpu(int cpu); |
| 21 | |
| 22 | |
| 23 | extern void sched_clock_init(void); |
| 24 | |
| 25 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
| 26 | static inline void sched_clock_init_late(void) |
| 27 | { |
| 28 | } |
| 29 | |
| 30 | static inline void sched_clock_tick(void) |
| 31 | { |
| 32 | } |
| 33 | |
| 34 | static inline void clear_sched_clock_stable(void) |
| 35 | { |
| 36 | } |
| 37 | |
| 38 | static inline void sched_clock_idle_sleep_event(void) |
| 39 | { |
| 40 | } |
| 41 | |
| 42 | static inline void sched_clock_idle_wakeup_event(u64 delta_ns) |
| 43 | { |
| 44 | } |
| 45 | |
| 46 | static inline u64 cpu_clock(int cpu) |
| 47 | { |
| 48 | return sched_clock(); |
| 49 | } |
| 50 | |
| 51 | static inline u64 local_clock(void) |
| 52 | { |
| 53 | return sched_clock(); |
| 54 | } |
| 55 | #else |
| 56 | extern void sched_clock_init_late(void); |
| 57 | /* |
| 58 | * Architectures can set this to 1 if they have specified |
| 59 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, |
| 60 | * but then during bootup it turns out that sched_clock() |
| 61 | * is reliable after all: |
| 62 | */ |
| 63 | extern int sched_clock_stable(void); |
| 64 | extern void clear_sched_clock_stable(void); |
| 65 | |
| 66 | extern void sched_clock_tick(void); |
| 67 | extern void sched_clock_idle_sleep_event(void); |
| 68 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
| 69 | |
| 70 | /* |
| 71 | * As outlined in clock.c, provides a fast, high resolution, nanosecond |
| 72 | * time source that is monotonic per cpu argument and has bounded drift |
| 73 | * between cpus. |
| 74 | * |
| 75 | * ######################### BIG FAT WARNING ########################## |
| 76 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # |
| 77 | * # go backwards !! # |
| 78 | * #################################################################### |
| 79 | */ |
| 80 | static inline u64 cpu_clock(int cpu) |
| 81 | { |
| 82 | return sched_clock_cpu(cpu); |
| 83 | } |
| 84 | |
| 85 | static inline u64 local_clock(void) |
| 86 | { |
| 87 | return sched_clock_cpu(raw_smp_processor_id()); |
| 88 | } |
| 89 | #endif |
| 90 | |
| 91 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 92 | /* |
| 93 | * An i/f to runtime opt-in for irq time accounting based off of sched_clock. |
| 94 | * The reason for this explicit opt-in is not to have perf penalty with |
| 95 | * slow sched_clocks. |
| 96 | */ |
| 97 | extern void enable_sched_clock_irqtime(void); |
| 98 | extern void disable_sched_clock_irqtime(void); |
| 99 | #else |
| 100 | static inline void enable_sched_clock_irqtime(void) {} |
| 101 | static inline void disable_sched_clock_irqtime(void) {} |
| 102 | #endif |
| 103 | |
Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 104 | #endif /* _LINUX_SCHED_CLOCK_H */ |