H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_VGTOD_H |
| 2 | #define _ASM_X86_VGTOD_H |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 3 | |
Stefani Seibold | 7c03156 | 2014-03-17 23:22:10 +0100 | [diff] [blame] | 4 | #include <linux/compiler.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 5 | #include <linux/clocksource.h> |
| 6 | |
Stefani Seibold | 7c03156 | 2014-03-17 23:22:10 +0100 | [diff] [blame] | 7 | #ifdef BUILD_VDSO32_64 |
| 8 | typedef u64 gtod_long_t; |
| 9 | #else |
| 10 | typedef unsigned long gtod_long_t; |
| 11 | #endif |
| 12 | /* |
| 13 | * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time |
| 14 | * so be carefull by modifying this structure. |
| 15 | */ |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 16 | struct vsyscall_gtod_data { |
Stefani Seibold | 7c03156 | 2014-03-17 23:22:10 +0100 | [diff] [blame] | 17 | unsigned seq; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 18 | |
Stefani Seibold | 7c03156 | 2014-03-17 23:22:10 +0100 | [diff] [blame] | 19 | int vclock_mode; |
| 20 | cycle_t cycle_last; |
| 21 | cycle_t mask; |
| 22 | u32 mult; |
| 23 | u32 shift; |
Andy Lutomirski | 91ec87d | 2012-03-22 21:15:51 -0700 | [diff] [blame] | 24 | |
| 25 | /* open coded 'struct timespec' */ |
John Stultz | 650ea02 | 2012-09-04 16:14:46 -0400 | [diff] [blame] | 26 | u64 wall_time_snsec; |
Stefani Seibold | 7c03156 | 2014-03-17 23:22:10 +0100 | [diff] [blame] | 27 | gtod_long_t wall_time_sec; |
| 28 | gtod_long_t monotonic_time_sec; |
John Stultz | 650ea02 | 2012-09-04 16:14:46 -0400 | [diff] [blame] | 29 | u64 monotonic_time_snsec; |
Stefani Seibold | 7c03156 | 2014-03-17 23:22:10 +0100 | [diff] [blame] | 30 | gtod_long_t wall_time_coarse_sec; |
| 31 | gtod_long_t wall_time_coarse_nsec; |
| 32 | gtod_long_t monotonic_time_coarse_sec; |
| 33 | gtod_long_t monotonic_time_coarse_nsec; |
Andy Lutomirski | 91ec87d | 2012-03-22 21:15:51 -0700 | [diff] [blame] | 34 | |
Stefani Seibold | 7c03156 | 2014-03-17 23:22:10 +0100 | [diff] [blame] | 35 | int tz_minuteswest; |
| 36 | int tz_dsttime; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 37 | }; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 38 | extern struct vsyscall_gtod_data vsyscall_gtod_data; |
| 39 | |
Andy Lutomirski | bd902c5 | 2015-12-29 20:12:24 -0800 | [diff] [blame] | 40 | extern int vclocks_used; |
| 41 | static inline bool vclock_was_used(int vclock) |
| 42 | { |
| 43 | return READ_ONCE(vclocks_used) & (1 << vclock); |
| 44 | } |
| 45 | |
Stefani Seibold | 7c03156 | 2014-03-17 23:22:10 +0100 | [diff] [blame] | 46 | static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s) |
| 47 | { |
| 48 | unsigned ret; |
| 49 | |
| 50 | repeat: |
| 51 | ret = ACCESS_ONCE(s->seq); |
| 52 | if (unlikely(ret & 1)) { |
| 53 | cpu_relax(); |
| 54 | goto repeat; |
| 55 | } |
| 56 | smp_rmb(); |
| 57 | return ret; |
| 58 | } |
| 59 | |
| 60 | static inline int gtod_read_retry(const struct vsyscall_gtod_data *s, |
| 61 | unsigned start) |
| 62 | { |
| 63 | smp_rmb(); |
| 64 | return unlikely(s->seq != start); |
| 65 | } |
| 66 | |
| 67 | static inline void gtod_write_begin(struct vsyscall_gtod_data *s) |
| 68 | { |
| 69 | ++s->seq; |
| 70 | smp_wmb(); |
| 71 | } |
| 72 | |
| 73 | static inline void gtod_write_end(struct vsyscall_gtod_data *s) |
| 74 | { |
| 75 | smp_wmb(); |
| 76 | ++s->seq; |
| 77 | } |
| 78 | |
Andy Lutomirski | e76b027 | 2014-10-30 14:58:01 -0700 | [diff] [blame] | 79 | #ifdef CONFIG_X86_64 |
| 80 | |
| 81 | #define VGETCPU_CPU_MASK 0xfff |
| 82 | |
| 83 | static inline unsigned int __getcpu(void) |
| 84 | { |
| 85 | unsigned int p; |
| 86 | |
| 87 | /* |
| 88 | * Load per CPU data from GDT. LSL is faster than RDTSCP and |
Andy Lutomirski | 1ddf0b1 | 2014-12-21 08:57:46 -0800 | [diff] [blame] | 89 | * works on all CPUs. This is volatile so that it orders |
| 90 | * correctly wrt barrier() and to keep gcc from cleverly |
| 91 | * hoisting it out of the calling function. |
Andy Lutomirski | e76b027 | 2014-10-30 14:58:01 -0700 | [diff] [blame] | 92 | */ |
Andy Lutomirski | 1ddf0b1 | 2014-12-21 08:57:46 -0800 | [diff] [blame] | 93 | asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); |
Andy Lutomirski | e76b027 | 2014-10-30 14:58:01 -0700 | [diff] [blame] | 94 | |
| 95 | return p; |
| 96 | } |
| 97 | |
| 98 | #endif /* CONFIG_X86_64 */ |
| 99 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 100 | #endif /* _ASM_X86_VGTOD_H */ |