blob: e728699db7741f0282441a79635a88afd23259c8 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_VGTOD_H
2#define _ASM_X86_VGTOD_H
Andi Kleen2aae9502007-07-21 17:10:01 +02003
Stefani Seibold7c031562014-03-17 23:22:10 +01004#include <linux/compiler.h>
Andi Kleen2aae9502007-07-21 17:10:01 +02005#include <linux/clocksource.h>
6
Stefani Seibold7c031562014-03-17 23:22:10 +01007#ifdef BUILD_VDSO32_64
8typedef u64 gtod_long_t;
9#else
10typedef unsigned long gtod_long_t;
11#endif
12/*
13 * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
14 * so be carefull by modifying this structure.
15 */
Andi Kleen2aae9502007-07-21 17:10:01 +020016struct vsyscall_gtod_data {
Stefani Seibold7c031562014-03-17 23:22:10 +010017 unsigned seq;
Andi Kleen2aae9502007-07-21 17:10:01 +020018
Stefani Seibold7c031562014-03-17 23:22:10 +010019 int vclock_mode;
20 cycle_t cycle_last;
21 cycle_t mask;
22 u32 mult;
23 u32 shift;
Andy Lutomirski91ec87d2012-03-22 21:15:51 -070024
25 /* open coded 'struct timespec' */
John Stultz650ea022012-09-04 16:14:46 -040026 u64 wall_time_snsec;
Stefani Seibold7c031562014-03-17 23:22:10 +010027 gtod_long_t wall_time_sec;
28 gtod_long_t monotonic_time_sec;
John Stultz650ea022012-09-04 16:14:46 -040029 u64 monotonic_time_snsec;
Stefani Seibold7c031562014-03-17 23:22:10 +010030 gtod_long_t wall_time_coarse_sec;
31 gtod_long_t wall_time_coarse_nsec;
32 gtod_long_t monotonic_time_coarse_sec;
33 gtod_long_t monotonic_time_coarse_nsec;
Andy Lutomirski91ec87d2012-03-22 21:15:51 -070034
Stefani Seibold7c031562014-03-17 23:22:10 +010035 int tz_minuteswest;
36 int tz_dsttime;
Andi Kleen2aae9502007-07-21 17:10:01 +020037};
Andi Kleen2aae9502007-07-21 17:10:01 +020038extern struct vsyscall_gtod_data vsyscall_gtod_data;
39
Andy Lutomirskibd902c52015-12-29 20:12:24 -080040extern int vclocks_used;
41static inline bool vclock_was_used(int vclock)
42{
43 return READ_ONCE(vclocks_used) & (1 << vclock);
44}
45
Stefani Seibold7c031562014-03-17 23:22:10 +010046static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
47{
48 unsigned ret;
49
50repeat:
51 ret = ACCESS_ONCE(s->seq);
52 if (unlikely(ret & 1)) {
53 cpu_relax();
54 goto repeat;
55 }
56 smp_rmb();
57 return ret;
58}
59
60static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
61 unsigned start)
62{
63 smp_rmb();
64 return unlikely(s->seq != start);
65}
66
67static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
68{
69 ++s->seq;
70 smp_wmb();
71}
72
73static inline void gtod_write_end(struct vsyscall_gtod_data *s)
74{
75 smp_wmb();
76 ++s->seq;
77}
78
Andy Lutomirskie76b0272014-10-30 14:58:01 -070079#ifdef CONFIG_X86_64
80
81#define VGETCPU_CPU_MASK 0xfff
82
83static inline unsigned int __getcpu(void)
84{
85 unsigned int p;
86
87 /*
88 * Load per CPU data from GDT. LSL is faster than RDTSCP and
Andy Lutomirski1ddf0b12014-12-21 08:57:46 -080089 * works on all CPUs. This is volatile so that it orders
90 * correctly wrt barrier() and to keep gcc from cleverly
91 * hoisting it out of the calling function.
Andy Lutomirskie76b0272014-10-30 14:58:01 -070092 */
Andy Lutomirski1ddf0b12014-12-21 08:57:46 -080093 asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
Andy Lutomirskie76b0272014-10-30 14:58:01 -070094
95 return p;
96}
97
98#endif /* CONFIG_X86_64 */
99
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700100#endif /* _ASM_X86_VGTOD_H */