blob: 9793322751e02f63ddba0d1b8fef5f21b0a4d502 [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
Andy Lutomirskif144a6b2011-05-23 09:31:30 -04005 * Fast user context implementation of clock_gettime, gettimeofday, and time.
Andi Kleen2aae9502007-07-21 17:10:01 +02006 *
Stefani Seibold7a59ed42014-03-17 23:22:09 +01007 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
9 *
Andi Kleen2aae9502007-07-21 17:10:01 +020010 * The code should have no internal unresolved relocations.
11 * Check with readelf after changing.
Andi Kleen2aae9502007-07-21 17:10:01 +020012 */
13
Stefani Seibold7a59ed42014-03-17 23:22:09 +010014#include <uapi/linux/time.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020015#include <asm/vgtod.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020016#include <asm/hpet.h>
Stefani Seibold7c031562014-03-17 23:22:10 +010017#include <asm/vvar.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020018#include <asm/unistd.h>
Stefani Seibold7c031562014-03-17 23:22:10 +010019#include <asm/msr.h>
20#include <linux/math64.h>
21#include <linux/time.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020022
Andy Lutomirski8c49d9a2011-05-23 09:31:24 -040023#define gtod (&VVAR(vsyscall_gtod_data))
Andi Kleen2aae9502007-07-21 17:10:01 +020024
Stefani Seibold7a59ed42014-03-17 23:22:09 +010025extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
26extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
27extern time_t __vdso_time(time_t *t);
28
Stefani Seibold7c031562014-03-17 23:22:10 +010029#ifdef CONFIG_HPET_TIMER
Andy Lutomirskif40c3302014-05-05 12:19:36 -070030extern u8 hpet_page
31 __attribute__((visibility("hidden")));
32
33static notrace cycle_t vread_hpet(void)
Stefani Seibold7c031562014-03-17 23:22:10 +010034{
Andy Lutomirskif40c3302014-05-05 12:19:36 -070035 return *(const volatile u32 *)(&hpet_page + HPET_COUNTER);
Stefani Seibold7c031562014-03-17 23:22:10 +010036}
37#endif
38
Stefani Seibold7a59ed42014-03-17 23:22:09 +010039#ifndef BUILD_VDSO32
40
Stefani Seibold7c031562014-03-17 23:22:10 +010041#include <linux/kernel.h>
42#include <asm/vsyscall.h>
43#include <asm/fixmap.h>
44#include <asm/pvclock.h>
45
Stefani Seibold411f7902014-03-17 23:22:03 +010046notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
47{
48 long ret;
49 asm("syscall" : "=a" (ret) :
50 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
51 return ret;
52}
53
54notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
55{
56 long ret;
57
58 asm("syscall" : "=a" (ret) :
59 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
60 return ret;
61}
62
Marcelo Tosatti51c19b42012-11-27 23:28:57 -020063#ifdef CONFIG_PARAVIRT_CLOCK
64
65static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu)
66{
67 const struct pvclock_vsyscall_time_info *pvti_base;
68 int idx = cpu / (PAGE_SIZE/PVTI_SIZE);
69 int offset = cpu % (PAGE_SIZE/PVTI_SIZE);
70
71 BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END);
72
73 pvti_base = (struct pvclock_vsyscall_time_info *)
74 __fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx);
75
76 return &pvti_base[offset];
77}
78
79static notrace cycle_t vread_pvclock(int *mode)
80{
81 const struct pvclock_vsyscall_time_info *pvti;
82 cycle_t ret;
83 u64 last;
84 u32 version;
Marcelo Tosatti51c19b42012-11-27 23:28:57 -020085 u8 flags;
86 unsigned cpu, cpu1;
87
88
89 /*
Paolo Bonzini73459e22015-04-23 13:20:18 +020090 * Note: hypervisor must guarantee that:
91 * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
92 * 2. that per-CPU pvclock time info is updated if the
93 * underlying CPU changes.
94 * 3. that version is increased whenever underlying CPU
95 * changes.
96 *
Marcelo Tosatti51c19b42012-11-27 23:28:57 -020097 */
98 do {
99 cpu = __getcpu() & VGETCPU_CPU_MASK;
100 /* TODO: We can put vcpu id into higher bits of pvti.version.
101 * This will save a couple of cycles by getting rid of
102 * __getcpu() calls (Gleb).
103 */
104
Paolo Bonzini73459e22015-04-23 13:20:18 +0200105 pvti = get_pvti(cpu);
Marcelo Tosatti0a4e6be2015-03-23 20:21:51 -0300106
Marcelo Tosatti51c19b42012-11-27 23:28:57 -0200107 version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
108
109 /*
110 * Test we're still on the cpu as well as the version.
Paolo Bonzini73459e22015-04-23 13:20:18 +0200111 * We could have been migrated just after the first
112 * vgetcpu but before fetching the version, so we
113 * wouldn't notice a version change.
Marcelo Tosatti51c19b42012-11-27 23:28:57 -0200114 */
Paolo Bonzini73459e22015-04-23 13:20:18 +0200115 cpu1 = __getcpu() & VGETCPU_CPU_MASK;
116 } while (unlikely(cpu != cpu1 ||
117 (pvti->pvti.version & 1) ||
118 pvti->pvti.version != version));
Marcelo Tosatti51c19b42012-11-27 23:28:57 -0200119
120 if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
121 *mode = VCLOCK_NONE;
122
123 /* refer to tsc.c read_tsc() comment for rationale */
Stefani Seibold7c031562014-03-17 23:22:10 +0100124 last = gtod->cycle_last;
Marcelo Tosatti51c19b42012-11-27 23:28:57 -0200125
126 if (likely(ret >= last))
127 return ret;
128
129 return last;
130}
131#endif
132
Stefani Seibold7a59ed42014-03-17 23:22:09 +0100133#else
134
Stefani Seibold7a59ed42014-03-17 23:22:09 +0100135notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
136{
137 long ret;
138
139 asm(
140 "mov %%ebx, %%edx \n"
141 "mov %2, %%ebx \n"
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700142 "call __kernel_vsyscall \n"
Stefani Seibold7a59ed42014-03-17 23:22:09 +0100143 "mov %%edx, %%ebx \n"
144 : "=a" (ret)
145 : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
146 : "memory", "edx");
147 return ret;
148}
149
150notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
151{
152 long ret;
153
154 asm(
155 "mov %%ebx, %%edx \n"
156 "mov %2, %%ebx \n"
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700157 "call __kernel_vsyscall \n"
Stefani Seibold7a59ed42014-03-17 23:22:09 +0100158 "mov %%edx, %%ebx \n"
159 : "=a" (ret)
160 : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
161 : "memory", "edx");
162 return ret;
163}
164
165#ifdef CONFIG_PARAVIRT_CLOCK
166
167static notrace cycle_t vread_pvclock(int *mode)
168{
169 *mode = VCLOCK_NONE;
170 return 0;
171}
172#endif
173
174#endif
175
Stefani Seibold411f7902014-03-17 23:22:03 +0100176notrace static cycle_t vread_tsc(void)
Andi Kleen2aae9502007-07-21 17:10:01 +0200177{
Stefani Seibold411f7902014-03-17 23:22:03 +0100178 cycle_t ret;
179 u64 last;
180
181 /*
182 * Empirically, a fence (of type that depends on the CPU)
183 * before rdtsc is enough to ensure that rdtsc is ordered
184 * with respect to loads. The various CPU manuals are unclear
185 * as to whether rdtsc can be reordered with later loads,
186 * but no one has ever seen it happen.
187 */
188 rdtsc_barrier();
Stefani Seibold7a59ed42014-03-17 23:22:09 +0100189 ret = (cycle_t)__native_read_tsc();
Stefani Seibold411f7902014-03-17 23:22:03 +0100190
Stefani Seibold7c031562014-03-17 23:22:10 +0100191 last = gtod->cycle_last;
Stefani Seibold411f7902014-03-17 23:22:03 +0100192
193 if (likely(ret >= last))
194 return ret;
195
196 /*
197 * GCC likes to generate cmov here, but this branch is extremely
198 * predictable (it's just a funciton of time and the likely is
199 * very likely) and there's a data dependence, so force GCC
200 * to generate a branch instead. I don't barrier() because
201 * we don't actually need a barrier, and if this function
202 * ever gets inlined it will generate worse code.
203 */
204 asm volatile ("");
205 return last;
Andi Kleen2aae9502007-07-21 17:10:01 +0200206}
207
Marcelo Tosatti51c19b42012-11-27 23:28:57 -0200208notrace static inline u64 vgetsns(int *mode)
Andi Kleen2aae9502007-07-21 17:10:01 +0200209{
Stefani Seibold7a59ed42014-03-17 23:22:09 +0100210 u64 v;
Andy Lutomirski98d0ac32011-07-14 06:47:22 -0400211 cycles_t cycles;
Stefani Seibold7c031562014-03-17 23:22:10 +0100212
213 if (gtod->vclock_mode == VCLOCK_TSC)
Andy Lutomirski98d0ac32011-07-14 06:47:22 -0400214 cycles = vread_tsc();
Stefani Seibold7a59ed42014-03-17 23:22:09 +0100215#ifdef CONFIG_HPET_TIMER
Stefani Seibold7c031562014-03-17 23:22:10 +0100216 else if (gtod->vclock_mode == VCLOCK_HPET)
Andy Lutomirski98d0ac32011-07-14 06:47:22 -0400217 cycles = vread_hpet();
Stefani Seibold7a59ed42014-03-17 23:22:09 +0100218#endif
Marcelo Tosatti51c19b42012-11-27 23:28:57 -0200219#ifdef CONFIG_PARAVIRT_CLOCK
Stefani Seibold7c031562014-03-17 23:22:10 +0100220 else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
Marcelo Tosatti51c19b42012-11-27 23:28:57 -0200221 cycles = vread_pvclock(mode);
222#endif
John Stultza939e812012-03-01 22:11:09 -0800223 else
224 return 0;
Stefani Seibold7c031562014-03-17 23:22:10 +0100225 v = (cycles - gtod->cycle_last) & gtod->mask;
226 return v * gtod->mult;
Andi Kleen2aae9502007-07-21 17:10:01 +0200227}
228
Andy Lutomirski5f293472012-03-22 21:15:52 -0700229/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
230notrace static int __always_inline do_realtime(struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +0200231{
John Stultz650ea022012-09-04 16:14:46 -0400232 unsigned long seq;
233 u64 ns;
John Stultza939e812012-03-01 22:11:09 -0800234 int mode;
235
Andi Kleen2aae9502007-07-21 17:10:01 +0200236 do {
Stefani Seibold7c031562014-03-17 23:22:10 +0100237 seq = gtod_read_begin(gtod);
238 mode = gtod->vclock_mode;
Andi Kleen2aae9502007-07-21 17:10:01 +0200239 ts->tv_sec = gtod->wall_time_sec;
John Stultz650ea022012-09-04 16:14:46 -0400240 ns = gtod->wall_time_snsec;
Marcelo Tosatti51c19b42012-11-27 23:28:57 -0200241 ns += vgetsns(&mode);
Stefani Seibold7c031562014-03-17 23:22:10 +0100242 ns >>= gtod->shift;
243 } while (unlikely(gtod_read_retry(gtod, seq)));
John Stultza939e812012-03-01 22:11:09 -0800244
Stefani Seibold7c031562014-03-17 23:22:10 +0100245 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
246 ts->tv_nsec = ns;
247
John Stultza939e812012-03-01 22:11:09 -0800248 return mode;
Andi Kleen2aae9502007-07-21 17:10:01 +0200249}
250
Stefani Seibold7a59ed42014-03-17 23:22:09 +0100251notrace static int __always_inline do_monotonic(struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +0200252{
John Stultz650ea022012-09-04 16:14:46 -0400253 unsigned long seq;
254 u64 ns;
John Stultza939e812012-03-01 22:11:09 -0800255 int mode;
256
Andi Kleen2aae9502007-07-21 17:10:01 +0200257 do {
Stefani Seibold7c031562014-03-17 23:22:10 +0100258 seq = gtod_read_begin(gtod);
259 mode = gtod->vclock_mode;
Andy Lutomirski91ec87d2012-03-22 21:15:51 -0700260 ts->tv_sec = gtod->monotonic_time_sec;
John Stultz650ea022012-09-04 16:14:46 -0400261 ns = gtod->monotonic_time_snsec;
Marcelo Tosatti51c19b42012-11-27 23:28:57 -0200262 ns += vgetsns(&mode);
Stefani Seibold7c031562014-03-17 23:22:10 +0100263 ns >>= gtod->shift;
264 } while (unlikely(gtod_read_retry(gtod, seq)));
265
266 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
267 ts->tv_nsec = ns;
Andy Lutomirski0f51f282011-05-23 09:31:27 -0400268
John Stultza939e812012-03-01 22:11:09 -0800269 return mode;
Andi Kleen2aae9502007-07-21 17:10:01 +0200270}
271
Stefani Seiboldce39c642014-03-17 23:22:04 +0100272notrace static void do_realtime_coarse(struct timespec *ts)
john stultzda15cfd2009-08-19 19:13:34 -0700273{
274 unsigned long seq;
275 do {
Stefani Seibold7c031562014-03-17 23:22:10 +0100276 seq = gtod_read_begin(gtod);
277 ts->tv_sec = gtod->wall_time_coarse_sec;
278 ts->tv_nsec = gtod->wall_time_coarse_nsec;
279 } while (unlikely(gtod_read_retry(gtod, seq)));
john stultzda15cfd2009-08-19 19:13:34 -0700280}
281
Stefani Seiboldce39c642014-03-17 23:22:04 +0100282notrace static void do_monotonic_coarse(struct timespec *ts)
john stultzda15cfd2009-08-19 19:13:34 -0700283{
Andy Lutomirski91ec87d2012-03-22 21:15:51 -0700284 unsigned long seq;
john stultzda15cfd2009-08-19 19:13:34 -0700285 do {
Stefani Seibold7c031562014-03-17 23:22:10 +0100286 seq = gtod_read_begin(gtod);
287 ts->tv_sec = gtod->monotonic_time_coarse_sec;
288 ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
289 } while (unlikely(gtod_read_retry(gtod, seq)));
john stultzda15cfd2009-08-19 19:13:34 -0700290}
291
Steven Rostedt23adec52008-05-12 21:20:41 +0200292notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +0200293{
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400294 switch (clock) {
295 case CLOCK_REALTIME:
Stefani Seiboldce39c642014-03-17 23:22:04 +0100296 if (do_realtime(ts) == VCLOCK_NONE)
297 goto fallback;
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400298 break;
299 case CLOCK_MONOTONIC:
Stefani Seiboldce39c642014-03-17 23:22:04 +0100300 if (do_monotonic(ts) == VCLOCK_NONE)
301 goto fallback;
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400302 break;
303 case CLOCK_REALTIME_COARSE:
Stefani Seiboldce39c642014-03-17 23:22:04 +0100304 do_realtime_coarse(ts);
305 break;
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400306 case CLOCK_MONOTONIC_COARSE:
Stefani Seiboldce39c642014-03-17 23:22:04 +0100307 do_monotonic_coarse(ts);
308 break;
309 default:
310 goto fallback;
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400311 }
312
John Stultza939e812012-03-01 22:11:09 -0800313 return 0;
Stefani Seiboldce39c642014-03-17 23:22:04 +0100314fallback:
315 return vdso_fallback_gettime(clock, ts);
Andi Kleen2aae9502007-07-21 17:10:01 +0200316}
317int clock_gettime(clockid_t, struct timespec *)
318 __attribute__((weak, alias("__vdso_clock_gettime")));
319
Steven Rostedt23adec52008-05-12 21:20:41 +0200320notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
Andi Kleen2aae9502007-07-21 17:10:01 +0200321{
John Stultza939e812012-03-01 22:11:09 -0800322 if (likely(tv != NULL)) {
Stefani Seibold0df1ea22014-03-17 23:22:06 +0100323 if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
324 return vdso_fallback_gtod(tv, tz);
John Stultza939e812012-03-01 22:11:09 -0800325 tv->tv_usec /= 1000;
Andi Kleen2aae9502007-07-21 17:10:01 +0200326 }
John Stultza939e812012-03-01 22:11:09 -0800327 if (unlikely(tz != NULL)) {
Stefani Seibold7c031562014-03-17 23:22:10 +0100328 tz->tz_minuteswest = gtod->tz_minuteswest;
329 tz->tz_dsttime = gtod->tz_dsttime;
John Stultza939e812012-03-01 22:11:09 -0800330 }
331
John Stultza939e812012-03-01 22:11:09 -0800332 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +0200333}
334int gettimeofday(struct timeval *, struct timezone *)
335 __attribute__((weak, alias("__vdso_gettimeofday")));
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400336
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400337/*
338 * This will break when the xtime seconds get inaccurate, but that is
339 * unlikely
340 */
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400341notrace time_t __vdso_time(time_t *t)
342{
Stefani Seibold7a59ed42014-03-17 23:22:09 +0100343 /* This is atomic on x86 so we don't need any locks. */
Stefani Seiboldaf8c93d2014-03-17 23:22:05 +0100344 time_t result = ACCESS_ONCE(gtod->wall_time_sec);
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400345
346 if (t)
347 *t = result;
348 return result;
349}
350int time(time_t *t)
351 __attribute__((weak, alias("__vdso_time")));