blob: 6eea70b8f384f4967f5f93345a067788f1746986 [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
Andy Lutomirskif144a6b2011-05-23 09:31:30 -04005 * Fast user context implementation of clock_gettime, gettimeofday, and time.
Andi Kleen2aae9502007-07-21 17:10:01 +02006 *
7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing.
Andi Kleen2aae9502007-07-21 17:10:01 +02009 */
10
Ingo Molnar2b7d0392008-11-12 13:17:38 +010011/* Disable profiling for userspace code: */
Steven Rostedt2ed84ee2008-11-12 15:24:24 -050012#define DISABLE_BRANCH_PROFILING
Ingo Molnar2b7d0392008-11-12 13:17:38 +010013
Andi Kleen2aae9502007-07-21 17:10:01 +020014#include <linux/kernel.h>
15#include <linux/posix-timers.h>
16#include <linux/time.h>
17#include <linux/string.h>
18#include <asm/vsyscall.h>
Andy Lutomirski98d0ac32011-07-14 06:47:22 -040019#include <asm/fixmap.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020020#include <asm/vgtod.h>
21#include <asm/timex.h>
22#include <asm/hpet.h>
23#include <asm/unistd.h>
24#include <asm/io.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020025
Andy Lutomirski8c49d9a2011-05-23 09:31:24 -040026#define gtod (&VVAR(vsyscall_gtod_data))
Andi Kleen2aae9502007-07-21 17:10:01 +020027
Andy Lutomirski98d0ac32011-07-14 06:47:22 -040028notrace static cycle_t vread_tsc(void)
29{
30 cycle_t ret;
31 u64 last;
32
33 /*
34 * Empirically, a fence (of type that depends on the CPU)
35 * before rdtsc is enough to ensure that rdtsc is ordered
36 * with respect to loads. The various CPU manuals are unclear
37 * as to whether rdtsc can be reordered with later loads,
38 * but no one has ever seen it happen.
39 */
40 rdtsc_barrier();
41 ret = (cycle_t)vget_cycles();
42
43 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
44
45 if (likely(ret >= last))
46 return ret;
47
48 /*
49 * GCC likes to generate cmov here, but this branch is extremely
50 * predictable (it's just a funciton of time and the likely is
51 * very likely) and there's a data dependence, so force GCC
52 * to generate a branch instead. I don't barrier() because
53 * we don't actually need a barrier, and if this function
54 * ever gets inlined it will generate worse code.
55 */
56 asm volatile ("");
57 return last;
58}
59
60static notrace cycle_t vread_hpet(void)
61{
62 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
63}
64
Steven Rostedt23adec52008-05-12 21:20:41 +020065notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +020066{
67 long ret;
68 asm("syscall" : "=a" (ret) :
69 "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
70 return ret;
71}
72
John Stultza939e812012-03-01 22:11:09 -080073notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
74{
75 long ret;
76
77 asm("syscall" : "=a" (ret) :
78 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
79 return ret;
80}
81
82
Steven Rostedt23adec52008-05-12 21:20:41 +020083notrace static inline long vgetns(void)
Andi Kleen2aae9502007-07-21 17:10:01 +020084{
Andi Kleen95b08672007-09-11 14:02:09 +020085 long v;
Andy Lutomirski98d0ac32011-07-14 06:47:22 -040086 cycles_t cycles;
87 if (gtod->clock.vclock_mode == VCLOCK_TSC)
88 cycles = vread_tsc();
John Stultza939e812012-03-01 22:11:09 -080089 else if (gtod->clock.vclock_mode == VCLOCK_HPET)
Andy Lutomirski98d0ac32011-07-14 06:47:22 -040090 cycles = vread_hpet();
John Stultza939e812012-03-01 22:11:09 -080091 else
92 return 0;
Andy Lutomirski98d0ac32011-07-14 06:47:22 -040093 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
Andi Kleen95b08672007-09-11 14:02:09 +020094 return (v * gtod->clock.mult) >> gtod->clock.shift;
Andi Kleen2aae9502007-07-21 17:10:01 +020095}
96
Steven Rostedt23adec52008-05-12 21:20:41 +020097notrace static noinline int do_realtime(struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +020098{
99 unsigned long seq, ns;
John Stultza939e812012-03-01 22:11:09 -0800100 int mode;
101
Andi Kleen2aae9502007-07-21 17:10:01 +0200102 do {
Thomas Gleixner2ab51652012-02-28 19:46:04 +0000103 seq = read_seqcount_begin(&gtod->seq);
John Stultza939e812012-03-01 22:11:09 -0800104 mode = gtod->clock.vclock_mode;
Andi Kleen2aae9502007-07-21 17:10:01 +0200105 ts->tv_sec = gtod->wall_time_sec;
106 ts->tv_nsec = gtod->wall_time_nsec;
107 ns = vgetns();
Thomas Gleixner2ab51652012-02-28 19:46:04 +0000108 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
John Stultza939e812012-03-01 22:11:09 -0800109
Andi Kleen2aae9502007-07-21 17:10:01 +0200110 timespec_add_ns(ts, ns);
John Stultza939e812012-03-01 22:11:09 -0800111 return mode;
Andi Kleen2aae9502007-07-21 17:10:01 +0200112}
113
Steven Rostedt23adec52008-05-12 21:20:41 +0200114notrace static noinline int do_monotonic(struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +0200115{
Andy Lutomirski91ec87d2012-03-22 21:15:51 -0700116 unsigned long seq, ns;
John Stultza939e812012-03-01 22:11:09 -0800117 int mode;
118
Andi Kleen2aae9502007-07-21 17:10:01 +0200119 do {
Thomas Gleixner2ab51652012-02-28 19:46:04 +0000120 seq = read_seqcount_begin(&gtod->seq);
John Stultza939e812012-03-01 22:11:09 -0800121 mode = gtod->clock.vclock_mode;
Andy Lutomirski91ec87d2012-03-22 21:15:51 -0700122 ts->tv_sec = gtod->monotonic_time_sec;
123 ts->tv_nsec = gtod->monotonic_time_nsec;
124 ns = vgetns();
Thomas Gleixner2ab51652012-02-28 19:46:04 +0000125 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
Andy Lutomirski91ec87d2012-03-22 21:15:51 -0700126 timespec_add_ns(ts, ns);
Andy Lutomirski0f51f282011-05-23 09:31:27 -0400127
John Stultza939e812012-03-01 22:11:09 -0800128 return mode;
Andi Kleen2aae9502007-07-21 17:10:01 +0200129}
130
john stultzda15cfd2009-08-19 19:13:34 -0700131notrace static noinline int do_realtime_coarse(struct timespec *ts)
132{
133 unsigned long seq;
134 do {
Thomas Gleixner2ab51652012-02-28 19:46:04 +0000135 seq = read_seqcount_begin(&gtod->seq);
john stultzda15cfd2009-08-19 19:13:34 -0700136 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
137 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
Thomas Gleixner2ab51652012-02-28 19:46:04 +0000138 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
john stultzda15cfd2009-08-19 19:13:34 -0700139 return 0;
140}
141
142notrace static noinline int do_monotonic_coarse(struct timespec *ts)
143{
Andy Lutomirski91ec87d2012-03-22 21:15:51 -0700144 unsigned long seq;
john stultzda15cfd2009-08-19 19:13:34 -0700145 do {
Thomas Gleixner2ab51652012-02-28 19:46:04 +0000146 seq = read_seqcount_begin(&gtod->seq);
Andy Lutomirski91ec87d2012-03-22 21:15:51 -0700147 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
148 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
Thomas Gleixner2ab51652012-02-28 19:46:04 +0000149 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
Andy Lutomirski0f51f282011-05-23 09:31:27 -0400150
john stultzda15cfd2009-08-19 19:13:34 -0700151 return 0;
152}
153
Steven Rostedt23adec52008-05-12 21:20:41 +0200154notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +0200155{
John Stultza939e812012-03-01 22:11:09 -0800156 int ret = VCLOCK_NONE;
157
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400158 switch (clock) {
159 case CLOCK_REALTIME:
John Stultza939e812012-03-01 22:11:09 -0800160 ret = do_realtime(ts);
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400161 break;
162 case CLOCK_MONOTONIC:
John Stultza939e812012-03-01 22:11:09 -0800163 ret = do_monotonic(ts);
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400164 break;
165 case CLOCK_REALTIME_COARSE:
166 return do_realtime_coarse(ts);
167 case CLOCK_MONOTONIC_COARSE:
168 return do_monotonic_coarse(ts);
169 }
170
John Stultza939e812012-03-01 22:11:09 -0800171 if (ret == VCLOCK_NONE)
172 return vdso_fallback_gettime(clock, ts);
173 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +0200174}
175int clock_gettime(clockid_t, struct timespec *)
176 __attribute__((weak, alias("__vdso_clock_gettime")));
177
Steven Rostedt23adec52008-05-12 21:20:41 +0200178notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
Andi Kleen2aae9502007-07-21 17:10:01 +0200179{
John Stultza939e812012-03-01 22:11:09 -0800180 long ret = VCLOCK_NONE;
181
182 if (likely(tv != NULL)) {
183 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
184 offsetof(struct timespec, tv_nsec) ||
185 sizeof(*tv) != sizeof(struct timespec));
186 ret = do_realtime((struct timespec *)tv);
187 tv->tv_usec /= 1000;
Andi Kleen2aae9502007-07-21 17:10:01 +0200188 }
John Stultza939e812012-03-01 22:11:09 -0800189 if (unlikely(tz != NULL)) {
190 /* Avoid memcpy. Some old compilers fail to inline it */
191 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
192 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
193 }
194
195 if (ret == VCLOCK_NONE)
196 return vdso_fallback_gtod(tv, tz);
197 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +0200198}
199int gettimeofday(struct timeval *, struct timezone *)
200 __attribute__((weak, alias("__vdso_gettimeofday")));
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400201
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400202/*
203 * This will break when the xtime seconds get inaccurate, but that is
204 * unlikely
205 */
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400206notrace time_t __vdso_time(time_t *t)
207{
Andy Lutomirski973aa812011-05-23 09:31:31 -0400208 /* This is atomic on x86_64 so we don't need any locks. */
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400209 time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400210
211 if (t)
212 *t = result;
213 return result;
214}
215int time(time_t *t)
216 __attribute__((weak, alias("__vdso_time")));