blob: 7eeb1f6188ee4379f916ae84c70b13d6ed705083 [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
Andy Lutomirskif144a6b2011-05-23 09:31:30 -04005 * Fast user context implementation of clock_gettime, gettimeofday, and time.
Andi Kleen2aae9502007-07-21 17:10:01 +02006 *
7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing.
Andi Kleen2aae9502007-07-21 17:10:01 +02009 */
10
Ingo Molnar2b7d0392008-11-12 13:17:38 +010011/* Disable profiling for userspace code: */
Steven Rostedt2ed84ee2008-11-12 15:24:24 -050012#define DISABLE_BRANCH_PROFILING
Ingo Molnar2b7d0392008-11-12 13:17:38 +010013
Andi Kleen2aae9502007-07-21 17:10:01 +020014#include <linux/kernel.h>
15#include <linux/posix-timers.h>
16#include <linux/time.h>
17#include <linux/string.h>
18#include <asm/vsyscall.h>
Andy Lutomirski98d0ac32011-07-14 06:47:22 -040019#include <asm/fixmap.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020020#include <asm/vgtod.h>
21#include <asm/timex.h>
22#include <asm/hpet.h>
23#include <asm/unistd.h>
24#include <asm/io.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020025
Andy Lutomirski8c49d9a2011-05-23 09:31:24 -040026#define gtod (&VVAR(vsyscall_gtod_data))
Andi Kleen2aae9502007-07-21 17:10:01 +020027
Andy Lutomirski98d0ac32011-07-14 06:47:22 -040028notrace static cycle_t vread_tsc(void)
29{
30 cycle_t ret;
31 u64 last;
32
33 /*
34 * Empirically, a fence (of type that depends on the CPU)
35 * before rdtsc is enough to ensure that rdtsc is ordered
36 * with respect to loads. The various CPU manuals are unclear
37 * as to whether rdtsc can be reordered with later loads,
38 * but no one has ever seen it happen.
39 */
40 rdtsc_barrier();
41 ret = (cycle_t)vget_cycles();
42
43 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
44
45 if (likely(ret >= last))
46 return ret;
47
48 /*
49 * GCC likes to generate cmov here, but this branch is extremely
50 * predictable (it's just a funciton of time and the likely is
51 * very likely) and there's a data dependence, so force GCC
52 * to generate a branch instead. I don't barrier() because
53 * we don't actually need a barrier, and if this function
54 * ever gets inlined it will generate worse code.
55 */
56 asm volatile ("");
57 return last;
58}
59
60static notrace cycle_t vread_hpet(void)
61{
62 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
63}
64
Steven Rostedt23adec52008-05-12 21:20:41 +020065notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +020066{
67 long ret;
68 asm("syscall" : "=a" (ret) :
69 "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
70 return ret;
71}
72
John Stultza939e812012-03-01 22:11:09 -080073notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
74{
75 long ret;
76
77 asm("syscall" : "=a" (ret) :
78 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
79 return ret;
80}
81
82
Steven Rostedt23adec52008-05-12 21:20:41 +020083notrace static inline long vgetns(void)
Andi Kleen2aae9502007-07-21 17:10:01 +020084{
Andi Kleen95b08672007-09-11 14:02:09 +020085 long v;
Andy Lutomirski98d0ac32011-07-14 06:47:22 -040086 cycles_t cycles;
87 if (gtod->clock.vclock_mode == VCLOCK_TSC)
88 cycles = vread_tsc();
John Stultza939e812012-03-01 22:11:09 -080089 else if (gtod->clock.vclock_mode == VCLOCK_HPET)
Andy Lutomirski98d0ac32011-07-14 06:47:22 -040090 cycles = vread_hpet();
John Stultza939e812012-03-01 22:11:09 -080091 else
92 return 0;
Andy Lutomirski98d0ac32011-07-14 06:47:22 -040093 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
Andi Kleen95b08672007-09-11 14:02:09 +020094 return (v * gtod->clock.mult) >> gtod->clock.shift;
Andi Kleen2aae9502007-07-21 17:10:01 +020095}
96
Steven Rostedt23adec52008-05-12 21:20:41 +020097notrace static noinline int do_realtime(struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +020098{
99 unsigned long seq, ns;
John Stultza939e812012-03-01 22:11:09 -0800100 int mode;
101
Andi Kleen2aae9502007-07-21 17:10:01 +0200102 do {
103 seq = read_seqbegin(&gtod->lock);
John Stultza939e812012-03-01 22:11:09 -0800104 mode = gtod->clock.vclock_mode;
Andi Kleen2aae9502007-07-21 17:10:01 +0200105 ts->tv_sec = gtod->wall_time_sec;
106 ts->tv_nsec = gtod->wall_time_nsec;
107 ns = vgetns();
108 } while (unlikely(read_seqretry(&gtod->lock, seq)));
John Stultza939e812012-03-01 22:11:09 -0800109
Andi Kleen2aae9502007-07-21 17:10:01 +0200110 timespec_add_ns(ts, ns);
John Stultza939e812012-03-01 22:11:09 -0800111 return mode;
Andi Kleen2aae9502007-07-21 17:10:01 +0200112}
113
Steven Rostedt23adec52008-05-12 21:20:41 +0200114notrace static noinline int do_monotonic(struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +0200115{
116 unsigned long seq, ns, secs;
John Stultza939e812012-03-01 22:11:09 -0800117 int mode;
118
Andi Kleen2aae9502007-07-21 17:10:01 +0200119 do {
120 seq = read_seqbegin(&gtod->lock);
John Stultza939e812012-03-01 22:11:09 -0800121 mode = gtod->clock.vclock_mode;
Andi Kleen2aae9502007-07-21 17:10:01 +0200122 secs = gtod->wall_time_sec;
123 ns = gtod->wall_time_nsec + vgetns();
124 secs += gtod->wall_to_monotonic.tv_sec;
125 ns += gtod->wall_to_monotonic.tv_nsec;
126 } while (unlikely(read_seqretry(&gtod->lock, seq)));
Andy Lutomirski0f51f282011-05-23 09:31:27 -0400127
128 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
129 * are all guaranteed to be nonnegative.
130 */
131 while (ns >= NSEC_PER_SEC) {
132 ns -= NSEC_PER_SEC;
133 ++secs;
134 }
135 ts->tv_sec = secs;
136 ts->tv_nsec = ns;
137
John Stultza939e812012-03-01 22:11:09 -0800138 return mode;
Andi Kleen2aae9502007-07-21 17:10:01 +0200139}
140
john stultzda15cfd2009-08-19 19:13:34 -0700141notrace static noinline int do_realtime_coarse(struct timespec *ts)
142{
143 unsigned long seq;
144 do {
145 seq = read_seqbegin(&gtod->lock);
146 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
147 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
148 } while (unlikely(read_seqretry(&gtod->lock, seq)));
149 return 0;
150}
151
152notrace static noinline int do_monotonic_coarse(struct timespec *ts)
153{
154 unsigned long seq, ns, secs;
155 do {
156 seq = read_seqbegin(&gtod->lock);
157 secs = gtod->wall_time_coarse.tv_sec;
158 ns = gtod->wall_time_coarse.tv_nsec;
159 secs += gtod->wall_to_monotonic.tv_sec;
160 ns += gtod->wall_to_monotonic.tv_nsec;
161 } while (unlikely(read_seqretry(&gtod->lock, seq)));
Andy Lutomirski0f51f282011-05-23 09:31:27 -0400162
163 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
164 * guaranteed to be between 0 and NSEC_PER_SEC.
165 */
166 if (ns >= NSEC_PER_SEC) {
167 ns -= NSEC_PER_SEC;
168 ++secs;
169 }
170 ts->tv_sec = secs;
171 ts->tv_nsec = ns;
172
john stultzda15cfd2009-08-19 19:13:34 -0700173 return 0;
174}
175
Steven Rostedt23adec52008-05-12 21:20:41 +0200176notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +0200177{
John Stultza939e812012-03-01 22:11:09 -0800178 int ret = VCLOCK_NONE;
179
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400180 switch (clock) {
181 case CLOCK_REALTIME:
John Stultza939e812012-03-01 22:11:09 -0800182 ret = do_realtime(ts);
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400183 break;
184 case CLOCK_MONOTONIC:
John Stultza939e812012-03-01 22:11:09 -0800185 ret = do_monotonic(ts);
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400186 break;
187 case CLOCK_REALTIME_COARSE:
188 return do_realtime_coarse(ts);
189 case CLOCK_MONOTONIC_COARSE:
190 return do_monotonic_coarse(ts);
191 }
192
John Stultza939e812012-03-01 22:11:09 -0800193 if (ret == VCLOCK_NONE)
194 return vdso_fallback_gettime(clock, ts);
195 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +0200196}
197int clock_gettime(clockid_t, struct timespec *)
198 __attribute__((weak, alias("__vdso_clock_gettime")));
199
Steven Rostedt23adec52008-05-12 21:20:41 +0200200notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
Andi Kleen2aae9502007-07-21 17:10:01 +0200201{
John Stultza939e812012-03-01 22:11:09 -0800202 long ret = VCLOCK_NONE;
203
204 if (likely(tv != NULL)) {
205 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
206 offsetof(struct timespec, tv_nsec) ||
207 sizeof(*tv) != sizeof(struct timespec));
208 ret = do_realtime((struct timespec *)tv);
209 tv->tv_usec /= 1000;
Andi Kleen2aae9502007-07-21 17:10:01 +0200210 }
John Stultza939e812012-03-01 22:11:09 -0800211 if (unlikely(tz != NULL)) {
212 /* Avoid memcpy. Some old compilers fail to inline it */
213 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
214 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
215 }
216
217 if (ret == VCLOCK_NONE)
218 return vdso_fallback_gtod(tv, tz);
219 return 0;
Andi Kleen2aae9502007-07-21 17:10:01 +0200220}
221int gettimeofday(struct timeval *, struct timezone *)
222 __attribute__((weak, alias("__vdso_gettimeofday")));
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400223
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400224/*
225 * This will break when the xtime seconds get inaccurate, but that is
226 * unlikely
227 */
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400228notrace time_t __vdso_time(time_t *t)
229{
Andy Lutomirski973aa812011-05-23 09:31:31 -0400230 /* This is atomic on x86_64 so we don't need any locks. */
Andy Lutomirski0d7b8542011-06-05 13:50:20 -0400231 time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400232
233 if (t)
234 *t = result;
235 return result;
236}
237int time(time_t *t)
238 __attribute__((weak, alias("__vdso_time")));