blob: a724905fdae7c296bb40492505225f5cf053f35a [file] [log] [blame]
Andi Kleen2aae9502007-07-21 17:10:01 +02001/*
2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
4 *
Andy Lutomirskif144a6b2011-05-23 09:31:30 -04005 * Fast user context implementation of clock_gettime, gettimeofday, and time.
Andi Kleen2aae9502007-07-21 17:10:01 +02006 *
7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing.
9 * Also alternative() doesn't work.
10 */
11
Ingo Molnar2b7d0392008-11-12 13:17:38 +010012/* Disable profiling for userspace code: */
Steven Rostedt2ed84ee2008-11-12 15:24:24 -050013#define DISABLE_BRANCH_PROFILING
Ingo Molnar2b7d0392008-11-12 13:17:38 +010014
Andi Kleen2aae9502007-07-21 17:10:01 +020015#include <linux/kernel.h>
16#include <linux/posix-timers.h>
17#include <linux/time.h>
18#include <linux/string.h>
19#include <asm/vsyscall.h>
20#include <asm/vgtod.h>
21#include <asm/timex.h>
22#include <asm/hpet.h>
23#include <asm/unistd.h>
24#include <asm/io.h>
Andi Kleen2aae9502007-07-21 17:10:01 +020025
Andy Lutomirski8c49d9a2011-05-23 09:31:24 -040026#define gtod (&VVAR(vsyscall_gtod_data))
Andi Kleen2aae9502007-07-21 17:10:01 +020027
Steven Rostedt23adec52008-05-12 21:20:41 +020028notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +020029{
30 long ret;
31 asm("syscall" : "=a" (ret) :
32 "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
33 return ret;
34}
35
Steven Rostedt23adec52008-05-12 21:20:41 +020036notrace static inline long vgetns(void)
Andi Kleen2aae9502007-07-21 17:10:01 +020037{
Andi Kleen95b08672007-09-11 14:02:09 +020038 long v;
Andi Kleen2aae9502007-07-21 17:10:01 +020039 cycles_t (*vread)(void);
40 vread = gtod->clock.vread;
Andi Kleen95b08672007-09-11 14:02:09 +020041 v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
42 return (v * gtod->clock.mult) >> gtod->clock.shift;
Andi Kleen2aae9502007-07-21 17:10:01 +020043}
44
Steven Rostedt23adec52008-05-12 21:20:41 +020045notrace static noinline int do_realtime(struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +020046{
47 unsigned long seq, ns;
48 do {
49 seq = read_seqbegin(&gtod->lock);
50 ts->tv_sec = gtod->wall_time_sec;
51 ts->tv_nsec = gtod->wall_time_nsec;
52 ns = vgetns();
53 } while (unlikely(read_seqretry(&gtod->lock, seq)));
54 timespec_add_ns(ts, ns);
55 return 0;
56}
57
Steven Rostedt23adec52008-05-12 21:20:41 +020058notrace static noinline int do_monotonic(struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +020059{
60 unsigned long seq, ns, secs;
61 do {
62 seq = read_seqbegin(&gtod->lock);
63 secs = gtod->wall_time_sec;
64 ns = gtod->wall_time_nsec + vgetns();
65 secs += gtod->wall_to_monotonic.tv_sec;
66 ns += gtod->wall_to_monotonic.tv_nsec;
67 } while (unlikely(read_seqretry(&gtod->lock, seq)));
Andy Lutomirski0f51f282011-05-23 09:31:27 -040068
69 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
70 * are all guaranteed to be nonnegative.
71 */
72 while (ns >= NSEC_PER_SEC) {
73 ns -= NSEC_PER_SEC;
74 ++secs;
75 }
76 ts->tv_sec = secs;
77 ts->tv_nsec = ns;
78
Andi Kleen2aae9502007-07-21 17:10:01 +020079 return 0;
80}
81
john stultzda15cfd2009-08-19 19:13:34 -070082notrace static noinline int do_realtime_coarse(struct timespec *ts)
83{
84 unsigned long seq;
85 do {
86 seq = read_seqbegin(&gtod->lock);
87 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
88 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
89 } while (unlikely(read_seqretry(&gtod->lock, seq)));
90 return 0;
91}
92
93notrace static noinline int do_monotonic_coarse(struct timespec *ts)
94{
95 unsigned long seq, ns, secs;
96 do {
97 seq = read_seqbegin(&gtod->lock);
98 secs = gtod->wall_time_coarse.tv_sec;
99 ns = gtod->wall_time_coarse.tv_nsec;
100 secs += gtod->wall_to_monotonic.tv_sec;
101 ns += gtod->wall_to_monotonic.tv_nsec;
102 } while (unlikely(read_seqretry(&gtod->lock, seq)));
Andy Lutomirski0f51f282011-05-23 09:31:27 -0400103
104 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
105 * guaranteed to be between 0 and NSEC_PER_SEC.
106 */
107 if (ns >= NSEC_PER_SEC) {
108 ns -= NSEC_PER_SEC;
109 ++secs;
110 }
111 ts->tv_sec = secs;
112 ts->tv_nsec = ns;
113
john stultzda15cfd2009-08-19 19:13:34 -0700114 return 0;
115}
116
Steven Rostedt23adec52008-05-12 21:20:41 +0200117notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
Andi Kleen2aae9502007-07-21 17:10:01 +0200118{
john stultzda15cfd2009-08-19 19:13:34 -0700119 if (likely(gtod->sysctl_enabled))
Andi Kleen2aae9502007-07-21 17:10:01 +0200120 switch (clock) {
121 case CLOCK_REALTIME:
john stultzda15cfd2009-08-19 19:13:34 -0700122 if (likely(gtod->clock.vread))
123 return do_realtime(ts);
124 break;
Andi Kleen2aae9502007-07-21 17:10:01 +0200125 case CLOCK_MONOTONIC:
john stultzda15cfd2009-08-19 19:13:34 -0700126 if (likely(gtod->clock.vread))
127 return do_monotonic(ts);
128 break;
129 case CLOCK_REALTIME_COARSE:
130 return do_realtime_coarse(ts);
131 case CLOCK_MONOTONIC_COARSE:
132 return do_monotonic_coarse(ts);
Andi Kleen2aae9502007-07-21 17:10:01 +0200133 }
134 return vdso_fallback_gettime(clock, ts);
135}
136int clock_gettime(clockid_t, struct timespec *)
137 __attribute__((weak, alias("__vdso_clock_gettime")));
138
Steven Rostedt23adec52008-05-12 21:20:41 +0200139notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
Andi Kleen2aae9502007-07-21 17:10:01 +0200140{
141 long ret;
142 if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
John Wright2f65dd42009-04-29 14:32:01 -0600143 if (likely(tv != NULL)) {
144 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
145 offsetof(struct timespec, tv_nsec) ||
146 sizeof(*tv) != sizeof(struct timespec));
147 do_realtime((struct timespec *)tv);
148 tv->tv_usec /= 1000;
149 }
Andi Kleen2aae9502007-07-21 17:10:01 +0200150 if (unlikely(tz != NULL)) {
Andi Kleena1289642008-05-14 16:10:42 -0700151 /* Avoid memcpy. Some old compilers fail to inline it */
152 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
153 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
Andi Kleen2aae9502007-07-21 17:10:01 +0200154 }
155 return 0;
156 }
157 asm("syscall" : "=a" (ret) :
158 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
159 return ret;
160}
161int gettimeofday(struct timeval *, struct timezone *)
162 __attribute__((weak, alias("__vdso_gettimeofday")));
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400163
164/* This will break when the xtime seconds get inaccurate, but that is
165 * unlikely */
166
167static __always_inline long time_syscall(long *t)
168{
169 long secs;
170 asm volatile("syscall"
171 : "=a" (secs)
172 : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
173 return secs;
174}
175
176notrace time_t __vdso_time(time_t *t)
177{
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400178 time_t result;
Thomas Gleixnere9d359462011-05-26 13:17:35 +0200179
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400180 if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
181 return time_syscall(t);
182
Andy Lutomirski973aa812011-05-23 09:31:31 -0400183 /* This is atomic on x86_64 so we don't need any locks. */
184 result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
Andy Lutomirskif144a6b2011-05-23 09:31:30 -0400185
186 if (t)
187 *t = result;
188 return result;
189}
190int time(time_t *t)
191 __attribute__((weak, alias("__vdso_time")));