Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2006 Andi Kleen, SUSE Labs. |
| 3 | * Subject to the GNU Public License, v.2 |
| 4 | * |
Andy Lutomirski | f144a6b | 2011-05-23 09:31:30 -0400 | [diff] [blame] | 5 | * Fast user context implementation of clock_gettime, gettimeofday, and time. |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 6 | * |
| 7 | * The code should have no internal unresolved relocations. |
| 8 | * Check with readelf after changing. |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 9 | */ |
| 10 | |
Ingo Molnar | 2b7d039 | 2008-11-12 13:17:38 +0100 | [diff] [blame] | 11 | /* Disable profiling for userspace code: */ |
Steven Rostedt | 2ed84ee | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 12 | #define DISABLE_BRANCH_PROFILING |
Ingo Molnar | 2b7d039 | 2008-11-12 13:17:38 +0100 | [diff] [blame] | 13 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 14 | #include <linux/kernel.h> |
| 15 | #include <linux/posix-timers.h> |
| 16 | #include <linux/time.h> |
| 17 | #include <linux/string.h> |
| 18 | #include <asm/vsyscall.h> |
Andy Lutomirski | 98d0ac3 | 2011-07-14 06:47:22 -0400 | [diff] [blame] | 19 | #include <asm/fixmap.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 20 | #include <asm/vgtod.h> |
| 21 | #include <asm/timex.h> |
| 22 | #include <asm/hpet.h> |
| 23 | #include <asm/unistd.h> |
| 24 | #include <asm/io.h> |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 25 | |
Andy Lutomirski | 8c49d9a | 2011-05-23 09:31:24 -0400 | [diff] [blame] | 26 | #define gtod (&VVAR(vsyscall_gtod_data)) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 27 | |
Andy Lutomirski | 98d0ac3 | 2011-07-14 06:47:22 -0400 | [diff] [blame] | 28 | notrace static cycle_t vread_tsc(void) |
| 29 | { |
| 30 | cycle_t ret; |
| 31 | u64 last; |
| 32 | |
| 33 | /* |
| 34 | * Empirically, a fence (of type that depends on the CPU) |
| 35 | * before rdtsc is enough to ensure that rdtsc is ordered |
| 36 | * with respect to loads. The various CPU manuals are unclear |
| 37 | * as to whether rdtsc can be reordered with later loads, |
| 38 | * but no one has ever seen it happen. |
| 39 | */ |
| 40 | rdtsc_barrier(); |
| 41 | ret = (cycle_t)vget_cycles(); |
| 42 | |
| 43 | last = VVAR(vsyscall_gtod_data).clock.cycle_last; |
| 44 | |
| 45 | if (likely(ret >= last)) |
| 46 | return ret; |
| 47 | |
| 48 | /* |
| 49 | * GCC likes to generate cmov here, but this branch is extremely |
| 50 | * predictable (it's just a funciton of time and the likely is |
| 51 | * very likely) and there's a data dependence, so force GCC |
| 52 | * to generate a branch instead. I don't barrier() because |
| 53 | * we don't actually need a barrier, and if this function |
| 54 | * ever gets inlined it will generate worse code. |
| 55 | */ |
| 56 | asm volatile (""); |
| 57 | return last; |
| 58 | } |
| 59 | |
| 60 | static notrace cycle_t vread_hpet(void) |
| 61 | { |
| 62 | return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); |
| 63 | } |
| 64 | |
Steven Rostedt | 23adec5 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 65 | notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 66 | { |
| 67 | long ret; |
| 68 | asm("syscall" : "=a" (ret) : |
| 69 | "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory"); |
| 70 | return ret; |
| 71 | } |
| 72 | |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 73 | notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) |
| 74 | { |
| 75 | long ret; |
| 76 | |
| 77 | asm("syscall" : "=a" (ret) : |
| 78 | "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); |
| 79 | return ret; |
| 80 | } |
| 81 | |
| 82 | |
John Stultz | 650ea02 | 2012-09-04 16:14:46 -0400 | [diff] [blame^] | 83 | notrace static inline u64 vgetsns(void) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 84 | { |
Andi Kleen | 95b0867 | 2007-09-11 14:02:09 +0200 | [diff] [blame] | 85 | long v; |
Andy Lutomirski | 98d0ac3 | 2011-07-14 06:47:22 -0400 | [diff] [blame] | 86 | cycles_t cycles; |
| 87 | if (gtod->clock.vclock_mode == VCLOCK_TSC) |
| 88 | cycles = vread_tsc(); |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 89 | else if (gtod->clock.vclock_mode == VCLOCK_HPET) |
Andy Lutomirski | 98d0ac3 | 2011-07-14 06:47:22 -0400 | [diff] [blame] | 90 | cycles = vread_hpet(); |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 91 | else |
| 92 | return 0; |
Andy Lutomirski | 98d0ac3 | 2011-07-14 06:47:22 -0400 | [diff] [blame] | 93 | v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; |
John Stultz | 650ea02 | 2012-09-04 16:14:46 -0400 | [diff] [blame^] | 94 | return v * gtod->clock.mult; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 95 | } |
| 96 | |
Andy Lutomirski | 5f29347 | 2012-03-22 21:15:52 -0700 | [diff] [blame] | 97 | /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */ |
| 98 | notrace static int __always_inline do_realtime(struct timespec *ts) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 99 | { |
John Stultz | 650ea02 | 2012-09-04 16:14:46 -0400 | [diff] [blame^] | 100 | unsigned long seq; |
| 101 | u64 ns; |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 102 | int mode; |
| 103 | |
John Stultz | 650ea02 | 2012-09-04 16:14:46 -0400 | [diff] [blame^] | 104 | ts->tv_nsec = 0; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 105 | do { |
Thomas Gleixner | 2ab5165 | 2012-02-28 19:46:04 +0000 | [diff] [blame] | 106 | seq = read_seqcount_begin(>od->seq); |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 107 | mode = gtod->clock.vclock_mode; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 108 | ts->tv_sec = gtod->wall_time_sec; |
John Stultz | 650ea02 | 2012-09-04 16:14:46 -0400 | [diff] [blame^] | 109 | ns = gtod->wall_time_snsec; |
| 110 | ns += vgetsns(); |
| 111 | ns >>= gtod->clock.shift; |
Thomas Gleixner | 2ab5165 | 2012-02-28 19:46:04 +0000 | [diff] [blame] | 112 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 113 | |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 114 | timespec_add_ns(ts, ns); |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 115 | return mode; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 116 | } |
| 117 | |
Andy Lutomirski | 5f29347 | 2012-03-22 21:15:52 -0700 | [diff] [blame] | 118 | notrace static int do_monotonic(struct timespec *ts) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 119 | { |
John Stultz | 650ea02 | 2012-09-04 16:14:46 -0400 | [diff] [blame^] | 120 | unsigned long seq; |
| 121 | u64 ns; |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 122 | int mode; |
| 123 | |
John Stultz | 650ea02 | 2012-09-04 16:14:46 -0400 | [diff] [blame^] | 124 | ts->tv_nsec = 0; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 125 | do { |
Thomas Gleixner | 2ab5165 | 2012-02-28 19:46:04 +0000 | [diff] [blame] | 126 | seq = read_seqcount_begin(>od->seq); |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 127 | mode = gtod->clock.vclock_mode; |
Andy Lutomirski | 91ec87d | 2012-03-22 21:15:51 -0700 | [diff] [blame] | 128 | ts->tv_sec = gtod->monotonic_time_sec; |
John Stultz | 650ea02 | 2012-09-04 16:14:46 -0400 | [diff] [blame^] | 129 | ns = gtod->monotonic_time_snsec; |
| 130 | ns += vgetsns(); |
| 131 | ns >>= gtod->clock.shift; |
Thomas Gleixner | 2ab5165 | 2012-02-28 19:46:04 +0000 | [diff] [blame] | 132 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
Andy Lutomirski | 91ec87d | 2012-03-22 21:15:51 -0700 | [diff] [blame] | 133 | timespec_add_ns(ts, ns); |
Andy Lutomirski | 0f51f28 | 2011-05-23 09:31:27 -0400 | [diff] [blame] | 134 | |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 135 | return mode; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 136 | } |
| 137 | |
Andy Lutomirski | 5f29347 | 2012-03-22 21:15:52 -0700 | [diff] [blame] | 138 | notrace static int do_realtime_coarse(struct timespec *ts) |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 139 | { |
| 140 | unsigned long seq; |
| 141 | do { |
Thomas Gleixner | 2ab5165 | 2012-02-28 19:46:04 +0000 | [diff] [blame] | 142 | seq = read_seqcount_begin(>od->seq); |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 143 | ts->tv_sec = gtod->wall_time_coarse.tv_sec; |
| 144 | ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; |
Thomas Gleixner | 2ab5165 | 2012-02-28 19:46:04 +0000 | [diff] [blame] | 145 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 146 | return 0; |
| 147 | } |
| 148 | |
Andy Lutomirski | 5f29347 | 2012-03-22 21:15:52 -0700 | [diff] [blame] | 149 | notrace static int do_monotonic_coarse(struct timespec *ts) |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 150 | { |
Andy Lutomirski | 91ec87d | 2012-03-22 21:15:51 -0700 | [diff] [blame] | 151 | unsigned long seq; |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 152 | do { |
Thomas Gleixner | 2ab5165 | 2012-02-28 19:46:04 +0000 | [diff] [blame] | 153 | seq = read_seqcount_begin(>od->seq); |
Andy Lutomirski | 91ec87d | 2012-03-22 21:15:51 -0700 | [diff] [blame] | 154 | ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; |
| 155 | ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; |
Thomas Gleixner | 2ab5165 | 2012-02-28 19:46:04 +0000 | [diff] [blame] | 156 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
Andy Lutomirski | 0f51f28 | 2011-05-23 09:31:27 -0400 | [diff] [blame] | 157 | |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 158 | return 0; |
| 159 | } |
| 160 | |
Steven Rostedt | 23adec5 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 161 | notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 162 | { |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 163 | int ret = VCLOCK_NONE; |
| 164 | |
Andy Lutomirski | 0d7b854 | 2011-06-05 13:50:20 -0400 | [diff] [blame] | 165 | switch (clock) { |
| 166 | case CLOCK_REALTIME: |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 167 | ret = do_realtime(ts); |
Andy Lutomirski | 0d7b854 | 2011-06-05 13:50:20 -0400 | [diff] [blame] | 168 | break; |
| 169 | case CLOCK_MONOTONIC: |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 170 | ret = do_monotonic(ts); |
Andy Lutomirski | 0d7b854 | 2011-06-05 13:50:20 -0400 | [diff] [blame] | 171 | break; |
| 172 | case CLOCK_REALTIME_COARSE: |
| 173 | return do_realtime_coarse(ts); |
| 174 | case CLOCK_MONOTONIC_COARSE: |
| 175 | return do_monotonic_coarse(ts); |
| 176 | } |
| 177 | |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 178 | if (ret == VCLOCK_NONE) |
| 179 | return vdso_fallback_gettime(clock, ts); |
| 180 | return 0; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 181 | } |
| 182 | int clock_gettime(clockid_t, struct timespec *) |
| 183 | __attribute__((weak, alias("__vdso_clock_gettime"))); |
| 184 | |
Steven Rostedt | 23adec5 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 185 | notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 186 | { |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 187 | long ret = VCLOCK_NONE; |
| 188 | |
| 189 | if (likely(tv != NULL)) { |
| 190 | BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != |
| 191 | offsetof(struct timespec, tv_nsec) || |
| 192 | sizeof(*tv) != sizeof(struct timespec)); |
| 193 | ret = do_realtime((struct timespec *)tv); |
| 194 | tv->tv_usec /= 1000; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 195 | } |
John Stultz | a939e81 | 2012-03-01 22:11:09 -0800 | [diff] [blame] | 196 | if (unlikely(tz != NULL)) { |
| 197 | /* Avoid memcpy. Some old compilers fail to inline it */ |
| 198 | tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; |
| 199 | tz->tz_dsttime = gtod->sys_tz.tz_dsttime; |
| 200 | } |
| 201 | |
| 202 | if (ret == VCLOCK_NONE) |
| 203 | return vdso_fallback_gtod(tv, tz); |
| 204 | return 0; |
Andi Kleen | 2aae950 | 2007-07-21 17:10:01 +0200 | [diff] [blame] | 205 | } |
| 206 | int gettimeofday(struct timeval *, struct timezone *) |
| 207 | __attribute__((weak, alias("__vdso_gettimeofday"))); |
Andy Lutomirski | f144a6b | 2011-05-23 09:31:30 -0400 | [diff] [blame] | 208 | |
Andy Lutomirski | 0d7b854 | 2011-06-05 13:50:20 -0400 | [diff] [blame] | 209 | /* |
| 210 | * This will break when the xtime seconds get inaccurate, but that is |
| 211 | * unlikely |
| 212 | */ |
Andy Lutomirski | f144a6b | 2011-05-23 09:31:30 -0400 | [diff] [blame] | 213 | notrace time_t __vdso_time(time_t *t) |
| 214 | { |
Andy Lutomirski | 973aa81 | 2011-05-23 09:31:31 -0400 | [diff] [blame] | 215 | /* This is atomic on x86_64 so we don't need any locks. */ |
Andy Lutomirski | 0d7b854 | 2011-06-05 13:50:20 -0400 | [diff] [blame] | 216 | time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec); |
Andy Lutomirski | f144a6b | 2011-05-23 09:31:30 -0400 | [diff] [blame] | 217 | |
| 218 | if (t) |
| 219 | *t = result; |
| 220 | return result; |
| 221 | } |
| 222 | int time(time_t *t) |
| 223 | __attribute__((weak, alias("__vdso_time"))); |