blob: 7b341b86216c29b3c8fc909e68478a128866974e [file] [log] [blame]
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +11001/*
2 * Userland implementation of gettimeofday() for 32 bits processes in a
3 * ppc64 kernel for use in the vDSO
4 *
5 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org,
6 * IBM Corp.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110013#include <asm/processor.h>
14#include <asm/ppc_asm.h>
15#include <asm/vdso.h>
16#include <asm/asm-offsets.h>
17#include <asm/unistd.h>
18
Paul Mackerras597bc5c2008-10-27 23:56:03 +000019/* Offset for the low 32-bit part of a field of long type */
20#ifdef CONFIG_PPC64
21#define LOPART 4
Paul Mackerras8fd63a92010-06-20 19:03:08 +000022#define TSPEC_TV_SEC TSPC64_TV_SEC+LOPART
Paul Mackerras597bc5c2008-10-27 23:56:03 +000023#else
24#define LOPART 0
Paul Mackerras8fd63a92010-06-20 19:03:08 +000025#define TSPEC_TV_SEC TSPC32_TV_SEC
Paul Mackerras597bc5c2008-10-27 23:56:03 +000026#endif
27
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110028 .text
29/*
30 * Exact prototype of gettimeofday
31 *
32 * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
33 *
34 */
35V_FUNCTION_BEGIN(__kernel_gettimeofday)
36 .cfi_startproc
37 mflr r12
38 .cfi_register lr,r12
39
40 mr r10,r3 /* r10 saves tv */
41 mr r11,r4 /* r11 saves tz */
42 bl __get_datapage@local /* get data page */
43 mr r9, r3 /* datapage ptr in r9 */
Tony Breeds74609f42007-06-26 09:50:32 +100044 cmplwi r10,0 /* check if tv is NULL */
45 beq 3f
Paul Mackerras8fd63a92010-06-20 19:03:08 +000046 lis r7,1000000@ha /* load up USEC_PER_SEC */
47 addi r7,r7,1000000@l /* so we get microseconds in r4 */
48 bl __do_get_tspec@local /* get sec/usec from tb & kernel */
49 stw r3,TVAL32_TV_SEC(r10)
50 stw r4,TVAL32_TV_USEC(r10)
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110051
Tony Breeds74609f42007-06-26 09:50:32 +1000523: cmplwi r11,0 /* check if tz is NULL */
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110053 beq 1f
54 lwz r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
55 lwz r5,CFG_TZ_DSTTIME(r9)
56 stw r4,TZONE_TZ_MINWEST(r11)
57 stw r5,TZONE_TZ_DSTTIME(r11)
58
591: mtlr r12
Benjamin Herrenschmidt5d66da32005-11-16 13:54:32 +110060 crclr cr0*4+so
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110061 li r3,0
62 blr
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110063 .cfi_endproc
64V_FUNCTION_END(__kernel_gettimeofday)
65
66/*
67 * Exact prototype of clock_gettime()
68 *
69 * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
70 *
71 */
72V_FUNCTION_BEGIN(__kernel_clock_gettime)
73 .cfi_startproc
74 /* Check for supported clock IDs */
75 cmpli cr0,r3,CLOCK_REALTIME
76 cmpli cr1,r3,CLOCK_MONOTONIC
Benjamin Herrenschmidt0c37ec22005-11-14 14:55:58 +110077 cror cr0*4+eq,cr0*4+eq,cr1*4+eq
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110078 bne cr0,99f
79
80 mflr r12 /* r12 saves lr */
81 .cfi_register lr,r12
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110082 mr r11,r4 /* r11 saves tp */
83 bl __get_datapage@local /* get data page */
Benjamin Herrenschmidt0c37ec22005-11-14 14:55:58 +110084 mr r9,r3 /* datapage ptr in r9 */
Paul Mackerras8fd63a92010-06-20 19:03:08 +000085 lis r7,NSEC_PER_SEC@h /* want nanoseconds */
86 ori r7,r7,NSEC_PER_SEC@l
Paul Mackerras597bc5c2008-10-27 23:56:03 +00008750: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */
88 bne cr1,80f /* not monotonic -> all done */
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110089
90 /*
91 * CLOCK_MONOTONIC
92 */
93
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110094 /* now we must fixup using wall to monotonic. We need to snapshot
95 * that value and do the counter trick again. Fortunately, we still
96 * have the counter value in r8 that was returned by __do_get_xsec.
Paul Mackerras597bc5c2008-10-27 23:56:03 +000097 * At this point, r3,r4 contain our sec/nsec values, r5 and r6
98 * can be used, r7 contains NSEC_PER_SEC.
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110099 */
100
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000101 lwz r5,WTOM_CLOCK_SEC(r9)
102 lwz r6,WTOM_CLOCK_NSEC(r9)
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100103
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000104 /* We now have our offset in r5,r6. We create a fake dependency
105 * on that value and re-check the counter
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100106 */
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000107 or r0,r6,r5
108 xor r0,r0,r0
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100109 add r9,r9,r0
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000110 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100111 cmpl cr0,r8,r0 /* check if updated */
112 bne- 50b
113
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000114 /* Calculate and store result. Note that this mimics the C code,
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100115 * which may cause funny results if nsec goes negative... is that
116 * possible at all ?
117 */
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000118 add r3,r3,r5
119 add r4,r4,r6
120 cmpw cr0,r4,r7
121 cmpwi cr1,r4,0
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100122 blt 1f
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000123 subf r4,r7,r4
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100124 addi r3,r3,1
Paul Mackerras597bc5c2008-10-27 23:56:03 +00001251: bge cr1,80f
Benjamin Herrenschmidt0c37ec22005-11-14 14:55:58 +1100126 addi r3,r3,-1
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000127 add r4,r4,r7
128
12980: stw r3,TSPC32_TV_SEC(r11)
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100130 stw r4,TSPC32_TV_NSEC(r11)
131
132 mtlr r12
Benjamin Herrenschmidt5d66da32005-11-16 13:54:32 +1100133 crclr cr0*4+so
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100134 li r3,0
135 blr
136
137 /*
138 * syscall fallback
139 */
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +110014099:
141 li r0,__NR_clock_gettime
Alan Modra039eb3d2018-09-14 13:10:04 +0930142 .cfi_restore lr
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100143 sc
144 blr
145 .cfi_endproc
146V_FUNCTION_END(__kernel_clock_gettime)
147
148
149/*
150 * Exact prototype of clock_getres()
151 *
152 * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
153 *
154 */
155V_FUNCTION_BEGIN(__kernel_clock_getres)
156 .cfi_startproc
157 /* Check for supported clock IDs */
158 cmpwi cr0,r3,CLOCK_REALTIME
159 cmpwi cr1,r3,CLOCK_MONOTONIC
Benjamin Herrenschmidt0c37ec22005-11-14 14:55:58 +1100160 cror cr0*4+eq,cr0*4+eq,cr1*4+eq
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100161 bne cr0,99f
162
163 li r3,0
164 cmpli cr0,r4,0
Benjamin Herrenschmidt5d66da32005-11-16 13:54:32 +1100165 crclr cr0*4+so
Benjamin Herrenschmidta7f290d2005-11-11 21:15:21 +1100166 beqlr
167 lis r5,CLOCK_REALTIME_RES@h
168 ori r5,r5,CLOCK_REALTIME_RES@l
169 stw r3,TSPC32_TV_SEC(r4)
170 stw r5,TSPC32_TV_NSEC(r4)
171 blr
172
173 /*
174 * syscall fallback
175 */
17699:
177 li r0,__NR_clock_getres
178 sc
179 blr
180 .cfi_endproc
181V_FUNCTION_END(__kernel_clock_getres)
182
183
184/*
Adhemerval Zanellafcb41a22013-04-22 09:29:33 +0000185 * Exact prototype of time()
186 *
187 * time_t time(time *t);
188 *
189 */
190V_FUNCTION_BEGIN(__kernel_time)
191 .cfi_startproc
192 mflr r12
193 .cfi_register lr,r12
194
195 mr r11,r3 /* r11 holds t */
196 bl __get_datapage@local
197 mr r9, r3 /* datapage ptr in r9 */
198
199 lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9)
200
201 cmplwi r11,0 /* check if t is NULL */
202 beq 2f
203 stw r3,0(r11) /* store result at *t */
2042: mtlr r12
205 crclr cr0*4+so
206 blr
207 .cfi_endproc
208V_FUNCTION_END(__kernel_time)
209
210/*
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000211 * This is the core of clock_gettime() and gettimeofday(),
212 * it returns the current time in r3 (seconds) and r4.
213 * On entry, r7 gives the resolution of r4, either USEC_PER_SEC
214 * or NSEC_PER_SEC, giving r4 in microseconds or nanoseconds.
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000215 * It expects the datapage ptr in r9 and doesn't clobber it.
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000216 * It clobbers r0, r5 and r6.
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000217 * On return, r8 contains the counter value that can be reused.
218 * This clobbers cr0 but not any other cr field.
219 */
220__do_get_tspec:
221 .cfi_startproc
222 /* Check for update count & load values. We use the low
223 * order 32 bits of the update count
224 */
2251: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
226 andi. r0,r8,1 /* pending update ? loop */
227 bne- 1b
228 xor r0,r8,r8 /* create dependency */
229 add r9,r9,r0
230
231 /* Load orig stamp (offset to TB) */
232 lwz r5,CFG_TB_ORIG_STAMP(r9)
233 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
234
235 /* Get a stable TB value */
LEROY Christopheae2163b2013-11-22 17:57:31 +0100236#ifdef CONFIG_8xx
2372: mftbu r3
238 mftbl r4
239 mftbu r0
240#else
Scott Woodbeb2dc02013-08-20 19:33:12 -05002412: mfspr r3, SPRN_TBRU
242 mfspr r4, SPRN_TBRL
243 mfspr r0, SPRN_TBRU
LEROY Christopheae2163b2013-11-22 17:57:31 +0100244#endif
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000245 cmplw cr0,r3,r0
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000246 bne- 2b
247
248 /* Subtract tb orig stamp and shift left 12 bits.
249 */
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000250 subfc r4,r6,r4
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000251 subfe r0,r5,r3
252 slwi r0,r0,12
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000253 rlwimi. r0,r4,12,20,31
254 slwi r4,r4,12
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000255
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000256 /*
257 * Load scale factor & do multiplication.
258 * We only use the high 32 bits of the tb_to_xs value.
259 * Even with a 1GHz timebase clock, the high 32 bits of
260 * tb_to_xs will be at least 4 million, so the error from
261 * ignoring the low 32 bits will be no more than 0.25ppm.
262 * The error will just make the clock run very very slightly
263 * slow until the next time the kernel updates the VDSO data,
264 * at which point the clock will catch up to the kernel's value,
265 * so there is no long-term error accumulation.
266 */
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000267 lwz r5,CFG_TB_TO_XS(r9) /* load values */
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000268 mulhwu r4,r4,r5
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000269 li r3,0
270
271 beq+ 4f /* skip high part computation if 0 */
272 mulhwu r3,r0,r5
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000273 mullw r5,r0,r5
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000274 addc r4,r4,r5
275 addze r3,r3
Paul Mackerras8fd63a92010-06-20 19:03:08 +00002764:
277 /* At this point, we have seconds since the xtime stamp
278 * as a 32.32 fixed-point number in r3 and r4.
279 * Load & add the xtime stamp.
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000280 */
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000281 lwz r5,STAMP_XTIME+TSPEC_TV_SEC(r9)
282 lwz r6,STAMP_SEC_FRAC(r9)
283 addc r4,r4,r6
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000284 adde r3,r3,r5
285
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000286 /* We create a fake dependency on the result in r3/r4
287 * and re-check the counter
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000288 */
289 or r6,r4,r3
290 xor r0,r6,r6
291 add r9,r9,r0
292 lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000293 cmplw cr0,r8,r0 /* check if updated */
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000294 bne- 1b
295
Paul Mackerras8fd63a92010-06-20 19:03:08 +0000296 mulhwu r4,r4,r7 /* convert to micro or nanoseconds */
Paul Mackerras597bc5c2008-10-27 23:56:03 +0000297
298 blr
299 .cfi_endproc