blob: d83fc29c2bbf13ed8deed9244e7f71c78dd4c78c [file] [log] [blame]
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +10001#ifndef _ARCH_POWERPC_UACCESS_H
2#define _ARCH_POWERPC_UACCESS_H
3
4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__
6
7#include <linux/sched.h>
8#include <linux/errno.h>
9#include <asm/processor.h>
Paul Mackerras6bfd93c2006-05-03 23:02:04 +100010#include <asm/page.h>
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100011
12#define VERIFY_READ 0
13#define VERIFY_WRITE 1
14
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 *
22 * The fs/ds values are now the highest legal address in the "segment".
23 * This simplifies the checking in the routines below.
24 */
25
26#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
27
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100028#define KERNEL_DS MAKE_MM_SEG(~0UL)
Stephen Rothwell5015b492005-10-31 18:39:20 +110029#ifdef __powerpc64__
30/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
31#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
32#else
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100033#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
34#endif
35
36#define get_ds() (KERNEL_DS)
37#define get_fs() (current->thread.fs)
38#define set_fs(val) (current->thread.fs = (val))
39
40#define segment_eq(a, b) ((a).seg == (b).seg)
41
42#ifdef __powerpc64__
43/*
Stephen Rothwell5015b492005-10-31 18:39:20 +110044 * This check is sufficient because there is a large enough
45 * gap between user addresses and the kernel addresses
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100046 */
47#define __access_ok(addr, size, segment) \
Stephen Rothwell5015b492005-10-31 18:39:20 +110048 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100049
50#else
51
52#define __access_ok(addr, size, segment) \
53 (((addr) <= (segment).seg) && \
54 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
55
56#endif
57
58#define access_ok(type, addr, size) \
59 (__chk_user_ptr(addr), \
60 __access_ok((__force unsigned long)(addr), (size), get_fs()))
61
62/*
63 * The exception table consists of pairs of addresses: the first is the
64 * address of an instruction that is allowed to fault, and the second is
65 * the address at which the program should continue. No registers are
66 * modified, so it is entirely up to the continuation code to figure out
67 * what to do.
68 *
69 * All the routines below use bits of fixup code that are out of line
70 * with the main instruction path. This means when everything is well,
71 * we don't even have to jump over them. Further, they do not intrude
72 * on our cache or tlb entries.
73 */
74
75struct exception_table_entry {
76 unsigned long insn;
77 unsigned long fixup;
78};
79
80/*
81 * These are the main single-value transfer routines. They automatically
82 * use the right size if we just have the right pointer type.
83 *
84 * This gets kind of ugly. We want to return _two_ values in "get_user()"
85 * and yet we don't want to do any pointers, because that is too much
86 * of a performance impact. Thus we have a few rather ugly macros here,
87 * and hide all the ugliness from the user.
88 *
89 * The "__xxx" versions of the user access functions are versions that
90 * do not verify the address space, that must have been done previously
91 * with a separate "access_ok()" call (this is used when we do multiple
92 * accesses to the same area of user memory).
93 *
94 * As we use the same address space for kernel and user data on the
95 * PowerPC, we can just do these as direct assignments. (Of course, the
96 * exception handling means that it's no longer "just"...)
97 *
98 * The "user64" versions of the user access functions are versions that
99 * allow access of 64-bit data. The "get_user" functions do not
100 * properly handle 64-bit data because the value gets down cast to a long.
101 * The "put_user" functions already handle 64-bit data properly but we add
102 * "user64" versions for completeness
103 */
104#define get_user(x, ptr) \
105 __get_user_check((x), (ptr), sizeof(*(ptr)))
106#define put_user(x, ptr) \
107 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
108
109#define __get_user(x, ptr) \
110 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
111#define __put_user(x, ptr) \
112 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
113#ifndef __powerpc64__
114#define __get_user64(x, ptr) \
115 __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
116#define __put_user64(x, ptr) __put_user(x, ptr)
117#endif
118
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000119#define __get_user_unaligned __get_user
120#define __put_user_unaligned __put_user
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000121
122extern long __put_user_bad(void);
123
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000124/*
125 * We don't tell gcc that we are accessing memory, but this is OK
126 * because we do not write to any memory gcc knows about, so there
127 * are no aliasing issues.
128 */
129#define __put_user_asm(x, addr, err, op) \
130 __asm__ __volatile__( \
131 "1: " op " %1,0(%2) # put_user\n" \
132 "2:\n" \
133 ".section .fixup,\"ax\"\n" \
134 "3: li %0,%3\n" \
135 " b 2b\n" \
136 ".previous\n" \
137 ".section __ex_table,\"a\"\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100138 " .balign %5\n" \
139 PPC_LONG "1b,3b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000140 ".previous" \
141 : "=r" (err) \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100142 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err),\
143 "i"(sizeof(unsigned long)))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000144
Stephen Rothwell5015b492005-10-31 18:39:20 +1100145#ifdef __powerpc64__
146#define __put_user_asm2(x, ptr, retval) \
147 __put_user_asm(x, ptr, retval, "std")
148#else /* __powerpc64__ */
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000149#define __put_user_asm2(x, addr, err) \
150 __asm__ __volatile__( \
151 "1: stw %1,0(%2)\n" \
152 "2: stw %1+1,4(%2)\n" \
153 "3:\n" \
154 ".section .fixup,\"ax\"\n" \
155 "4: li %0,%3\n" \
156 " b 3b\n" \
157 ".previous\n" \
158 ".section __ex_table,\"a\"\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100159 " .balign %5\n" \
160 PPC_LONG "1b,4b\n" \
161 PPC_LONG "2b,4b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000162 ".previous" \
163 : "=r" (err) \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100164 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err),\
165 "i"(sizeof(unsigned long)))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000166#endif /* __powerpc64__ */
167
168#define __put_user_size(x, ptr, size, retval) \
169do { \
170 retval = 0; \
171 switch (size) { \
172 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
173 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
174 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
175 case 8: __put_user_asm2(x, ptr, retval); break; \
176 default: __put_user_bad(); \
177 } \
178} while (0)
179
180#define __put_user_nocheck(x, ptr, size) \
181({ \
182 long __pu_err; \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000183 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
184 if (!is_kernel_addr((unsigned long)__pu_addr)) \
185 might_sleep(); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000186 __chk_user_ptr(ptr); \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000187 __put_user_size((x), __pu_addr, (size), __pu_err); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000188 __pu_err; \
189})
190
191#define __put_user_check(x, ptr, size) \
192({ \
193 long __pu_err = -EFAULT; \
194 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
195 might_sleep(); \
196 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
197 __put_user_size((x), __pu_addr, (size), __pu_err); \
198 __pu_err; \
199})
200
201extern long __get_user_bad(void);
202
203#define __get_user_asm(x, addr, err, op) \
204 __asm__ __volatile__( \
Stephen Rothwell5015b492005-10-31 18:39:20 +1100205 "1: "op" %1,0(%2) # get_user\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000206 "2:\n" \
207 ".section .fixup,\"ax\"\n" \
208 "3: li %0,%3\n" \
209 " li %1,0\n" \
210 " b 2b\n" \
211 ".previous\n" \
212 ".section __ex_table,\"a\"\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100213 " .balign %5\n" \
214 PPC_LONG "1b,3b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000215 ".previous" \
216 : "=r" (err), "=r" (x) \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100217 : "b" (addr), "i" (-EFAULT), "0" (err), \
218 "i"(sizeof(unsigned long)))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000219
Stephen Rothwell5015b492005-10-31 18:39:20 +1100220#ifdef __powerpc64__
221#define __get_user_asm2(x, addr, err) \
222 __get_user_asm(x, addr, err, "ld")
223#else /* __powerpc64__ */
224#define __get_user_asm2(x, addr, err) \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000225 __asm__ __volatile__( \
226 "1: lwz %1,0(%2)\n" \
227 "2: lwz %1+1,4(%2)\n" \
228 "3:\n" \
229 ".section .fixup,\"ax\"\n" \
230 "4: li %0,%3\n" \
231 " li %1,0\n" \
232 " li %1+1,0\n" \
233 " b 3b\n" \
234 ".previous\n" \
235 ".section __ex_table,\"a\"\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100236 " .balign %5\n" \
237 PPC_LONG "1b,4b\n" \
238 PPC_LONG "2b,4b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000239 ".previous" \
240 : "=r" (err), "=&r" (x) \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100241 : "b" (addr), "i" (-EFAULT), "0" (err), \
242 "i"(sizeof(unsigned long)))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000243#endif /* __powerpc64__ */
244
245#define __get_user_size(x, ptr, size, retval) \
246do { \
247 retval = 0; \
248 __chk_user_ptr(ptr); \
Stephen Rothwell5015b492005-10-31 18:39:20 +1100249 if (size > sizeof(x)) \
250 (x) = __get_user_bad(); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000251 switch (size) { \
252 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
253 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
254 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
255 case 8: __get_user_asm2(x, ptr, retval); break; \
256 default: (x) = __get_user_bad(); \
257 } \
258} while (0)
259
260#define __get_user_nocheck(x, ptr, size) \
261({ \
262 long __gu_err; \
263 unsigned long __gu_val; \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000264 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000265 __chk_user_ptr(ptr); \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000266 if (!is_kernel_addr((unsigned long)__gu_addr)) \
267 might_sleep(); \
268 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000269 (x) = (__typeof__(*(ptr)))__gu_val; \
270 __gu_err; \
271})
272
273#ifndef __powerpc64__
274#define __get_user64_nocheck(x, ptr, size) \
275({ \
276 long __gu_err; \
277 long long __gu_val; \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000278 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000279 __chk_user_ptr(ptr); \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000280 if (!is_kernel_addr((unsigned long)__gu_addr)) \
281 might_sleep(); \
282 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000283 (x) = (__typeof__(*(ptr)))__gu_val; \
284 __gu_err; \
285})
286#endif /* __powerpc64__ */
287
288#define __get_user_check(x, ptr, size) \
289({ \
290 long __gu_err = -EFAULT; \
291 unsigned long __gu_val = 0; \
292 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
Stephen Rothwell5015b492005-10-31 18:39:20 +1100293 might_sleep(); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000294 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
295 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
296 (x) = (__typeof__(*(ptr)))__gu_val; \
297 __gu_err; \
298})
299
300/* more complex routines */
301
302extern unsigned long __copy_tofrom_user(void __user *to,
303 const void __user *from, unsigned long size);
304
305#ifndef __powerpc64__
Stephen Rothwell5015b492005-10-31 18:39:20 +1100306
307extern inline unsigned long copy_from_user(void *to,
308 const void __user *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000309{
310 unsigned long over;
311
312 if (access_ok(VERIFY_READ, from, n))
313 return __copy_tofrom_user((__force void __user *)to, from, n);
314 if ((unsigned long)from < TASK_SIZE) {
315 over = (unsigned long)from + n - TASK_SIZE;
316 return __copy_tofrom_user((__force void __user *)to, from,
317 n - over) + over;
318 }
319 return n;
320}
321
Stephen Rothwell5015b492005-10-31 18:39:20 +1100322extern inline unsigned long copy_to_user(void __user *to,
323 const void *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000324{
325 unsigned long over;
326
327 if (access_ok(VERIFY_WRITE, to, n))
328 return __copy_tofrom_user(to, (__force void __user *)from, n);
329 if ((unsigned long)to < TASK_SIZE) {
330 over = (unsigned long)to + n - TASK_SIZE;
331 return __copy_tofrom_user(to, (__force void __user *)from,
332 n - over) + over;
333 }
334 return n;
335}
336
337#else /* __powerpc64__ */
338
Stephen Rothwell5015b492005-10-31 18:39:20 +1100339#define __copy_in_user(to, from, size) \
340 __copy_tofrom_user((to), (from), (size))
341
342extern unsigned long copy_from_user(void *to, const void __user *from,
343 unsigned long n);
344extern unsigned long copy_to_user(void __user *to, const void *from,
345 unsigned long n);
346extern unsigned long copy_in_user(void __user *to, const void __user *from,
347 unsigned long n);
348
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100349#endif /* __powerpc64__ */
350
Stephen Rothwell5015b492005-10-31 18:39:20 +1100351static inline unsigned long __copy_from_user_inatomic(void *to,
352 const void __user *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000353{
354 if (__builtin_constant_p(n) && (n <= 8)) {
355 unsigned long ret;
356
357 switch (n) {
358 case 1:
359 __get_user_size(*(u8 *)to, from, 1, ret);
360 break;
361 case 2:
362 __get_user_size(*(u16 *)to, from, 2, ret);
363 break;
364 case 4:
365 __get_user_size(*(u32 *)to, from, 4, ret);
366 break;
367 case 8:
368 __get_user_size(*(u64 *)to, from, 8, ret);
369 break;
370 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100371 if (ret == 0)
372 return 0;
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000373 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100374 return __copy_tofrom_user((__force void __user *)to, from, n);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000375}
376
Stephen Rothwell5015b492005-10-31 18:39:20 +1100377static inline unsigned long __copy_to_user_inatomic(void __user *to,
378 const void *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000379{
380 if (__builtin_constant_p(n) && (n <= 8)) {
381 unsigned long ret;
382
383 switch (n) {
384 case 1:
385 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
386 break;
387 case 2:
388 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
389 break;
390 case 4:
391 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
392 break;
393 case 8:
394 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
395 break;
396 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100397 if (ret == 0)
398 return 0;
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000399 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100400 return __copy_tofrom_user(to, (__force const void __user *)from, n);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000401}
402
Stephen Rothwell5015b492005-10-31 18:39:20 +1100403static inline unsigned long __copy_from_user(void *to,
404 const void __user *from, unsigned long size)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000405{
406 might_sleep();
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000407 return __copy_from_user_inatomic(to, from, size);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000408}
409
Stephen Rothwell5015b492005-10-31 18:39:20 +1100410static inline unsigned long __copy_to_user(void __user *to,
411 const void *from, unsigned long size)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000412{
413 might_sleep();
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000414 return __copy_to_user_inatomic(to, from, size);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000415}
416
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000417extern unsigned long __clear_user(void __user *addr, unsigned long size);
418
419static inline unsigned long clear_user(void __user *addr, unsigned long size)
420{
421 might_sleep();
422 if (likely(access_ok(VERIFY_WRITE, addr, size)))
423 return __clear_user(addr, size);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000424 if ((unsigned long)addr < TASK_SIZE) {
425 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
426 return __clear_user(addr, size - over) + over;
427 }
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000428 return size;
429}
430
431extern int __strncpy_from_user(char *dst, const char __user *src, long count);
432
433static inline long strncpy_from_user(char *dst, const char __user *src,
434 long count)
435{
436 might_sleep();
437 if (likely(access_ok(VERIFY_READ, src, 1)))
438 return __strncpy_from_user(dst, src, count);
439 return -EFAULT;
440}
441
442/*
443 * Return the size of a string (including the ending 0)
444 *
445 * Return 0 for error
446 */
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000447extern int __strnlen_user(const char __user *str, long len, unsigned long top);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000448
449/*
450 * Returns the length of the string at str (including the null byte),
451 * or 0 if we hit a page we can't access,
452 * or something > len if we didn't find a null byte.
453 *
454 * The `top' parameter to __strnlen_user is to make sure that
455 * we can never overflow from the user area into kernel space.
456 */
457static inline int strnlen_user(const char __user *str, long len)
458{
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000459 unsigned long top = current->thread.fs.seg;
460
461 if ((unsigned long)str > top)
462 return 0;
463 return __strnlen_user(str, len, top);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000464}
465
466#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
467
468#endif /* __ASSEMBLY__ */
469#endif /* __KERNEL__ */
470
471#endif /* _ARCH_POWERPC_UACCESS_H */