blob: 17bb40cad5bfbaade0efaf0211baafeb49351e04 [file] [log] [blame]
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +10001#ifndef _ARCH_POWERPC_UACCESS_H
2#define _ARCH_POWERPC_UACCESS_H
3
4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__
6
7#include <linux/sched.h>
8#include <linux/errno.h>
Michael Ellerman551c3c02008-07-17 17:17:52 +10009#include <asm/asm-compat.h>
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100010#include <asm/processor.h>
Paul Mackerras6bfd93c2006-05-03 23:02:04 +100011#include <asm/page.h>
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100012
13#define VERIFY_READ 0
14#define VERIFY_WRITE 1
15
16/*
17 * The fs value determines whether argument validity checking should be
18 * performed or not. If get_fs() == USER_DS, checking is performed, with
19 * get_fs() == KERNEL_DS, checking is bypassed.
20 *
21 * For historical reasons, these macros are grossly misnamed.
22 *
23 * The fs/ds values are now the highest legal address in the "segment".
24 * This simplifies the checking in the routines below.
25 */
26
27#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
28
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100029#define KERNEL_DS MAKE_MM_SEG(~0UL)
Stephen Rothwell5015b492005-10-31 18:39:20 +110030#ifdef __powerpc64__
31/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
32#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
33#else
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100034#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
35#endif
36
37#define get_ds() (KERNEL_DS)
38#define get_fs() (current->thread.fs)
39#define set_fs(val) (current->thread.fs = (val))
40
41#define segment_eq(a, b) ((a).seg == (b).seg)
42
Paul Mackerras16293722012-05-28 13:03:47 +100043#define user_addr_max() (get_fs().seg)
44
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100045#ifdef __powerpc64__
46/*
Stephen Rothwell5015b492005-10-31 18:39:20 +110047 * This check is sufficient because there is a large enough
48 * gap between user addresses and the kernel addresses
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100049 */
50#define __access_ok(addr, size, segment) \
Stephen Rothwell5015b492005-10-31 18:39:20 +110051 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100052
53#else
54
55#define __access_ok(addr, size, segment) \
56 (((addr) <= (segment).seg) && \
57 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
58
59#endif
60
61#define access_ok(type, addr, size) \
62 (__chk_user_ptr(addr), \
63 __access_ok((__force unsigned long)(addr), (size), get_fs()))
64
65/*
66 * The exception table consists of pairs of addresses: the first is the
67 * address of an instruction that is allowed to fault, and the second is
68 * the address at which the program should continue. No registers are
69 * modified, so it is entirely up to the continuation code to figure out
70 * what to do.
71 *
72 * All the routines below use bits of fixup code that are out of line
73 * with the main instruction path. This means when everything is well,
74 * we don't even have to jump over them. Further, they do not intrude
75 * on our cache or tlb entries.
76 */
77
78struct exception_table_entry {
79 unsigned long insn;
80 unsigned long fixup;
81};
82
83/*
84 * These are the main single-value transfer routines. They automatically
85 * use the right size if we just have the right pointer type.
86 *
87 * This gets kind of ugly. We want to return _two_ values in "get_user()"
88 * and yet we don't want to do any pointers, because that is too much
89 * of a performance impact. Thus we have a few rather ugly macros here,
90 * and hide all the ugliness from the user.
91 *
92 * The "__xxx" versions of the user access functions are versions that
93 * do not verify the address space, that must have been done previously
94 * with a separate "access_ok()" call (this is used when we do multiple
95 * accesses to the same area of user memory).
96 *
97 * As we use the same address space for kernel and user data on the
98 * PowerPC, we can just do these as direct assignments. (Of course, the
99 * exception handling means that it's no longer "just"...)
100 *
101 * The "user64" versions of the user access functions are versions that
102 * allow access of 64-bit data. The "get_user" functions do not
103 * properly handle 64-bit data because the value gets down cast to a long.
104 * The "put_user" functions already handle 64-bit data properly but we add
105 * "user64" versions for completeness
106 */
107#define get_user(x, ptr) \
108 __get_user_check((x), (ptr), sizeof(*(ptr)))
109#define put_user(x, ptr) \
110 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
111
112#define __get_user(x, ptr) \
113 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
114#define __put_user(x, ptr) \
115 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
Benjamin Herrenschmidte68c8252007-04-11 16:13:19 +1000116
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000117#ifndef __powerpc64__
118#define __get_user64(x, ptr) \
119 __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
120#define __put_user64(x, ptr) __put_user(x, ptr)
121#endif
122
Benjamin Herrenschmidte68c8252007-04-11 16:13:19 +1000123#define __get_user_inatomic(x, ptr) \
124 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
125#define __put_user_inatomic(x, ptr) \
126 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
127
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000128#define __get_user_unaligned __get_user
129#define __put_user_unaligned __put_user
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000130
131extern long __put_user_bad(void);
132
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000133/*
134 * We don't tell gcc that we are accessing memory, but this is OK
135 * because we do not write to any memory gcc knows about, so there
136 * are no aliasing issues.
137 */
138#define __put_user_asm(x, addr, err, op) \
139 __asm__ __volatile__( \
140 "1: " op " %1,0(%2) # put_user\n" \
141 "2:\n" \
142 ".section .fixup,\"ax\"\n" \
143 "3: li %0,%3\n" \
144 " b 2b\n" \
145 ".previous\n" \
146 ".section __ex_table,\"a\"\n" \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000147 PPC_LONG_ALIGN "\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100148 PPC_LONG "1b,3b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000149 ".previous" \
150 : "=r" (err) \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000151 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000152
Stephen Rothwell5015b492005-10-31 18:39:20 +1100153#ifdef __powerpc64__
154#define __put_user_asm2(x, ptr, retval) \
155 __put_user_asm(x, ptr, retval, "std")
156#else /* __powerpc64__ */
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000157#define __put_user_asm2(x, addr, err) \
158 __asm__ __volatile__( \
159 "1: stw %1,0(%2)\n" \
160 "2: stw %1+1,4(%2)\n" \
161 "3:\n" \
162 ".section .fixup,\"ax\"\n" \
163 "4: li %0,%3\n" \
164 " b 3b\n" \
165 ".previous\n" \
166 ".section __ex_table,\"a\"\n" \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000167 PPC_LONG_ALIGN "\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100168 PPC_LONG "1b,4b\n" \
169 PPC_LONG "2b,4b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000170 ".previous" \
171 : "=r" (err) \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000172 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000173#endif /* __powerpc64__ */
174
175#define __put_user_size(x, ptr, size, retval) \
176do { \
177 retval = 0; \
178 switch (size) { \
179 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
180 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
181 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
182 case 8: __put_user_asm2(x, ptr, retval); break; \
183 default: __put_user_bad(); \
184 } \
185} while (0)
186
187#define __put_user_nocheck(x, ptr, size) \
188({ \
189 long __pu_err; \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000190 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
191 if (!is_kernel_addr((unsigned long)__pu_addr)) \
192 might_sleep(); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000193 __chk_user_ptr(ptr); \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000194 __put_user_size((x), __pu_addr, (size), __pu_err); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000195 __pu_err; \
196})
197
198#define __put_user_check(x, ptr, size) \
199({ \
200 long __pu_err = -EFAULT; \
201 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
202 might_sleep(); \
203 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
204 __put_user_size((x), __pu_addr, (size), __pu_err); \
205 __pu_err; \
206})
207
Benjamin Herrenschmidte68c8252007-04-11 16:13:19 +1000208#define __put_user_nosleep(x, ptr, size) \
209({ \
210 long __pu_err; \
211 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
212 __chk_user_ptr(ptr); \
213 __put_user_size((x), __pu_addr, (size), __pu_err); \
214 __pu_err; \
215})
216
217
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000218extern long __get_user_bad(void);
219
220#define __get_user_asm(x, addr, err, op) \
221 __asm__ __volatile__( \
Stephen Rothwell5015b492005-10-31 18:39:20 +1100222 "1: "op" %1,0(%2) # get_user\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000223 "2:\n" \
224 ".section .fixup,\"ax\"\n" \
225 "3: li %0,%3\n" \
226 " li %1,0\n" \
227 " b 2b\n" \
228 ".previous\n" \
229 ".section __ex_table,\"a\"\n" \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000230 PPC_LONG_ALIGN "\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100231 PPC_LONG "1b,3b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000232 ".previous" \
233 : "=r" (err), "=r" (x) \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000234 : "b" (addr), "i" (-EFAULT), "0" (err))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000235
Stephen Rothwell5015b492005-10-31 18:39:20 +1100236#ifdef __powerpc64__
237#define __get_user_asm2(x, addr, err) \
238 __get_user_asm(x, addr, err, "ld")
239#else /* __powerpc64__ */
240#define __get_user_asm2(x, addr, err) \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000241 __asm__ __volatile__( \
242 "1: lwz %1,0(%2)\n" \
243 "2: lwz %1+1,4(%2)\n" \
244 "3:\n" \
245 ".section .fixup,\"ax\"\n" \
246 "4: li %0,%3\n" \
247 " li %1,0\n" \
248 " li %1+1,0\n" \
249 " b 3b\n" \
250 ".previous\n" \
251 ".section __ex_table,\"a\"\n" \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000252 PPC_LONG_ALIGN "\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100253 PPC_LONG "1b,4b\n" \
254 PPC_LONG "2b,4b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000255 ".previous" \
256 : "=r" (err), "=&r" (x) \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000257 : "b" (addr), "i" (-EFAULT), "0" (err))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000258#endif /* __powerpc64__ */
259
260#define __get_user_size(x, ptr, size, retval) \
261do { \
262 retval = 0; \
263 __chk_user_ptr(ptr); \
Stephen Rothwell5015b492005-10-31 18:39:20 +1100264 if (size > sizeof(x)) \
265 (x) = __get_user_bad(); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000266 switch (size) { \
267 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
268 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
269 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
270 case 8: __get_user_asm2(x, ptr, retval); break; \
271 default: (x) = __get_user_bad(); \
272 } \
273} while (0)
274
275#define __get_user_nocheck(x, ptr, size) \
276({ \
277 long __gu_err; \
278 unsigned long __gu_val; \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000279 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000280 __chk_user_ptr(ptr); \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000281 if (!is_kernel_addr((unsigned long)__gu_addr)) \
282 might_sleep(); \
283 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000284 (x) = (__typeof__(*(ptr)))__gu_val; \
285 __gu_err; \
286})
287
288#ifndef __powerpc64__
289#define __get_user64_nocheck(x, ptr, size) \
290({ \
291 long __gu_err; \
292 long long __gu_val; \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000293 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000294 __chk_user_ptr(ptr); \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000295 if (!is_kernel_addr((unsigned long)__gu_addr)) \
296 might_sleep(); \
297 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000298 (x) = (__typeof__(*(ptr)))__gu_val; \
299 __gu_err; \
300})
301#endif /* __powerpc64__ */
302
303#define __get_user_check(x, ptr, size) \
304({ \
305 long __gu_err = -EFAULT; \
306 unsigned long __gu_val = 0; \
307 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
Stephen Rothwell5015b492005-10-31 18:39:20 +1100308 might_sleep(); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000309 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
310 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
311 (x) = (__typeof__(*(ptr)))__gu_val; \
312 __gu_err; \
313})
314
Benjamin Herrenschmidte68c8252007-04-11 16:13:19 +1000315#define __get_user_nosleep(x, ptr, size) \
316({ \
317 long __gu_err; \
318 unsigned long __gu_val; \
319 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
320 __chk_user_ptr(ptr); \
321 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
322 (x) = (__typeof__(*(ptr)))__gu_val; \
323 __gu_err; \
324})
325
326
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000327/* more complex routines */
328
329extern unsigned long __copy_tofrom_user(void __user *to,
330 const void __user *from, unsigned long size);
331
332#ifndef __powerpc64__
Stephen Rothwell5015b492005-10-31 18:39:20 +1100333
Adrian Bunk4cfbdff2006-12-01 12:53:18 +0100334static inline unsigned long copy_from_user(void *to,
Stephen Rothwell5015b492005-10-31 18:39:20 +1100335 const void __user *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000336{
337 unsigned long over;
338
339 if (access_ok(VERIFY_READ, from, n))
340 return __copy_tofrom_user((__force void __user *)to, from, n);
341 if ((unsigned long)from < TASK_SIZE) {
342 over = (unsigned long)from + n - TASK_SIZE;
343 return __copy_tofrom_user((__force void __user *)to, from,
344 n - over) + over;
345 }
346 return n;
347}
348
Adrian Bunk4cfbdff2006-12-01 12:53:18 +0100349static inline unsigned long copy_to_user(void __user *to,
Stephen Rothwell5015b492005-10-31 18:39:20 +1100350 const void *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000351{
352 unsigned long over;
353
354 if (access_ok(VERIFY_WRITE, to, n))
355 return __copy_tofrom_user(to, (__force void __user *)from, n);
356 if ((unsigned long)to < TASK_SIZE) {
357 over = (unsigned long)to + n - TASK_SIZE;
358 return __copy_tofrom_user(to, (__force void __user *)from,
359 n - over) + over;
360 }
361 return n;
362}
363
364#else /* __powerpc64__ */
365
Stephen Rothwell5015b492005-10-31 18:39:20 +1100366#define __copy_in_user(to, from, size) \
367 __copy_tofrom_user((to), (from), (size))
368
369extern unsigned long copy_from_user(void *to, const void __user *from,
370 unsigned long n);
371extern unsigned long copy_to_user(void __user *to, const void *from,
372 unsigned long n);
373extern unsigned long copy_in_user(void __user *to, const void __user *from,
374 unsigned long n);
375
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100376#endif /* __powerpc64__ */
377
Stephen Rothwell5015b492005-10-31 18:39:20 +1100378static inline unsigned long __copy_from_user_inatomic(void *to,
379 const void __user *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000380{
381 if (__builtin_constant_p(n) && (n <= 8)) {
Nate Case9c8387a2008-05-13 06:14:14 +1000382 unsigned long ret = 1;
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000383
384 switch (n) {
385 case 1:
386 __get_user_size(*(u8 *)to, from, 1, ret);
387 break;
388 case 2:
389 __get_user_size(*(u16 *)to, from, 2, ret);
390 break;
391 case 4:
392 __get_user_size(*(u32 *)to, from, 4, ret);
393 break;
394 case 8:
395 __get_user_size(*(u64 *)to, from, 8, ret);
396 break;
397 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100398 if (ret == 0)
399 return 0;
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000400 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100401 return __copy_tofrom_user((__force void __user *)to, from, n);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000402}
403
Stephen Rothwell5015b492005-10-31 18:39:20 +1100404static inline unsigned long __copy_to_user_inatomic(void __user *to,
405 const void *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000406{
407 if (__builtin_constant_p(n) && (n <= 8)) {
Nate Case9c8387a2008-05-13 06:14:14 +1000408 unsigned long ret = 1;
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000409
410 switch (n) {
411 case 1:
412 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
413 break;
414 case 2:
415 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
416 break;
417 case 4:
418 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
419 break;
420 case 8:
421 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
422 break;
423 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100424 if (ret == 0)
425 return 0;
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000426 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100427 return __copy_tofrom_user(to, (__force const void __user *)from, n);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000428}
429
Stephen Rothwell5015b492005-10-31 18:39:20 +1100430static inline unsigned long __copy_from_user(void *to,
431 const void __user *from, unsigned long size)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000432{
433 might_sleep();
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000434 return __copy_from_user_inatomic(to, from, size);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000435}
436
Stephen Rothwell5015b492005-10-31 18:39:20 +1100437static inline unsigned long __copy_to_user(void __user *to,
438 const void *from, unsigned long size)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000439{
440 might_sleep();
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000441 return __copy_to_user_inatomic(to, from, size);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000442}
443
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000444extern unsigned long __clear_user(void __user *addr, unsigned long size);
445
446static inline unsigned long clear_user(void __user *addr, unsigned long size)
447{
448 might_sleep();
449 if (likely(access_ok(VERIFY_WRITE, addr, size)))
450 return __clear_user(addr, size);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000451 if ((unsigned long)addr < TASK_SIZE) {
452 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
453 return __clear_user(addr, size - over) + over;
454 }
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000455 return size;
456}
457
Paul Mackerras16293722012-05-28 13:03:47 +1000458extern long strncpy_from_user(char *dst, const char __user *src, long count);
459extern __must_check long strlen_user(const char __user *str);
460extern __must_check long strnlen_user(const char __user *str, long n);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000461
462#endif /* __ASSEMBLY__ */
463#endif /* __KERNEL__ */
464
465#endif /* _ARCH_POWERPC_UACCESS_H */