blob: bd0fb8495154d1684f51aa9bce6654ca6a763a99 [file] [log] [blame]
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +10001#ifndef _ARCH_POWERPC_UACCESS_H
2#define _ARCH_POWERPC_UACCESS_H
3
4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__
6
7#include <linux/sched.h>
8#include <linux/errno.h>
Michael Ellerman551c3c02008-07-17 17:17:52 +10009#include <asm/asm-compat.h>
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100010#include <asm/processor.h>
Paul Mackerras6bfd93c2006-05-03 23:02:04 +100011#include <asm/page.h>
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100012
13#define VERIFY_READ 0
14#define VERIFY_WRITE 1
15
16/*
17 * The fs value determines whether argument validity checking should be
18 * performed or not. If get_fs() == USER_DS, checking is performed, with
19 * get_fs() == KERNEL_DS, checking is bypassed.
20 *
21 * For historical reasons, these macros are grossly misnamed.
22 *
23 * The fs/ds values are now the highest legal address in the "segment".
24 * This simplifies the checking in the routines below.
25 */
26
27#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
28
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100029#define KERNEL_DS MAKE_MM_SEG(~0UL)
Stephen Rothwell5015b492005-10-31 18:39:20 +110030#ifdef __powerpc64__
31/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
32#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
33#else
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100034#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
35#endif
36
37#define get_ds() (KERNEL_DS)
38#define get_fs() (current->thread.fs)
39#define set_fs(val) (current->thread.fs = (val))
40
41#define segment_eq(a, b) ((a).seg == (b).seg)
42
43#ifdef __powerpc64__
44/*
Stephen Rothwell5015b492005-10-31 18:39:20 +110045 * This check is sufficient because there is a large enough
46 * gap between user addresses and the kernel addresses
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100047 */
48#define __access_ok(addr, size, segment) \
Stephen Rothwell5015b492005-10-31 18:39:20 +110049 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +100050
51#else
52
53#define __access_ok(addr, size, segment) \
54 (((addr) <= (segment).seg) && \
55 (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
56
57#endif
58
59#define access_ok(type, addr, size) \
60 (__chk_user_ptr(addr), \
61 __access_ok((__force unsigned long)(addr), (size), get_fs()))
62
63/*
64 * The exception table consists of pairs of addresses: the first is the
65 * address of an instruction that is allowed to fault, and the second is
66 * the address at which the program should continue. No registers are
67 * modified, so it is entirely up to the continuation code to figure out
68 * what to do.
69 *
70 * All the routines below use bits of fixup code that are out of line
71 * with the main instruction path. This means when everything is well,
72 * we don't even have to jump over them. Further, they do not intrude
73 * on our cache or tlb entries.
74 */
75
76struct exception_table_entry {
77 unsigned long insn;
78 unsigned long fixup;
79};
80
81/*
82 * These are the main single-value transfer routines. They automatically
83 * use the right size if we just have the right pointer type.
84 *
85 * This gets kind of ugly. We want to return _two_ values in "get_user()"
86 * and yet we don't want to do any pointers, because that is too much
87 * of a performance impact. Thus we have a few rather ugly macros here,
88 * and hide all the ugliness from the user.
89 *
90 * The "__xxx" versions of the user access functions are versions that
91 * do not verify the address space, that must have been done previously
92 * with a separate "access_ok()" call (this is used when we do multiple
93 * accesses to the same area of user memory).
94 *
95 * As we use the same address space for kernel and user data on the
96 * PowerPC, we can just do these as direct assignments. (Of course, the
97 * exception handling means that it's no longer "just"...)
98 *
99 * The "user64" versions of the user access functions are versions that
100 * allow access of 64-bit data. The "get_user" functions do not
101 * properly handle 64-bit data because the value gets down cast to a long.
102 * The "put_user" functions already handle 64-bit data properly but we add
103 * "user64" versions for completeness
104 */
105#define get_user(x, ptr) \
106 __get_user_check((x), (ptr), sizeof(*(ptr)))
107#define put_user(x, ptr) \
108 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
109
110#define __get_user(x, ptr) \
111 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
112#define __put_user(x, ptr) \
113 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
Benjamin Herrenschmidte68c8252007-04-11 16:13:19 +1000114
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000115#ifndef __powerpc64__
116#define __get_user64(x, ptr) \
117 __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
118#define __put_user64(x, ptr) __put_user(x, ptr)
119#endif
120
Benjamin Herrenschmidte68c8252007-04-11 16:13:19 +1000121#define __get_user_inatomic(x, ptr) \
122 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
123#define __put_user_inatomic(x, ptr) \
124 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
125
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000126#define __get_user_unaligned __get_user
127#define __put_user_unaligned __put_user
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000128
129extern long __put_user_bad(void);
130
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000131/*
132 * We don't tell gcc that we are accessing memory, but this is OK
133 * because we do not write to any memory gcc knows about, so there
134 * are no aliasing issues.
135 */
136#define __put_user_asm(x, addr, err, op) \
137 __asm__ __volatile__( \
138 "1: " op " %1,0(%2) # put_user\n" \
139 "2:\n" \
140 ".section .fixup,\"ax\"\n" \
141 "3: li %0,%3\n" \
142 " b 2b\n" \
143 ".previous\n" \
144 ".section __ex_table,\"a\"\n" \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000145 PPC_LONG_ALIGN "\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100146 PPC_LONG "1b,3b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000147 ".previous" \
148 : "=r" (err) \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000149 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000150
Stephen Rothwell5015b492005-10-31 18:39:20 +1100151#ifdef __powerpc64__
152#define __put_user_asm2(x, ptr, retval) \
153 __put_user_asm(x, ptr, retval, "std")
154#else /* __powerpc64__ */
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000155#define __put_user_asm2(x, addr, err) \
156 __asm__ __volatile__( \
157 "1: stw %1,0(%2)\n" \
158 "2: stw %1+1,4(%2)\n" \
159 "3:\n" \
160 ".section .fixup,\"ax\"\n" \
161 "4: li %0,%3\n" \
162 " b 3b\n" \
163 ".previous\n" \
164 ".section __ex_table,\"a\"\n" \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000165 PPC_LONG_ALIGN "\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100166 PPC_LONG "1b,4b\n" \
167 PPC_LONG "2b,4b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000168 ".previous" \
169 : "=r" (err) \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000170 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000171#endif /* __powerpc64__ */
172
173#define __put_user_size(x, ptr, size, retval) \
174do { \
175 retval = 0; \
176 switch (size) { \
177 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
178 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
179 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
180 case 8: __put_user_asm2(x, ptr, retval); break; \
181 default: __put_user_bad(); \
182 } \
183} while (0)
184
185#define __put_user_nocheck(x, ptr, size) \
186({ \
187 long __pu_err; \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000188 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
189 if (!is_kernel_addr((unsigned long)__pu_addr)) \
190 might_sleep(); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000191 __chk_user_ptr(ptr); \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000192 __put_user_size((x), __pu_addr, (size), __pu_err); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000193 __pu_err; \
194})
195
196#define __put_user_check(x, ptr, size) \
197({ \
198 long __pu_err = -EFAULT; \
199 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
200 might_sleep(); \
201 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
202 __put_user_size((x), __pu_addr, (size), __pu_err); \
203 __pu_err; \
204})
205
Benjamin Herrenschmidte68c8252007-04-11 16:13:19 +1000206#define __put_user_nosleep(x, ptr, size) \
207({ \
208 long __pu_err; \
209 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
210 __chk_user_ptr(ptr); \
211 __put_user_size((x), __pu_addr, (size), __pu_err); \
212 __pu_err; \
213})
214
215
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000216extern long __get_user_bad(void);
217
218#define __get_user_asm(x, addr, err, op) \
219 __asm__ __volatile__( \
Stephen Rothwell5015b492005-10-31 18:39:20 +1100220 "1: "op" %1,0(%2) # get_user\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000221 "2:\n" \
222 ".section .fixup,\"ax\"\n" \
223 "3: li %0,%3\n" \
224 " li %1,0\n" \
225 " b 2b\n" \
226 ".previous\n" \
227 ".section __ex_table,\"a\"\n" \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000228 PPC_LONG_ALIGN "\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100229 PPC_LONG "1b,3b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000230 ".previous" \
231 : "=r" (err), "=r" (x) \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000232 : "b" (addr), "i" (-EFAULT), "0" (err))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000233
Stephen Rothwell5015b492005-10-31 18:39:20 +1100234#ifdef __powerpc64__
235#define __get_user_asm2(x, addr, err) \
236 __get_user_asm(x, addr, err, "ld")
237#else /* __powerpc64__ */
238#define __get_user_asm2(x, addr, err) \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000239 __asm__ __volatile__( \
240 "1: lwz %1,0(%2)\n" \
241 "2: lwz %1+1,4(%2)\n" \
242 "3:\n" \
243 ".section .fixup,\"ax\"\n" \
244 "4: li %0,%3\n" \
245 " li %1,0\n" \
246 " li %1+1,0\n" \
247 " b 3b\n" \
248 ".previous\n" \
249 ".section __ex_table,\"a\"\n" \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000250 PPC_LONG_ALIGN "\n" \
David Gibson3ddfbcf2005-11-10 12:56:55 +1100251 PPC_LONG "1b,4b\n" \
252 PPC_LONG "2b,4b\n" \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000253 ".previous" \
254 : "=r" (err), "=&r" (x) \
Michael Ellerman551c3c02008-07-17 17:17:52 +1000255 : "b" (addr), "i" (-EFAULT), "0" (err))
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000256#endif /* __powerpc64__ */
257
258#define __get_user_size(x, ptr, size, retval) \
259do { \
260 retval = 0; \
261 __chk_user_ptr(ptr); \
Stephen Rothwell5015b492005-10-31 18:39:20 +1100262 if (size > sizeof(x)) \
263 (x) = __get_user_bad(); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000264 switch (size) { \
265 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
266 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
267 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
268 case 8: __get_user_asm2(x, ptr, retval); break; \
269 default: (x) = __get_user_bad(); \
270 } \
271} while (0)
272
273#define __get_user_nocheck(x, ptr, size) \
274({ \
275 long __gu_err; \
276 unsigned long __gu_val; \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000277 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000278 __chk_user_ptr(ptr); \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000279 if (!is_kernel_addr((unsigned long)__gu_addr)) \
280 might_sleep(); \
281 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000282 (x) = (__typeof__(*(ptr)))__gu_val; \
283 __gu_err; \
284})
285
286#ifndef __powerpc64__
287#define __get_user64_nocheck(x, ptr, size) \
288({ \
289 long __gu_err; \
290 long long __gu_val; \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000291 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000292 __chk_user_ptr(ptr); \
Paul Mackerras6bfd93c2006-05-03 23:02:04 +1000293 if (!is_kernel_addr((unsigned long)__gu_addr)) \
294 might_sleep(); \
295 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000296 (x) = (__typeof__(*(ptr)))__gu_val; \
297 __gu_err; \
298})
299#endif /* __powerpc64__ */
300
301#define __get_user_check(x, ptr, size) \
302({ \
303 long __gu_err = -EFAULT; \
304 unsigned long __gu_val = 0; \
305 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
Stephen Rothwell5015b492005-10-31 18:39:20 +1100306 might_sleep(); \
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000307 if (access_ok(VERIFY_READ, __gu_addr, (size))) \
308 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
309 (x) = (__typeof__(*(ptr)))__gu_val; \
310 __gu_err; \
311})
312
Benjamin Herrenschmidte68c8252007-04-11 16:13:19 +1000313#define __get_user_nosleep(x, ptr, size) \
314({ \
315 long __gu_err; \
316 unsigned long __gu_val; \
317 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
318 __chk_user_ptr(ptr); \
319 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
320 (x) = (__typeof__(*(ptr)))__gu_val; \
321 __gu_err; \
322})
323
324
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000325/* more complex routines */
326
327extern unsigned long __copy_tofrom_user(void __user *to,
328 const void __user *from, unsigned long size);
329
330#ifndef __powerpc64__
Stephen Rothwell5015b492005-10-31 18:39:20 +1100331
Adrian Bunk4cfbdff2006-12-01 12:53:18 +0100332static inline unsigned long copy_from_user(void *to,
Stephen Rothwell5015b492005-10-31 18:39:20 +1100333 const void __user *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000334{
335 unsigned long over;
336
337 if (access_ok(VERIFY_READ, from, n))
338 return __copy_tofrom_user((__force void __user *)to, from, n);
339 if ((unsigned long)from < TASK_SIZE) {
340 over = (unsigned long)from + n - TASK_SIZE;
341 return __copy_tofrom_user((__force void __user *)to, from,
342 n - over) + over;
343 }
344 return n;
345}
346
Adrian Bunk4cfbdff2006-12-01 12:53:18 +0100347static inline unsigned long copy_to_user(void __user *to,
Stephen Rothwell5015b492005-10-31 18:39:20 +1100348 const void *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000349{
350 unsigned long over;
351
352 if (access_ok(VERIFY_WRITE, to, n))
353 return __copy_tofrom_user(to, (__force void __user *)from, n);
354 if ((unsigned long)to < TASK_SIZE) {
355 over = (unsigned long)to + n - TASK_SIZE;
356 return __copy_tofrom_user(to, (__force void __user *)from,
357 n - over) + over;
358 }
359 return n;
360}
361
362#else /* __powerpc64__ */
363
Stephen Rothwell5015b492005-10-31 18:39:20 +1100364#define __copy_in_user(to, from, size) \
365 __copy_tofrom_user((to), (from), (size))
366
367extern unsigned long copy_from_user(void *to, const void __user *from,
368 unsigned long n);
369extern unsigned long copy_to_user(void __user *to, const void *from,
370 unsigned long n);
371extern unsigned long copy_in_user(void __user *to, const void __user *from,
372 unsigned long n);
373
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100374#endif /* __powerpc64__ */
375
Stephen Rothwell5015b492005-10-31 18:39:20 +1100376static inline unsigned long __copy_from_user_inatomic(void *to,
377 const void __user *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000378{
379 if (__builtin_constant_p(n) && (n <= 8)) {
Nate Case9c8387a2008-05-13 06:14:14 +1000380 unsigned long ret = 1;
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000381
382 switch (n) {
383 case 1:
384 __get_user_size(*(u8 *)to, from, 1, ret);
385 break;
386 case 2:
387 __get_user_size(*(u16 *)to, from, 2, ret);
388 break;
389 case 4:
390 __get_user_size(*(u32 *)to, from, 4, ret);
391 break;
392 case 8:
393 __get_user_size(*(u64 *)to, from, 8, ret);
394 break;
395 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100396 if (ret == 0)
397 return 0;
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000398 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100399 return __copy_tofrom_user((__force void __user *)to, from, n);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000400}
401
Stephen Rothwell5015b492005-10-31 18:39:20 +1100402static inline unsigned long __copy_to_user_inatomic(void __user *to,
403 const void *from, unsigned long n)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000404{
405 if (__builtin_constant_p(n) && (n <= 8)) {
Nate Case9c8387a2008-05-13 06:14:14 +1000406 unsigned long ret = 1;
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000407
408 switch (n) {
409 case 1:
410 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
411 break;
412 case 2:
413 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
414 break;
415 case 4:
416 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
417 break;
418 case 8:
419 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
420 break;
421 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100422 if (ret == 0)
423 return 0;
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000424 }
Stephen Rothwell48fe4872005-11-01 15:53:19 +1100425 return __copy_tofrom_user(to, (__force const void __user *)from, n);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000426}
427
Stephen Rothwell5015b492005-10-31 18:39:20 +1100428static inline unsigned long __copy_from_user(void *to,
429 const void __user *from, unsigned long size)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000430{
431 might_sleep();
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000432 return __copy_from_user_inatomic(to, from, size);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000433}
434
Stephen Rothwell5015b492005-10-31 18:39:20 +1100435static inline unsigned long __copy_to_user(void __user *to,
436 const void *from, unsigned long size)
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000437{
438 might_sleep();
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000439 return __copy_to_user_inatomic(to, from, size);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000440}
441
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000442extern unsigned long __clear_user(void __user *addr, unsigned long size);
443
444static inline unsigned long clear_user(void __user *addr, unsigned long size)
445{
446 might_sleep();
447 if (likely(access_ok(VERIFY_WRITE, addr, size)))
448 return __clear_user(addr, size);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000449 if ((unsigned long)addr < TASK_SIZE) {
450 unsigned long over = (unsigned long)addr + size - TASK_SIZE;
451 return __clear_user(addr, size - over) + over;
452 }
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000453 return size;
454}
455
456extern int __strncpy_from_user(char *dst, const char __user *src, long count);
457
458static inline long strncpy_from_user(char *dst, const char __user *src,
459 long count)
460{
461 might_sleep();
462 if (likely(access_ok(VERIFY_READ, src, 1)))
463 return __strncpy_from_user(dst, src, count);
464 return -EFAULT;
465}
466
467/*
468 * Return the size of a string (including the ending 0)
469 *
470 * Return 0 for error
471 */
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000472extern int __strnlen_user(const char __user *str, long len, unsigned long top);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000473
474/*
475 * Returns the length of the string at str (including the null byte),
476 * or 0 if we hit a page we can't access,
477 * or something > len if we didn't find a null byte.
478 *
479 * The `top' parameter to __strnlen_user is to make sure that
480 * we can never overflow from the user area into kernel space.
481 */
482static inline int strnlen_user(const char __user *str, long len)
483{
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000484 unsigned long top = current->thread.fs.seg;
485
486 if ((unsigned long)str > top)
487 return 0;
488 return __strnlen_user(str, len, top);
Stephen Rothwell2df5e8b2005-10-29 17:51:31 +1000489}
490
491#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
492
493#endif /* __ASSEMBLY__ */
494#endif /* __KERNEL__ */
495
496#endif /* _ARCH_POWERPC_UACCESS_H */