blob: 0795ee5919d1f7b69d09ba3900addba27afef12c [file] [log] [blame]
Paul Mundt02f7e622008-06-03 18:48:54 +09001/*
Paul Mundt9b01bd92007-11-10 19:55:50 +09002 * User space memory access functions
3 *
4 * Copyright (C) 1999, 2002 Niibe Yutaka
Paul Mundt02f7e622008-06-03 18:48:54 +09005 * Copyright (C) 2003 - 2008 Paul Mundt
Paul Mundt9b01bd92007-11-10 19:55:50 +09006 *
7 * Based on:
8 * MIPS implementation version 1.15 by
9 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
10 * and i386 version.
11 */
Magnus Damm1e6760c2008-02-07 19:50:52 +090012#ifndef __ASM_SH_UACCESS_32_H
13#define __ASM_SH_UACCESS_32_H
Paul Mundt9b01bd92007-11-10 19:55:50 +090014
15#include <linux/errno.h>
16#include <linux/sched.h>
Paul Mundt02f7e622008-06-03 18:48:54 +090017#include <asm/segment.h>
Paul Mundt9b01bd92007-11-10 19:55:50 +090018
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
Paul Mundt9b01bd92007-11-10 19:55:50 +090022
23#if !defined(CONFIG_MMU)
24/* NOMMU is always true */
25#define __addr_ok(addr) (1)
26
Paul Mundt9b01bd92007-11-10 19:55:50 +090027/*
28 * __access_ok: Check if address with size is OK or not.
29 *
30 * If we don't have an MMU (or if its disabled) the only thing we really have
31 * to look out for is if the address resides somewhere outside of what
32 * available RAM we have.
Paul Mundt9b01bd92007-11-10 19:55:50 +090033 */
34static inline int __access_ok(unsigned long addr, unsigned long size)
35{
Paul Mundt9460c0c2008-03-21 18:08:58 +090036 return 1;
Paul Mundt9b01bd92007-11-10 19:55:50 +090037}
38#else /* CONFIG_MMU */
39#define __addr_ok(addr) \
40 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
41
Paul Mundt9b01bd92007-11-10 19:55:50 +090042
43/*
44 * __access_ok: Check if address with size is OK or not.
45 *
Stuart Menefy0fb19dc2007-11-30 18:16:23 +090046 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
Paul Mundt9b01bd92007-11-10 19:55:50 +090047 *
Stuart Menefy0fb19dc2007-11-30 18:16:23 +090048 * sum := addr + size; carry? --> flag = true;
49 * if (sum >= addr_limit) flag = true;
Paul Mundt9b01bd92007-11-10 19:55:50 +090050 */
51static inline int __access_ok(unsigned long addr, unsigned long size)
52{
Stuart Menefy0fb19dc2007-11-30 18:16:23 +090053 unsigned long flag, sum;
Paul Mundt9b01bd92007-11-10 19:55:50 +090054
Stuart Menefy0fb19dc2007-11-30 18:16:23 +090055 __asm__("clrt\n\t"
56 "addc %3, %1\n\t"
57 "movt %0\n\t"
58 "cmp/hi %4, %1\n\t"
59 "rotcl %0"
60 :"=&r" (flag), "=r" (sum)
61 :"1" (addr), "r" (size),
62 "r" (current_thread_info()->addr_limit.seg)
63 :"t");
Paul Mundt9b01bd92007-11-10 19:55:50 +090064 return flag == 0;
65}
66#endif /* CONFIG_MMU */
67
Paul Mundt0465b9f2007-12-26 18:37:16 +090068#define access_ok(type, addr, size) \
69 (__chk_user_ptr(addr), \
70 __access_ok((unsigned long __force)(addr), (size)))
Paul Mundt9b01bd92007-11-10 19:55:50 +090071
72/*
73 * Uh, these should become the main single-value transfer routines ...
74 * They automatically use the right size if we just have the right
75 * pointer type ...
76 *
77 * As SuperH uses the same address space for kernel and user data, we
78 * can just do these as direct assignments.
79 *
80 * Careful to not
81 * (a) re-use the arguments for side effects (sizeof is ok)
82 * (b) require any knowledge of processes at this stage
83 */
Paul Mundt0465b9f2007-12-26 18:37:16 +090084#define put_user(x,ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
85#define get_user(x,ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
Paul Mundt9b01bd92007-11-10 19:55:50 +090086
87/*
88 * The "__xxx" versions do not do address space checking, useful when
89 * doing multiple accesses to the same area (the user has to do the
90 * checks by hand with "access_ok()")
91 */
Paul Mundt0465b9f2007-12-26 18:37:16 +090092#define __put_user(x,ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
93#define __get_user(x,ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
Paul Mundt9b01bd92007-11-10 19:55:50 +090094
95struct __large_struct { unsigned long buf[100]; };
96#define __m(x) (*(struct __large_struct __user *)(x))
97
98#define __get_user_size(x,ptr,size,retval) \
99do { \
100 retval = 0; \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900101 switch (size) { \
102 case 1: \
103 __get_user_asm(x, ptr, retval, "b"); \
104 break; \
105 case 2: \
106 __get_user_asm(x, ptr, retval, "w"); \
107 break; \
108 case 4: \
109 __get_user_asm(x, ptr, retval, "l"); \
110 break; \
111 default: \
112 __get_user_unknown(); \
113 break; \
114 } \
115} while (0)
116
117#define __get_user_nocheck(x,ptr,size) \
118({ \
Paul Mundt0465b9f2007-12-26 18:37:16 +0900119 long __gu_err; \
120 unsigned long __gu_val; \
121 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
122 __chk_user_ptr(ptr); \
123 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900124 (x) = (__typeof__(*(ptr)))__gu_val; \
125 __gu_err; \
126})
127
Paul Mundt9b01bd92007-11-10 19:55:50 +0900128#define __get_user_check(x,ptr,size) \
129({ \
Paul Mundt0465b9f2007-12-26 18:37:16 +0900130 long __gu_err = -EFAULT; \
131 unsigned long __gu_val = 0; \
132 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
133 if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \
134 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900135 (x) = (__typeof__(*(ptr)))__gu_val; \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900136 __gu_err; \
137})
Paul Mundt9b01bd92007-11-10 19:55:50 +0900138
139#define __get_user_asm(x, addr, err, insn) \
140({ \
141__asm__ __volatile__( \
142 "1:\n\t" \
143 "mov." insn " %2, %1\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900144 "2:\n" \
145 ".section .fixup,\"ax\"\n" \
146 "3:\n\t" \
147 "mov #0, %1\n\t" \
148 "mov.l 4f, %0\n\t" \
149 "jmp @%0\n\t" \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900150 " mov %3, %0\n\t" \
151 ".balign 4\n" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900152 "4: .long 2b\n\t" \
153 ".previous\n" \
154 ".section __ex_table,\"a\"\n\t" \
155 ".long 1b, 3b\n\t" \
156 ".previous" \
157 :"=&r" (err), "=&r" (x) \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900158 :"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
Paul Mundt9b01bd92007-11-10 19:55:50 +0900159
160extern void __get_user_unknown(void);
161
162#define __put_user_size(x,ptr,size,retval) \
163do { \
164 retval = 0; \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900165 switch (size) { \
166 case 1: \
167 __put_user_asm(x, ptr, retval, "b"); \
168 break; \
169 case 2: \
170 __put_user_asm(x, ptr, retval, "w"); \
171 break; \
172 case 4: \
173 __put_user_asm(x, ptr, retval, "l"); \
174 break; \
175 case 8: \
176 __put_user_u64(x, ptr, retval); \
177 break; \
178 default: \
179 __put_user_unknown(); \
180 } \
181} while (0)
182
Paul Mundt0465b9f2007-12-26 18:37:16 +0900183#define __put_user_nocheck(x,ptr,size) \
184({ \
185 long __pu_err; \
186 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
187 __chk_user_ptr(ptr); \
188 __put_user_size((x), __pu_addr, (size), __pu_err); \
189 __pu_err; \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900190})
191
192#define __put_user_check(x,ptr,size) \
193({ \
Paul Mundt0465b9f2007-12-26 18:37:16 +0900194 long __pu_err = -EFAULT; \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900195 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
Paul Mundt0465b9f2007-12-26 18:37:16 +0900196 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
197 __put_user_size((x), __pu_addr, (size), \
198 __pu_err); \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900199 __pu_err; \
200})
201
202#define __put_user_asm(x, addr, err, insn) \
203({ \
204__asm__ __volatile__( \
205 "1:\n\t" \
206 "mov." insn " %1, %2\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900207 "2:\n" \
208 ".section .fixup,\"ax\"\n" \
209 "3:\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900210 "mov.l 4f, %0\n\t" \
211 "jmp @%0\n\t" \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900212 " mov %3, %0\n\t" \
213 ".balign 4\n" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900214 "4: .long 2b\n\t" \
215 ".previous\n" \
216 ".section __ex_table,\"a\"\n\t" \
217 ".long 1b, 3b\n\t" \
218 ".previous" \
219 :"=&r" (err) \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900220 :"r" (x), "m" (__m(addr)), "i" (-EFAULT), "0" (err) \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900221 :"memory"); })
222
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900223#if defined(CONFIG_CPU_LITTLE_ENDIAN)
Paul Mundt9b01bd92007-11-10 19:55:50 +0900224#define __put_user_u64(val,addr,retval) \
225({ \
226__asm__ __volatile__( \
227 "1:\n\t" \
228 "mov.l %R1,%2\n\t" \
229 "mov.l %S1,%T2\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900230 "2:\n" \
231 ".section .fixup,\"ax\"\n" \
232 "3:\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900233 "mov.l 4f,%0\n\t" \
234 "jmp @%0\n\t" \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900235 " mov %3,%0\n\t" \
236 ".balign 4\n" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900237 "4: .long 2b\n\t" \
238 ".previous\n" \
239 ".section __ex_table,\"a\"\n\t" \
240 ".long 1b, 3b\n\t" \
241 ".previous" \
242 : "=r" (retval) \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900243 : "r" (val), "m" (__m(addr)), "i" (-EFAULT), "0" (retval) \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900244 : "memory"); })
245#else
246#define __put_user_u64(val,addr,retval) \
247({ \
248__asm__ __volatile__( \
249 "1:\n\t" \
250 "mov.l %S1,%2\n\t" \
251 "mov.l %R1,%T2\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900252 "2:\n" \
253 ".section .fixup,\"ax\"\n" \
254 "3:\n\t" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900255 "mov.l 4f,%0\n\t" \
256 "jmp @%0\n\t" \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900257 " mov %3,%0\n\t" \
258 ".balign 4\n" \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900259 "4: .long 2b\n\t" \
260 ".previous\n" \
261 ".section __ex_table,\"a\"\n\t" \
262 ".long 1b, 3b\n\t" \
263 ".previous" \
264 : "=r" (retval) \
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900265 : "r" (val), "m" (__m(addr)), "i" (-EFAULT), "0" (retval) \
Paul Mundt9b01bd92007-11-10 19:55:50 +0900266 : "memory"); })
267#endif
268
269extern void __put_user_unknown(void);
270
271/* Generic arbitrary sized copy. */
272/* Return the number of bytes NOT copied */
273__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
274
Paul Mundt9b01bd92007-11-10 19:55:50 +0900275
276static __always_inline unsigned long
277__copy_from_user(void *to, const void __user *from, unsigned long n)
278{
279 return __copy_user(to, (__force void *)from, n);
280}
281
282static __always_inline unsigned long __must_check
283__copy_to_user(void __user *to, const void *from, unsigned long n)
284{
285 return __copy_user((__force void *)to, from, n);
286}
287
288#define __copy_to_user_inatomic __copy_to_user
289#define __copy_from_user_inatomic __copy_from_user
290
291/*
292 * Clear the area and return remaining number of bytes
293 * (on failure. Usually it's 0.)
294 */
295extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
296
297#define clear_user(addr,n) ({ \
298void * __cl_addr = (addr); \
299unsigned long __cl_size = (n); \
300if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
301__cl_size = __clear_user(__cl_addr, __cl_size); \
302__cl_size; })
303
304static __inline__ int
305__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
306{
307 __kernel_size_t res;
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900308 unsigned long __dummy, _d, _s, _c;
Paul Mundt9b01bd92007-11-10 19:55:50 +0900309
310 __asm__ __volatile__(
311 "9:\n"
312 "mov.b @%2+, %1\n\t"
313 "cmp/eq #0, %1\n\t"
314 "bt/s 2f\n"
315 "1:\n"
316 "mov.b %1, @%3\n\t"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900317 "dt %4\n\t"
Paul Mundt9b01bd92007-11-10 19:55:50 +0900318 "bf/s 9b\n\t"
319 " add #1, %3\n\t"
320 "2:\n\t"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900321 "sub %4, %0\n"
Paul Mundt9b01bd92007-11-10 19:55:50 +0900322 "3:\n"
323 ".section .fixup,\"ax\"\n"
324 "4:\n\t"
325 "mov.l 5f, %1\n\t"
326 "jmp @%1\n\t"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900327 " mov %9, %0\n\t"
Paul Mundt9b01bd92007-11-10 19:55:50 +0900328 ".balign 4\n"
329 "5: .long 3b\n"
330 ".previous\n"
331 ".section __ex_table,\"a\"\n"
332 " .balign 4\n"
333 " .long 9b,4b\n"
334 ".previous"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900335 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
336 : "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
Paul Mundt9b01bd92007-11-10 19:55:50 +0900337 "i" (-EFAULT)
338 : "memory", "t");
339
340 return res;
341}
342
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900343/**
344 * strncpy_from_user: - Copy a NUL terminated string from userspace.
345 * @dst: Destination address, in kernel space. This buffer must be at
346 * least @count bytes long.
347 * @src: Source address, in user space.
348 * @count: Maximum number of bytes to copy, including the trailing NUL.
349 *
350 * Copies a NUL-terminated string from userspace to kernel space.
351 *
352 * On success, returns the length of the string (not including the trailing
353 * NUL).
354 *
355 * If access to userspace fails, returns -EFAULT (some data may have been
356 * copied).
357 *
358 * If @count is smaller than the length of the string, copies @count bytes
359 * and returns @count.
360 */
Paul Mundt9b01bd92007-11-10 19:55:50 +0900361#define strncpy_from_user(dest,src,count) ({ \
362unsigned long __sfu_src = (unsigned long) (src); \
363int __sfu_count = (int) (count); \
364long __sfu_res = -EFAULT; \
365if(__access_ok(__sfu_src, __sfu_count)) { \
366__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
367} __sfu_res; })
368
369/*
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900370 * Return the size of a string (including the ending 0 even when we have
371 * exceeded the maximum string length).
Paul Mundt9b01bd92007-11-10 19:55:50 +0900372 */
373static __inline__ long __strnlen_user(const char __user *__s, long __n)
374{
375 unsigned long res;
376 unsigned long __dummy;
377
378 __asm__ __volatile__(
Paul Mundt9b01bd92007-11-10 19:55:50 +0900379 "1:\t"
380 "mov.b @(%0,%3), %1\n\t"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900381 "cmp/eq %4, %0\n\t"
382 "bt/s 2f\n\t"
383 " add #1, %0\n\t"
Paul Mundt9b01bd92007-11-10 19:55:50 +0900384 "tst %1, %1\n\t"
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900385 "bf 1b\n\t"
Paul Mundt9b01bd92007-11-10 19:55:50 +0900386 "2:\n"
387 ".section .fixup,\"ax\"\n"
388 "3:\n\t"
389 "mov.l 4f, %1\n\t"
390 "jmp @%1\n\t"
391 " mov #0, %0\n"
392 ".balign 4\n"
393 "4: .long 2b\n"
394 ".previous\n"
395 ".section __ex_table,\"a\"\n"
396 " .balign 4\n"
397 " .long 1b,3b\n"
398 ".previous"
399 : "=z" (res), "=&r" (__dummy)
400 : "0" (0), "r" (__s), "r" (__n)
401 : "t");
402 return res;
403}
404
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900405/**
406 * strnlen_user: - Get the size of a string in user space.
407 * @s: The string to measure.
408 * @n: The maximum valid length
409 *
410 * Context: User context only. This function may sleep.
411 *
412 * Get the size of a NUL-terminated string in user space.
413 *
414 * Returns the size of the string INCLUDING the terminating NUL.
415 * On exception, returns 0.
416 * If the string is too long, returns a value greater than @n.
417 */
Paul Mundt9b01bd92007-11-10 19:55:50 +0900418static __inline__ long strnlen_user(const char __user *s, long n)
419{
420 if (!__addr_ok(s))
421 return 0;
422 else
423 return __strnlen_user(s, n);
424}
425
Stuart Menefy0fb19dc2007-11-30 18:16:23 +0900426/**
427 * strlen_user: - Get the size of a string in user space.
428 * @str: The string to measure.
429 *
430 * Context: User context only. This function may sleep.
431 *
432 * Get the size of a NUL-terminated string in user space.
433 *
434 * Returns the size of the string INCLUDING the terminating NUL.
435 * On exception, returns 0.
436 *
437 * If there is a limit on the length of a valid string, you may wish to
438 * consider using strnlen_user() instead.
439 */
Paul Mundt9b01bd92007-11-10 19:55:50 +0900440#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
441
442/*
443 * The exception table consists of pairs of addresses: the first is the
444 * address of an instruction that is allowed to fault, and the second is
445 * the address at which the program should continue. No registers are
446 * modified, so it is entirely up to the continuation code to figure out
447 * what to do.
448 *
449 * All the routines below use bits of fixup code that are out of line
450 * with the main instruction path. This means when everything is well,
451 * we don't even have to jump over them. Further, they do not intrude
452 * on our cache or tlb entries.
453 */
454
455struct exception_table_entry
456{
457 unsigned long insn, fixup;
458};
459
460extern int fixup_exception(struct pt_regs *regs);
461
Magnus Damm1e6760c2008-02-07 19:50:52 +0900462#endif /* __ASM_SH_UACCESS_32_H */