blob: 3d6e60dad9d98a2b99a44029658638c48f300d50 [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 userspace access functions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_UACCESS_H
12#define _ASM_UACCESS_H
13
14/*
15 * User space memory access functions
16 */
David Howells7c7fcf72010-10-27 17:29:01 +010017#include <linux/thread_info.h>
David Howellsb920de12008-02-08 04:19:31 -080018#include <asm/page.h>
David Howellsb920de12008-02-08 04:19:31 -080019#include <asm/errno.h>
20
21#define VERIFY_READ 0
22#define VERIFY_WRITE 1
23
24/*
25 * The fs value determines whether argument validity checking should be
26 * performed or not. If get_fs() == USER_DS, checking is performed, with
27 * get_fs() == KERNEL_DS, checking is bypassed.
28 *
29 * For historical reasons, these macros are grossly misnamed.
30 */
David Howellsb920de12008-02-08 04:19:31 -080031#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
32
33#define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
34#define KERNEL_DS MAKE_MM_SEG(0x9FFFFFFF)
35#define USER_DS MAKE_MM_SEG(TASK_SIZE)
36
37#define get_ds() (KERNEL_DS)
38#define get_fs() (current_thread_info()->addr_limit)
39#define set_fs(x) (current_thread_info()->addr_limit = (x))
40#define __kernel_ds_p() (current_thread_info()->addr_limit.seg == 0x9FFFFFFF)
41
42#define segment_eq(a, b) ((a).seg == (b).seg)
43
44#define __addr_ok(addr) \
45 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
46
47/*
48 * check that a range of addresses falls within the current address limit
49 */
50static inline int ___range_ok(unsigned long addr, unsigned int size)
51{
52 int flag = 1, tmp;
53
54 asm(" add %3,%1 \n" /* set C-flag if addr + size > 4Gb */
55 " bcs 0f \n"
56 " cmp %4,%1 \n" /* jump if addr+size>limit (error) */
57 " bhi 0f \n"
58 " clr %0 \n" /* mark okay */
59 "0: \n"
60 : "=r"(flag), "=&r"(tmp)
61 : "1"(addr), "ir"(size),
62 "r"(current_thread_info()->addr_limit.seg), "0"(flag)
63 : "cc"
64 );
65
66 return flag;
67}
68
69#define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (u32)(size))
70
71#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
72#define __access_ok(addr, size) (__range_ok((addr), (size)) == 0)
73
74static inline int verify_area(int type, const void *addr, unsigned long size)
75{
76 return access_ok(type, addr, size) ? 0 : -EFAULT;
77}
78
79
80/*
81 * The exception table consists of pairs of addresses: the first is the
82 * address of an instruction that is allowed to fault, and the second is
83 * the address at which the program should continue. No registers are
84 * modified, so it is entirely up to the continuation code to figure out
85 * what to do.
86 *
87 * All the routines below use bits of fixup code that are out of line
88 * with the main instruction path. This means when everything is well,
89 * we don't even have to jump over them. Further, they do not intrude
90 * on our cache or tlb entries.
91 */
92
93struct exception_table_entry
94{
95 unsigned long insn, fixup;
96};
97
98/* Returns 0 if exception not found and fixup otherwise. */
99extern int fixup_exception(struct pt_regs *regs);
100
101#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
102#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
103
104/*
105 * The "__xxx" versions do not do address space checking, useful when
106 * doing multiple accesses to the same area (the user has to do the
107 * checks by hand with "access_ok()")
108 */
109#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
110#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
111
112/*
113 * The "xxx_ret" versions return constant specified in third argument, if
114 * something bad happens. These macros can be optimized for the
115 * case of just returning from the function xxx_ret is used.
116 */
117
118#define put_user_ret(x, ptr, ret) \
119 ({ if (put_user((x), (ptr))) return (ret); })
120#define get_user_ret(x, ptr, ret) \
121 ({ if (get_user((x), (ptr))) return (ret); })
122#define __put_user_ret(x, ptr, ret) \
123 ({ if (__put_user((x), (ptr))) return (ret); })
124#define __get_user_ret(x, ptr, ret) \
125 ({ if (__get_user((x), (ptr))) return (ret); })
126
127struct __large_struct { unsigned long buf[100]; };
128#define __m(x) (*(struct __large_struct *)(x))
129
Mark Salterd22a0012009-10-01 15:44:01 -0700130#define __get_user_nocheck(x, ptr, size) \
131({ \
132 unsigned long __gu_addr; \
133 int __gu_err; \
134 __gu_addr = (unsigned long) (ptr); \
135 switch (size) { \
136 case 1: { \
137 unsigned char __gu_val; \
138 __get_user_asm("bu"); \
139 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
140 break; \
141 } \
142 case 2: { \
143 unsigned short __gu_val; \
144 __get_user_asm("hu"); \
145 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
146 break; \
147 } \
148 case 4: { \
149 unsigned int __gu_val; \
150 __get_user_asm(""); \
151 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
152 break; \
153 } \
154 default: \
155 __get_user_unknown(); \
156 break; \
157 } \
158 __gu_err; \
David Howellsb920de12008-02-08 04:19:31 -0800159})
160
Mark Salterd22a0012009-10-01 15:44:01 -0700161#define __get_user_check(x, ptr, size) \
162({ \
Tkhai Kirill6fc34432011-03-14 13:27:46 +0000163 const __typeof__(ptr) __guc_ptr = (ptr); \
Mark Salterd22a0012009-10-01 15:44:01 -0700164 int _e; \
Tkhai Kirill6fc34432011-03-14 13:27:46 +0000165 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
166 _e = __get_user_nocheck((x), __guc_ptr, (size)); \
Mark Salterd22a0012009-10-01 15:44:01 -0700167 else { \
168 _e = -EFAULT; \
169 (x) = (__typeof__(x))0; \
170 } \
171 _e; \
David Howellsb920de12008-02-08 04:19:31 -0800172})
173
174#define __get_user_asm(INSN) \
175({ \
176 asm volatile( \
177 "1:\n" \
178 " mov"INSN" %2,%1\n" \
179 " mov 0,%0\n" \
180 "2:\n" \
181 " .section .fixup,\"ax\"\n" \
182 "3:\n\t" \
183 " mov %3,%0\n" \
184 " jmp 2b\n" \
185 " .previous\n" \
186 " .section __ex_table,\"a\"\n" \
187 " .balign 4\n" \
188 " .long 1b, 3b\n" \
189 " .previous" \
190 : "=&r" (__gu_err), "=&r" (__gu_val) \
191 : "m" (__m(__gu_addr)), "i" (-EFAULT)); \
192})
193
194extern int __get_user_unknown(void);
195
196#define __put_user_nocheck(x, ptr, size) \
197({ \
198 union { \
199 __typeof__(*(ptr)) val; \
200 u32 bits[2]; \
201 } __pu_val; \
202 unsigned long __pu_addr; \
203 int __pu_err; \
204 __pu_val.val = (x); \
205 __pu_addr = (unsigned long) (ptr); \
206 switch (size) { \
207 case 1: __put_user_asm("bu"); break; \
208 case 2: __put_user_asm("hu"); break; \
209 case 4: __put_user_asm("" ); break; \
210 case 8: __put_user_asm8(); break; \
211 default: __pu_err = __put_user_unknown(); break; \
212 } \
213 __pu_err; \
214})
215
216#define __put_user_check(x, ptr, size) \
217({ \
218 union { \
219 __typeof__(*(ptr)) val; \
220 u32 bits[2]; \
221 } __pu_val; \
222 unsigned long __pu_addr; \
223 int __pu_err; \
224 __pu_val.val = (x); \
225 __pu_addr = (unsigned long) (ptr); \
226 if (likely(__access_ok(__pu_addr, size))) { \
227 switch (size) { \
228 case 1: __put_user_asm("bu"); break; \
229 case 2: __put_user_asm("hu"); break; \
230 case 4: __put_user_asm("" ); break; \
231 case 8: __put_user_asm8(); break; \
232 default: __pu_err = __put_user_unknown(); break; \
233 } \
234 } \
235 else { \
236 __pu_err = -EFAULT; \
237 } \
238 __pu_err; \
239})
240
241#define __put_user_asm(INSN) \
242({ \
243 asm volatile( \
244 "1:\n" \
245 " mov"INSN" %1,%2\n" \
246 " mov 0,%0\n" \
247 "2:\n" \
248 " .section .fixup,\"ax\"\n" \
249 "3:\n" \
250 " mov %3,%0\n" \
251 " jmp 2b\n" \
252 " .previous\n" \
253 " .section __ex_table,\"a\"\n" \
254 " .balign 4\n" \
255 " .long 1b, 3b\n" \
256 " .previous" \
257 : "=&r" (__pu_err) \
258 : "r" (__pu_val.val), "m" (__m(__pu_addr)), \
259 "i" (-EFAULT) \
260 ); \
261})
262
263#define __put_user_asm8() \
264({ \
265 asm volatile( \
266 "1: mov %1,%3 \n" \
267 "2: mov %2,%4 \n" \
268 " mov 0,%0 \n" \
269 "3: \n" \
270 " .section .fixup,\"ax\" \n" \
271 "4: \n" \
272 " mov %5,%0 \n" \
Akira Takeuchi54b71fb2008-12-10 12:43:34 +0000273 " jmp 3b \n" \
David Howellsb920de12008-02-08 04:19:31 -0800274 " .previous \n" \
275 " .section __ex_table,\"a\"\n" \
276 " .balign 4 \n" \
277 " .long 1b, 4b \n" \
278 " .long 2b, 4b \n" \
279 " .previous \n" \
280 : "=&r" (__pu_err) \
281 : "r" (__pu_val.bits[0]), "r" (__pu_val.bits[1]), \
282 "m" (__m(__pu_addr)), "m" (__m(__pu_addr+4)), \
283 "i" (-EFAULT) \
284 ); \
285})
286
287extern int __put_user_unknown(void);
288
289
290/*
291 * Copy To/From Userspace
292 */
293/* Generic arbitrary sized copy. */
294#define __copy_user(to, from, size) \
295do { \
296 if (size) { \
297 void *__to = to; \
298 const void *__from = from; \
299 int w; \
300 asm volatile( \
301 "0: movbu (%0),%3;\n" \
302 "1: movbu %3,(%1);\n" \
303 " inc %0;\n" \
304 " inc %1;\n" \
305 " add -1,%2;\n" \
306 " bne 0b;\n" \
307 "2:\n" \
308 " .section .fixup,\"ax\"\n" \
309 "3: jmp 2b\n" \
310 " .previous\n" \
311 " .section __ex_table,\"a\"\n" \
312 " .balign 4\n" \
313 " .long 0b,3b\n" \
314 " .long 1b,3b\n" \
315 " .previous\n" \
316 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
317 : "0"(__from), "1"(__to), "2"(size) \
Mark Salterd6bb7a12010-01-08 14:43:16 -0800318 : "cc", "memory"); \
David Howellsb920de12008-02-08 04:19:31 -0800319 } \
320} while (0)
321
322#define __copy_user_zeroing(to, from, size) \
323do { \
324 if (size) { \
325 void *__to = to; \
326 const void *__from = from; \
327 int w; \
328 asm volatile( \
329 "0: movbu (%0),%3;\n" \
330 "1: movbu %3,(%1);\n" \
331 " inc %0;\n" \
332 " inc %1;\n" \
333 " add -1,%2;\n" \
334 " bne 0b;\n" \
335 "2:\n" \
336 " .section .fixup,\"ax\"\n" \
337 "3:\n" \
338 " mov %2,%0\n" \
339 " clr %3\n" \
340 "4: movbu %3,(%1);\n" \
341 " inc %1;\n" \
342 " add -1,%2;\n" \
343 " bne 4b;\n" \
344 " mov %0,%2\n" \
345 " jmp 2b\n" \
346 " .previous\n" \
347 " .section __ex_table,\"a\"\n" \
348 " .balign 4\n" \
349 " .long 0b,3b\n" \
350 " .long 1b,3b\n" \
351 " .previous\n" \
352 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
353 : "0"(__from), "1"(__to), "2"(size) \
Mark Salterd6bb7a12010-01-08 14:43:16 -0800354 : "cc", "memory"); \
David Howellsb920de12008-02-08 04:19:31 -0800355 } \
356} while (0)
357
358/* We let the __ versions of copy_from/to_user inline, because they're often
359 * used in fast paths and have only a small space overhead.
360 */
361static inline
362unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
363 unsigned long n)
364{
365 __copy_user_zeroing(to, from, n);
366 return n;
367}
368
369static inline
370unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
371 unsigned long n)
372{
373 __copy_user(to, from, n);
374 return n;
375}
376
377
378#if 0
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100379#error "don't use - these macros don't increment to & from pointers"
David Howellsb920de12008-02-08 04:19:31 -0800380/* Optimize just a little bit when we know the size of the move. */
381#define __constant_copy_user(to, from, size) \
382do { \
383 asm volatile( \
384 " mov %0,a0;\n" \
385 "0: movbu (%1),d3;\n" \
386 "1: movbu d3,(%2);\n" \
387 " add -1,a0;\n" \
388 " bne 0b;\n" \
389 "2:;" \
390 ".section .fixup,\"ax\"\n" \
391 "3: jmp 2b\n" \
392 ".previous\n" \
393 ".section __ex_table,\"a\"\n" \
394 " .balign 4\n" \
395 " .long 0b,3b\n" \
396 " .long 1b,3b\n" \
397 ".previous" \
398 : \
399 : "d"(size), "d"(to), "d"(from) \
400 : "d3", "a0"); \
401} while (0)
402
403/* Optimize just a little bit when we know the size of the move. */
404#define __constant_copy_user_zeroing(to, from, size) \
405do { \
406 asm volatile( \
407 " mov %0,a0;\n" \
408 "0: movbu (%1),d3;\n" \
409 "1: movbu d3,(%2);\n" \
410 " add -1,a0;\n" \
411 " bne 0b;\n" \
412 "2:;" \
413 ".section .fixup,\"ax\"\n" \
414 "3: jmp 2b\n" \
415 ".previous\n" \
416 ".section __ex_table,\"a\"\n" \
417 " .balign 4\n" \
418 " .long 0b,3b\n" \
419 " .long 1b,3b\n" \
420 ".previous" \
421 : \
422 : "d"(size), "d"(to), "d"(from) \
423 : "d3", "a0"); \
424} while (0)
425
426static inline
427unsigned long __constant_copy_to_user(void *to, const void *from,
428 unsigned long n)
429{
430 if (access_ok(VERIFY_WRITE, to, n))
431 __constant_copy_user(to, from, n);
432 return n;
433}
434
435static inline
436unsigned long __constant_copy_from_user(void *to, const void *from,
437 unsigned long n)
438{
439 if (access_ok(VERIFY_READ, from, n))
440 __constant_copy_user_zeroing(to, from, n);
441 return n;
442}
443
444static inline
445unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
446 unsigned long n)
447{
448 __constant_copy_user(to, from, n);
449 return n;
450}
451
452static inline
453unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
454 unsigned long n)
455{
456 __constant_copy_user_zeroing(to, from, n);
457 return n;
458}
459#endif
460
461extern unsigned long __generic_copy_to_user(void __user *, const void *,
462 unsigned long);
463extern unsigned long __generic_copy_from_user(void *, const void __user *,
464 unsigned long);
465
466#define __copy_to_user_inatomic(to, from, n) \
467 __generic_copy_to_user_nocheck((to), (from), (n))
468#define __copy_from_user_inatomic(to, from, n) \
469 __generic_copy_from_user_nocheck((to), (from), (n))
470
471#define __copy_to_user(to, from, n) \
472({ \
473 might_sleep(); \
474 __copy_to_user_inatomic((to), (from), (n)); \
475})
476
477#define __copy_from_user(to, from, n) \
478({ \
479 might_sleep(); \
480 __copy_from_user_inatomic((to), (from), (n)); \
481})
482
483
484#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
485#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
486
487extern long strncpy_from_user(char *dst, const char __user *src, long count);
488extern long __strncpy_from_user(char *dst, const char __user *src, long count);
489extern long strnlen_user(const char __user *str, long n);
490#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
491extern unsigned long clear_user(void __user *mem, unsigned long len);
492extern unsigned long __clear_user(void __user *mem, unsigned long len);
493
494#endif /* _ASM_UACCESS_H */