blob: d6352da05b10b9b29660b5121581dc44ae838423 [file] [log] [blame]
Chris Zankel9a8fd552005-06-23 22:01:26 -07001/*
2 * include/asm-xtensa/uaccess.h
3 *
4 * User space memory access functions
5 *
6 * These routines provide basic accessing functions to the user memory
7 * space for the kernel. This header file provides fuctions such as:
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 *
13 * Copyright (C) 2001 - 2005 Tensilica Inc.
14 */
15
16#ifndef _XTENSA_UACCESS_H
17#define _XTENSA_UACCESS_H
18
19#include <linux/errno.h>
20
21#define VERIFY_READ 0
22#define VERIFY_WRITE 1
23
24#ifdef __ASSEMBLY__
25
Chris Zankel9a8fd552005-06-23 22:01:26 -070026#include <asm/current.h>
Sam Ravnborg0013a852005-09-09 20:57:26 +020027#include <asm/asm-offsets.h>
Chris Zankel9a8fd552005-06-23 22:01:26 -070028#include <asm/processor.h>
29
30/*
31 * These assembly macros mirror the C macros that follow below. They
32 * should always have identical functionality. See
33 * arch/xtensa/kernel/sys.S for usage.
34 */
35
36#define KERNEL_DS 0
37#define USER_DS 1
38
39#define get_ds (KERNEL_DS)
40
41/*
42 * get_fs reads current->thread.current_ds into a register.
43 * On Entry:
44 * <ad> anything
45 * <sp> stack
46 * On Exit:
47 * <ad> contains current->thread.current_ds
48 */
49 .macro get_fs ad, sp
50 GET_CURRENT(\ad,\sp)
51 l32i \ad, \ad, THREAD_CURRENT_DS
52 .endm
53
54/*
55 * set_fs sets current->thread.current_ds to some value.
56 * On Entry:
57 * <at> anything (temp register)
58 * <av> value to write
59 * <sp> stack
60 * On Exit:
61 * <at> destroyed (actually, current)
62 * <av> preserved, value to write
63 */
64 .macro set_fs at, av, sp
65 GET_CURRENT(\at,\sp)
66 s32i \av, \at, THREAD_CURRENT_DS
67 .endm
68
69/*
70 * kernel_ok determines whether we should bypass addr/size checking.
71 * See the equivalent C-macro version below for clarity.
72 * On success, kernel_ok branches to a label indicated by parameter
73 * <success>. This implies that the macro falls through to the next
74 * insruction on an error.
75 *
76 * Note that while this macro can be used independently, we designed
77 * in for optimal use in the access_ok macro below (i.e., we fall
78 * through on error).
79 *
80 * On Entry:
81 * <at> anything (temp register)
82 * <success> label to branch to on success; implies
83 * fall-through macro on error
84 * <sp> stack pointer
85 * On Exit:
86 * <at> destroyed (actually, current->thread.current_ds)
87 */
88
89#if ((KERNEL_DS != 0) || (USER_DS == 0))
90# error Assembly macro kernel_ok fails
91#endif
92 .macro kernel_ok at, sp, success
93 get_fs \at, \sp
94 beqz \at, \success
95 .endm
96
97/*
98 * user_ok determines whether the access to user-space memory is allowed.
99 * See the equivalent C-macro version below for clarity.
100 *
101 * On error, user_ok branches to a label indicated by parameter
102 * <error>. This implies that the macro falls through to the next
103 * instruction on success.
104 *
105 * Note that while this macro can be used independently, we designed
106 * in for optimal use in the access_ok macro below (i.e., we fall
107 * through on success).
108 *
109 * On Entry:
110 * <aa> register containing memory address
111 * <as> register containing memory size
112 * <at> temp register
113 * <error> label to branch to on error; implies fall-through
114 * macro on success
115 * On Exit:
116 * <aa> preserved
117 * <as> preserved
118 * <at> destroyed (actually, (TASK_SIZE + 1 - size))
119 */
120 .macro user_ok aa, as, at, error
121 movi \at, (TASK_SIZE+1)
122 bgeu \as, \at, \error
123 sub \at, \at, \as
124 bgeu \aa, \at, \error
125 .endm
126
127/*
128 * access_ok determines whether a memory access is allowed. See the
129 * equivalent C-macro version below for clarity.
130 *
131 * On error, access_ok branches to a label indicated by parameter
132 * <error>. This implies that the macro falls through to the next
133 * instruction on success.
134 *
135 * Note that we assume success is the common case, and we optimize the
136 * branch fall-through case on success.
137 *
138 * On Entry:
139 * <aa> register containing memory address
140 * <as> register containing memory size
141 * <at> temp register
142 * <sp>
143 * <error> label to branch to on error; implies fall-through
144 * macro on success
145 * On Exit:
146 * <aa> preserved
147 * <as> preserved
148 * <at> destroyed
149 */
150 .macro access_ok aa, as, at, sp, error
151 kernel_ok \at, \sp, .Laccess_ok_\@
152 user_ok \aa, \as, \at, \error
153.Laccess_ok_\@:
154 .endm
155
Chris Zankel9a8fd552005-06-23 22:01:26 -0700156#else /* __ASSEMBLY__ not defined */
157
158#include <linux/sched.h>
159#include <asm/types.h>
160
161/*
162 * The fs value determines whether argument validity checking should
163 * be performed or not. If get_fs() == USER_DS, checking is
164 * performed, with get_fs() == KERNEL_DS, checking is bypassed.
165 *
166 * For historical reasons (Data Segment Register?), these macros are
167 * grossly misnamed.
168 */
169
170#define KERNEL_DS ((mm_segment_t) { 0 })
171#define USER_DS ((mm_segment_t) { 1 })
172
173#define get_ds() (KERNEL_DS)
174#define get_fs() (current->thread.current_ds)
175#define set_fs(val) (current->thread.current_ds = (val))
176
177#define segment_eq(a,b) ((a).seg == (b).seg)
178
179#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
180#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
181#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
182#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
183
Chris Zankel9a8fd552005-06-23 22:01:26 -0700184/*
185 * These are the main single-value transfer routines. They
186 * automatically use the right size if we just have the right pointer
187 * type.
188 *
189 * This gets kind of ugly. We want to return _two_ values in
190 * "get_user()" and yet we don't want to do any pointers, because that
191 * is too much of a performance impact. Thus we have a few rather ugly
192 * macros here, and hide all the uglyness from the user.
193 *
194 * Careful to not
195 * (a) re-use the arguments for side effects (sizeof is ok)
196 * (b) require any knowledge of processes at this stage
197 */
198#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
199#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
200
201/*
202 * The "__xxx" versions of the user access functions are versions that
203 * do not verify the address space, that must have been done previously
204 * with a separate "access_ok()" call (this is used when we do multiple
205 * accesses to the same area of user memory).
206 */
207#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
208#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
209
210
211extern long __put_user_bad(void);
212
213#define __put_user_nocheck(x,ptr,size) \
214({ \
215 long __pu_err; \
216 __put_user_size((x),(ptr),(size),__pu_err); \
217 __pu_err; \
218})
219
220#define __put_user_check(x,ptr,size) \
221({ \
222 long __pu_err = -EFAULT; \
223 __typeof__(*(ptr)) *__pu_addr = (ptr); \
224 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
225 __put_user_size((x),__pu_addr,(size),__pu_err); \
226 __pu_err; \
227})
228
229#define __put_user_size(x,ptr,size,retval) \
230do { \
231 retval = 0; \
232 switch (size) { \
233 case 1: __put_user_asm(x,ptr,retval,1,"s8i"); break; \
234 case 2: __put_user_asm(x,ptr,retval,2,"s16i"); break; \
235 case 4: __put_user_asm(x,ptr,retval,4,"s32i"); break; \
236 case 8: { \
237 __typeof__(*ptr) __v64 = x; \
238 retval = __copy_to_user(ptr,&__v64,8); \
239 break; \
240 } \
241 default: __put_user_bad(); \
242 } \
243} while (0)
244
245
246/*
247 * Consider a case of a user single load/store would cause both an
248 * unaligned exception and an MMU-related exception (unaligned
249 * exceptions happen first):
250 *
251 * User code passes a bad variable ptr to a system call.
252 * Kernel tries to access the variable.
253 * Unaligned exception occurs.
254 * Unaligned exception handler tries to make aligned accesses.
255 * Double exception occurs for MMU-related cause (e.g., page not mapped).
256 * do_page_fault() thinks the fault address belongs to the kernel, not the
257 * user, and panics.
258 *
259 * The kernel currently prohibits user unaligned accesses. We use the
260 * __check_align_* macros to check for unaligned addresses before
261 * accessing user space so we don't crash the kernel. Both
262 * __put_user_asm and __get_user_asm use these alignment macros, so
263 * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
264 * sync.
265 */
266
267#define __check_align_1 ""
268
269#define __check_align_2 \
270 " _bbci.l %2, 0, 1f \n" \
271 " movi %0, %3 \n" \
272 " _j 2f \n"
273
274#define __check_align_4 \
275 " _bbsi.l %2, 0, 0f \n" \
276 " _bbci.l %2, 1, 1f \n" \
277 "0: movi %0, %3 \n" \
278 " _j 2f \n"
279
280
281/*
282 * We don't tell gcc that we are accessing memory, but this is OK
283 * because we do not write to any memory gcc knows about, so there
284 * are no aliasing issues.
285 *
286 * WARNING: If you modify this macro at all, verify that the
287 * __check_align_* macros still work.
288 */
289#define __put_user_asm(x, addr, err, align, insn) \
290 __asm__ __volatile__( \
291 __check_align_##align \
292 "1: "insn" %1, %2, 0 \n" \
293 "2: \n" \
294 " .section .fixup,\"ax\" \n" \
295 " .align 4 \n" \
296 "4: \n" \
297 " .long 2b \n" \
298 "5: \n" \
299 " l32r %2, 4b \n" \
300 " movi %0, %3 \n" \
301 " jx %2 \n" \
302 " .previous \n" \
303 " .section __ex_table,\"a\" \n" \
304 " .long 1b, 5b \n" \
305 " .previous" \
306 :"=r" (err) \
307 :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
308
309#define __get_user_nocheck(x,ptr,size) \
310({ \
311 long __gu_err, __gu_val; \
312 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
313 (x) = (__typeof__(*(ptr)))__gu_val; \
314 __gu_err; \
315})
316
317#define __get_user_check(x,ptr,size) \
318({ \
319 long __gu_err = -EFAULT, __gu_val = 0; \
320 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
321 if (access_ok(VERIFY_READ,__gu_addr,size)) \
322 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
323 (x) = (__typeof__(*(ptr)))__gu_val; \
324 __gu_err; \
325})
326
327extern long __get_user_bad(void);
328
329#define __get_user_size(x,ptr,size,retval) \
330do { \
331 retval = 0; \
332 switch (size) { \
333 case 1: __get_user_asm(x,ptr,retval,1,"l8ui"); break; \
334 case 2: __get_user_asm(x,ptr,retval,2,"l16ui"); break; \
335 case 4: __get_user_asm(x,ptr,retval,4,"l32i"); break; \
336 case 8: retval = __copy_from_user(&x,ptr,8); break; \
337 default: (x) = __get_user_bad(); \
338 } \
339} while (0)
340
341
342/*
343 * WARNING: If you modify this macro at all, verify that the
344 * __check_align_* macros still work.
345 */
346#define __get_user_asm(x, addr, err, align, insn) \
347 __asm__ __volatile__( \
348 __check_align_##align \
349 "1: "insn" %1, %2, 0 \n" \
350 "2: \n" \
351 " .section .fixup,\"ax\" \n" \
352 " .align 4 \n" \
353 "4: \n" \
354 " .long 2b \n" \
355 "5: \n" \
356 " l32r %2, 4b \n" \
357 " movi %1, 0 \n" \
358 " movi %0, %3 \n" \
359 " jx %2 \n" \
360 " .previous \n" \
361 " .section __ex_table,\"a\" \n" \
362 " .long 1b, 5b \n" \
363 " .previous" \
364 :"=r" (err), "=r" (x) \
365 :"r" (addr), "i" (-EFAULT), "0" (err))
366
367
368/*
369 * Copy to/from user space
370 */
371
372/*
373 * We use a generic, arbitrary-sized copy subroutine. The Xtensa
374 * architecture would cause heavy code bloat if we tried to inline
375 * these functions and provide __constant_copy_* equivalents like the
376 * i386 versions. __xtensa_copy_user is quite efficient. See the
377 * .fixup section of __xtensa_copy_user for a discussion on the
378 * X_zeroing equivalents for Xtensa.
379 */
380
381extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
382#define __copy_user(to,from,size) __xtensa_copy_user(to,from,size)
383
384
385static inline unsigned long
386__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
387{
388 return __copy_user(to,from,n);
389}
390
391static inline unsigned long
392__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
393{
394 return __copy_user(to,from,n);
395}
396
397static inline unsigned long
398__generic_copy_to_user(void *to, const void *from, unsigned long n)
399{
400 prefetch(from);
401 if (access_ok(VERIFY_WRITE, to, n))
402 return __copy_user(to,from,n);
403 return n;
404}
405
406static inline unsigned long
407__generic_copy_from_user(void *to, const void *from, unsigned long n)
408{
409 prefetchw(to);
410 if (access_ok(VERIFY_READ, from, n))
411 return __copy_user(to,from,n);
412 else
413 memset(to, 0, n);
414 return n;
415}
416
417#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
418#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
419#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
420#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
421#define __copy_to_user_inatomic __copy_to_user
422#define __copy_from_user_inatomic __copy_from_user
423
424
425/*
426 * We need to return the number of bytes not cleared. Our memset()
427 * returns zero if a problem occurs while accessing user-space memory.
428 * In that event, return no memory cleared. Otherwise, zero for
429 * success.
430 */
431
Adrian Bunkd99cf712005-09-03 15:57:53 -0700432static inline unsigned long
Chris Zankel9a8fd552005-06-23 22:01:26 -0700433__xtensa_clear_user(void *addr, unsigned long size)
434{
435 if ( ! memset(addr, 0, size) )
436 return size;
437 return 0;
438}
439
Adrian Bunkd99cf712005-09-03 15:57:53 -0700440static inline unsigned long
Chris Zankel9a8fd552005-06-23 22:01:26 -0700441clear_user(void *addr, unsigned long size)
442{
443 if (access_ok(VERIFY_WRITE, addr, size))
444 return __xtensa_clear_user(addr, size);
445 return size ? -EFAULT : 0;
446}
447
448#define __clear_user __xtensa_clear_user
449
450
451extern long __strncpy_user(char *, const char *, long);
452#define __strncpy_from_user __strncpy_user
453
Adrian Bunkd99cf712005-09-03 15:57:53 -0700454static inline long
Chris Zankel9a8fd552005-06-23 22:01:26 -0700455strncpy_from_user(char *dst, const char *src, long count)
456{
457 if (access_ok(VERIFY_READ, src, 1))
458 return __strncpy_from_user(dst, src, count);
459 return -EFAULT;
460}
461
462
463#define strlen_user(str) strnlen_user((str), TASK_SIZE - 1)
464
465/*
466 * Return the size of a string (including the ending 0!)
467 */
468extern long __strnlen_user(const char *, long);
469
Adrian Bunkd99cf712005-09-03 15:57:53 -0700470static inline long strnlen_user(const char *str, long len)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700471{
472 unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
473
474 if ((unsigned long)str > top)
475 return 0;
476 return __strnlen_user(str, len);
477}
478
479
480struct exception_table_entry
481{
482 unsigned long insn, fixup;
483};
484
485/* Returns 0 if exception not found and fixup.unit otherwise. */
486
487extern unsigned long search_exception_table(unsigned long addr);
488extern void sort_exception_table(void);
489
490/* Returns the new pc */
491#define fixup_exception(map_reg, fixup_unit, pc) \
492({ \
493 fixup_unit; \
494})
495
496#endif /* __ASSEMBLY__ */
497#endif /* _XTENSA_UACCESS_H */