blob: 4353b2267a0247d593b92e53cbb3c66ac1e3b601 [file] [log] [blame]
Glauber Costaca233862008-06-13 14:39:25 -03001#ifndef _ASM_UACCES_H_
2#define _ASM_UACCES_H_
3/*
4 * User space memory access functions
5 */
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
9#include <linux/prefetch.h>
10#include <linux/string.h>
11#include <asm/asm.h>
12#include <asm/page.h>
13
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(-1UL)
28#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
29
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b) ((a).seg == (b).seg)
35
Glauber Costa002ca162008-06-25 11:08:51 -030036#define __addr_ok(addr) \
37 ((unsigned long __force)(addr) < \
38 (current_thread_info()->addr_limit.seg))
39
Glauber Costaca233862008-06-13 14:39:25 -030040/*
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
43 *
44 * This is equivalent to the following test:
45 * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
46 *
47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48 */
49
50#define __range_not_ok(addr, size) \
51({ \
52 unsigned long flag, roksum; \
53 __chk_user_ptr(addr); \
54 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
55 : "=&r" (flag), "=r" (roksum) \
56 : "1" (addr), "g" ((long)(size)), \
57 "rm" (current_thread_info()->addr_limit.seg)); \
58 flag; \
59})
60
61/**
62 * access_ok: - Checks if a user space pointer is valid
63 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
64 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
65 * to write to a block, it is always safe to read from it.
66 * @addr: User space pointer to start of block to check
67 * @size: Size of block to check
68 *
69 * Context: User context only. This function may sleep.
70 *
71 * Checks if a pointer to a block of memory in user space is valid.
72 *
73 * Returns true (nonzero) if the memory block may be valid, false (zero)
74 * if it is definitely invalid.
75 *
76 * Note that, depending on architecture, this function probably just
77 * checks that the pointer is in the user space range - after calling
78 * this function, memory access functions may still return -EFAULT.
79 */
80#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
81
82/*
83 * The exception table consists of pairs of addresses: the first is the
84 * address of an instruction that is allowed to fault, and the second is
85 * the address at which the program should continue. No registers are
86 * modified, so it is entirely up to the continuation code to figure out
87 * what to do.
88 *
89 * All the routines below use bits of fixup code that are out of line
90 * with the main instruction path. This means when everything is well,
91 * we don't even have to jump over them. Further, they do not intrude
92 * on our cache or tlb entries.
93 */
94
95struct exception_table_entry {
96 unsigned long insn, fixup;
97};
98
99extern int fixup_exception(struct pt_regs *regs);
100
101/*
102 * These are the main single-value transfer routines. They automatically
103 * use the right size if we just have the right pointer type.
104 *
105 * This gets kind of ugly. We want to return _two_ values in "get_user()"
106 * and yet we don't want to do any pointers, because that is too much
107 * of a performance impact. Thus we have a few rather ugly macros here,
108 * and hide all the ugliness from the user.
109 *
110 * The "__xxx" versions of the user access functions are versions that
111 * do not verify the address space, that must have been done previously
112 * with a separate "access_ok()" call (this is used when we do multiple
113 * accesses to the same area of user memory).
114 */
115
116extern int __get_user_1(void);
117extern int __get_user_2(void);
118extern int __get_user_4(void);
119extern int __get_user_8(void);
120extern int __get_user_bad(void);
121
122#define __get_user_x(size, ret, x, ptr) \
123 asm volatile("call __get_user_" #size \
124 : "=a" (ret),"=d" (x) \
125 : "0" (ptr)) \
126
Glauber Costa865e5b72008-06-25 11:05:11 -0300127/* Careful: we have to cast the result to the type of the pointer
128 * for sign reasons */
129
130/**
131 * get_user: - Get a simple variable from user space.
132 * @x: Variable to store result.
133 * @ptr: Source address, in user space.
134 *
135 * Context: User context only. This function may sleep.
136 *
137 * This macro copies a single simple variable from user space to kernel
138 * space. It supports simple types like char and int, but not larger
139 * data types like structures or arrays.
140 *
141 * @ptr must have pointer-to-simple-variable type, and the result of
142 * dereferencing @ptr must be assignable to @x without a cast.
143 *
144 * Returns zero on success, or -EFAULT on error.
145 * On error, the variable @x is set to zero.
146 */
147#ifdef CONFIG_X86_32
148#define __get_user_8(__ret_gu, __val_gu, ptr) \
149 __get_user_x(X, __ret_gu, __val_gu, ptr)
150#else
151#define __get_user_8(__ret_gu, __val_gu, ptr) \
152 __get_user_x(8, __ret_gu, __val_gu, ptr)
153#endif
154
155#define get_user(x, ptr) \
156({ \
157 int __ret_gu; \
158 unsigned long __val_gu; \
159 __chk_user_ptr(ptr); \
160 switch (sizeof(*(ptr))) { \
161 case 1: \
162 __get_user_x(1, __ret_gu, __val_gu, ptr); \
163 break; \
164 case 2: \
165 __get_user_x(2, __ret_gu, __val_gu, ptr); \
166 break; \
167 case 4: \
168 __get_user_x(4, __ret_gu, __val_gu, ptr); \
169 break; \
170 case 8: \
171 __get_user_8(__ret_gu, __val_gu, ptr); \
172 break; \
173 default: \
174 __get_user_x(X, __ret_gu, __val_gu, ptr); \
175 break; \
176 } \
177 (x) = (__typeof__(*(ptr)))__val_gu; \
178 __ret_gu; \
179})
180
Glauber Costadc70ddf2008-06-25 11:48:29 -0300181#ifdef CONFIG_X86_32
182#define __put_user_u64(x, addr, err) \
183 asm volatile("1: movl %%eax,0(%2)\n" \
184 "2: movl %%edx,4(%2)\n" \
185 "3:\n" \
186 ".section .fixup,\"ax\"\n" \
187 "4: movl %3,%0\n" \
188 " jmp 3b\n" \
189 ".previous\n" \
190 _ASM_EXTABLE(1b, 4b) \
191 _ASM_EXTABLE(2b, 4b) \
192 : "=r" (err) \
193 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
194#else
195#define __put_user_u64(x, ptr, retval) \
196 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
197#endif
198
199#ifdef CONFIG_X86_WP_WORKS_OK
200
201#define __put_user_size(x, ptr, size, retval, errret) \
202do { \
203 retval = 0; \
204 __chk_user_ptr(ptr); \
205 switch (size) { \
206 case 1: \
207 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
208 break; \
209 case 2: \
210 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
211 break; \
212 case 4: \
213 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
214 break; \
215 case 8: \
216 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
217 break; \
218 default: \
219 __put_user_bad(); \
220 } \
221} while (0)
222
223#else
224
225#define __put_user_size(x, ptr, size, retval, errret) \
226do { \
227 __typeof__(*(ptr))__pus_tmp = x; \
228 retval = 0; \
229 \
230 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
231 retval = errret; \
232} while (0)
233
234#endif
235
Glauber Costa3f168222008-06-25 12:48:47 -0300236#ifdef CONFIG_X86_32
237#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
238#else
239#define __get_user_asm_u64(x, ptr, retval, errret) \
240 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
241#endif
242
243#define __get_user_size(x, ptr, size, retval, errret) \
244do { \
245 retval = 0; \
246 __chk_user_ptr(ptr); \
247 switch (size) { \
248 case 1: \
249 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
250 break; \
251 case 2: \
252 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
253 break; \
254 case 4: \
255 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
256 break; \
257 case 8: \
258 __get_user_asm_u64(x, ptr, retval, errret); \
259 break; \
260 default: \
261 (x) = __get_user_bad(); \
262 } \
263} while (0)
264
265#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
266 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
267 "2:\n" \
268 ".section .fixup,\"ax\"\n" \
269 "3: mov %3,%0\n" \
270 " xor"itype" %"rtype"1,%"rtype"1\n" \
271 " jmp 2b\n" \
272 ".previous\n" \
273 _ASM_EXTABLE(1b, 3b) \
274 : "=r" (err), ltype(x) \
275 : "m" (__m(addr)), "i" (errret), "0" (err))
276
Glauber Costadc70ddf2008-06-25 11:48:29 -0300277#define __put_user_nocheck(x, ptr, size) \
278({ \
279 long __pu_err; \
280 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
281 __pu_err; \
282})
283
Glauber Costa3f168222008-06-25 12:48:47 -0300284#define __get_user_nocheck(x, ptr, size) \
285({ \
286 long __gu_err; \
287 unsigned long __gu_val; \
288 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
289 (x) = (__force __typeof__(*(ptr)))__gu_val; \
290 __gu_err; \
291})
Glauber Costadc70ddf2008-06-25 11:48:29 -0300292
293/* FIXME: this hack is definitely wrong -AK */
294struct __large_struct { unsigned long buf[100]; };
295#define __m(x) (*(struct __large_struct __user *)(x))
296
297/*
298 * Tell gcc we read from memory instead of writing: this is because
299 * we do not write to any memory gcc knows about, so there are no
300 * aliasing issues.
301 */
302#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
303 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
304 "2:\n" \
305 ".section .fixup,\"ax\"\n" \
306 "3: mov %3,%0\n" \
307 " jmp 2b\n" \
308 ".previous\n" \
309 _ASM_EXTABLE(1b, 3b) \
310 : "=r"(err) \
311 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
312
Glauber Costa865e5b72008-06-25 11:05:11 -0300313
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200314#ifdef CONFIG_X86_32
315# include "uaccess_32.h"
316#else
317# include "uaccess_64.h"
318#endif
Glauber Costaca233862008-06-13 14:39:25 -0300319
320#endif