blob: 8b3fb76b489b8d2df25c7a9730e9a360bd183b3d [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_UACCESS_H
2#define _ASM_X86_UACCESS_H
Glauber Costaca233862008-06-13 14:39:25 -03003/*
4 * User space memory access functions
5 */
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
Glauber Costaca233862008-06-13 14:39:25 -03009#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070012#include <asm/smap.h>
Glauber Costaca233862008-06-13 14:39:25 -030013
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(-1UL)
Linus Torvalds9063c612009-06-20 15:40:00 -070028#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
Glauber Costaca233862008-06-13 14:39:25 -030029
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b) ((a).seg == (b).seg)
35
Linus Torvalds4ae73f22012-05-26 10:14:39 -070036#define user_addr_max() (current_thread_info()->addr_limit.seg)
Arun Sharmabc6ca7b2012-04-20 15:41:35 -070037#define __addr_ok(addr) \
38 ((unsigned long __force)(addr) < user_addr_max())
Glauber Costa002ca162008-06-25 11:08:51 -030039
Glauber Costaca233862008-06-13 14:39:25 -030040/*
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
Glauber Costaca233862008-06-13 14:39:25 -030043 */
H. Peter Anvina7405762013-12-27 16:52:47 -080044static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080045{
46 /*
47 * If we have used "sizeof()" for the size,
48 * we know it won't overflow the limit (but
49 * it might overflow the 'addr', so it's
50 * important to subtract the size from the
51 * limit, not add it to the address).
52 */
53 if (__builtin_constant_p(size))
Andy Lutomirski7e0f51c2015-10-05 17:47:50 -070054 return unlikely(addr > limit - size);
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080055
56 /* Arbitrary sizes? Be careful about overflow */
57 addr += size;
Andy Lutomirski7e0f51c2015-10-05 17:47:50 -070058 if (unlikely(addr < size))
H. Peter Anvina7405762013-12-27 16:52:47 -080059 return true;
Andy Lutomirski7e0f51c2015-10-05 17:47:50 -070060 return unlikely(addr > limit);
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080061}
Glauber Costaca233862008-06-13 14:39:25 -030062
Arun Sharmabc6ca7b2012-04-20 15:41:35 -070063#define __range_not_ok(addr, size, limit) \
Glauber Costaca233862008-06-13 14:39:25 -030064({ \
Glauber Costaca233862008-06-13 14:39:25 -030065 __chk_user_ptr(addr); \
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080066 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
Glauber Costaca233862008-06-13 14:39:25 -030067})
68
69/**
70 * access_ok: - Checks if a user space pointer is valid
71 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
72 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
73 * to write to a block, it is always safe to read from it.
74 * @addr: User space pointer to start of block to check
75 * @size: Size of block to check
76 *
David Hildenbrandb3c395ef2015-05-11 17:52:08 +020077 * Context: User context only. This function may sleep if pagefaults are
78 * enabled.
Glauber Costaca233862008-06-13 14:39:25 -030079 *
80 * Checks if a pointer to a block of memory in user space is valid.
81 *
82 * Returns true (nonzero) if the memory block may be valid, false (zero)
83 * if it is definitely invalid.
84 *
85 * Note that, depending on architecture, this function probably just
86 * checks that the pointer is in the user space range - after calling
87 * this function, memory access functions may still return -EFAULT.
88 */
Arun Sharmabc6ca7b2012-04-20 15:41:35 -070089#define access_ok(type, addr, size) \
H. Peter Anvina7405762013-12-27 16:52:47 -080090 likely(!__range_not_ok(addr, size, user_addr_max()))
Glauber Costaca233862008-06-13 14:39:25 -030091
92/*
Tony Luck548acf12016-02-17 10:20:12 -080093 * The exception table consists of triples of addresses relative to the
94 * exception table entry itself. The first address is of an instruction
95 * that is allowed to fault, the second is the target at which the program
96 * should continue. The third is a handler function to deal with the fault
97 * caused by the instruction in the first field.
Glauber Costaca233862008-06-13 14:39:25 -030098 *
99 * All the routines below use bits of fixup code that are out of line
100 * with the main instruction path. This means when everything is well,
101 * we don't even have to jump over them. Further, they do not intrude
102 * on our cache or tlb entries.
103 */
104
105struct exception_table_entry {
Tony Luck548acf12016-02-17 10:20:12 -0800106 int insn, fixup, handler;
Glauber Costaca233862008-06-13 14:39:25 -0300107};
Ard Biesheuvel29934b02016-03-22 14:28:17 -0700108
109#define ARCH_HAS_RELATIVE_EXTABLE
Glauber Costaca233862008-06-13 14:39:25 -0300110
Tony Luck548acf12016-02-17 10:20:12 -0800111extern int fixup_exception(struct pt_regs *regs, int trapnr);
112extern bool ex_has_fault_handler(unsigned long ip);
H. Peter Anvin70627652012-04-20 17:12:48 -0700113extern int early_fixup_exception(unsigned long *ip);
Glauber Costaca233862008-06-13 14:39:25 -0300114
115/*
116 * These are the main single-value transfer routines. They automatically
117 * use the right size if we just have the right pointer type.
118 *
119 * This gets kind of ugly. We want to return _two_ values in "get_user()"
120 * and yet we don't want to do any pointers, because that is too much
121 * of a performance impact. Thus we have a few rather ugly macros here,
122 * and hide all the ugliness from the user.
123 *
124 * The "__xxx" versions of the user access functions are versions that
125 * do not verify the address space, that must have been done previously
126 * with a separate "access_ok()" call (this is used when we do multiple
127 * accesses to the same area of user memory).
128 */
129
130extern int __get_user_1(void);
131extern int __get_user_2(void);
132extern int __get_user_4(void);
133extern int __get_user_8(void);
134extern int __get_user_bad(void);
135
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800136#define __uaccess_begin() stac()
137#define __uaccess_end() clac()
138
H. Peter Anvin3578baa2013-02-12 11:47:31 -0800139/*
140 * This is a type: either unsigned long, if the argument fits into
141 * that type, or otherwise unsigned long long.
142 */
143#define __inttype(x) \
144__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
Glauber Costa865e5b72008-06-25 11:05:11 -0300145
146/**
147 * get_user: - Get a simple variable from user space.
148 * @x: Variable to store result.
149 * @ptr: Source address, in user space.
150 *
David Hildenbrandb3c395ef2015-05-11 17:52:08 +0200151 * Context: User context only. This function may sleep if pagefaults are
152 * enabled.
Glauber Costa865e5b72008-06-25 11:05:11 -0300153 *
154 * This macro copies a single simple variable from user space to kernel
155 * space. It supports simple types like char and int, but not larger
156 * data types like structures or arrays.
157 *
158 * @ptr must have pointer-to-simple-variable type, and the result of
159 * dereferencing @ptr must be assignable to @x without a cast.
160 *
161 * Returns zero on success, or -EFAULT on error.
162 * On error, the variable @x is set to zero.
H. Peter Anvinff52c3b2013-02-12 15:37:02 -0800163 */
164/*
H. Peter Anvin3578baa2013-02-12 11:47:31 -0800165 * Careful: we have to cast the result to the type of the pointer
166 * for sign reasons.
H. Peter Anvinff52c3b2013-02-12 15:37:02 -0800167 *
H. Peter Anvinf69fa9a2013-08-29 13:34:50 -0700168 * The use of _ASM_DX as the register specifier is a bit of a
H. Peter Anvinff52c3b2013-02-12 15:37:02 -0800169 * simplification, as gcc only cares about it as the starting point
170 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
171 * (%ecx being the next register in gcc's x86 register sequence), and
172 * %rdx on 64 bits.
H. Peter Anvinf69fa9a2013-08-29 13:34:50 -0700173 *
174 * Clang/LLVM cares about the size of the register, but still wants
175 * the base register for something that ends up being a pair.
Glauber Costa865e5b72008-06-25 11:05:11 -0300176 */
Glauber Costa865e5b72008-06-25 11:05:11 -0300177#define get_user(x, ptr) \
178({ \
179 int __ret_gu; \
Jan-Simon Möllerbdfc0172013-08-29 21:13:05 +0200180 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
Chris J Argesf05058c2016-01-21 16:49:25 -0600181 register void *__sp asm(_ASM_SP); \
Glauber Costa865e5b72008-06-25 11:05:11 -0300182 __chk_user_ptr(ptr); \
Ingo Molnard1a76182008-10-28 16:54:49 +0100183 might_fault(); \
Chris J Argesf05058c2016-01-21 16:49:25 -0600184 asm volatile("call __get_user_%P4" \
185 : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
H. Peter Anvin3578baa2013-02-12 11:47:31 -0800186 : "0" (ptr), "i" (sizeof(*(ptr)))); \
Michael S. Tsirkine182c572014-12-12 01:56:04 +0200187 (x) = (__force __typeof__(*(ptr))) __val_gu; \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700188 __builtin_expect(__ret_gu, 0); \
Glauber Costa865e5b72008-06-25 11:05:11 -0300189})
190
Glauber Costae30a44f2008-06-25 13:17:43 -0300191#define __put_user_x(size, x, ptr, __ret_pu) \
192 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
Hiroshi Shimamoto4d5d7832009-01-19 16:34:26 -0800193 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
Glauber Costae30a44f2008-06-25 13:17:43 -0300194
195
196
Glauber Costadc70ddf2008-06-25 11:48:29 -0300197#ifdef CONFIG_X86_32
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800198#define __put_user_asm_u64(x, addr, err, errret) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800199 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700200 "1: movl %%eax,0(%2)\n" \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300201 "2: movl %%edx,4(%2)\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800202 "3:" \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300203 ".section .fixup,\"ax\"\n" \
204 "4: movl %3,%0\n" \
205 " jmp 3b\n" \
206 ".previous\n" \
207 _ASM_EXTABLE(1b, 4b) \
208 _ASM_EXTABLE(2b, 4b) \
209 : "=r" (err) \
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800210 : "A" (x), "r" (addr), "i" (errret), "0" (err))
Glauber Costae30a44f2008-06-25 13:17:43 -0300211
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800212#define __put_user_asm_ex_u64(x, addr) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800213 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700214 "1: movl %%eax,0(%1)\n" \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800215 "2: movl %%edx,4(%1)\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800216 "3:" \
H. Peter Anvin535c0c32012-04-20 16:57:35 -0700217 _ASM_EXTABLE_EX(1b, 2b) \
218 _ASM_EXTABLE_EX(2b, 3b) \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800219 : : "A" (x), "r" (addr))
220
Glauber Costae30a44f2008-06-25 13:17:43 -0300221#define __put_user_x8(x, ptr, __ret_pu) \
222 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
223 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
Glauber Costadc70ddf2008-06-25 11:48:29 -0300224#else
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800225#define __put_user_asm_u64(x, ptr, retval, errret) \
H. Peter Anvinebe119c2009-07-20 23:27:39 -0700226 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800227#define __put_user_asm_ex_u64(x, addr) \
H. Peter Anvinebe119c2009-07-20 23:27:39 -0700228 __put_user_asm_ex(x, addr, "q", "", "er")
Glauber Costae30a44f2008-06-25 13:17:43 -0300229#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
Glauber Costadc70ddf2008-06-25 11:48:29 -0300230#endif
231
Glauber Costae30a44f2008-06-25 13:17:43 -0300232extern void __put_user_bad(void);
233
234/*
235 * Strange magic calling convention: pointer in %ecx,
236 * value in %eax(:%edx), return value in %eax. clobbers %rbx
237 */
238extern void __put_user_1(void);
239extern void __put_user_2(void);
240extern void __put_user_4(void);
241extern void __put_user_8(void);
242
Glauber Costae30a44f2008-06-25 13:17:43 -0300243/**
244 * put_user: - Write a simple value into user space.
245 * @x: Value to copy to user space.
246 * @ptr: Destination address, in user space.
247 *
David Hildenbrandb3c395ef2015-05-11 17:52:08 +0200248 * Context: User context only. This function may sleep if pagefaults are
249 * enabled.
Glauber Costae30a44f2008-06-25 13:17:43 -0300250 *
251 * This macro copies a single simple value from kernel space to user
252 * space. It supports simple types like char and int, but not larger
253 * data types like structures or arrays.
254 *
255 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
256 * to the result of dereferencing @ptr.
257 *
258 * Returns zero on success, or -EFAULT on error.
259 */
260#define put_user(x, ptr) \
261({ \
262 int __ret_pu; \
263 __typeof__(*(ptr)) __pu_val; \
264 __chk_user_ptr(ptr); \
Ingo Molnard1a76182008-10-28 16:54:49 +0100265 might_fault(); \
Glauber Costae30a44f2008-06-25 13:17:43 -0300266 __pu_val = x; \
267 switch (sizeof(*(ptr))) { \
268 case 1: \
269 __put_user_x(1, __pu_val, ptr, __ret_pu); \
270 break; \
271 case 2: \
272 __put_user_x(2, __pu_val, ptr, __ret_pu); \
273 break; \
274 case 4: \
275 __put_user_x(4, __pu_val, ptr, __ret_pu); \
276 break; \
277 case 8: \
278 __put_user_x8(__pu_val, ptr, __ret_pu); \
279 break; \
280 default: \
281 __put_user_x(X, __pu_val, ptr, __ret_pu); \
282 break; \
283 } \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700284 __builtin_expect(__ret_pu, 0); \
Glauber Costae30a44f2008-06-25 13:17:43 -0300285})
286
Glauber Costadc70ddf2008-06-25 11:48:29 -0300287#define __put_user_size(x, ptr, size, retval, errret) \
288do { \
289 retval = 0; \
290 __chk_user_ptr(ptr); \
291 switch (size) { \
292 case 1: \
293 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
294 break; \
295 case 2: \
296 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
297 break; \
298 case 4: \
Hiroshi Shimamoto4d5d7832009-01-19 16:34:26 -0800299 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300300 break; \
301 case 8: \
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800302 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
303 errret); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300304 break; \
305 default: \
306 __put_user_bad(); \
307 } \
308} while (0)
309
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800310/*
311 * This doesn't do __uaccess_begin/end - the exception handling
312 * around it must do that.
313 */
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800314#define __put_user_size_ex(x, ptr, size) \
315do { \
316 __chk_user_ptr(ptr); \
317 switch (size) { \
318 case 1: \
319 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
320 break; \
321 case 2: \
322 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
323 break; \
324 case 4: \
325 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
326 break; \
327 case 8: \
328 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
329 break; \
330 default: \
331 __put_user_bad(); \
332 } \
333} while (0)
334
Glauber Costa3f168222008-06-25 12:48:47 -0300335#ifdef CONFIG_X86_32
Benjamin LaHaiseb2f68032016-03-09 15:05:56 -0500336#define __get_user_asm_u64(x, ptr, retval, errret) \
337({ \
338 __typeof__(ptr) __ptr = (ptr); \
339 asm volatile(ASM_STAC "\n" \
340 "1: movl %2,%%eax\n" \
341 "2: movl %3,%%edx\n" \
342 "3: " ASM_CLAC "\n" \
343 ".section .fixup,\"ax\"\n" \
344 "4: mov %4,%0\n" \
345 " xorl %%eax,%%eax\n" \
346 " xorl %%edx,%%edx\n" \
347 " jmp 3b\n" \
348 ".previous\n" \
349 _ASM_EXTABLE(1b, 4b) \
350 _ASM_EXTABLE(2b, 4b) \
351 : "=r" (retval), "=A"(x) \
352 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
353 "i" (errret), "0" (retval)); \
354})
355
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800356#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
Glauber Costa3f168222008-06-25 12:48:47 -0300357#else
358#define __get_user_asm_u64(x, ptr, retval, errret) \
359 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800360#define __get_user_asm_ex_u64(x, ptr) \
361 __get_user_asm_ex(x, ptr, "q", "", "=r")
Glauber Costa3f168222008-06-25 12:48:47 -0300362#endif
363
364#define __get_user_size(x, ptr, size, retval, errret) \
365do { \
366 retval = 0; \
367 __chk_user_ptr(ptr); \
368 switch (size) { \
369 case 1: \
370 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
371 break; \
372 case 2: \
373 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
374 break; \
375 case 4: \
376 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
377 break; \
378 case 8: \
379 __get_user_asm_u64(x, ptr, retval, errret); \
380 break; \
381 default: \
382 (x) = __get_user_bad(); \
383 } \
384} while (0)
385
386#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800387 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700388 "1: mov"itype" %2,%"rtype"1\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800389 "2:\n" \
Glauber Costa3f168222008-06-25 12:48:47 -0300390 ".section .fixup,\"ax\"\n" \
391 "3: mov %3,%0\n" \
392 " xor"itype" %"rtype"1,%"rtype"1\n" \
393 " jmp 2b\n" \
394 ".previous\n" \
395 _ASM_EXTABLE(1b, 3b) \
396 : "=r" (err), ltype(x) \
397 : "m" (__m(addr)), "i" (errret), "0" (err))
398
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800399/*
400 * This doesn't do __uaccess_begin/end - the exception handling
401 * around it must do that.
402 */
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800403#define __get_user_size_ex(x, ptr, size) \
404do { \
405 __chk_user_ptr(ptr); \
406 switch (size) { \
407 case 1: \
408 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
409 break; \
410 case 2: \
411 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
412 break; \
413 case 4: \
414 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
415 break; \
416 case 8: \
417 __get_user_asm_ex_u64(x, ptr); \
418 break; \
419 default: \
420 (x) = __get_user_bad(); \
421 } \
422} while (0)
423
424#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
H. Peter Anvin5e883532012-09-21 12:43:15 -0700425 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
426 "2:\n" \
H. Peter Anvin535c0c32012-04-20 16:57:35 -0700427 _ASM_EXTABLE_EX(1b, 2b) \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800428 : ltype(x) : "m" (__m(addr)))
429
Glauber Costadc70ddf2008-06-25 11:48:29 -0300430#define __put_user_nocheck(x, ptr, size) \
431({ \
Hiroshi Shimamoto16855f82008-12-08 19:18:38 -0800432 int __pu_err; \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800433 __uaccess_begin(); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300434 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800435 __uaccess_end(); \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700436 __builtin_expect(__pu_err, 0); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300437})
438
Glauber Costa3f168222008-06-25 12:48:47 -0300439#define __get_user_nocheck(x, ptr, size) \
440({ \
Hiroshi Shimamoto16855f82008-12-08 19:18:38 -0800441 int __gu_err; \
Benjamin LaHaiseb2f68032016-03-09 15:05:56 -0500442 __inttype(*(ptr)) __gu_val; \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800443 __uaccess_begin(); \
Glauber Costa3f168222008-06-25 12:48:47 -0300444 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800445 __uaccess_end(); \
Glauber Costa3f168222008-06-25 12:48:47 -0300446 (x) = (__force __typeof__(*(ptr)))__gu_val; \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700447 __builtin_expect(__gu_err, 0); \
Glauber Costa3f168222008-06-25 12:48:47 -0300448})
Glauber Costadc70ddf2008-06-25 11:48:29 -0300449
450/* FIXME: this hack is definitely wrong -AK */
451struct __large_struct { unsigned long buf[100]; };
452#define __m(x) (*(struct __large_struct __user *)(x))
453
454/*
455 * Tell gcc we read from memory instead of writing: this is because
456 * we do not write to any memory gcc knows about, so there are no
457 * aliasing issues.
458 */
459#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800460 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700461 "1: mov"itype" %"rtype"1,%2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800462 "2:\n" \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300463 ".section .fixup,\"ax\"\n" \
464 "3: mov %3,%0\n" \
465 " jmp 2b\n" \
466 ".previous\n" \
467 _ASM_EXTABLE(1b, 3b) \
468 : "=r"(err) \
469 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800470
471#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
H. Peter Anvin5e883532012-09-21 12:43:15 -0700472 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
473 "2:\n" \
H. Peter Anvin535c0c32012-04-20 16:57:35 -0700474 _ASM_EXTABLE_EX(1b, 2b) \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800475 : : ltype(x), "m" (__m(addr)))
476
477/*
478 * uaccess_try and catch
479 */
480#define uaccess_try do { \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800481 current_thread_info()->uaccess_err = 0; \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800482 __uaccess_begin(); \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800483 barrier();
484
485#define uaccess_catch(err) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800486 __uaccess_end(); \
Andy Lutomirski4fc34902011-11-07 16:33:40 -0800487 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800488} while (0)
489
Glauber Costa8cb834e2008-06-25 14:43:30 -0300490/**
491 * __get_user: - Get a simple variable from user space, with less checking.
492 * @x: Variable to store result.
493 * @ptr: Source address, in user space.
494 *
David Hildenbrandb3c395ef2015-05-11 17:52:08 +0200495 * Context: User context only. This function may sleep if pagefaults are
496 * enabled.
Glauber Costa8cb834e2008-06-25 14:43:30 -0300497 *
498 * This macro copies a single simple variable from user space to kernel
499 * space. It supports simple types like char and int, but not larger
500 * data types like structures or arrays.
501 *
502 * @ptr must have pointer-to-simple-variable type, and the result of
503 * dereferencing @ptr must be assignable to @x without a cast.
504 *
505 * Caller must check the pointer with access_ok() before calling this
506 * function.
507 *
508 * Returns zero on success, or -EFAULT on error.
509 * On error, the variable @x is set to zero.
510 */
Glauber Costadc70ddf2008-06-25 11:48:29 -0300511
Glauber Costa8cb834e2008-06-25 14:43:30 -0300512#define __get_user(x, ptr) \
513 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800514
Glauber Costa8cb834e2008-06-25 14:43:30 -0300515/**
516 * __put_user: - Write a simple value into user space, with less checking.
517 * @x: Value to copy to user space.
518 * @ptr: Destination address, in user space.
519 *
David Hildenbrandb3c395ef2015-05-11 17:52:08 +0200520 * Context: User context only. This function may sleep if pagefaults are
521 * enabled.
Glauber Costa8cb834e2008-06-25 14:43:30 -0300522 *
523 * This macro copies a single simple value from kernel space to user
524 * space. It supports simple types like char and int, but not larger
525 * data types like structures or arrays.
526 *
527 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
528 * to the result of dereferencing @ptr.
529 *
530 * Caller must check the pointer with access_ok() before calling this
531 * function.
532 *
533 * Returns zero on success, or -EFAULT on error.
534 */
535
536#define __put_user(x, ptr) \
537 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
538
539#define __get_user_unaligned __get_user
540#define __put_user_unaligned __put_user
Glauber Costa865e5b72008-06-25 11:05:11 -0300541
Glauber Costa8bc7de0c2008-06-25 14:53:41 -0300542/*
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800543 * {get|put}_user_try and catch
544 *
545 * get_user_try {
546 * get_user_ex(...);
547 * } get_user_catch(err)
548 */
549#define get_user_try uaccess_try
550#define get_user_catch(err) uaccess_catch(err)
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800551
552#define get_user_ex(x, ptr) do { \
553 unsigned long __gue_val; \
554 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
555 (x) = (__force __typeof__(*(ptr)))__gue_val; \
556} while (0)
557
Hiroshi Shimamoto019a1362009-01-29 11:49:18 -0800558#define put_user_try uaccess_try
559#define put_user_catch(err) uaccess_catch(err)
560
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800561#define put_user_ex(x, ptr) \
562 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
563
Robert Richter1ac2e6c2011-06-07 11:49:55 +0200564extern unsigned long
565copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
Linus Torvalds92ae03f2012-04-06 14:32:32 -0700566extern __must_check long
567strncpy_from_user(char *dst, const char __user *src, long count);
Robert Richter1ac2e6c2011-06-07 11:49:55 +0200568
Linus Torvalds5723aa92012-05-26 11:09:53 -0700569extern __must_check long strlen_user(const char __user *str);
570extern __must_check long strnlen_user(const char __user *str, long n);
571
H. Peter Anvina0528582012-09-21 12:43:11 -0700572unsigned long __must_check clear_user(void __user *mem, unsigned long len);
573unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
574
Qiaowei Renf09174c2013-12-14 14:25:02 +0800575extern void __cmpxchg_wrong_size(void)
576 __compiletime_error("Bad argument size for cmpxchg");
577
578#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
579({ \
580 int __ret = 0; \
581 __typeof__(ptr) __uval = (uval); \
582 __typeof__(*(ptr)) __old = (old); \
583 __typeof__(*(ptr)) __new = (new); \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800584 __uaccess_begin(); \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800585 switch (size) { \
586 case 1: \
587 { \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800588 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800589 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800590 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800591 "\t.section .fixup, \"ax\"\n" \
592 "3:\tmov %3, %0\n" \
593 "\tjmp 2b\n" \
594 "\t.previous\n" \
595 _ASM_EXTABLE(1b, 3b) \
596 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
597 : "i" (-EFAULT), "q" (__new), "1" (__old) \
598 : "memory" \
599 ); \
600 break; \
601 } \
602 case 2: \
603 { \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800604 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800605 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800606 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800607 "\t.section .fixup, \"ax\"\n" \
608 "3:\tmov %3, %0\n" \
609 "\tjmp 2b\n" \
610 "\t.previous\n" \
611 _ASM_EXTABLE(1b, 3b) \
612 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
613 : "i" (-EFAULT), "r" (__new), "1" (__old) \
614 : "memory" \
615 ); \
616 break; \
617 } \
618 case 4: \
619 { \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800620 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800621 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800622 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800623 "\t.section .fixup, \"ax\"\n" \
624 "3:\tmov %3, %0\n" \
625 "\tjmp 2b\n" \
626 "\t.previous\n" \
627 _ASM_EXTABLE(1b, 3b) \
628 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
629 : "i" (-EFAULT), "r" (__new), "1" (__old) \
630 : "memory" \
631 ); \
632 break; \
633 } \
634 case 8: \
635 { \
636 if (!IS_ENABLED(CONFIG_X86_64)) \
637 __cmpxchg_wrong_size(); \
638 \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800639 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800640 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800641 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800642 "\t.section .fixup, \"ax\"\n" \
643 "3:\tmov %3, %0\n" \
644 "\tjmp 2b\n" \
645 "\t.previous\n" \
646 _ASM_EXTABLE(1b, 3b) \
647 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
648 : "i" (-EFAULT), "r" (__new), "1" (__old) \
649 : "memory" \
650 ); \
651 break; \
652 } \
653 default: \
654 __cmpxchg_wrong_size(); \
655 } \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800656 __uaccess_end(); \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800657 *__uval = __old; \
658 __ret; \
659})
660
661#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
662({ \
663 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
664 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
665 (old), (new), sizeof(*(ptr))) : \
666 -EFAULT; \
667})
668
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800669/*
Glauber Costa8bc7de0c2008-06-25 14:53:41 -0300670 * movsl can be slow when source and dest are not both 8-byte aligned
671 */
672#ifdef CONFIG_X86_INTEL_USERCOPY
673extern struct movsl_mask {
674 int mask;
675} ____cacheline_aligned_in_smp movsl_mask;
676#endif
677
Glauber Costa22cac162008-06-25 14:56:53 -0300678#define ARCH_HAS_NOCACHE_UACCESS 1
679
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200680#ifdef CONFIG_X86_32
David Howellsa1ce3922012-10-02 18:01:25 +0100681# include <asm/uaccess_32.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200682#else
David Howellsa1ce3922012-10-02 18:01:25 +0100683# include <asm/uaccess_64.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200684#endif
Glauber Costaca233862008-06-13 14:39:25 -0300685
Jan Beulich3df7b412013-10-21 09:43:57 +0100686unsigned long __must_check _copy_from_user(void *to, const void __user *from,
687 unsigned n);
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100688unsigned long __must_check _copy_to_user(void __user *to, const void *from,
689 unsigned n);
Jan Beulich3df7b412013-10-21 09:43:57 +0100690
691#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
692# define copy_user_diag __compiletime_error
693#else
694# define copy_user_diag __compiletime_warning
695#endif
696
697extern void copy_user_diag("copy_from_user() buffer size is too small")
698copy_from_user_overflow(void);
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100699extern void copy_user_diag("copy_to_user() buffer size is too small")
700copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
Jan Beulich3df7b412013-10-21 09:43:57 +0100701
702#undef copy_user_diag
703
704#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
705
706extern void
707__compiletime_warning("copy_from_user() buffer size is not provably correct")
708__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
709#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
710
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100711extern void
712__compiletime_warning("copy_to_user() buffer size is not provably correct")
713__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
714#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
715
Jan Beulich3df7b412013-10-21 09:43:57 +0100716#else
717
718static inline void
719__copy_from_user_overflow(int size, unsigned long count)
720{
721 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
722}
723
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100724#define __copy_to_user_overflow __copy_from_user_overflow
725
Jan Beulich3df7b412013-10-21 09:43:57 +0100726#endif
727
728static inline unsigned long __must_check
729copy_from_user(void *to, const void __user *from, unsigned long n)
730{
731 int sz = __compiletime_object_size(to);
732
733 might_fault();
734
735 /*
736 * While we would like to have the compiler do the checking for us
737 * even in the non-constant size case, any false positives there are
738 * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
739 * without - the [hopefully] dangerous looking nature of the warning
740 * would make people go look at the respecitive call sites over and
741 * over again just to find that there's no problem).
742 *
743 * And there are cases where it's just not realistic for the compiler
744 * to prove the count to be in range. For example when multiple call
745 * sites of a helper function - perhaps in different source files -
746 * all doing proper range checking, yet the helper function not doing
747 * so again.
748 *
749 * Therefore limit the compile time checking to the constant size
750 * case, and do only runtime checking for non-constant sizes.
751 */
752
753 if (likely(sz < 0 || sz >= n))
754 n = _copy_from_user(to, from, n);
755 else if(__builtin_constant_p(n))
756 copy_from_user_overflow();
757 else
758 __copy_from_user_overflow(sz, n);
759
760 return n;
761}
762
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100763static inline unsigned long __must_check
764copy_to_user(void __user *to, const void *from, unsigned long n)
765{
766 int sz = __compiletime_object_size(from);
767
768 might_fault();
769
770 /* See the comment in copy_from_user() above. */
771 if (likely(sz < 0 || sz >= n))
772 n = _copy_to_user(to, from, n);
773 else if(__builtin_constant_p(n))
774 copy_to_user_overflow();
775 else
776 __copy_to_user_overflow(sz, n);
777
778 return n;
779}
780
Jan Beulich3df7b412013-10-21 09:43:57 +0100781#undef __copy_from_user_overflow
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100782#undef __copy_to_user_overflow
Jan Beulich3df7b412013-10-21 09:43:57 +0100783
Andi Kleen10013eb2015-10-22 15:07:20 -0700784/*
785 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
786 * nested NMI paths are careful to preserve CR2.
787 *
788 * Caller must use pagefault_enable/disable, or run in interrupt context,
789 * and also do a uaccess_ok() check
790 */
791#define __copy_from_user_nmi __copy_from_user_inatomic
792
Linus Torvalds404a4742016-01-21 13:02:41 -0800793/*
Linus Torvalds5b24a7a2015-12-17 09:57:27 -0800794 * The "unsafe" user accesses aren't really "unsafe", but the naming
795 * is a big fat warning: you have to not only do the access_ok()
796 * checking before using them, but you have to surround them with the
797 * user_access_begin/end() pair.
798 */
799#define user_access_begin() __uaccess_begin()
800#define user_access_end() __uaccess_end()
801
802#define unsafe_put_user(x, ptr) \
803({ \
804 int __pu_err; \
805 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
806 __builtin_expect(__pu_err, 0); \
807})
808
809#define unsafe_get_user(x, ptr) \
810({ \
811 int __gu_err; \
812 unsigned long __gu_val; \
813 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
814 (x) = (__force __typeof__(*(ptr)))__gu_val; \
815 __builtin_expect(__gu_err, 0); \
816})
817
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700818#endif /* _ASM_X86_UACCESS_H */
Nick Piggin8174c432008-07-25 19:45:24 -0700819