blob: 86c48f359686226ec6e592a59228c43263162166 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_UACCESS_H
2#define _ASM_X86_UACCESS_H
Glauber Costaca233862008-06-13 14:39:25 -03003/*
4 * User space memory access functions
5 */
6#include <linux/errno.h>
7#include <linux/compiler.h>
8#include <linux/thread_info.h>
Glauber Costaca233862008-06-13 14:39:25 -03009#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070012#include <asm/smap.h>
Glauber Costaca233862008-06-13 14:39:25 -030013
14#define VERIFY_READ 0
15#define VERIFY_WRITE 1
16
17/*
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
21 *
22 * For historical reasons, these macros are grossly misnamed.
23 */
24
25#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
26
27#define KERNEL_DS MAKE_MM_SEG(-1UL)
Linus Torvalds9063c612009-06-20 15:40:00 -070028#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
Glauber Costaca233862008-06-13 14:39:25 -030029
30#define get_ds() (KERNEL_DS)
31#define get_fs() (current_thread_info()->addr_limit)
32#define set_fs(x) (current_thread_info()->addr_limit = (x))
33
34#define segment_eq(a, b) ((a).seg == (b).seg)
35
Linus Torvalds4ae73f22012-05-26 10:14:39 -070036#define user_addr_max() (current_thread_info()->addr_limit.seg)
Arun Sharmabc6ca7b2012-04-20 15:41:35 -070037#define __addr_ok(addr) \
38 ((unsigned long __force)(addr) < user_addr_max())
Glauber Costa002ca162008-06-25 11:08:51 -030039
Glauber Costaca233862008-06-13 14:39:25 -030040/*
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
Glauber Costaca233862008-06-13 14:39:25 -030043 */
H. Peter Anvina7405762013-12-27 16:52:47 -080044static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080045{
46 /*
47 * If we have used "sizeof()" for the size,
48 * we know it won't overflow the limit (but
49 * it might overflow the 'addr', so it's
50 * important to subtract the size from the
51 * limit, not add it to the address).
52 */
53 if (__builtin_constant_p(size))
Andy Lutomirski7e0f51c2015-10-05 17:47:50 -070054 return unlikely(addr > limit - size);
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080055
56 /* Arbitrary sizes? Be careful about overflow */
57 addr += size;
Andy Lutomirski7e0f51c2015-10-05 17:47:50 -070058 if (unlikely(addr < size))
H. Peter Anvina7405762013-12-27 16:52:47 -080059 return true;
Andy Lutomirski7e0f51c2015-10-05 17:47:50 -070060 return unlikely(addr > limit);
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080061}
Glauber Costaca233862008-06-13 14:39:25 -030062
Arun Sharmabc6ca7b2012-04-20 15:41:35 -070063#define __range_not_ok(addr, size, limit) \
Glauber Costaca233862008-06-13 14:39:25 -030064({ \
Glauber Costaca233862008-06-13 14:39:25 -030065 __chk_user_ptr(addr); \
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080066 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
Glauber Costaca233862008-06-13 14:39:25 -030067})
68
69/**
70 * access_ok: - Checks if a user space pointer is valid
71 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
72 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
73 * to write to a block, it is always safe to read from it.
74 * @addr: User space pointer to start of block to check
75 * @size: Size of block to check
76 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +020077 * Context: User context only. This function may sleep if pagefaults are
78 * enabled.
Glauber Costaca233862008-06-13 14:39:25 -030079 *
80 * Checks if a pointer to a block of memory in user space is valid.
81 *
82 * Returns true (nonzero) if the memory block may be valid, false (zero)
83 * if it is definitely invalid.
84 *
85 * Note that, depending on architecture, this function probably just
86 * checks that the pointer is in the user space range - after calling
87 * this function, memory access functions may still return -EFAULT.
88 */
Arun Sharmabc6ca7b2012-04-20 15:41:35 -070089#define access_ok(type, addr, size) \
H. Peter Anvina7405762013-12-27 16:52:47 -080090 likely(!__range_not_ok(addr, size, user_addr_max()))
Glauber Costaca233862008-06-13 14:39:25 -030091
92/*
Tony Luck548acf12016-02-17 10:20:12 -080093 * The exception table consists of triples of addresses relative to the
94 * exception table entry itself. The first address is of an instruction
95 * that is allowed to fault, the second is the target at which the program
96 * should continue. The third is a handler function to deal with the fault
97 * caused by the instruction in the first field.
Glauber Costaca233862008-06-13 14:39:25 -030098 *
99 * All the routines below use bits of fixup code that are out of line
100 * with the main instruction path. This means when everything is well,
101 * we don't even have to jump over them. Further, they do not intrude
102 * on our cache or tlb entries.
103 */
104
105struct exception_table_entry {
Tony Luck548acf12016-02-17 10:20:12 -0800106 int insn, fixup, handler;
Glauber Costaca233862008-06-13 14:39:25 -0300107};
Ard Biesheuvel29934b02016-03-22 14:28:17 -0700108
109#define ARCH_HAS_RELATIVE_EXTABLE
Glauber Costaca233862008-06-13 14:39:25 -0300110
Mathias Krause67d7a982016-05-10 23:07:02 +0200111#define swap_ex_entry_fixup(a, b, tmp, delta) \
112 do { \
113 (a)->fixup = (b)->fixup + (delta); \
114 (b)->fixup = (tmp).fixup - (delta); \
115 (a)->handler = (b)->handler + (delta); \
116 (b)->handler = (tmp).handler - (delta); \
117 } while (0)
118
Tony Luck548acf12016-02-17 10:20:12 -0800119extern int fixup_exception(struct pt_regs *regs, int trapnr);
120extern bool ex_has_fault_handler(unsigned long ip);
H. Peter Anvin70627652012-04-20 17:12:48 -0700121extern int early_fixup_exception(unsigned long *ip);
Glauber Costaca233862008-06-13 14:39:25 -0300122
123/*
124 * These are the main single-value transfer routines. They automatically
125 * use the right size if we just have the right pointer type.
126 *
127 * This gets kind of ugly. We want to return _two_ values in "get_user()"
128 * and yet we don't want to do any pointers, because that is too much
129 * of a performance impact. Thus we have a few rather ugly macros here,
130 * and hide all the ugliness from the user.
131 *
132 * The "__xxx" versions of the user access functions are versions that
133 * do not verify the address space, that must have been done previously
134 * with a separate "access_ok()" call (this is used when we do multiple
135 * accesses to the same area of user memory).
136 */
137
138extern int __get_user_1(void);
139extern int __get_user_2(void);
140extern int __get_user_4(void);
141extern int __get_user_8(void);
142extern int __get_user_bad(void);
143
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800144#define __uaccess_begin() stac()
145#define __uaccess_end() clac()
146
H. Peter Anvin3578baa2013-02-12 11:47:31 -0800147/*
148 * This is a type: either unsigned long, if the argument fits into
149 * that type, or otherwise unsigned long long.
150 */
151#define __inttype(x) \
152__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
Glauber Costa865e5b72008-06-25 11:05:11 -0300153
154/**
155 * get_user: - Get a simple variable from user space.
156 * @x: Variable to store result.
157 * @ptr: Source address, in user space.
158 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +0200159 * Context: User context only. This function may sleep if pagefaults are
160 * enabled.
Glauber Costa865e5b72008-06-25 11:05:11 -0300161 *
162 * This macro copies a single simple variable from user space to kernel
163 * space. It supports simple types like char and int, but not larger
164 * data types like structures or arrays.
165 *
166 * @ptr must have pointer-to-simple-variable type, and the result of
167 * dereferencing @ptr must be assignable to @x without a cast.
168 *
169 * Returns zero on success, or -EFAULT on error.
170 * On error, the variable @x is set to zero.
H. Peter Anvinff52c3b2013-02-12 15:37:02 -0800171 */
172/*
H. Peter Anvin3578baa2013-02-12 11:47:31 -0800173 * Careful: we have to cast the result to the type of the pointer
174 * for sign reasons.
H. Peter Anvinff52c3b2013-02-12 15:37:02 -0800175 *
H. Peter Anvinf69fa9a2013-08-29 13:34:50 -0700176 * The use of _ASM_DX as the register specifier is a bit of a
H. Peter Anvinff52c3b2013-02-12 15:37:02 -0800177 * simplification, as gcc only cares about it as the starting point
178 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
179 * (%ecx being the next register in gcc's x86 register sequence), and
180 * %rdx on 64 bits.
H. Peter Anvinf69fa9a2013-08-29 13:34:50 -0700181 *
182 * Clang/LLVM cares about the size of the register, but still wants
183 * the base register for something that ends up being a pair.
Glauber Costa865e5b72008-06-25 11:05:11 -0300184 */
Glauber Costa865e5b72008-06-25 11:05:11 -0300185#define get_user(x, ptr) \
186({ \
187 int __ret_gu; \
Jan-Simon Möllerbdfc0172013-08-29 21:13:05 +0200188 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
Chris J Argesf05058c2016-01-21 16:49:25 -0600189 register void *__sp asm(_ASM_SP); \
Glauber Costa865e5b72008-06-25 11:05:11 -0300190 __chk_user_ptr(ptr); \
Ingo Molnard1a76182008-10-28 16:54:49 +0100191 might_fault(); \
Chris J Argesf05058c2016-01-21 16:49:25 -0600192 asm volatile("call __get_user_%P4" \
193 : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
H. Peter Anvin3578baa2013-02-12 11:47:31 -0800194 : "0" (ptr), "i" (sizeof(*(ptr)))); \
Michael S. Tsirkine182c572014-12-12 01:56:04 +0200195 (x) = (__force __typeof__(*(ptr))) __val_gu; \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700196 __builtin_expect(__ret_gu, 0); \
Glauber Costa865e5b72008-06-25 11:05:11 -0300197})
198
Glauber Costae30a44f2008-06-25 13:17:43 -0300199#define __put_user_x(size, x, ptr, __ret_pu) \
200 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
Hiroshi Shimamoto4d5d7832009-01-19 16:34:26 -0800201 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
Glauber Costae30a44f2008-06-25 13:17:43 -0300202
203
204
Glauber Costadc70ddf2008-06-25 11:48:29 -0300205#ifdef CONFIG_X86_32
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800206#define __put_user_asm_u64(x, addr, err, errret) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800207 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700208 "1: movl %%eax,0(%2)\n" \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300209 "2: movl %%edx,4(%2)\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800210 "3:" \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300211 ".section .fixup,\"ax\"\n" \
212 "4: movl %3,%0\n" \
213 " jmp 3b\n" \
214 ".previous\n" \
215 _ASM_EXTABLE(1b, 4b) \
216 _ASM_EXTABLE(2b, 4b) \
217 : "=r" (err) \
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800218 : "A" (x), "r" (addr), "i" (errret), "0" (err))
Glauber Costae30a44f2008-06-25 13:17:43 -0300219
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800220#define __put_user_asm_ex_u64(x, addr) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800221 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700222 "1: movl %%eax,0(%1)\n" \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800223 "2: movl %%edx,4(%1)\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800224 "3:" \
H. Peter Anvin535c0c32012-04-20 16:57:35 -0700225 _ASM_EXTABLE_EX(1b, 2b) \
226 _ASM_EXTABLE_EX(2b, 3b) \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800227 : : "A" (x), "r" (addr))
228
Glauber Costae30a44f2008-06-25 13:17:43 -0300229#define __put_user_x8(x, ptr, __ret_pu) \
230 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
231 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
Glauber Costadc70ddf2008-06-25 11:48:29 -0300232#else
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800233#define __put_user_asm_u64(x, ptr, retval, errret) \
H. Peter Anvinebe119c2009-07-20 23:27:39 -0700234 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800235#define __put_user_asm_ex_u64(x, addr) \
H. Peter Anvinebe119c2009-07-20 23:27:39 -0700236 __put_user_asm_ex(x, addr, "q", "", "er")
Glauber Costae30a44f2008-06-25 13:17:43 -0300237#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
Glauber Costadc70ddf2008-06-25 11:48:29 -0300238#endif
239
Glauber Costae30a44f2008-06-25 13:17:43 -0300240extern void __put_user_bad(void);
241
242/*
243 * Strange magic calling convention: pointer in %ecx,
244 * value in %eax(:%edx), return value in %eax. clobbers %rbx
245 */
246extern void __put_user_1(void);
247extern void __put_user_2(void);
248extern void __put_user_4(void);
249extern void __put_user_8(void);
250
Glauber Costae30a44f2008-06-25 13:17:43 -0300251/**
252 * put_user: - Write a simple value into user space.
253 * @x: Value to copy to user space.
254 * @ptr: Destination address, in user space.
255 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +0200256 * Context: User context only. This function may sleep if pagefaults are
257 * enabled.
Glauber Costae30a44f2008-06-25 13:17:43 -0300258 *
259 * This macro copies a single simple value from kernel space to user
260 * space. It supports simple types like char and int, but not larger
261 * data types like structures or arrays.
262 *
263 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
264 * to the result of dereferencing @ptr.
265 *
266 * Returns zero on success, or -EFAULT on error.
267 */
268#define put_user(x, ptr) \
269({ \
270 int __ret_pu; \
271 __typeof__(*(ptr)) __pu_val; \
272 __chk_user_ptr(ptr); \
Ingo Molnard1a76182008-10-28 16:54:49 +0100273 might_fault(); \
Glauber Costae30a44f2008-06-25 13:17:43 -0300274 __pu_val = x; \
275 switch (sizeof(*(ptr))) { \
276 case 1: \
277 __put_user_x(1, __pu_val, ptr, __ret_pu); \
278 break; \
279 case 2: \
280 __put_user_x(2, __pu_val, ptr, __ret_pu); \
281 break; \
282 case 4: \
283 __put_user_x(4, __pu_val, ptr, __ret_pu); \
284 break; \
285 case 8: \
286 __put_user_x8(__pu_val, ptr, __ret_pu); \
287 break; \
288 default: \
289 __put_user_x(X, __pu_val, ptr, __ret_pu); \
290 break; \
291 } \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700292 __builtin_expect(__ret_pu, 0); \
Glauber Costae30a44f2008-06-25 13:17:43 -0300293})
294
Glauber Costadc70ddf2008-06-25 11:48:29 -0300295#define __put_user_size(x, ptr, size, retval, errret) \
296do { \
297 retval = 0; \
298 __chk_user_ptr(ptr); \
299 switch (size) { \
300 case 1: \
301 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
302 break; \
303 case 2: \
304 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
305 break; \
306 case 4: \
Hiroshi Shimamoto4d5d7832009-01-19 16:34:26 -0800307 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300308 break; \
309 case 8: \
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800310 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
311 errret); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300312 break; \
313 default: \
314 __put_user_bad(); \
315 } \
316} while (0)
317
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800318/*
319 * This doesn't do __uaccess_begin/end - the exception handling
320 * around it must do that.
321 */
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800322#define __put_user_size_ex(x, ptr, size) \
323do { \
324 __chk_user_ptr(ptr); \
325 switch (size) { \
326 case 1: \
327 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
328 break; \
329 case 2: \
330 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
331 break; \
332 case 4: \
333 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
334 break; \
335 case 8: \
336 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
337 break; \
338 default: \
339 __put_user_bad(); \
340 } \
341} while (0)
342
Glauber Costa3f168222008-06-25 12:48:47 -0300343#ifdef CONFIG_X86_32
Benjamin LaHaiseb2f68032016-03-09 15:05:56 -0500344#define __get_user_asm_u64(x, ptr, retval, errret) \
345({ \
346 __typeof__(ptr) __ptr = (ptr); \
347 asm volatile(ASM_STAC "\n" \
348 "1: movl %2,%%eax\n" \
349 "2: movl %3,%%edx\n" \
350 "3: " ASM_CLAC "\n" \
351 ".section .fixup,\"ax\"\n" \
352 "4: mov %4,%0\n" \
353 " xorl %%eax,%%eax\n" \
354 " xorl %%edx,%%edx\n" \
355 " jmp 3b\n" \
356 ".previous\n" \
357 _ASM_EXTABLE(1b, 4b) \
358 _ASM_EXTABLE(2b, 4b) \
359 : "=r" (retval), "=A"(x) \
360 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
361 "i" (errret), "0" (retval)); \
362})
363
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800364#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
Glauber Costa3f168222008-06-25 12:48:47 -0300365#else
366#define __get_user_asm_u64(x, ptr, retval, errret) \
367 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800368#define __get_user_asm_ex_u64(x, ptr) \
369 __get_user_asm_ex(x, ptr, "q", "", "=r")
Glauber Costa3f168222008-06-25 12:48:47 -0300370#endif
371
372#define __get_user_size(x, ptr, size, retval, errret) \
373do { \
374 retval = 0; \
375 __chk_user_ptr(ptr); \
376 switch (size) { \
377 case 1: \
378 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
379 break; \
380 case 2: \
381 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
382 break; \
383 case 4: \
384 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
385 break; \
386 case 8: \
387 __get_user_asm_u64(x, ptr, retval, errret); \
388 break; \
389 default: \
390 (x) = __get_user_bad(); \
391 } \
392} while (0)
393
394#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800395 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700396 "1: mov"itype" %2,%"rtype"1\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800397 "2:\n" \
Glauber Costa3f168222008-06-25 12:48:47 -0300398 ".section .fixup,\"ax\"\n" \
399 "3: mov %3,%0\n" \
400 " xor"itype" %"rtype"1,%"rtype"1\n" \
401 " jmp 2b\n" \
402 ".previous\n" \
403 _ASM_EXTABLE(1b, 3b) \
404 : "=r" (err), ltype(x) \
405 : "m" (__m(addr)), "i" (errret), "0" (err))
406
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800407/*
408 * This doesn't do __uaccess_begin/end - the exception handling
409 * around it must do that.
410 */
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800411#define __get_user_size_ex(x, ptr, size) \
412do { \
413 __chk_user_ptr(ptr); \
414 switch (size) { \
415 case 1: \
416 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
417 break; \
418 case 2: \
419 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
420 break; \
421 case 4: \
422 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
423 break; \
424 case 8: \
425 __get_user_asm_ex_u64(x, ptr); \
426 break; \
427 default: \
428 (x) = __get_user_bad(); \
429 } \
430} while (0)
431
432#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
H. Peter Anvin5e883532012-09-21 12:43:15 -0700433 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
434 "2:\n" \
H. Peter Anvin535c0c32012-04-20 16:57:35 -0700435 _ASM_EXTABLE_EX(1b, 2b) \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800436 : ltype(x) : "m" (__m(addr)))
437
Glauber Costadc70ddf2008-06-25 11:48:29 -0300438#define __put_user_nocheck(x, ptr, size) \
439({ \
Hiroshi Shimamoto16855f82008-12-08 19:18:38 -0800440 int __pu_err; \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800441 __uaccess_begin(); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300442 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800443 __uaccess_end(); \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700444 __builtin_expect(__pu_err, 0); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300445})
446
Glauber Costa3f168222008-06-25 12:48:47 -0300447#define __get_user_nocheck(x, ptr, size) \
448({ \
Hiroshi Shimamoto16855f82008-12-08 19:18:38 -0800449 int __gu_err; \
Benjamin LaHaiseb2f68032016-03-09 15:05:56 -0500450 __inttype(*(ptr)) __gu_val; \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800451 __uaccess_begin(); \
Glauber Costa3f168222008-06-25 12:48:47 -0300452 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800453 __uaccess_end(); \
Glauber Costa3f168222008-06-25 12:48:47 -0300454 (x) = (__force __typeof__(*(ptr)))__gu_val; \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700455 __builtin_expect(__gu_err, 0); \
Glauber Costa3f168222008-06-25 12:48:47 -0300456})
Glauber Costadc70ddf2008-06-25 11:48:29 -0300457
458/* FIXME: this hack is definitely wrong -AK */
459struct __large_struct { unsigned long buf[100]; };
460#define __m(x) (*(struct __large_struct __user *)(x))
461
462/*
463 * Tell gcc we read from memory instead of writing: this is because
464 * we do not write to any memory gcc knows about, so there are no
465 * aliasing issues.
466 */
467#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800468 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700469 "1: mov"itype" %"rtype"1,%2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800470 "2:\n" \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300471 ".section .fixup,\"ax\"\n" \
472 "3: mov %3,%0\n" \
473 " jmp 2b\n" \
474 ".previous\n" \
475 _ASM_EXTABLE(1b, 3b) \
476 : "=r"(err) \
477 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800478
479#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
H. Peter Anvin5e883532012-09-21 12:43:15 -0700480 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
481 "2:\n" \
H. Peter Anvin535c0c32012-04-20 16:57:35 -0700482 _ASM_EXTABLE_EX(1b, 2b) \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800483 : : ltype(x), "m" (__m(addr)))
484
485/*
486 * uaccess_try and catch
487 */
488#define uaccess_try do { \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800489 current_thread_info()->uaccess_err = 0; \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800490 __uaccess_begin(); \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800491 barrier();
492
493#define uaccess_catch(err) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800494 __uaccess_end(); \
Andy Lutomirski4fc34902011-11-07 16:33:40 -0800495 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800496} while (0)
497
Glauber Costa8cb834e2008-06-25 14:43:30 -0300498/**
499 * __get_user: - Get a simple variable from user space, with less checking.
500 * @x: Variable to store result.
501 * @ptr: Source address, in user space.
502 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +0200503 * Context: User context only. This function may sleep if pagefaults are
504 * enabled.
Glauber Costa8cb834e2008-06-25 14:43:30 -0300505 *
506 * This macro copies a single simple variable from user space to kernel
507 * space. It supports simple types like char and int, but not larger
508 * data types like structures or arrays.
509 *
510 * @ptr must have pointer-to-simple-variable type, and the result of
511 * dereferencing @ptr must be assignable to @x without a cast.
512 *
513 * Caller must check the pointer with access_ok() before calling this
514 * function.
515 *
516 * Returns zero on success, or -EFAULT on error.
517 * On error, the variable @x is set to zero.
518 */
Glauber Costadc70ddf2008-06-25 11:48:29 -0300519
Glauber Costa8cb834e2008-06-25 14:43:30 -0300520#define __get_user(x, ptr) \
521 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800522
Glauber Costa8cb834e2008-06-25 14:43:30 -0300523/**
524 * __put_user: - Write a simple value into user space, with less checking.
525 * @x: Value to copy to user space.
526 * @ptr: Destination address, in user space.
527 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +0200528 * Context: User context only. This function may sleep if pagefaults are
529 * enabled.
Glauber Costa8cb834e2008-06-25 14:43:30 -0300530 *
531 * This macro copies a single simple value from kernel space to user
532 * space. It supports simple types like char and int, but not larger
533 * data types like structures or arrays.
534 *
535 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
536 * to the result of dereferencing @ptr.
537 *
538 * Caller must check the pointer with access_ok() before calling this
539 * function.
540 *
541 * Returns zero on success, or -EFAULT on error.
542 */
543
544#define __put_user(x, ptr) \
545 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
546
547#define __get_user_unaligned __get_user
548#define __put_user_unaligned __put_user
Glauber Costa865e5b72008-06-25 11:05:11 -0300549
Glauber Costa8bc7de0c2008-06-25 14:53:41 -0300550/*
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800551 * {get|put}_user_try and catch
552 *
553 * get_user_try {
554 * get_user_ex(...);
555 * } get_user_catch(err)
556 */
557#define get_user_try uaccess_try
558#define get_user_catch(err) uaccess_catch(err)
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800559
560#define get_user_ex(x, ptr) do { \
561 unsigned long __gue_val; \
562 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
563 (x) = (__force __typeof__(*(ptr)))__gue_val; \
564} while (0)
565
Hiroshi Shimamoto019a1362009-01-29 11:49:18 -0800566#define put_user_try uaccess_try
567#define put_user_catch(err) uaccess_catch(err)
568
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800569#define put_user_ex(x, ptr) \
570 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
571
Robert Richter1ac2e6c2011-06-07 11:49:55 +0200572extern unsigned long
573copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
Linus Torvalds92ae03f2012-04-06 14:32:32 -0700574extern __must_check long
575strncpy_from_user(char *dst, const char __user *src, long count);
Robert Richter1ac2e6c2011-06-07 11:49:55 +0200576
Linus Torvalds5723aa92012-05-26 11:09:53 -0700577extern __must_check long strlen_user(const char __user *str);
578extern __must_check long strnlen_user(const char __user *str, long n);
579
H. Peter Anvina0528582012-09-21 12:43:11 -0700580unsigned long __must_check clear_user(void __user *mem, unsigned long len);
581unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
582
Qiaowei Renf09174c2013-12-14 14:25:02 +0800583extern void __cmpxchg_wrong_size(void)
584 __compiletime_error("Bad argument size for cmpxchg");
585
586#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
587({ \
588 int __ret = 0; \
589 __typeof__(ptr) __uval = (uval); \
590 __typeof__(*(ptr)) __old = (old); \
591 __typeof__(*(ptr)) __new = (new); \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800592 __uaccess_begin(); \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800593 switch (size) { \
594 case 1: \
595 { \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800596 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800597 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800598 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800599 "\t.section .fixup, \"ax\"\n" \
600 "3:\tmov %3, %0\n" \
601 "\tjmp 2b\n" \
602 "\t.previous\n" \
603 _ASM_EXTABLE(1b, 3b) \
604 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
605 : "i" (-EFAULT), "q" (__new), "1" (__old) \
606 : "memory" \
607 ); \
608 break; \
609 } \
610 case 2: \
611 { \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800612 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800613 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800614 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800615 "\t.section .fixup, \"ax\"\n" \
616 "3:\tmov %3, %0\n" \
617 "\tjmp 2b\n" \
618 "\t.previous\n" \
619 _ASM_EXTABLE(1b, 3b) \
620 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
621 : "i" (-EFAULT), "r" (__new), "1" (__old) \
622 : "memory" \
623 ); \
624 break; \
625 } \
626 case 4: \
627 { \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800628 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800629 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800630 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800631 "\t.section .fixup, \"ax\"\n" \
632 "3:\tmov %3, %0\n" \
633 "\tjmp 2b\n" \
634 "\t.previous\n" \
635 _ASM_EXTABLE(1b, 3b) \
636 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
637 : "i" (-EFAULT), "r" (__new), "1" (__old) \
638 : "memory" \
639 ); \
640 break; \
641 } \
642 case 8: \
643 { \
644 if (!IS_ENABLED(CONFIG_X86_64)) \
645 __cmpxchg_wrong_size(); \
646 \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800647 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800648 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800649 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800650 "\t.section .fixup, \"ax\"\n" \
651 "3:\tmov %3, %0\n" \
652 "\tjmp 2b\n" \
653 "\t.previous\n" \
654 _ASM_EXTABLE(1b, 3b) \
655 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
656 : "i" (-EFAULT), "r" (__new), "1" (__old) \
657 : "memory" \
658 ); \
659 break; \
660 } \
661 default: \
662 __cmpxchg_wrong_size(); \
663 } \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800664 __uaccess_end(); \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800665 *__uval = __old; \
666 __ret; \
667})
668
669#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
670({ \
671 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
672 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
673 (old), (new), sizeof(*(ptr))) : \
674 -EFAULT; \
675})
676
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800677/*
Glauber Costa8bc7de0c2008-06-25 14:53:41 -0300678 * movsl can be slow when source and dest are not both 8-byte aligned
679 */
680#ifdef CONFIG_X86_INTEL_USERCOPY
681extern struct movsl_mask {
682 int mask;
683} ____cacheline_aligned_in_smp movsl_mask;
684#endif
685
Glauber Costa22cac162008-06-25 14:56:53 -0300686#define ARCH_HAS_NOCACHE_UACCESS 1
687
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200688#ifdef CONFIG_X86_32
David Howellsa1ce3922012-10-02 18:01:25 +0100689# include <asm/uaccess_32.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200690#else
David Howellsa1ce3922012-10-02 18:01:25 +0100691# include <asm/uaccess_64.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200692#endif
Glauber Costaca233862008-06-13 14:39:25 -0300693
Jan Beulich3df7b412013-10-21 09:43:57 +0100694unsigned long __must_check _copy_from_user(void *to, const void __user *from,
695 unsigned n);
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100696unsigned long __must_check _copy_to_user(void __user *to, const void *from,
697 unsigned n);
Jan Beulich3df7b412013-10-21 09:43:57 +0100698
699#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
700# define copy_user_diag __compiletime_error
701#else
702# define copy_user_diag __compiletime_warning
703#endif
704
705extern void copy_user_diag("copy_from_user() buffer size is too small")
706copy_from_user_overflow(void);
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100707extern void copy_user_diag("copy_to_user() buffer size is too small")
708copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
Jan Beulich3df7b412013-10-21 09:43:57 +0100709
710#undef copy_user_diag
711
712#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
713
714extern void
715__compiletime_warning("copy_from_user() buffer size is not provably correct")
716__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
717#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
718
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100719extern void
720__compiletime_warning("copy_to_user() buffer size is not provably correct")
721__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
722#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
723
Jan Beulich3df7b412013-10-21 09:43:57 +0100724#else
725
726static inline void
727__copy_from_user_overflow(int size, unsigned long count)
728{
729 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
730}
731
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100732#define __copy_to_user_overflow __copy_from_user_overflow
733
Jan Beulich3df7b412013-10-21 09:43:57 +0100734#endif
735
736static inline unsigned long __must_check
737copy_from_user(void *to, const void __user *from, unsigned long n)
738{
739 int sz = __compiletime_object_size(to);
740
741 might_fault();
742
743 /*
744 * While we would like to have the compiler do the checking for us
745 * even in the non-constant size case, any false positives there are
746 * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
747 * without - the [hopefully] dangerous looking nature of the warning
748 * would make people go look at the respecitive call sites over and
749 * over again just to find that there's no problem).
750 *
751 * And there are cases where it's just not realistic for the compiler
752 * to prove the count to be in range. For example when multiple call
753 * sites of a helper function - perhaps in different source files -
754 * all doing proper range checking, yet the helper function not doing
755 * so again.
756 *
757 * Therefore limit the compile time checking to the constant size
758 * case, and do only runtime checking for non-constant sizes.
759 */
760
761 if (likely(sz < 0 || sz >= n))
762 n = _copy_from_user(to, from, n);
763 else if(__builtin_constant_p(n))
764 copy_from_user_overflow();
765 else
766 __copy_from_user_overflow(sz, n);
767
768 return n;
769}
770
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100771static inline unsigned long __must_check
772copy_to_user(void __user *to, const void *from, unsigned long n)
773{
774 int sz = __compiletime_object_size(from);
775
776 might_fault();
777
778 /* See the comment in copy_from_user() above. */
779 if (likely(sz < 0 || sz >= n))
780 n = _copy_to_user(to, from, n);
781 else if(__builtin_constant_p(n))
782 copy_to_user_overflow();
783 else
784 __copy_to_user_overflow(sz, n);
785
786 return n;
787}
788
Jan Beulich3df7b412013-10-21 09:43:57 +0100789#undef __copy_from_user_overflow
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100790#undef __copy_to_user_overflow
Jan Beulich3df7b412013-10-21 09:43:57 +0100791
Andi Kleen10013eb2015-10-22 15:07:20 -0700792/*
793 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
794 * nested NMI paths are careful to preserve CR2.
795 *
796 * Caller must use pagefault_enable/disable, or run in interrupt context,
797 * and also do a uaccess_ok() check
798 */
799#define __copy_from_user_nmi __copy_from_user_inatomic
800
Linus Torvalds404a4742016-01-21 13:02:41 -0800801/*
Linus Torvalds5b24a7a2015-12-17 09:57:27 -0800802 * The "unsafe" user accesses aren't really "unsafe", but the naming
803 * is a big fat warning: you have to not only do the access_ok()
804 * checking before using them, but you have to surround them with the
805 * user_access_begin/end() pair.
806 */
807#define user_access_begin() __uaccess_begin()
808#define user_access_end() __uaccess_end()
809
810#define unsafe_put_user(x, ptr) \
811({ \
812 int __pu_err; \
813 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
814 __builtin_expect(__pu_err, 0); \
815})
816
817#define unsafe_get_user(x, ptr) \
818({ \
819 int __gu_err; \
820 unsigned long __gu_val; \
821 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
822 (x) = (__force __typeof__(*(ptr)))__gu_val; \
823 __builtin_expect(__gu_err, 0); \
824})
825
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700826#endif /* _ASM_X86_UACCESS_H */
Nick Piggin8174c432008-07-25 19:45:24 -0700827