blob: 0bd651e65cd15f75cb050dbcd694bdbfc415b24b [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_UACCESS_H
2#define _ASM_X86_UACCESS_H
Glauber Costaca233862008-06-13 14:39:25 -03003/*
4 * User space memory access functions
5 */
6#include <linux/errno.h>
7#include <linux/compiler.h>
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -07008#include <linux/kasan-checks.h>
Glauber Costaca233862008-06-13 14:39:25 -03009#include <linux/thread_info.h>
Glauber Costaca233862008-06-13 14:39:25 -030010#include <linux/string.h>
11#include <asm/asm.h>
12#include <asm/page.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070013#include <asm/smap.h>
Al Viro45caf472016-09-05 11:32:44 -040014#include <asm/extable.h>
Glauber Costaca233862008-06-13 14:39:25 -030015
16#define VERIFY_READ 0
17#define VERIFY_WRITE 1
18
19/*
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
23 *
24 * For historical reasons, these macros are grossly misnamed.
25 */
26
27#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
28
29#define KERNEL_DS MAKE_MM_SEG(-1UL)
Linus Torvalds9063c612009-06-20 15:40:00 -070030#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
Glauber Costaca233862008-06-13 14:39:25 -030031
32#define get_ds() (KERNEL_DS)
Andy Lutomirski13d4ea02016-07-14 13:22:57 -070033#define get_fs() (current->thread.addr_limit)
34#define set_fs(x) (current->thread.addr_limit = (x))
Glauber Costaca233862008-06-13 14:39:25 -030035
36#define segment_eq(a, b) ((a).seg == (b).seg)
37
Andy Lutomirski13d4ea02016-07-14 13:22:57 -070038#define user_addr_max() (current->thread.addr_limit.seg)
Arun Sharmabc6ca7b2012-04-20 15:41:35 -070039#define __addr_ok(addr) \
40 ((unsigned long __force)(addr) < user_addr_max())
Glauber Costa002ca162008-06-25 11:08:51 -030041
Glauber Costaca233862008-06-13 14:39:25 -030042/*
43 * Test whether a block of memory is a valid user space address.
44 * Returns 0 if the range is valid, nonzero otherwise.
Glauber Costaca233862008-06-13 14:39:25 -030045 */
H. Peter Anvina7405762013-12-27 16:52:47 -080046static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080047{
48 /*
49 * If we have used "sizeof()" for the size,
50 * we know it won't overflow the limit (but
51 * it might overflow the 'addr', so it's
52 * important to subtract the size from the
53 * limit, not add it to the address).
54 */
55 if (__builtin_constant_p(size))
Andy Lutomirski7e0f51c2015-10-05 17:47:50 -070056 return unlikely(addr > limit - size);
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080057
58 /* Arbitrary sizes? Be careful about overflow */
59 addr += size;
Andy Lutomirski7e0f51c2015-10-05 17:47:50 -070060 if (unlikely(addr < size))
H. Peter Anvina7405762013-12-27 16:52:47 -080061 return true;
Andy Lutomirski7e0f51c2015-10-05 17:47:50 -070062 return unlikely(addr > limit);
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080063}
Glauber Costaca233862008-06-13 14:39:25 -030064
Arun Sharmabc6ca7b2012-04-20 15:41:35 -070065#define __range_not_ok(addr, size, limit) \
Glauber Costaca233862008-06-13 14:39:25 -030066({ \
Glauber Costaca233862008-06-13 14:39:25 -030067 __chk_user_ptr(addr); \
Linus Torvaldsc5fe5d82013-12-27 15:30:58 -080068 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
Glauber Costaca233862008-06-13 14:39:25 -030069})
70
Peter Zijlstra2715f682016-11-22 10:57:15 +010071#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
72# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
73#else
74# define WARN_ON_IN_IRQ()
75#endif
76
Glauber Costaca233862008-06-13 14:39:25 -030077/**
78 * access_ok: - Checks if a user space pointer is valid
79 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
80 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
81 * to write to a block, it is always safe to read from it.
82 * @addr: User space pointer to start of block to check
83 * @size: Size of block to check
84 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +020085 * Context: User context only. This function may sleep if pagefaults are
86 * enabled.
Glauber Costaca233862008-06-13 14:39:25 -030087 *
88 * Checks if a pointer to a block of memory in user space is valid.
89 *
90 * Returns true (nonzero) if the memory block may be valid, false (zero)
91 * if it is definitely invalid.
92 *
93 * Note that, depending on architecture, this function probably just
94 * checks that the pointer is in the user space range - after calling
95 * this function, memory access functions may still return -EFAULT.
96 */
Peter Zijlstra2715f682016-11-22 10:57:15 +010097#define access_ok(type, addr, size) \
98({ \
99 WARN_ON_IN_IRQ(); \
100 likely(!__range_not_ok(addr, size, user_addr_max())); \
101})
Glauber Costaca233862008-06-13 14:39:25 -0300102
103/*
Glauber Costaca233862008-06-13 14:39:25 -0300104 * These are the main single-value transfer routines. They automatically
105 * use the right size if we just have the right pointer type.
106 *
107 * This gets kind of ugly. We want to return _two_ values in "get_user()"
108 * and yet we don't want to do any pointers, because that is too much
109 * of a performance impact. Thus we have a few rather ugly macros here,
110 * and hide all the ugliness from the user.
111 *
112 * The "__xxx" versions of the user access functions are versions that
113 * do not verify the address space, that must have been done previously
114 * with a separate "access_ok()" call (this is used when we do multiple
115 * accesses to the same area of user memory).
116 */
117
118extern int __get_user_1(void);
119extern int __get_user_2(void);
120extern int __get_user_4(void);
121extern int __get_user_8(void);
122extern int __get_user_bad(void);
123
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800124#define __uaccess_begin() stac()
125#define __uaccess_end() clac()
126
H. Peter Anvin3578baa2013-02-12 11:47:31 -0800127/*
128 * This is a type: either unsigned long, if the argument fits into
129 * that type, or otherwise unsigned long long.
130 */
131#define __inttype(x) \
132__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
Glauber Costa865e5b72008-06-25 11:05:11 -0300133
134/**
135 * get_user: - Get a simple variable from user space.
136 * @x: Variable to store result.
137 * @ptr: Source address, in user space.
138 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +0200139 * Context: User context only. This function may sleep if pagefaults are
140 * enabled.
Glauber Costa865e5b72008-06-25 11:05:11 -0300141 *
142 * This macro copies a single simple variable from user space to kernel
143 * space. It supports simple types like char and int, but not larger
144 * data types like structures or arrays.
145 *
146 * @ptr must have pointer-to-simple-variable type, and the result of
147 * dereferencing @ptr must be assignable to @x without a cast.
148 *
149 * Returns zero on success, or -EFAULT on error.
150 * On error, the variable @x is set to zero.
H. Peter Anvinff52c3b2013-02-12 15:37:02 -0800151 */
152/*
H. Peter Anvin3578baa2013-02-12 11:47:31 -0800153 * Careful: we have to cast the result to the type of the pointer
154 * for sign reasons.
H. Peter Anvinff52c3b2013-02-12 15:37:02 -0800155 *
H. Peter Anvinf69fa9a2013-08-29 13:34:50 -0700156 * The use of _ASM_DX as the register specifier is a bit of a
H. Peter Anvinff52c3b2013-02-12 15:37:02 -0800157 * simplification, as gcc only cares about it as the starting point
158 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
159 * (%ecx being the next register in gcc's x86 register sequence), and
160 * %rdx on 64 bits.
H. Peter Anvinf69fa9a2013-08-29 13:34:50 -0700161 *
162 * Clang/LLVM cares about the size of the register, but still wants
163 * the base register for something that ends up being a pair.
Glauber Costa865e5b72008-06-25 11:05:11 -0300164 */
Glauber Costa865e5b72008-06-25 11:05:11 -0300165#define get_user(x, ptr) \
166({ \
167 int __ret_gu; \
Jan-Simon Möllerbdfc0172013-08-29 21:13:05 +0200168 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
Glauber Costa865e5b72008-06-25 11:05:11 -0300169 __chk_user_ptr(ptr); \
Ingo Molnard1a76182008-10-28 16:54:49 +0100170 might_fault(); \
Chris J Argesf05058c2016-01-21 16:49:25 -0600171 asm volatile("call __get_user_%P4" \
Josh Poimboeufd5ea93e2017-09-20 16:24:33 -0500172 : "=a" (__ret_gu), "=r" (__val_gu), \
173 ASM_CALL_CONSTRAINT \
H. Peter Anvin3578baa2013-02-12 11:47:31 -0800174 : "0" (ptr), "i" (sizeof(*(ptr)))); \
Michael S. Tsirkine182c572014-12-12 01:56:04 +0200175 (x) = (__force __typeof__(*(ptr))) __val_gu; \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700176 __builtin_expect(__ret_gu, 0); \
Glauber Costa865e5b72008-06-25 11:05:11 -0300177})
178
Glauber Costae30a44f2008-06-25 13:17:43 -0300179#define __put_user_x(size, x, ptr, __ret_pu) \
180 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
Hiroshi Shimamoto4d5d7832009-01-19 16:34:26 -0800181 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
Glauber Costae30a44f2008-06-25 13:17:43 -0300182
183
184
Glauber Costadc70ddf2008-06-25 11:48:29 -0300185#ifdef CONFIG_X86_32
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800186#define __put_user_asm_u64(x, addr, err, errret) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800187 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700188 "1: movl %%eax,0(%2)\n" \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300189 "2: movl %%edx,4(%2)\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800190 "3:" \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300191 ".section .fixup,\"ax\"\n" \
192 "4: movl %3,%0\n" \
193 " jmp 3b\n" \
194 ".previous\n" \
195 _ASM_EXTABLE(1b, 4b) \
196 _ASM_EXTABLE(2b, 4b) \
197 : "=r" (err) \
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800198 : "A" (x), "r" (addr), "i" (errret), "0" (err))
Glauber Costae30a44f2008-06-25 13:17:43 -0300199
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800200#define __put_user_asm_ex_u64(x, addr) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800201 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700202 "1: movl %%eax,0(%1)\n" \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800203 "2: movl %%edx,4(%1)\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800204 "3:" \
H. Peter Anvin535c0c32012-04-20 16:57:35 -0700205 _ASM_EXTABLE_EX(1b, 2b) \
206 _ASM_EXTABLE_EX(2b, 3b) \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800207 : : "A" (x), "r" (addr))
208
Glauber Costae30a44f2008-06-25 13:17:43 -0300209#define __put_user_x8(x, ptr, __ret_pu) \
210 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
211 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
Glauber Costadc70ddf2008-06-25 11:48:29 -0300212#else
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800213#define __put_user_asm_u64(x, ptr, retval, errret) \
H. Peter Anvinebe119c2009-07-20 23:27:39 -0700214 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800215#define __put_user_asm_ex_u64(x, addr) \
H. Peter Anvinebe119c2009-07-20 23:27:39 -0700216 __put_user_asm_ex(x, addr, "q", "", "er")
Glauber Costae30a44f2008-06-25 13:17:43 -0300217#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
Glauber Costadc70ddf2008-06-25 11:48:29 -0300218#endif
219
Glauber Costae30a44f2008-06-25 13:17:43 -0300220extern void __put_user_bad(void);
221
222/*
223 * Strange magic calling convention: pointer in %ecx,
224 * value in %eax(:%edx), return value in %eax. clobbers %rbx
225 */
226extern void __put_user_1(void);
227extern void __put_user_2(void);
228extern void __put_user_4(void);
229extern void __put_user_8(void);
230
Glauber Costae30a44f2008-06-25 13:17:43 -0300231/**
232 * put_user: - Write a simple value into user space.
233 * @x: Value to copy to user space.
234 * @ptr: Destination address, in user space.
235 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +0200236 * Context: User context only. This function may sleep if pagefaults are
237 * enabled.
Glauber Costae30a44f2008-06-25 13:17:43 -0300238 *
239 * This macro copies a single simple value from kernel space to user
240 * space. It supports simple types like char and int, but not larger
241 * data types like structures or arrays.
242 *
243 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
244 * to the result of dereferencing @ptr.
245 *
246 * Returns zero on success, or -EFAULT on error.
247 */
248#define put_user(x, ptr) \
249({ \
250 int __ret_pu; \
251 __typeof__(*(ptr)) __pu_val; \
252 __chk_user_ptr(ptr); \
Ingo Molnard1a76182008-10-28 16:54:49 +0100253 might_fault(); \
Glauber Costae30a44f2008-06-25 13:17:43 -0300254 __pu_val = x; \
255 switch (sizeof(*(ptr))) { \
256 case 1: \
257 __put_user_x(1, __pu_val, ptr, __ret_pu); \
258 break; \
259 case 2: \
260 __put_user_x(2, __pu_val, ptr, __ret_pu); \
261 break; \
262 case 4: \
263 __put_user_x(4, __pu_val, ptr, __ret_pu); \
264 break; \
265 case 8: \
266 __put_user_x8(__pu_val, ptr, __ret_pu); \
267 break; \
268 default: \
269 __put_user_x(X, __pu_val, ptr, __ret_pu); \
270 break; \
271 } \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700272 __builtin_expect(__ret_pu, 0); \
Glauber Costae30a44f2008-06-25 13:17:43 -0300273})
274
Glauber Costadc70ddf2008-06-25 11:48:29 -0300275#define __put_user_size(x, ptr, size, retval, errret) \
276do { \
277 retval = 0; \
278 __chk_user_ptr(ptr); \
279 switch (size) { \
280 case 1: \
281 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
282 break; \
283 case 2: \
284 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
285 break; \
286 case 4: \
Hiroshi Shimamoto4d5d7832009-01-19 16:34:26 -0800287 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300288 break; \
289 case 8: \
Hiroshi Shimamoto18114f62009-01-30 18:16:46 -0800290 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
291 errret); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300292 break; \
293 default: \
294 __put_user_bad(); \
295 } \
296} while (0)
297
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800298/*
299 * This doesn't do __uaccess_begin/end - the exception handling
300 * around it must do that.
301 */
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800302#define __put_user_size_ex(x, ptr, size) \
303do { \
304 __chk_user_ptr(ptr); \
305 switch (size) { \
306 case 1: \
307 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
308 break; \
309 case 2: \
310 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
311 break; \
312 case 4: \
313 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
314 break; \
315 case 8: \
316 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
317 break; \
318 default: \
319 __put_user_bad(); \
320 } \
321} while (0)
322
Glauber Costa3f168222008-06-25 12:48:47 -0300323#ifdef CONFIG_X86_32
Benjamin LaHaiseb2f68032016-03-09 15:05:56 -0500324#define __get_user_asm_u64(x, ptr, retval, errret) \
325({ \
326 __typeof__(ptr) __ptr = (ptr); \
Linus Torvaldsae382ca2017-05-21 18:26:54 -0700327 asm volatile("\n" \
Benjamin LaHaiseb2f68032016-03-09 15:05:56 -0500328 "1: movl %2,%%eax\n" \
329 "2: movl %3,%%edx\n" \
Linus Torvaldsae382ca2017-05-21 18:26:54 -0700330 "3:\n" \
Benjamin LaHaiseb2f68032016-03-09 15:05:56 -0500331 ".section .fixup,\"ax\"\n" \
332 "4: mov %4,%0\n" \
333 " xorl %%eax,%%eax\n" \
334 " xorl %%edx,%%edx\n" \
335 " jmp 3b\n" \
336 ".previous\n" \
337 _ASM_EXTABLE(1b, 4b) \
338 _ASM_EXTABLE(2b, 4b) \
Linus Torvaldsae382ca2017-05-21 18:26:54 -0700339 : "=r" (retval), "=&A"(x) \
Benjamin LaHaiseb2f68032016-03-09 15:05:56 -0500340 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
341 "i" (errret), "0" (retval)); \
342})
343
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800344#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
Glauber Costa3f168222008-06-25 12:48:47 -0300345#else
346#define __get_user_asm_u64(x, ptr, retval, errret) \
347 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800348#define __get_user_asm_ex_u64(x, ptr) \
349 __get_user_asm_ex(x, ptr, "q", "", "=r")
Glauber Costa3f168222008-06-25 12:48:47 -0300350#endif
351
352#define __get_user_size(x, ptr, size, retval, errret) \
353do { \
354 retval = 0; \
355 __chk_user_ptr(ptr); \
356 switch (size) { \
357 case 1: \
358 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
359 break; \
360 case 2: \
361 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
362 break; \
363 case 4: \
364 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
365 break; \
366 case 8: \
367 __get_user_asm_u64(x, ptr, retval, errret); \
368 break; \
369 default: \
370 (x) = __get_user_bad(); \
371 } \
372} while (0)
373
374#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800375 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700376 "1: mov"itype" %2,%"rtype"1\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800377 "2:\n" \
Glauber Costa3f168222008-06-25 12:48:47 -0300378 ".section .fixup,\"ax\"\n" \
379 "3: mov %3,%0\n" \
380 " xor"itype" %"rtype"1,%"rtype"1\n" \
381 " jmp 2b\n" \
382 ".previous\n" \
383 _ASM_EXTABLE(1b, 3b) \
384 : "=r" (err), ltype(x) \
385 : "m" (__m(addr)), "i" (errret), "0" (err))
386
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800387/*
388 * This doesn't do __uaccess_begin/end - the exception handling
389 * around it must do that.
390 */
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800391#define __get_user_size_ex(x, ptr, size) \
392do { \
393 __chk_user_ptr(ptr); \
394 switch (size) { \
395 case 1: \
396 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
397 break; \
398 case 2: \
399 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
400 break; \
401 case 4: \
402 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
403 break; \
404 case 8: \
405 __get_user_asm_ex_u64(x, ptr); \
406 break; \
407 default: \
408 (x) = __get_user_bad(); \
409 } \
410} while (0)
411
412#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
H. Peter Anvin5e883532012-09-21 12:43:15 -0700413 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
414 "2:\n" \
Al Viro1c109fa2016-09-15 02:35:29 +0100415 ".section .fixup,\"ax\"\n" \
416 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
417 " jmp 2b\n" \
418 ".previous\n" \
419 _ASM_EXTABLE_EX(1b, 3b) \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800420 : ltype(x) : "m" (__m(addr)))
421
Glauber Costadc70ddf2008-06-25 11:48:29 -0300422#define __put_user_nocheck(x, ptr, size) \
423({ \
Hiroshi Shimamoto16855f82008-12-08 19:18:38 -0800424 int __pu_err; \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800425 __uaccess_begin(); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300426 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800427 __uaccess_end(); \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700428 __builtin_expect(__pu_err, 0); \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300429})
430
Glauber Costa3f168222008-06-25 12:48:47 -0300431#define __get_user_nocheck(x, ptr, size) \
432({ \
Hiroshi Shimamoto16855f82008-12-08 19:18:38 -0800433 int __gu_err; \
Benjamin LaHaiseb2f68032016-03-09 15:05:56 -0500434 __inttype(*(ptr)) __gu_val; \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800435 __uaccess_begin(); \
Glauber Costa3f168222008-06-25 12:48:47 -0300436 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800437 __uaccess_end(); \
Glauber Costa3f168222008-06-25 12:48:47 -0300438 (x) = (__force __typeof__(*(ptr)))__gu_val; \
Andy Lutomirskia76cf662015-10-05 17:47:49 -0700439 __builtin_expect(__gu_err, 0); \
Glauber Costa3f168222008-06-25 12:48:47 -0300440})
Glauber Costadc70ddf2008-06-25 11:48:29 -0300441
442/* FIXME: this hack is definitely wrong -AK */
443struct __large_struct { unsigned long buf[100]; };
444#define __m(x) (*(struct __large_struct __user *)(x))
445
446/*
447 * Tell gcc we read from memory instead of writing: this is because
448 * we do not write to any memory gcc knows about, so there are no
449 * aliasing issues.
450 */
451#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800452 asm volatile("\n" \
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700453 "1: mov"itype" %"rtype"1,%2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800454 "2:\n" \
Glauber Costadc70ddf2008-06-25 11:48:29 -0300455 ".section .fixup,\"ax\"\n" \
456 "3: mov %3,%0\n" \
457 " jmp 2b\n" \
458 ".previous\n" \
459 _ASM_EXTABLE(1b, 3b) \
460 : "=r"(err) \
461 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800462
463#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
H. Peter Anvin5e883532012-09-21 12:43:15 -0700464 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
465 "2:\n" \
H. Peter Anvin535c0c32012-04-20 16:57:35 -0700466 _ASM_EXTABLE_EX(1b, 2b) \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800467 : : ltype(x), "m" (__m(addr)))
468
469/*
470 * uaccess_try and catch
471 */
472#define uaccess_try do { \
Andy Lutomirskidfa9a942016-07-14 13:22:56 -0700473 current->thread.uaccess_err = 0; \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800474 __uaccess_begin(); \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800475 barrier();
476
477#define uaccess_catch(err) \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800478 __uaccess_end(); \
Andy Lutomirskidfa9a942016-07-14 13:22:56 -0700479 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800480} while (0)
481
Glauber Costa8cb834e2008-06-25 14:43:30 -0300482/**
483 * __get_user: - Get a simple variable from user space, with less checking.
484 * @x: Variable to store result.
485 * @ptr: Source address, in user space.
486 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +0200487 * Context: User context only. This function may sleep if pagefaults are
488 * enabled.
Glauber Costa8cb834e2008-06-25 14:43:30 -0300489 *
490 * This macro copies a single simple variable from user space to kernel
491 * space. It supports simple types like char and int, but not larger
492 * data types like structures or arrays.
493 *
494 * @ptr must have pointer-to-simple-variable type, and the result of
495 * dereferencing @ptr must be assignable to @x without a cast.
496 *
497 * Caller must check the pointer with access_ok() before calling this
498 * function.
499 *
500 * Returns zero on success, or -EFAULT on error.
501 * On error, the variable @x is set to zero.
502 */
Glauber Costadc70ddf2008-06-25 11:48:29 -0300503
Glauber Costa8cb834e2008-06-25 14:43:30 -0300504#define __get_user(x, ptr) \
505 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800506
Glauber Costa8cb834e2008-06-25 14:43:30 -0300507/**
508 * __put_user: - Write a simple value into user space, with less checking.
509 * @x: Value to copy to user space.
510 * @ptr: Destination address, in user space.
511 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +0200512 * Context: User context only. This function may sleep if pagefaults are
513 * enabled.
Glauber Costa8cb834e2008-06-25 14:43:30 -0300514 *
515 * This macro copies a single simple value from kernel space to user
516 * space. It supports simple types like char and int, but not larger
517 * data types like structures or arrays.
518 *
519 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
520 * to the result of dereferencing @ptr.
521 *
522 * Caller must check the pointer with access_ok() before calling this
523 * function.
524 *
525 * Returns zero on success, or -EFAULT on error.
526 */
527
528#define __put_user(x, ptr) \
529 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
530
531#define __get_user_unaligned __get_user
532#define __put_user_unaligned __put_user
Glauber Costa865e5b72008-06-25 11:05:11 -0300533
Glauber Costa8bc7de0c2008-06-25 14:53:41 -0300534/*
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800535 * {get|put}_user_try and catch
536 *
537 * get_user_try {
538 * get_user_ex(...);
539 * } get_user_catch(err)
540 */
541#define get_user_try uaccess_try
542#define get_user_catch(err) uaccess_catch(err)
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800543
544#define get_user_ex(x, ptr) do { \
545 unsigned long __gue_val; \
546 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
547 (x) = (__force __typeof__(*(ptr)))__gue_val; \
548} while (0)
549
Hiroshi Shimamoto019a1362009-01-29 11:49:18 -0800550#define put_user_try uaccess_try
551#define put_user_catch(err) uaccess_catch(err)
552
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800553#define put_user_ex(x, ptr) \
554 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
555
Robert Richter1ac2e6c2011-06-07 11:49:55 +0200556extern unsigned long
557copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
Linus Torvalds92ae03f2012-04-06 14:32:32 -0700558extern __must_check long
559strncpy_from_user(char *dst, const char __user *src, long count);
Robert Richter1ac2e6c2011-06-07 11:49:55 +0200560
Linus Torvalds5723aa92012-05-26 11:09:53 -0700561extern __must_check long strlen_user(const char __user *str);
562extern __must_check long strnlen_user(const char __user *str, long n);
563
H. Peter Anvina0528582012-09-21 12:43:11 -0700564unsigned long __must_check clear_user(void __user *mem, unsigned long len);
565unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
566
Qiaowei Renf09174c2013-12-14 14:25:02 +0800567extern void __cmpxchg_wrong_size(void)
568 __compiletime_error("Bad argument size for cmpxchg");
569
570#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
571({ \
572 int __ret = 0; \
573 __typeof__(ptr) __uval = (uval); \
574 __typeof__(*(ptr)) __old = (old); \
575 __typeof__(*(ptr)) __new = (new); \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800576 __uaccess_begin(); \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800577 switch (size) { \
578 case 1: \
579 { \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800580 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800581 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800582 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800583 "\t.section .fixup, \"ax\"\n" \
584 "3:\tmov %3, %0\n" \
585 "\tjmp 2b\n" \
586 "\t.previous\n" \
587 _ASM_EXTABLE(1b, 3b) \
588 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
589 : "i" (-EFAULT), "q" (__new), "1" (__old) \
590 : "memory" \
591 ); \
592 break; \
593 } \
594 case 2: \
595 { \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800596 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800597 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800598 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800599 "\t.section .fixup, \"ax\"\n" \
600 "3:\tmov %3, %0\n" \
601 "\tjmp 2b\n" \
602 "\t.previous\n" \
603 _ASM_EXTABLE(1b, 3b) \
604 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
605 : "i" (-EFAULT), "r" (__new), "1" (__old) \
606 : "memory" \
607 ); \
608 break; \
609 } \
610 case 4: \
611 { \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800612 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800613 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800614 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800615 "\t.section .fixup, \"ax\"\n" \
616 "3:\tmov %3, %0\n" \
617 "\tjmp 2b\n" \
618 "\t.previous\n" \
619 _ASM_EXTABLE(1b, 3b) \
620 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
621 : "i" (-EFAULT), "r" (__new), "1" (__old) \
622 : "memory" \
623 ); \
624 break; \
625 } \
626 case 8: \
627 { \
628 if (!IS_ENABLED(CONFIG_X86_64)) \
629 __cmpxchg_wrong_size(); \
630 \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800631 asm volatile("\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800632 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800633 "2:\n" \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800634 "\t.section .fixup, \"ax\"\n" \
635 "3:\tmov %3, %0\n" \
636 "\tjmp 2b\n" \
637 "\t.previous\n" \
638 _ASM_EXTABLE(1b, 3b) \
639 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
640 : "i" (-EFAULT), "r" (__new), "1" (__old) \
641 : "memory" \
642 ); \
643 break; \
644 } \
645 default: \
646 __cmpxchg_wrong_size(); \
647 } \
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800648 __uaccess_end(); \
Qiaowei Renf09174c2013-12-14 14:25:02 +0800649 *__uval = __old; \
650 __ret; \
651})
652
653#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
654({ \
655 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
656 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
657 (old), (new), sizeof(*(ptr))) : \
658 -EFAULT; \
659})
660
Hiroshi Shimamotofe40c0a2009-01-23 15:49:41 -0800661/*
Glauber Costa8bc7de0c2008-06-25 14:53:41 -0300662 * movsl can be slow when source and dest are not both 8-byte aligned
663 */
664#ifdef CONFIG_X86_INTEL_USERCOPY
665extern struct movsl_mask {
666 int mask;
667} ____cacheline_aligned_in_smp movsl_mask;
668#endif
669
Glauber Costa22cac162008-06-25 14:56:53 -0300670#define ARCH_HAS_NOCACHE_UACCESS 1
671
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200672#ifdef CONFIG_X86_32
David Howellsa1ce3922012-10-02 18:01:25 +0100673# include <asm/uaccess_32.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200674#else
David Howellsa1ce3922012-10-02 18:01:25 +0100675# include <asm/uaccess_64.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200676#endif
Glauber Costaca233862008-06-13 14:39:25 -0300677
Jan Beulich3df7b412013-10-21 09:43:57 +0100678unsigned long __must_check _copy_from_user(void *to, const void __user *from,
679 unsigned n);
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100680unsigned long __must_check _copy_to_user(void __user *to, const void *from,
681 unsigned n);
Jan Beulich3df7b412013-10-21 09:43:57 +0100682
Josh Poimboeuf0d025d22016-08-30 08:04:16 -0500683extern void __compiletime_error("usercopy buffer size is too small")
684__bad_copy_user(void);
Jan Beulich3df7b412013-10-21 09:43:57 +0100685
Josh Poimboeuf0d025d22016-08-30 08:04:16 -0500686static inline void copy_user_overflow(int size, unsigned long count)
Jan Beulich3df7b412013-10-21 09:43:57 +0100687{
688 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
689}
690
Kees Cooke6971002016-09-06 11:56:01 -0700691static __always_inline unsigned long __must_check
Jan Beulich3df7b412013-10-21 09:43:57 +0100692copy_from_user(void *to, const void __user *from, unsigned long n)
693{
694 int sz = __compiletime_object_size(to);
695
696 might_fault();
697
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700698 kasan_check_write(to, n);
699
Kees Cook5b710f32016-06-23 15:04:01 -0700700 if (likely(sz < 0 || sz >= n)) {
701 check_object_size(to, n, false);
Jan Beulich3df7b412013-10-21 09:43:57 +0100702 n = _copy_from_user(to, from, n);
Josh Poimboeuf0d025d22016-08-30 08:04:16 -0500703 } else if (!__builtin_constant_p(n))
704 copy_user_overflow(sz, n);
Jan Beulich3df7b412013-10-21 09:43:57 +0100705 else
Josh Poimboeuf0d025d22016-08-30 08:04:16 -0500706 __bad_copy_user();
Jan Beulich3df7b412013-10-21 09:43:57 +0100707
708 return n;
709}
710
Kees Cooke6971002016-09-06 11:56:01 -0700711static __always_inline unsigned long __must_check
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100712copy_to_user(void __user *to, const void *from, unsigned long n)
713{
714 int sz = __compiletime_object_size(from);
715
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700716 kasan_check_read(from, n);
717
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100718 might_fault();
719
Kees Cook5b710f32016-06-23 15:04:01 -0700720 if (likely(sz < 0 || sz >= n)) {
721 check_object_size(from, n, true);
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100722 n = _copy_to_user(to, from, n);
Josh Poimboeuf0d025d22016-08-30 08:04:16 -0500723 } else if (!__builtin_constant_p(n))
724 copy_user_overflow(sz, n);
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100725 else
Josh Poimboeuf0d025d22016-08-30 08:04:16 -0500726 __bad_copy_user();
Jan Beulich7a3d9b02013-10-21 09:44:37 +0100727
728 return n;
729}
730
Andi Kleen10013eb2015-10-22 15:07:20 -0700731/*
732 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
733 * nested NMI paths are careful to preserve CR2.
734 *
735 * Caller must use pagefault_enable/disable, or run in interrupt context,
736 * and also do a uaccess_ok() check
737 */
738#define __copy_from_user_nmi __copy_from_user_inatomic
739
Linus Torvalds404a4742016-01-21 13:02:41 -0800740/*
Linus Torvalds5b24a7a2015-12-17 09:57:27 -0800741 * The "unsafe" user accesses aren't really "unsafe", but the naming
742 * is a big fat warning: you have to not only do the access_ok()
743 * checking before using them, but you have to surround them with the
744 * user_access_begin/end() pair.
745 */
746#define user_access_begin() __uaccess_begin()
747#define user_access_end() __uaccess_end()
748
Linus Torvalds1bd44032016-08-08 13:02:01 -0700749#define unsafe_put_user(x, ptr, err_label) \
750do { \
Linus Torvalds5b24a7a2015-12-17 09:57:27 -0800751 int __pu_err; \
752 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
Linus Torvalds1bd44032016-08-08 13:02:01 -0700753 if (unlikely(__pu_err)) goto err_label; \
754} while (0)
Linus Torvalds5b24a7a2015-12-17 09:57:27 -0800755
Linus Torvalds1bd44032016-08-08 13:02:01 -0700756#define unsafe_get_user(x, ptr, err_label) \
757do { \
Linus Torvalds5b24a7a2015-12-17 09:57:27 -0800758 int __gu_err; \
759 unsigned long __gu_val; \
760 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
761 (x) = (__force __typeof__(*(ptr)))__gu_val; \
Linus Torvalds1bd44032016-08-08 13:02:01 -0700762 if (unlikely(__gu_err)) goto err_label; \
763} while (0)
Linus Torvalds5b24a7a2015-12-17 09:57:27 -0800764
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700765#endif /* _ASM_X86_UACCESS_H */
Nick Piggin8174c432008-07-25 19:45:24 -0700766