H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_UACCESS_H |
| 2 | #define _ASM_X86_UACCESS_H |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 3 | /* |
| 4 | * User space memory access functions |
| 5 | */ |
| 6 | #include <linux/errno.h> |
| 7 | #include <linux/compiler.h> |
Andrey Ryabinin | 1771c6e | 2016-05-20 16:59:31 -0700 | [diff] [blame] | 8 | #include <linux/kasan-checks.h> |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 9 | #include <linux/thread_info.h> |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 10 | #include <linux/string.h> |
| 11 | #include <asm/asm.h> |
| 12 | #include <asm/page.h> |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 13 | #include <asm/smap.h> |
Al Viro | 45caf47 | 2016-09-05 11:32:44 -0400 | [diff] [blame] | 14 | #include <asm/extable.h> |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 15 | |
| 16 | #define VERIFY_READ 0 |
| 17 | #define VERIFY_WRITE 1 |
| 18 | |
| 19 | /* |
| 20 | * The fs value determines whether argument validity checking should be |
| 21 | * performed or not. If get_fs() == USER_DS, checking is performed, with |
| 22 | * get_fs() == KERNEL_DS, checking is bypassed. |
| 23 | * |
| 24 | * For historical reasons, these macros are grossly misnamed. |
| 25 | */ |
| 26 | |
| 27 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
| 28 | |
| 29 | #define KERNEL_DS MAKE_MM_SEG(-1UL) |
Linus Torvalds | 9063c61 | 2009-06-20 15:40:00 -0700 | [diff] [blame] | 30 | #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 31 | |
| 32 | #define get_ds() (KERNEL_DS) |
Andy Lutomirski | 13d4ea0 | 2016-07-14 13:22:57 -0700 | [diff] [blame] | 33 | #define get_fs() (current->thread.addr_limit) |
| 34 | #define set_fs(x) (current->thread.addr_limit = (x)) |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 35 | |
| 36 | #define segment_eq(a, b) ((a).seg == (b).seg) |
| 37 | |
Andy Lutomirski | 13d4ea0 | 2016-07-14 13:22:57 -0700 | [diff] [blame] | 38 | #define user_addr_max() (current->thread.addr_limit.seg) |
Arun Sharma | bc6ca7b | 2012-04-20 15:41:35 -0700 | [diff] [blame] | 39 | #define __addr_ok(addr) \ |
| 40 | ((unsigned long __force)(addr) < user_addr_max()) |
Glauber Costa | 002ca16 | 2008-06-25 11:08:51 -0300 | [diff] [blame] | 41 | |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 42 | /* |
| 43 | * Test whether a block of memory is a valid user space address. |
| 44 | * Returns 0 if the range is valid, nonzero otherwise. |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 45 | */ |
H. Peter Anvin | a740576 | 2013-12-27 16:52:47 -0800 | [diff] [blame] | 46 | static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) |
Linus Torvalds | c5fe5d8 | 2013-12-27 15:30:58 -0800 | [diff] [blame] | 47 | { |
| 48 | /* |
| 49 | * If we have used "sizeof()" for the size, |
| 50 | * we know it won't overflow the limit (but |
| 51 | * it might overflow the 'addr', so it's |
| 52 | * important to subtract the size from the |
| 53 | * limit, not add it to the address). |
| 54 | */ |
| 55 | if (__builtin_constant_p(size)) |
Andy Lutomirski | 7e0f51c | 2015-10-05 17:47:50 -0700 | [diff] [blame] | 56 | return unlikely(addr > limit - size); |
Linus Torvalds | c5fe5d8 | 2013-12-27 15:30:58 -0800 | [diff] [blame] | 57 | |
| 58 | /* Arbitrary sizes? Be careful about overflow */ |
| 59 | addr += size; |
Andy Lutomirski | 7e0f51c | 2015-10-05 17:47:50 -0700 | [diff] [blame] | 60 | if (unlikely(addr < size)) |
H. Peter Anvin | a740576 | 2013-12-27 16:52:47 -0800 | [diff] [blame] | 61 | return true; |
Andy Lutomirski | 7e0f51c | 2015-10-05 17:47:50 -0700 | [diff] [blame] | 62 | return unlikely(addr > limit); |
Linus Torvalds | c5fe5d8 | 2013-12-27 15:30:58 -0800 | [diff] [blame] | 63 | } |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 64 | |
Arun Sharma | bc6ca7b | 2012-04-20 15:41:35 -0700 | [diff] [blame] | 65 | #define __range_not_ok(addr, size, limit) \ |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 66 | ({ \ |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 67 | __chk_user_ptr(addr); \ |
Linus Torvalds | c5fe5d8 | 2013-12-27 15:30:58 -0800 | [diff] [blame] | 68 | __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 69 | }) |
| 70 | |
Peter Zijlstra | 2715f68 | 2016-11-22 10:57:15 +0100 | [diff] [blame] | 71 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 72 | # define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) |
| 73 | #else |
| 74 | # define WARN_ON_IN_IRQ() |
| 75 | #endif |
| 76 | |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 77 | /** |
| 78 | * access_ok: - Checks if a user space pointer is valid |
| 79 | * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that |
| 80 | * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe |
| 81 | * to write to a block, it is always safe to read from it. |
| 82 | * @addr: User space pointer to start of block to check |
| 83 | * @size: Size of block to check |
| 84 | * |
David Hildenbrand | b3c395e | 2015-05-11 17:52:08 +0200 | [diff] [blame] | 85 | * Context: User context only. This function may sleep if pagefaults are |
| 86 | * enabled. |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 87 | * |
| 88 | * Checks if a pointer to a block of memory in user space is valid. |
| 89 | * |
| 90 | * Returns true (nonzero) if the memory block may be valid, false (zero) |
| 91 | * if it is definitely invalid. |
| 92 | * |
| 93 | * Note that, depending on architecture, this function probably just |
| 94 | * checks that the pointer is in the user space range - after calling |
| 95 | * this function, memory access functions may still return -EFAULT. |
| 96 | */ |
Peter Zijlstra | 2715f68 | 2016-11-22 10:57:15 +0100 | [diff] [blame] | 97 | #define access_ok(type, addr, size) \ |
| 98 | ({ \ |
| 99 | WARN_ON_IN_IRQ(); \ |
| 100 | likely(!__range_not_ok(addr, size, user_addr_max())); \ |
| 101 | }) |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 102 | |
| 103 | /* |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 104 | * These are the main single-value transfer routines. They automatically |
| 105 | * use the right size if we just have the right pointer type. |
| 106 | * |
| 107 | * This gets kind of ugly. We want to return _two_ values in "get_user()" |
| 108 | * and yet we don't want to do any pointers, because that is too much |
| 109 | * of a performance impact. Thus we have a few rather ugly macros here, |
| 110 | * and hide all the ugliness from the user. |
| 111 | * |
| 112 | * The "__xxx" versions of the user access functions are versions that |
| 113 | * do not verify the address space, that must have been done previously |
| 114 | * with a separate "access_ok()" call (this is used when we do multiple |
| 115 | * accesses to the same area of user memory). |
| 116 | */ |
| 117 | |
| 118 | extern int __get_user_1(void); |
| 119 | extern int __get_user_2(void); |
| 120 | extern int __get_user_4(void); |
| 121 | extern int __get_user_8(void); |
| 122 | extern int __get_user_bad(void); |
| 123 | |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 124 | #define __uaccess_begin() stac() |
| 125 | #define __uaccess_end() clac() |
Dan Williams | e06d7bf | 2018-01-29 17:02:39 -0800 | [diff] [blame] | 126 | #define __uaccess_begin_nospec() \ |
| 127 | ({ \ |
| 128 | stac(); \ |
| 129 | barrier_nospec(); \ |
| 130 | }) |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 131 | |
H. Peter Anvin | 3578baa | 2013-02-12 11:47:31 -0800 | [diff] [blame] | 132 | /* |
| 133 | * This is a type: either unsigned long, if the argument fits into |
| 134 | * that type, or otherwise unsigned long long. |
| 135 | */ |
| 136 | #define __inttype(x) \ |
| 137 | __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) |
Glauber Costa | 865e5b7 | 2008-06-25 11:05:11 -0300 | [diff] [blame] | 138 | |
| 139 | /** |
| 140 | * get_user: - Get a simple variable from user space. |
| 141 | * @x: Variable to store result. |
| 142 | * @ptr: Source address, in user space. |
| 143 | * |
David Hildenbrand | b3c395e | 2015-05-11 17:52:08 +0200 | [diff] [blame] | 144 | * Context: User context only. This function may sleep if pagefaults are |
| 145 | * enabled. |
Glauber Costa | 865e5b7 | 2008-06-25 11:05:11 -0300 | [diff] [blame] | 146 | * |
| 147 | * This macro copies a single simple variable from user space to kernel |
| 148 | * space. It supports simple types like char and int, but not larger |
| 149 | * data types like structures or arrays. |
| 150 | * |
| 151 | * @ptr must have pointer-to-simple-variable type, and the result of |
| 152 | * dereferencing @ptr must be assignable to @x without a cast. |
| 153 | * |
| 154 | * Returns zero on success, or -EFAULT on error. |
| 155 | * On error, the variable @x is set to zero. |
H. Peter Anvin | ff52c3b | 2013-02-12 15:37:02 -0800 | [diff] [blame] | 156 | */ |
| 157 | /* |
H. Peter Anvin | 3578baa | 2013-02-12 11:47:31 -0800 | [diff] [blame] | 158 | * Careful: we have to cast the result to the type of the pointer |
| 159 | * for sign reasons. |
H. Peter Anvin | ff52c3b | 2013-02-12 15:37:02 -0800 | [diff] [blame] | 160 | * |
H. Peter Anvin | f69fa9a | 2013-08-29 13:34:50 -0700 | [diff] [blame] | 161 | * The use of _ASM_DX as the register specifier is a bit of a |
H. Peter Anvin | ff52c3b | 2013-02-12 15:37:02 -0800 | [diff] [blame] | 162 | * simplification, as gcc only cares about it as the starting point |
| 163 | * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits |
| 164 | * (%ecx being the next register in gcc's x86 register sequence), and |
| 165 | * %rdx on 64 bits. |
H. Peter Anvin | f69fa9a | 2013-08-29 13:34:50 -0700 | [diff] [blame] | 166 | * |
| 167 | * Clang/LLVM cares about the size of the register, but still wants |
| 168 | * the base register for something that ends up being a pair. |
Glauber Costa | 865e5b7 | 2008-06-25 11:05:11 -0300 | [diff] [blame] | 169 | */ |
Glauber Costa | 865e5b7 | 2008-06-25 11:05:11 -0300 | [diff] [blame] | 170 | #define get_user(x, ptr) \ |
| 171 | ({ \ |
| 172 | int __ret_gu; \ |
Jan-Simon Möller | bdfc017 | 2013-08-29 21:13:05 +0200 | [diff] [blame] | 173 | register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ |
Chris J Arges | f05058c | 2016-01-21 16:49:25 -0600 | [diff] [blame] | 174 | register void *__sp asm(_ASM_SP); \ |
Glauber Costa | 865e5b7 | 2008-06-25 11:05:11 -0300 | [diff] [blame] | 175 | __chk_user_ptr(ptr); \ |
Ingo Molnar | d1a7618 | 2008-10-28 16:54:49 +0100 | [diff] [blame] | 176 | might_fault(); \ |
Chris J Arges | f05058c | 2016-01-21 16:49:25 -0600 | [diff] [blame] | 177 | asm volatile("call __get_user_%P4" \ |
| 178 | : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \ |
H. Peter Anvin | 3578baa | 2013-02-12 11:47:31 -0800 | [diff] [blame] | 179 | : "0" (ptr), "i" (sizeof(*(ptr)))); \ |
Michael S. Tsirkin | e182c57 | 2014-12-12 01:56:04 +0200 | [diff] [blame] | 180 | (x) = (__force __typeof__(*(ptr))) __val_gu; \ |
Andy Lutomirski | a76cf66 | 2015-10-05 17:47:49 -0700 | [diff] [blame] | 181 | __builtin_expect(__ret_gu, 0); \ |
Glauber Costa | 865e5b7 | 2008-06-25 11:05:11 -0300 | [diff] [blame] | 182 | }) |
| 183 | |
Glauber Costa | e30a44f | 2008-06-25 13:17:43 -0300 | [diff] [blame] | 184 | #define __put_user_x(size, x, ptr, __ret_pu) \ |
| 185 | asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ |
Hiroshi Shimamoto | 4d5d783 | 2009-01-19 16:34:26 -0800 | [diff] [blame] | 186 | : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") |
Glauber Costa | e30a44f | 2008-06-25 13:17:43 -0300 | [diff] [blame] | 187 | |
| 188 | |
| 189 | |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 190 | #ifdef CONFIG_X86_32 |
Hiroshi Shimamoto | 18114f6 | 2009-01-30 18:16:46 -0800 | [diff] [blame] | 191 | #define __put_user_asm_u64(x, addr, err, errret) \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 192 | asm volatile("\n" \ |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 193 | "1: movl %%eax,0(%2)\n" \ |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 194 | "2: movl %%edx,4(%2)\n" \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 195 | "3:" \ |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 196 | ".section .fixup,\"ax\"\n" \ |
| 197 | "4: movl %3,%0\n" \ |
| 198 | " jmp 3b\n" \ |
| 199 | ".previous\n" \ |
| 200 | _ASM_EXTABLE(1b, 4b) \ |
| 201 | _ASM_EXTABLE(2b, 4b) \ |
| 202 | : "=r" (err) \ |
Hiroshi Shimamoto | 18114f6 | 2009-01-30 18:16:46 -0800 | [diff] [blame] | 203 | : "A" (x), "r" (addr), "i" (errret), "0" (err)) |
Glauber Costa | e30a44f | 2008-06-25 13:17:43 -0300 | [diff] [blame] | 204 | |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 205 | #define __put_user_asm_ex_u64(x, addr) \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 206 | asm volatile("\n" \ |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 207 | "1: movl %%eax,0(%1)\n" \ |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 208 | "2: movl %%edx,4(%1)\n" \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 209 | "3:" \ |
H. Peter Anvin | 535c0c3 | 2012-04-20 16:57:35 -0700 | [diff] [blame] | 210 | _ASM_EXTABLE_EX(1b, 2b) \ |
| 211 | _ASM_EXTABLE_EX(2b, 3b) \ |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 212 | : : "A" (x), "r" (addr)) |
| 213 | |
Glauber Costa | e30a44f | 2008-06-25 13:17:43 -0300 | [diff] [blame] | 214 | #define __put_user_x8(x, ptr, __ret_pu) \ |
| 215 | asm volatile("call __put_user_8" : "=a" (__ret_pu) \ |
| 216 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 217 | #else |
Hiroshi Shimamoto | 18114f6 | 2009-01-30 18:16:46 -0800 | [diff] [blame] | 218 | #define __put_user_asm_u64(x, ptr, retval, errret) \ |
H. Peter Anvin | ebe119c | 2009-07-20 23:27:39 -0700 | [diff] [blame] | 219 | __put_user_asm(x, ptr, retval, "q", "", "er", errret) |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 220 | #define __put_user_asm_ex_u64(x, addr) \ |
H. Peter Anvin | ebe119c | 2009-07-20 23:27:39 -0700 | [diff] [blame] | 221 | __put_user_asm_ex(x, addr, "q", "", "er") |
Glauber Costa | e30a44f | 2008-06-25 13:17:43 -0300 | [diff] [blame] | 222 | #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 223 | #endif |
| 224 | |
Glauber Costa | e30a44f | 2008-06-25 13:17:43 -0300 | [diff] [blame] | 225 | extern void __put_user_bad(void); |
| 226 | |
| 227 | /* |
| 228 | * Strange magic calling convention: pointer in %ecx, |
| 229 | * value in %eax(:%edx), return value in %eax. clobbers %rbx |
| 230 | */ |
| 231 | extern void __put_user_1(void); |
| 232 | extern void __put_user_2(void); |
| 233 | extern void __put_user_4(void); |
| 234 | extern void __put_user_8(void); |
| 235 | |
Glauber Costa | e30a44f | 2008-06-25 13:17:43 -0300 | [diff] [blame] | 236 | /** |
| 237 | * put_user: - Write a simple value into user space. |
| 238 | * @x: Value to copy to user space. |
| 239 | * @ptr: Destination address, in user space. |
| 240 | * |
David Hildenbrand | b3c395e | 2015-05-11 17:52:08 +0200 | [diff] [blame] | 241 | * Context: User context only. This function may sleep if pagefaults are |
| 242 | * enabled. |
Glauber Costa | e30a44f | 2008-06-25 13:17:43 -0300 | [diff] [blame] | 243 | * |
| 244 | * This macro copies a single simple value from kernel space to user |
| 245 | * space. It supports simple types like char and int, but not larger |
| 246 | * data types like structures or arrays. |
| 247 | * |
| 248 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable |
| 249 | * to the result of dereferencing @ptr. |
| 250 | * |
| 251 | * Returns zero on success, or -EFAULT on error. |
| 252 | */ |
| 253 | #define put_user(x, ptr) \ |
| 254 | ({ \ |
| 255 | int __ret_pu; \ |
| 256 | __typeof__(*(ptr)) __pu_val; \ |
| 257 | __chk_user_ptr(ptr); \ |
Ingo Molnar | d1a7618 | 2008-10-28 16:54:49 +0100 | [diff] [blame] | 258 | might_fault(); \ |
Glauber Costa | e30a44f | 2008-06-25 13:17:43 -0300 | [diff] [blame] | 259 | __pu_val = x; \ |
| 260 | switch (sizeof(*(ptr))) { \ |
| 261 | case 1: \ |
| 262 | __put_user_x(1, __pu_val, ptr, __ret_pu); \ |
| 263 | break; \ |
| 264 | case 2: \ |
| 265 | __put_user_x(2, __pu_val, ptr, __ret_pu); \ |
| 266 | break; \ |
| 267 | case 4: \ |
| 268 | __put_user_x(4, __pu_val, ptr, __ret_pu); \ |
| 269 | break; \ |
| 270 | case 8: \ |
| 271 | __put_user_x8(__pu_val, ptr, __ret_pu); \ |
| 272 | break; \ |
| 273 | default: \ |
| 274 | __put_user_x(X, __pu_val, ptr, __ret_pu); \ |
| 275 | break; \ |
| 276 | } \ |
Andy Lutomirski | a76cf66 | 2015-10-05 17:47:49 -0700 | [diff] [blame] | 277 | __builtin_expect(__ret_pu, 0); \ |
Glauber Costa | e30a44f | 2008-06-25 13:17:43 -0300 | [diff] [blame] | 278 | }) |
| 279 | |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 280 | #define __put_user_size(x, ptr, size, retval, errret) \ |
| 281 | do { \ |
| 282 | retval = 0; \ |
| 283 | __chk_user_ptr(ptr); \ |
| 284 | switch (size) { \ |
| 285 | case 1: \ |
| 286 | __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ |
| 287 | break; \ |
| 288 | case 2: \ |
| 289 | __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ |
| 290 | break; \ |
| 291 | case 4: \ |
Hiroshi Shimamoto | 4d5d783 | 2009-01-19 16:34:26 -0800 | [diff] [blame] | 292 | __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 293 | break; \ |
| 294 | case 8: \ |
Andy Lutomirski | dd15ae3 | 2019-02-22 17:17:04 -0800 | [diff] [blame] | 295 | __put_user_asm_u64(x, ptr, retval, errret); \ |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 296 | break; \ |
| 297 | default: \ |
| 298 | __put_user_bad(); \ |
| 299 | } \ |
| 300 | } while (0) |
| 301 | |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 302 | /* |
| 303 | * This doesn't do __uaccess_begin/end - the exception handling |
| 304 | * around it must do that. |
| 305 | */ |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 306 | #define __put_user_size_ex(x, ptr, size) \ |
| 307 | do { \ |
| 308 | __chk_user_ptr(ptr); \ |
| 309 | switch (size) { \ |
| 310 | case 1: \ |
| 311 | __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ |
| 312 | break; \ |
| 313 | case 2: \ |
| 314 | __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ |
| 315 | break; \ |
| 316 | case 4: \ |
| 317 | __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ |
| 318 | break; \ |
| 319 | case 8: \ |
| 320 | __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ |
| 321 | break; \ |
| 322 | default: \ |
| 323 | __put_user_bad(); \ |
| 324 | } \ |
| 325 | } while (0) |
| 326 | |
Glauber Costa | 3f16822 | 2008-06-25 12:48:47 -0300 | [diff] [blame] | 327 | #ifdef CONFIG_X86_32 |
Benjamin LaHaise | b2f6803 | 2016-03-09 15:05:56 -0500 | [diff] [blame] | 328 | #define __get_user_asm_u64(x, ptr, retval, errret) \ |
| 329 | ({ \ |
| 330 | __typeof__(ptr) __ptr = (ptr); \ |
Linus Torvalds | ae382ca | 2017-05-21 18:26:54 -0700 | [diff] [blame] | 331 | asm volatile("\n" \ |
Benjamin LaHaise | b2f6803 | 2016-03-09 15:05:56 -0500 | [diff] [blame] | 332 | "1: movl %2,%%eax\n" \ |
| 333 | "2: movl %3,%%edx\n" \ |
Linus Torvalds | ae382ca | 2017-05-21 18:26:54 -0700 | [diff] [blame] | 334 | "3:\n" \ |
Benjamin LaHaise | b2f6803 | 2016-03-09 15:05:56 -0500 | [diff] [blame] | 335 | ".section .fixup,\"ax\"\n" \ |
| 336 | "4: mov %4,%0\n" \ |
| 337 | " xorl %%eax,%%eax\n" \ |
| 338 | " xorl %%edx,%%edx\n" \ |
| 339 | " jmp 3b\n" \ |
| 340 | ".previous\n" \ |
| 341 | _ASM_EXTABLE(1b, 4b) \ |
| 342 | _ASM_EXTABLE(2b, 4b) \ |
Linus Torvalds | ae382ca | 2017-05-21 18:26:54 -0700 | [diff] [blame] | 343 | : "=r" (retval), "=&A"(x) \ |
Benjamin LaHaise | b2f6803 | 2016-03-09 15:05:56 -0500 | [diff] [blame] | 344 | : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ |
| 345 | "i" (errret), "0" (retval)); \ |
| 346 | }) |
| 347 | |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 348 | #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() |
Glauber Costa | 3f16822 | 2008-06-25 12:48:47 -0300 | [diff] [blame] | 349 | #else |
| 350 | #define __get_user_asm_u64(x, ptr, retval, errret) \ |
| 351 | __get_user_asm(x, ptr, retval, "q", "", "=r", errret) |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 352 | #define __get_user_asm_ex_u64(x, ptr) \ |
| 353 | __get_user_asm_ex(x, ptr, "q", "", "=r") |
Glauber Costa | 3f16822 | 2008-06-25 12:48:47 -0300 | [diff] [blame] | 354 | #endif |
| 355 | |
| 356 | #define __get_user_size(x, ptr, size, retval, errret) \ |
| 357 | do { \ |
| 358 | retval = 0; \ |
| 359 | __chk_user_ptr(ptr); \ |
| 360 | switch (size) { \ |
| 361 | case 1: \ |
| 362 | __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ |
| 363 | break; \ |
| 364 | case 2: \ |
| 365 | __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ |
| 366 | break; \ |
| 367 | case 4: \ |
| 368 | __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ |
| 369 | break; \ |
| 370 | case 8: \ |
| 371 | __get_user_asm_u64(x, ptr, retval, errret); \ |
| 372 | break; \ |
| 373 | default: \ |
| 374 | (x) = __get_user_bad(); \ |
| 375 | } \ |
| 376 | } while (0) |
| 377 | |
| 378 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 379 | asm volatile("\n" \ |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 380 | "1: mov"itype" %2,%"rtype"1\n" \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 381 | "2:\n" \ |
Glauber Costa | 3f16822 | 2008-06-25 12:48:47 -0300 | [diff] [blame] | 382 | ".section .fixup,\"ax\"\n" \ |
| 383 | "3: mov %3,%0\n" \ |
| 384 | " xor"itype" %"rtype"1,%"rtype"1\n" \ |
| 385 | " jmp 2b\n" \ |
| 386 | ".previous\n" \ |
| 387 | _ASM_EXTABLE(1b, 3b) \ |
| 388 | : "=r" (err), ltype(x) \ |
| 389 | : "m" (__m(addr)), "i" (errret), "0" (err)) |
| 390 | |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 391 | /* |
| 392 | * This doesn't do __uaccess_begin/end - the exception handling |
| 393 | * around it must do that. |
| 394 | */ |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 395 | #define __get_user_size_ex(x, ptr, size) \ |
| 396 | do { \ |
| 397 | __chk_user_ptr(ptr); \ |
| 398 | switch (size) { \ |
| 399 | case 1: \ |
| 400 | __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ |
| 401 | break; \ |
| 402 | case 2: \ |
| 403 | __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ |
| 404 | break; \ |
| 405 | case 4: \ |
| 406 | __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ |
| 407 | break; \ |
| 408 | case 8: \ |
| 409 | __get_user_asm_ex_u64(x, ptr); \ |
| 410 | break; \ |
| 411 | default: \ |
| 412 | (x) = __get_user_bad(); \ |
| 413 | } \ |
| 414 | } while (0) |
| 415 | |
| 416 | #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ |
H. Peter Anvin | 5e88353 | 2012-09-21 12:43:15 -0700 | [diff] [blame] | 417 | asm volatile("1: mov"itype" %1,%"rtype"0\n" \ |
| 418 | "2:\n" \ |
Al Viro | 1c109fa | 2016-09-15 02:35:29 +0100 | [diff] [blame] | 419 | ".section .fixup,\"ax\"\n" \ |
| 420 | "3:xor"itype" %"rtype"0,%"rtype"0\n" \ |
| 421 | " jmp 2b\n" \ |
| 422 | ".previous\n" \ |
| 423 | _ASM_EXTABLE_EX(1b, 3b) \ |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 424 | : ltype(x) : "m" (__m(addr))) |
| 425 | |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 426 | #define __put_user_nocheck(x, ptr, size) \ |
| 427 | ({ \ |
Hiroshi Shimamoto | 16855f8 | 2008-12-08 19:18:38 -0800 | [diff] [blame] | 428 | int __pu_err; \ |
Andy Lutomirski | dd15ae3 | 2019-02-22 17:17:04 -0800 | [diff] [blame] | 429 | __typeof__(*(ptr)) __pu_val; \ |
| 430 | __pu_val = x; \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 431 | __uaccess_begin(); \ |
Andy Lutomirski | dd15ae3 | 2019-02-22 17:17:04 -0800 | [diff] [blame] | 432 | __put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 433 | __uaccess_end(); \ |
Andy Lutomirski | a76cf66 | 2015-10-05 17:47:49 -0700 | [diff] [blame] | 434 | __builtin_expect(__pu_err, 0); \ |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 435 | }) |
| 436 | |
Glauber Costa | 3f16822 | 2008-06-25 12:48:47 -0300 | [diff] [blame] | 437 | #define __get_user_nocheck(x, ptr, size) \ |
| 438 | ({ \ |
Hiroshi Shimamoto | 16855f8 | 2008-12-08 19:18:38 -0800 | [diff] [blame] | 439 | int __gu_err; \ |
Benjamin LaHaise | b2f6803 | 2016-03-09 15:05:56 -0500 | [diff] [blame] | 440 | __inttype(*(ptr)) __gu_val; \ |
Peter Zijlstra | 0696192 | 2019-08-29 10:24:45 +0200 | [diff] [blame] | 441 | __typeof__(ptr) __gu_ptr = (ptr); \ |
| 442 | __typeof__(size) __gu_size = (size); \ |
Dan Williams | 065eae4 | 2018-01-29 17:02:49 -0800 | [diff] [blame] | 443 | __uaccess_begin_nospec(); \ |
Peter Zijlstra | 0696192 | 2019-08-29 10:24:45 +0200 | [diff] [blame] | 444 | __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 445 | __uaccess_end(); \ |
Glauber Costa | 3f16822 | 2008-06-25 12:48:47 -0300 | [diff] [blame] | 446 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
Andy Lutomirski | a76cf66 | 2015-10-05 17:47:49 -0700 | [diff] [blame] | 447 | __builtin_expect(__gu_err, 0); \ |
Glauber Costa | 3f16822 | 2008-06-25 12:48:47 -0300 | [diff] [blame] | 448 | }) |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 449 | |
| 450 | /* FIXME: this hack is definitely wrong -AK */ |
| 451 | struct __large_struct { unsigned long buf[100]; }; |
| 452 | #define __m(x) (*(struct __large_struct __user *)(x)) |
| 453 | |
| 454 | /* |
| 455 | * Tell gcc we read from memory instead of writing: this is because |
| 456 | * we do not write to any memory gcc knows about, so there are no |
| 457 | * aliasing issues. |
| 458 | */ |
| 459 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 460 | asm volatile("\n" \ |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 461 | "1: mov"itype" %"rtype"1,%2\n" \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 462 | "2:\n" \ |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 463 | ".section .fixup,\"ax\"\n" \ |
| 464 | "3: mov %3,%0\n" \ |
| 465 | " jmp 2b\n" \ |
| 466 | ".previous\n" \ |
| 467 | _ASM_EXTABLE(1b, 3b) \ |
| 468 | : "=r"(err) \ |
| 469 | : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 470 | |
| 471 | #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ |
H. Peter Anvin | 5e88353 | 2012-09-21 12:43:15 -0700 | [diff] [blame] | 472 | asm volatile("1: mov"itype" %"rtype"0,%1\n" \ |
| 473 | "2:\n" \ |
H. Peter Anvin | 535c0c3 | 2012-04-20 16:57:35 -0700 | [diff] [blame] | 474 | _ASM_EXTABLE_EX(1b, 2b) \ |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 475 | : : ltype(x), "m" (__m(addr))) |
| 476 | |
| 477 | /* |
| 478 | * uaccess_try and catch |
| 479 | */ |
| 480 | #define uaccess_try do { \ |
Andy Lutomirski | dfa9a94 | 2016-07-14 13:22:56 -0700 | [diff] [blame] | 481 | current->thread.uaccess_err = 0; \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 482 | __uaccess_begin(); \ |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 483 | barrier(); |
| 484 | |
Dan Williams | e06d7bf | 2018-01-29 17:02:39 -0800 | [diff] [blame] | 485 | #define uaccess_try_nospec do { \ |
| 486 | current->thread.uaccess_err = 0; \ |
| 487 | __uaccess_begin_nospec(); \ |
| 488 | |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 489 | #define uaccess_catch(err) \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 490 | __uaccess_end(); \ |
Andy Lutomirski | dfa9a94 | 2016-07-14 13:22:56 -0700 | [diff] [blame] | 491 | (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 492 | } while (0) |
| 493 | |
Glauber Costa | 8cb834e | 2008-06-25 14:43:30 -0300 | [diff] [blame] | 494 | /** |
| 495 | * __get_user: - Get a simple variable from user space, with less checking. |
| 496 | * @x: Variable to store result. |
| 497 | * @ptr: Source address, in user space. |
| 498 | * |
David Hildenbrand | b3c395e | 2015-05-11 17:52:08 +0200 | [diff] [blame] | 499 | * Context: User context only. This function may sleep if pagefaults are |
| 500 | * enabled. |
Glauber Costa | 8cb834e | 2008-06-25 14:43:30 -0300 | [diff] [blame] | 501 | * |
| 502 | * This macro copies a single simple variable from user space to kernel |
| 503 | * space. It supports simple types like char and int, but not larger |
| 504 | * data types like structures or arrays. |
| 505 | * |
| 506 | * @ptr must have pointer-to-simple-variable type, and the result of |
| 507 | * dereferencing @ptr must be assignable to @x without a cast. |
| 508 | * |
| 509 | * Caller must check the pointer with access_ok() before calling this |
| 510 | * function. |
| 511 | * |
| 512 | * Returns zero on success, or -EFAULT on error. |
| 513 | * On error, the variable @x is set to zero. |
| 514 | */ |
Glauber Costa | dc70ddf | 2008-06-25 11:48:29 -0300 | [diff] [blame] | 515 | |
Glauber Costa | 8cb834e | 2008-06-25 14:43:30 -0300 | [diff] [blame] | 516 | #define __get_user(x, ptr) \ |
| 517 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 518 | |
Glauber Costa | 8cb834e | 2008-06-25 14:43:30 -0300 | [diff] [blame] | 519 | /** |
| 520 | * __put_user: - Write a simple value into user space, with less checking. |
| 521 | * @x: Value to copy to user space. |
| 522 | * @ptr: Destination address, in user space. |
| 523 | * |
David Hildenbrand | b3c395e | 2015-05-11 17:52:08 +0200 | [diff] [blame] | 524 | * Context: User context only. This function may sleep if pagefaults are |
| 525 | * enabled. |
Glauber Costa | 8cb834e | 2008-06-25 14:43:30 -0300 | [diff] [blame] | 526 | * |
| 527 | * This macro copies a single simple value from kernel space to user |
| 528 | * space. It supports simple types like char and int, but not larger |
| 529 | * data types like structures or arrays. |
| 530 | * |
| 531 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable |
| 532 | * to the result of dereferencing @ptr. |
| 533 | * |
| 534 | * Caller must check the pointer with access_ok() before calling this |
| 535 | * function. |
| 536 | * |
| 537 | * Returns zero on success, or -EFAULT on error. |
| 538 | */ |
| 539 | |
| 540 | #define __put_user(x, ptr) \ |
| 541 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
| 542 | |
| 543 | #define __get_user_unaligned __get_user |
| 544 | #define __put_user_unaligned __put_user |
Glauber Costa | 865e5b7 | 2008-06-25 11:05:11 -0300 | [diff] [blame] | 545 | |
Glauber Costa | 8bc7de0c | 2008-06-25 14:53:41 -0300 | [diff] [blame] | 546 | /* |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 547 | * {get|put}_user_try and catch |
| 548 | * |
| 549 | * get_user_try { |
| 550 | * get_user_ex(...); |
| 551 | * } get_user_catch(err) |
| 552 | */ |
Dan Williams | 065eae4 | 2018-01-29 17:02:49 -0800 | [diff] [blame] | 553 | #define get_user_try uaccess_try_nospec |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 554 | #define get_user_catch(err) uaccess_catch(err) |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 555 | |
| 556 | #define get_user_ex(x, ptr) do { \ |
| 557 | unsigned long __gue_val; \ |
| 558 | __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ |
| 559 | (x) = (__force __typeof__(*(ptr)))__gue_val; \ |
| 560 | } while (0) |
| 561 | |
Hiroshi Shimamoto | 019a136 | 2009-01-29 11:49:18 -0800 | [diff] [blame] | 562 | #define put_user_try uaccess_try |
| 563 | #define put_user_catch(err) uaccess_catch(err) |
| 564 | |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 565 | #define put_user_ex(x, ptr) \ |
| 566 | __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
| 567 | |
Robert Richter | 1ac2e6c | 2011-06-07 11:49:55 +0200 | [diff] [blame] | 568 | extern unsigned long |
| 569 | copy_from_user_nmi(void *to, const void __user *from, unsigned long n); |
Linus Torvalds | 92ae03f | 2012-04-06 14:32:32 -0700 | [diff] [blame] | 570 | extern __must_check long |
| 571 | strncpy_from_user(char *dst, const char __user *src, long count); |
Robert Richter | 1ac2e6c | 2011-06-07 11:49:55 +0200 | [diff] [blame] | 572 | |
Linus Torvalds | 5723aa9 | 2012-05-26 11:09:53 -0700 | [diff] [blame] | 573 | extern __must_check long strlen_user(const char __user *str); |
| 574 | extern __must_check long strnlen_user(const char __user *str, long n); |
| 575 | |
H. Peter Anvin | a052858 | 2012-09-21 12:43:11 -0700 | [diff] [blame] | 576 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); |
| 577 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); |
| 578 | |
Qiaowei Ren | f09174c | 2013-12-14 14:25:02 +0800 | [diff] [blame] | 579 | extern void __cmpxchg_wrong_size(void) |
| 580 | __compiletime_error("Bad argument size for cmpxchg"); |
| 581 | |
| 582 | #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ |
| 583 | ({ \ |
| 584 | int __ret = 0; \ |
| 585 | __typeof__(ptr) __uval = (uval); \ |
| 586 | __typeof__(*(ptr)) __old = (old); \ |
| 587 | __typeof__(*(ptr)) __new = (new); \ |
Dan Williams | 065eae4 | 2018-01-29 17:02:49 -0800 | [diff] [blame] | 588 | __uaccess_begin_nospec(); \ |
Qiaowei Ren | f09174c | 2013-12-14 14:25:02 +0800 | [diff] [blame] | 589 | switch (size) { \ |
| 590 | case 1: \ |
| 591 | { \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 592 | asm volatile("\n" \ |
Qiaowei Ren | f09174c | 2013-12-14 14:25:02 +0800 | [diff] [blame] | 593 | "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 594 | "2:\n" \ |
Qiaowei Ren | f09174c | 2013-12-14 14:25:02 +0800 | [diff] [blame] | 595 | "\t.section .fixup, \"ax\"\n" \ |
| 596 | "3:\tmov %3, %0\n" \ |
| 597 | "\tjmp 2b\n" \ |
| 598 | "\t.previous\n" \ |
| 599 | _ASM_EXTABLE(1b, 3b) \ |
| 600 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ |
| 601 | : "i" (-EFAULT), "q" (__new), "1" (__old) \ |
| 602 | : "memory" \ |
| 603 | ); \ |
| 604 | break; \ |
| 605 | } \ |
| 606 | case 2: \ |
| 607 | { \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 608 | asm volatile("\n" \ |
Qiaowei Ren | f09174c | 2013-12-14 14:25:02 +0800 | [diff] [blame] | 609 | "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 610 | "2:\n" \ |
Qiaowei Ren | f09174c | 2013-12-14 14:25:02 +0800 | [diff] [blame] | 611 | "\t.section .fixup, \"ax\"\n" \ |
| 612 | "3:\tmov %3, %0\n" \ |
| 613 | "\tjmp 2b\n" \ |
| 614 | "\t.previous\n" \ |
| 615 | _ASM_EXTABLE(1b, 3b) \ |
| 616 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ |
| 617 | : "i" (-EFAULT), "r" (__new), "1" (__old) \ |
| 618 | : "memory" \ |
| 619 | ); \ |
| 620 | break; \ |
| 621 | } \ |
| 622 | case 4: \ |
| 623 | { \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 624 | asm volatile("\n" \ |
Qiaowei Ren | f09174c | 2013-12-14 14:25:02 +0800 | [diff] [blame] | 625 | "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 626 | "2:\n" \ |
Qiaowei Ren | f09174c | 2013-12-14 14:25:02 +0800 | [diff] [blame] | 627 | "\t.section .fixup, \"ax\"\n" \ |
| 628 | "3:\tmov %3, %0\n" \ |
| 629 | "\tjmp 2b\n" \ |
| 630 | "\t.previous\n" \ |
| 631 | _ASM_EXTABLE(1b, 3b) \ |
| 632 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ |
| 633 | : "i" (-EFAULT), "r" (__new), "1" (__old) \ |
| 634 | : "memory" \ |
| 635 | ); \ |
| 636 | break; \ |
| 637 | } \ |
| 638 | case 8: \ |
| 639 | { \ |
| 640 | if (!IS_ENABLED(CONFIG_X86_64)) \ |
| 641 | __cmpxchg_wrong_size(); \ |
| 642 | \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 643 | asm volatile("\n" \ |
Qiaowei Ren | f09174c | 2013-12-14 14:25:02 +0800 | [diff] [blame] | 644 | "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 645 | "2:\n" \ |
Qiaowei Ren | f09174c | 2013-12-14 14:25:02 +0800 | [diff] [blame] | 646 | "\t.section .fixup, \"ax\"\n" \ |
| 647 | "3:\tmov %3, %0\n" \ |
| 648 | "\tjmp 2b\n" \ |
| 649 | "\t.previous\n" \ |
| 650 | _ASM_EXTABLE(1b, 3b) \ |
| 651 | : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ |
| 652 | : "i" (-EFAULT), "r" (__new), "1" (__old) \ |
| 653 | : "memory" \ |
| 654 | ); \ |
| 655 | break; \ |
| 656 | } \ |
| 657 | default: \ |
| 658 | __cmpxchg_wrong_size(); \ |
| 659 | } \ |
Linus Torvalds | 11f1a4b | 2015-12-17 09:45:09 -0800 | [diff] [blame] | 660 | __uaccess_end(); \ |
Qiaowei Ren | f09174c | 2013-12-14 14:25:02 +0800 | [diff] [blame] | 661 | *__uval = __old; \ |
| 662 | __ret; \ |
| 663 | }) |
| 664 | |
| 665 | #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ |
| 666 | ({ \ |
| 667 | access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ |
| 668 | __user_atomic_cmpxchg_inatomic((uval), (ptr), \ |
| 669 | (old), (new), sizeof(*(ptr))) : \ |
| 670 | -EFAULT; \ |
| 671 | }) |
| 672 | |
Hiroshi Shimamoto | fe40c0a | 2009-01-23 15:49:41 -0800 | [diff] [blame] | 673 | /* |
Glauber Costa | 8bc7de0c | 2008-06-25 14:53:41 -0300 | [diff] [blame] | 674 | * movsl can be slow when source and dest are not both 8-byte aligned |
| 675 | */ |
| 676 | #ifdef CONFIG_X86_INTEL_USERCOPY |
| 677 | extern struct movsl_mask { |
| 678 | int mask; |
| 679 | } ____cacheline_aligned_in_smp movsl_mask; |
| 680 | #endif |
| 681 | |
Glauber Costa | 22cac16 | 2008-06-25 14:56:53 -0300 | [diff] [blame] | 682 | #define ARCH_HAS_NOCACHE_UACCESS 1 |
| 683 | |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 684 | #ifdef CONFIG_X86_32 |
David Howells | a1ce392 | 2012-10-02 18:01:25 +0100 | [diff] [blame] | 685 | # include <asm/uaccess_32.h> |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 686 | #else |
David Howells | a1ce392 | 2012-10-02 18:01:25 +0100 | [diff] [blame] | 687 | # include <asm/uaccess_64.h> |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 688 | #endif |
Glauber Costa | ca23386 | 2008-06-13 14:39:25 -0300 | [diff] [blame] | 689 | |
Jan Beulich | 3df7b41 | 2013-10-21 09:43:57 +0100 | [diff] [blame] | 690 | unsigned long __must_check _copy_from_user(void *to, const void __user *from, |
| 691 | unsigned n); |
Jan Beulich | 7a3d9b0 | 2013-10-21 09:44:37 +0100 | [diff] [blame] | 692 | unsigned long __must_check _copy_to_user(void __user *to, const void *from, |
| 693 | unsigned n); |
Jan Beulich | 3df7b41 | 2013-10-21 09:43:57 +0100 | [diff] [blame] | 694 | |
Josh Poimboeuf | 0d025d2 | 2016-08-30 08:04:16 -0500 | [diff] [blame] | 695 | extern void __compiletime_error("usercopy buffer size is too small") |
| 696 | __bad_copy_user(void); |
Jan Beulich | 3df7b41 | 2013-10-21 09:43:57 +0100 | [diff] [blame] | 697 | |
Josh Poimboeuf | 0d025d2 | 2016-08-30 08:04:16 -0500 | [diff] [blame] | 698 | static inline void copy_user_overflow(int size, unsigned long count) |
Jan Beulich | 3df7b41 | 2013-10-21 09:43:57 +0100 | [diff] [blame] | 699 | { |
| 700 | WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); |
| 701 | } |
| 702 | |
Kees Cook | e697100 | 2016-09-06 11:56:01 -0700 | [diff] [blame] | 703 | static __always_inline unsigned long __must_check |
Jan Beulich | 3df7b41 | 2013-10-21 09:43:57 +0100 | [diff] [blame] | 704 | copy_from_user(void *to, const void __user *from, unsigned long n) |
| 705 | { |
| 706 | int sz = __compiletime_object_size(to); |
| 707 | |
| 708 | might_fault(); |
| 709 | |
Andrey Ryabinin | 1771c6e | 2016-05-20 16:59:31 -0700 | [diff] [blame] | 710 | kasan_check_write(to, n); |
| 711 | |
Kees Cook | 5b710f3 | 2016-06-23 15:04:01 -0700 | [diff] [blame] | 712 | if (likely(sz < 0 || sz >= n)) { |
| 713 | check_object_size(to, n, false); |
Jan Beulich | 3df7b41 | 2013-10-21 09:43:57 +0100 | [diff] [blame] | 714 | n = _copy_from_user(to, from, n); |
Josh Poimboeuf | 0d025d2 | 2016-08-30 08:04:16 -0500 | [diff] [blame] | 715 | } else if (!__builtin_constant_p(n)) |
| 716 | copy_user_overflow(sz, n); |
Jan Beulich | 3df7b41 | 2013-10-21 09:43:57 +0100 | [diff] [blame] | 717 | else |
Josh Poimboeuf | 0d025d2 | 2016-08-30 08:04:16 -0500 | [diff] [blame] | 718 | __bad_copy_user(); |
Jan Beulich | 3df7b41 | 2013-10-21 09:43:57 +0100 | [diff] [blame] | 719 | |
| 720 | return n; |
| 721 | } |
| 722 | |
Kees Cook | e697100 | 2016-09-06 11:56:01 -0700 | [diff] [blame] | 723 | static __always_inline unsigned long __must_check |
Jan Beulich | 7a3d9b0 | 2013-10-21 09:44:37 +0100 | [diff] [blame] | 724 | copy_to_user(void __user *to, const void *from, unsigned long n) |
| 725 | { |
| 726 | int sz = __compiletime_object_size(from); |
| 727 | |
Andrey Ryabinin | 1771c6e | 2016-05-20 16:59:31 -0700 | [diff] [blame] | 728 | kasan_check_read(from, n); |
| 729 | |
Jan Beulich | 7a3d9b0 | 2013-10-21 09:44:37 +0100 | [diff] [blame] | 730 | might_fault(); |
| 731 | |
Kees Cook | 5b710f3 | 2016-06-23 15:04:01 -0700 | [diff] [blame] | 732 | if (likely(sz < 0 || sz >= n)) { |
| 733 | check_object_size(from, n, true); |
Jan Beulich | 7a3d9b0 | 2013-10-21 09:44:37 +0100 | [diff] [blame] | 734 | n = _copy_to_user(to, from, n); |
Josh Poimboeuf | 0d025d2 | 2016-08-30 08:04:16 -0500 | [diff] [blame] | 735 | } else if (!__builtin_constant_p(n)) |
| 736 | copy_user_overflow(sz, n); |
Jan Beulich | 7a3d9b0 | 2013-10-21 09:44:37 +0100 | [diff] [blame] | 737 | else |
Josh Poimboeuf | 0d025d2 | 2016-08-30 08:04:16 -0500 | [diff] [blame] | 738 | __bad_copy_user(); |
Jan Beulich | 7a3d9b0 | 2013-10-21 09:44:37 +0100 | [diff] [blame] | 739 | |
| 740 | return n; |
| 741 | } |
| 742 | |
Andi Kleen | 10013eb | 2015-10-22 15:07:20 -0700 | [diff] [blame] | 743 | /* |
| 744 | * We rely on the nested NMI work to allow atomic faults from the NMI path; the |
| 745 | * nested NMI paths are careful to preserve CR2. |
| 746 | * |
| 747 | * Caller must use pagefault_enable/disable, or run in interrupt context, |
| 748 | * and also do a uaccess_ok() check |
| 749 | */ |
| 750 | #define __copy_from_user_nmi __copy_from_user_inatomic |
| 751 | |
Linus Torvalds | 404a474 | 2016-01-21 13:02:41 -0800 | [diff] [blame] | 752 | /* |
Linus Torvalds | 5b24a7a | 2015-12-17 09:57:27 -0800 | [diff] [blame] | 753 | * The "unsafe" user accesses aren't really "unsafe", but the naming |
| 754 | * is a big fat warning: you have to not only do the access_ok() |
| 755 | * checking before using them, but you have to surround them with the |
| 756 | * user_access_begin/end() pair. |
| 757 | */ |
| 758 | #define user_access_begin() __uaccess_begin() |
| 759 | #define user_access_end() __uaccess_end() |
| 760 | |
Linus Torvalds | 1bd4403 | 2016-08-08 13:02:01 -0700 | [diff] [blame] | 761 | #define unsafe_put_user(x, ptr, err_label) \ |
| 762 | do { \ |
Linus Torvalds | 5b24a7a | 2015-12-17 09:57:27 -0800 | [diff] [blame] | 763 | int __pu_err; \ |
| 764 | __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ |
Linus Torvalds | 1bd4403 | 2016-08-08 13:02:01 -0700 | [diff] [blame] | 765 | if (unlikely(__pu_err)) goto err_label; \ |
| 766 | } while (0) |
Linus Torvalds | 5b24a7a | 2015-12-17 09:57:27 -0800 | [diff] [blame] | 767 | |
Linus Torvalds | 1bd4403 | 2016-08-08 13:02:01 -0700 | [diff] [blame] | 768 | #define unsafe_get_user(x, ptr, err_label) \ |
| 769 | do { \ |
Linus Torvalds | 5b24a7a | 2015-12-17 09:57:27 -0800 | [diff] [blame] | 770 | int __gu_err; \ |
| 771 | unsigned long __gu_val; \ |
| 772 | __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ |
| 773 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
Linus Torvalds | 1bd4403 | 2016-08-08 13:02:01 -0700 | [diff] [blame] | 774 | if (unlikely(__gu_err)) goto err_label; \ |
| 775 | } while (0) |
Linus Torvalds | 5b24a7a | 2015-12-17 09:57:27 -0800 | [diff] [blame] | 776 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 777 | #endif /* _ASM_X86_UACCESS_H */ |
Nick Piggin | 8174c43 | 2008-07-25 19:45:24 -0700 | [diff] [blame] | 778 | |