blob: 6e5cc08134babfd16bf6ae30d3b4e7f81af26341 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_UACCESS_64_H
2#define _ASM_X86_UACCESS_64_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
4/*
5 * User space memory access functions
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/compiler.h>
8#include <linux/errno.h>
Nick Piggin16dbc6c2008-10-02 14:50:12 -07009#include <linux/lockdep.h>
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -070010#include <linux/kasan-checks.h>
Jan Beulich1b1d9252009-12-18 16:12:56 +000011#include <asm/alternative.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010012#include <asm/cpufeatures.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/page.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015/*
16 * Copy To/From Userspace
17 */
18
19/* Handles exceptions in both to and from, but doesn't do access_ok */
Andi Kleen95912002006-09-26 10:52:39 +020020__must_check unsigned long
Fenghua Yu954e4822012-05-24 18:19:45 -070021copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22__must_check unsigned long
Jan Beulich1b1d9252009-12-18 16:12:56 +000023copy_user_generic_string(void *to, const void *from, unsigned len);
24__must_check unsigned long
25copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26
27static __always_inline __must_check unsigned long
28copy_user_generic(void *to, const void *from, unsigned len)
29{
30 unsigned ret;
31
Fenghua Yu954e4822012-05-24 18:19:45 -070032 /*
33 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35 * Otherwise, use copy_user_generic_unrolled.
36 */
37 alternative_call_2(copy_user_generic_unrolled,
Jan Beulich1b1d9252009-12-18 16:12:56 +000038 copy_user_generic_string,
39 X86_FEATURE_REP_GOOD,
Fenghua Yu954e4822012-05-24 18:19:45 -070040 copy_user_enhanced_fast_string,
41 X86_FEATURE_ERMS,
Jan Beulich1b1d9252009-12-18 16:12:56 +000042 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43 "=d" (len)),
44 "1" (to), "2" (from), "3" (len)
45 : "memory", "rcx", "r8", "r9", "r10", "r11");
46 return ret;
47}
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Andi Kleen95912002006-09-26 10:52:39 +020049__must_check unsigned long
Andi Kleen95912002006-09-26 10:52:39 +020050copy_in_user(void __user *to, const void __user *from, unsigned len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Frederic Weisbecker3c93ca02009-11-16 15:42:18 +010052static __always_inline __must_check
Andi Kleenff47ab4f2013-08-16 14:17:19 -070053int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -070054{
Andi Kleen383d0792006-09-26 10:52:40 +020055 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +010056
Kees Cook5b710f32016-06-23 15:04:01 -070057 check_object_size(dst, size, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -070059 return copy_user_generic(dst, (__force void *)src, size);
60 switch (size) {
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080061 case 1:
Dan Williams065eae42018-01-29 17:02:49 -080062 __uaccess_begin_nospec();
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080063 __get_user_asm(*(u8 *)dst, (u8 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070064 ret, "b", "b", "=q", 1);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080065 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080067 case 2:
Dan Williams065eae42018-01-29 17:02:49 -080068 __uaccess_begin_nospec();
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080069 __get_user_asm(*(u16 *)dst, (u16 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070070 ret, "w", "w", "=r", 2);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080071 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080073 case 4:
Dan Williams065eae42018-01-29 17:02:49 -080074 __uaccess_begin_nospec();
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080075 __get_user_asm(*(u32 *)dst, (u32 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070076 ret, "l", "k", "=r", 4);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080077 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080079 case 8:
Dan Williams065eae42018-01-29 17:02:49 -080080 __uaccess_begin_nospec();
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080081 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070082 ret, "q", "", "=r", 8);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080083 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -070084 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 case 10:
Dan Williams065eae42018-01-29 17:02:49 -080086 __uaccess_begin_nospec();
Joe Perchesb8963132008-03-23 01:03:49 -070087 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
Hiroshi Shimamoto20a4a232008-11-13 18:06:04 -080088 ret, "q", "", "=r", 10);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080089 if (likely(!ret))
90 __get_user_asm(*(u16 *)(8 + (char *)dst),
91 (u16 __user *)(8 + (char __user *)src),
92 ret, "w", "w", "=r", 2);
93 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -070094 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 case 16:
Dan Williams065eae42018-01-29 17:02:49 -080096 __uaccess_begin_nospec();
Joe Perchesb8963132008-03-23 01:03:49 -070097 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
98 ret, "q", "", "=r", 16);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080099 if (likely(!ret))
100 __get_user_asm(*(u64 *)(8 + (char *)dst),
101 (u64 __user *)(8 + (char __user *)src),
102 ret, "q", "", "=r", 8);
103 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -0700104 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700106 return copy_user_generic(dst, (__force void *)src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 }
Joe Perchesb8963132008-03-23 01:03:49 -0700108}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Andi Kleen95912002006-09-26 10:52:39 +0200110static __always_inline __must_check
Andi Kleenff47ab4f2013-08-16 14:17:19 -0700111int __copy_from_user(void *dst, const void __user *src, unsigned size)
112{
113 might_fault();
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700114 kasan_check_write(dst, size);
Andi Kleenff47ab4f2013-08-16 14:17:19 -0700115 return __copy_from_user_nocheck(dst, src, size);
116}
117
118static __always_inline __must_check
119int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -0700120{
Andi Kleen383d0792006-09-26 10:52:40 +0200121 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +0100122
Kees Cook5b710f32016-06-23 15:04:01 -0700123 check_object_size(src, size, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -0700125 return copy_user_generic((__force void *)dst, src, size);
126 switch (size) {
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800127 case 1:
128 __uaccess_begin();
129 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
Joe Perchesb8963132008-03-23 01:03:49 -0700130 ret, "b", "b", "iq", 1);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800131 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800133 case 2:
134 __uaccess_begin();
135 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
Joe Perchesb8963132008-03-23 01:03:49 -0700136 ret, "w", "w", "ir", 2);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800137 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800139 case 4:
140 __uaccess_begin();
141 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
Joe Perchesb8963132008-03-23 01:03:49 -0700142 ret, "l", "k", "ir", 4);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800143 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800145 case 8:
146 __uaccess_begin();
147 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200148 ret, "q", "", "er", 8);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800149 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -0700150 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 case 10:
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800152 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700153 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200154 ret, "q", "", "er", 10);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800155 if (likely(!ret)) {
156 asm("":::"memory");
157 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
158 ret, "w", "w", "ir", 2);
159 }
160 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -0700161 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 case 16:
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800163 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700164 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200165 ret, "q", "", "er", 16);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800166 if (likely(!ret)) {
167 asm("":::"memory");
168 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
169 ret, "q", "", "er", 8);
170 }
171 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -0700172 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700174 return copy_user_generic((__force void *)dst, src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 }
Joe Perchesb8963132008-03-23 01:03:49 -0700176}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Andi Kleen95912002006-09-26 10:52:39 +0200178static __always_inline __must_check
Andi Kleenff47ab4f2013-08-16 14:17:19 -0700179int __copy_to_user(void __user *dst, const void *src, unsigned size)
180{
181 might_fault();
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700182 kasan_check_read(src, size);
Andi Kleenff47ab4f2013-08-16 14:17:19 -0700183 return __copy_to_user_nocheck(dst, src, size);
184}
185
186static __always_inline __must_check
Andi Kleen95912002006-09-26 10:52:39 +0200187int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -0700188{
Andi Kleen383d0792006-09-26 10:52:40 +0200189 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +0100190
191 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -0700193 return copy_user_generic((__force void *)dst,
194 (__force void *)src, size);
195 switch (size) {
196 case 1: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 u8 tmp;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800198 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700199 __get_user_asm(tmp, (u8 __user *)src,
200 ret, "b", "b", "=q", 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700202 __put_user_asm(tmp, (u8 __user *)dst,
203 ret, "b", "b", "iq", 1);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800204 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 return ret;
206 }
Joe Perchesb8963132008-03-23 01:03:49 -0700207 case 2: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 u16 tmp;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800209 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700210 __get_user_asm(tmp, (u16 __user *)src,
211 ret, "w", "w", "=r", 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700213 __put_user_asm(tmp, (u16 __user *)dst,
214 ret, "w", "w", "ir", 2);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800215 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 return ret;
217 }
218
Joe Perchesb8963132008-03-23 01:03:49 -0700219 case 4: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 u32 tmp;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800221 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700222 __get_user_asm(tmp, (u32 __user *)src,
223 ret, "l", "k", "=r", 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700225 __put_user_asm(tmp, (u32 __user *)dst,
226 ret, "l", "k", "ir", 4);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800227 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 return ret;
229 }
Joe Perchesb8963132008-03-23 01:03:49 -0700230 case 8: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 u64 tmp;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800232 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700233 __get_user_asm(tmp, (u64 __user *)src,
234 ret, "q", "", "=r", 8);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700236 __put_user_asm(tmp, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200237 ret, "q", "", "er", 8);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800238 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 return ret;
240 }
241 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700242 return copy_user_generic((__force void *)dst,
243 (__force void *)src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 }
Joe Perchesb8963132008-03-23 01:03:49 -0700245}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Jan Beulich14722482009-11-13 11:56:24 +0000247static __must_check __always_inline int
248__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
249{
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700250 kasan_check_write(dst, size);
Steven Rostedtdf90ca962014-01-03 16:45:00 -0500251 return __copy_from_user_nocheck(dst, src, size);
Jan Beulich14722482009-11-13 11:56:24 +0000252}
Andi Kleenb8858082006-09-30 01:47:55 +0200253
254static __must_check __always_inline int
255__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
256{
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700257 kasan_check_read(src, size);
Steven Rostedtdf90ca962014-01-03 16:45:00 -0500258 return __copy_to_user_nocheck(dst, src, size);
Andi Kleenb8858082006-09-30 01:47:55 +0200259}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Joe Perchesb8963132008-03-23 01:03:49 -0700261extern long __copy_user_nocache(void *dst, const void __user *src,
262 unsigned size, int zerorest);
Andi Kleen0812a572007-02-13 13:26:19 +0100263
Ingo Molnarf1800532009-03-02 11:00:57 +0100264static inline int
265__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
Andi Kleen0812a572007-02-13 13:26:19 +0100266{
Michael S. Tsirkin016be2e2013-05-26 17:31:55 +0300267 might_fault();
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700268 kasan_check_write(dst, size);
Ingo Molnarf1800532009-03-02 11:00:57 +0100269 return __copy_user_nocache(dst, src, size, 1);
Andi Kleen0812a572007-02-13 13:26:19 +0100270}
271
Ingo Molnarf1800532009-03-02 11:00:57 +0100272static inline int
273__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
274 unsigned size)
Andi Kleen0812a572007-02-13 13:26:19 +0100275{
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700276 kasan_check_write(dst, size);
Ingo Molnarf1800532009-03-02 11:00:57 +0100277 return __copy_user_nocache(dst, src, size, 0);
Andi Kleen0812a572007-02-13 13:26:19 +0100278}
279
Vitaly Mayatskikh11295852008-07-02 15:48:21 +0200280unsigned long
Linus Torvaldscae2a172015-04-06 10:26:17 -0700281copy_user_handle_tail(char *to, char *from, unsigned len);
Vitaly Mayatskikh11295852008-07-02 15:48:21 +0200282
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700283#endif /* _ASM_X86_UACCESS_64_H */