blob: 2eac2aa3e37f30318f6e20ed1753ae29d472f251 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_UACCESS_64_H
2#define _ASM_X86_UACCESS_64_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
4/*
5 * User space memory access functions
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/compiler.h>
8#include <linux/errno.h>
Nick Piggin16dbc6c2008-10-02 14:50:12 -07009#include <linux/lockdep.h>
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -070010#include <linux/kasan-checks.h>
Jan Beulich1b1d9252009-12-18 16:12:56 +000011#include <asm/alternative.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010012#include <asm/cpufeatures.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <asm/page.h>
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015/*
16 * Copy To/From Userspace
17 */
18
19/* Handles exceptions in both to and from, but doesn't do access_ok */
Andi Kleen95912002006-09-26 10:52:39 +020020__must_check unsigned long
Fenghua Yu954e4822012-05-24 18:19:45 -070021copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22__must_check unsigned long
Jan Beulich1b1d9252009-12-18 16:12:56 +000023copy_user_generic_string(void *to, const void *from, unsigned len);
24__must_check unsigned long
25copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26
27static __always_inline __must_check unsigned long
28copy_user_generic(void *to, const void *from, unsigned len)
29{
30 unsigned ret;
31
Fenghua Yu954e4822012-05-24 18:19:45 -070032 /*
33 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35 * Otherwise, use copy_user_generic_unrolled.
36 */
37 alternative_call_2(copy_user_generic_unrolled,
Jan Beulich1b1d9252009-12-18 16:12:56 +000038 copy_user_generic_string,
39 X86_FEATURE_REP_GOOD,
Fenghua Yu954e4822012-05-24 18:19:45 -070040 copy_user_enhanced_fast_string,
41 X86_FEATURE_ERMS,
Jan Beulich1b1d9252009-12-18 16:12:56 +000042 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43 "=d" (len)),
44 "1" (to), "2" (from), "3" (len)
45 : "memory", "rcx", "r8", "r9", "r10", "r11");
46 return ret;
47}
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Andi Kleen95912002006-09-26 10:52:39 +020049__must_check unsigned long
Andi Kleen95912002006-09-26 10:52:39 +020050copy_in_user(void __user *to, const void __user *from, unsigned len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Frederic Weisbecker3c93ca02009-11-16 15:42:18 +010052static __always_inline __must_check
Andi Kleenff47ab4f2013-08-16 14:17:19 -070053int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -070054{
Andi Kleen383d0792006-09-26 10:52:40 +020055 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +010056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -070058 return copy_user_generic(dst, (__force void *)src, size);
59 switch (size) {
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080060 case 1:
61 __uaccess_begin();
62 __get_user_asm(*(u8 *)dst, (u8 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070063 ret, "b", "b", "=q", 1);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080064 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080066 case 2:
67 __uaccess_begin();
68 __get_user_asm(*(u16 *)dst, (u16 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070069 ret, "w", "w", "=r", 2);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080070 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080072 case 4:
73 __uaccess_begin();
74 __get_user_asm(*(u32 *)dst, (u32 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070075 ret, "l", "k", "=r", 4);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080076 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080078 case 8:
79 __uaccess_begin();
80 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
Joe Perchesb8963132008-03-23 01:03:49 -070081 ret, "q", "", "=r", 8);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080082 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -070083 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 case 10:
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080085 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -070086 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
Hiroshi Shimamoto20a4a232008-11-13 18:06:04 -080087 ret, "q", "", "=r", 10);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080088 if (likely(!ret))
89 __get_user_asm(*(u16 *)(8 + (char *)dst),
90 (u16 __user *)(8 + (char __user *)src),
91 ret, "w", "w", "=r", 2);
92 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -070093 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 case 16:
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080095 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -070096 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
97 ret, "q", "", "=r", 16);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -080098 if (likely(!ret))
99 __get_user_asm(*(u64 *)(8 + (char *)dst),
100 (u64 __user *)(8 + (char __user *)src),
101 ret, "q", "", "=r", 8);
102 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -0700103 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700105 return copy_user_generic(dst, (__force void *)src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 }
Joe Perchesb8963132008-03-23 01:03:49 -0700107}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Andi Kleen95912002006-09-26 10:52:39 +0200109static __always_inline __must_check
Andi Kleenff47ab4f2013-08-16 14:17:19 -0700110int __copy_from_user(void *dst, const void __user *src, unsigned size)
111{
112 might_fault();
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700113 kasan_check_write(dst, size);
Andi Kleenff47ab4f2013-08-16 14:17:19 -0700114 return __copy_from_user_nocheck(dst, src, size);
115}
116
117static __always_inline __must_check
118int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -0700119{
Andi Kleen383d0792006-09-26 10:52:40 +0200120 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +0100121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -0700123 return copy_user_generic((__force void *)dst, src, size);
124 switch (size) {
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800125 case 1:
126 __uaccess_begin();
127 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
Joe Perchesb8963132008-03-23 01:03:49 -0700128 ret, "b", "b", "iq", 1);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800129 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800131 case 2:
132 __uaccess_begin();
133 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
Joe Perchesb8963132008-03-23 01:03:49 -0700134 ret, "w", "w", "ir", 2);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800135 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800137 case 4:
138 __uaccess_begin();
139 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
Joe Perchesb8963132008-03-23 01:03:49 -0700140 ret, "l", "k", "ir", 4);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800141 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 return ret;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800143 case 8:
144 __uaccess_begin();
145 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200146 ret, "q", "", "er", 8);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800147 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -0700148 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 case 10:
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800150 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700151 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200152 ret, "q", "", "er", 10);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800153 if (likely(!ret)) {
154 asm("":::"memory");
155 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
156 ret, "w", "w", "ir", 2);
157 }
158 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -0700159 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 case 16:
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800161 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700162 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200163 ret, "q", "", "er", 16);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800164 if (likely(!ret)) {
165 asm("":::"memory");
166 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
167 ret, "q", "", "er", 8);
168 }
169 __uaccess_end();
Joe Perchesb8963132008-03-23 01:03:49 -0700170 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700172 return copy_user_generic((__force void *)dst, src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 }
Joe Perchesb8963132008-03-23 01:03:49 -0700174}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Andi Kleen95912002006-09-26 10:52:39 +0200176static __always_inline __must_check
Andi Kleenff47ab4f2013-08-16 14:17:19 -0700177int __copy_to_user(void __user *dst, const void *src, unsigned size)
178{
179 might_fault();
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700180 kasan_check_read(src, size);
Andi Kleenff47ab4f2013-08-16 14:17:19 -0700181 return __copy_to_user_nocheck(dst, src, size);
182}
183
184static __always_inline __must_check
Andi Kleen95912002006-09-26 10:52:39 +0200185int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -0700186{
Andi Kleen383d0792006-09-26 10:52:40 +0200187 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +0100188
189 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -0700191 return copy_user_generic((__force void *)dst,
192 (__force void *)src, size);
193 switch (size) {
194 case 1: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 u8 tmp;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800196 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700197 __get_user_asm(tmp, (u8 __user *)src,
198 ret, "b", "b", "=q", 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700200 __put_user_asm(tmp, (u8 __user *)dst,
201 ret, "b", "b", "iq", 1);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800202 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 return ret;
204 }
Joe Perchesb8963132008-03-23 01:03:49 -0700205 case 2: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 u16 tmp;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800207 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700208 __get_user_asm(tmp, (u16 __user *)src,
209 ret, "w", "w", "=r", 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700211 __put_user_asm(tmp, (u16 __user *)dst,
212 ret, "w", "w", "ir", 2);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800213 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 return ret;
215 }
216
Joe Perchesb8963132008-03-23 01:03:49 -0700217 case 4: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 u32 tmp;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800219 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700220 __get_user_asm(tmp, (u32 __user *)src,
221 ret, "l", "k", "=r", 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700223 __put_user_asm(tmp, (u32 __user *)dst,
224 ret, "l", "k", "ir", 4);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800225 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 return ret;
227 }
Joe Perchesb8963132008-03-23 01:03:49 -0700228 case 8: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 u64 tmp;
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800230 __uaccess_begin();
Joe Perchesb8963132008-03-23 01:03:49 -0700231 __get_user_asm(tmp, (u64 __user *)src,
232 ret, "q", "", "=r", 8);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700234 __put_user_asm(tmp, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200235 ret, "q", "", "er", 8);
Linus Torvalds11f1a4b2015-12-17 09:45:09 -0800236 __uaccess_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 return ret;
238 }
239 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700240 return copy_user_generic((__force void *)dst,
241 (__force void *)src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 }
Joe Perchesb8963132008-03-23 01:03:49 -0700243}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Jan Beulich14722482009-11-13 11:56:24 +0000245static __must_check __always_inline int
246__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
247{
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700248 kasan_check_write(dst, size);
Steven Rostedtdf90ca962014-01-03 16:45:00 -0500249 return __copy_from_user_nocheck(dst, src, size);
Jan Beulich14722482009-11-13 11:56:24 +0000250}
Andi Kleenb8858082006-09-30 01:47:55 +0200251
252static __must_check __always_inline int
253__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
254{
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700255 kasan_check_read(src, size);
Steven Rostedtdf90ca962014-01-03 16:45:00 -0500256 return __copy_to_user_nocheck(dst, src, size);
Andi Kleenb8858082006-09-30 01:47:55 +0200257}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Joe Perchesb8963132008-03-23 01:03:49 -0700259extern long __copy_user_nocache(void *dst, const void __user *src,
260 unsigned size, int zerorest);
Andi Kleen0812a572007-02-13 13:26:19 +0100261
Ingo Molnarf1800532009-03-02 11:00:57 +0100262static inline int
263__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
Andi Kleen0812a572007-02-13 13:26:19 +0100264{
Michael S. Tsirkin016be2e2013-05-26 17:31:55 +0300265 might_fault();
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700266 kasan_check_write(dst, size);
Ingo Molnarf1800532009-03-02 11:00:57 +0100267 return __copy_user_nocache(dst, src, size, 1);
Andi Kleen0812a572007-02-13 13:26:19 +0100268}
269
Ingo Molnarf1800532009-03-02 11:00:57 +0100270static inline int
271__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
272 unsigned size)
Andi Kleen0812a572007-02-13 13:26:19 +0100273{
Andrey Ryabinin1771c6e2016-05-20 16:59:31 -0700274 kasan_check_write(dst, size);
Ingo Molnarf1800532009-03-02 11:00:57 +0100275 return __copy_user_nocache(dst, src, size, 0);
Andi Kleen0812a572007-02-13 13:26:19 +0100276}
277
Vitaly Mayatskikh11295852008-07-02 15:48:21 +0200278unsigned long
Linus Torvaldscae2a172015-04-06 10:26:17 -0700279copy_user_handle_tail(char *to, char *from, unsigned len);
Vitaly Mayatskikh11295852008-07-02 15:48:21 +0200280
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700281#endif /* _ASM_X86_UACCESS_64_H */