blob: ce6fec7ce38d403bd986651c19999bdb266f9fd5 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_UACCESS_64_H
2#define _ASM_X86_UACCESS_64_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
4/*
5 * User space memory access functions
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/compiler.h>
8#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/prefetch.h>
Nick Piggin16dbc6c2008-10-02 14:50:12 -070010#include <linux/lockdep.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <asm/page.h>
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013/*
14 * Copy To/From Userspace
15 */
16
17/* Handles exceptions in both to and from, but doesn't do access_ok */
Andi Kleen95912002006-09-26 10:52:39 +020018__must_check unsigned long
19copy_user_generic(void *to, const void *from, unsigned len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Andi Kleen95912002006-09-26 10:52:39 +020021__must_check unsigned long
22copy_to_user(void __user *to, const void *from, unsigned len);
23__must_check unsigned long
Arjan van de Ven9f0cf4a2009-09-26 14:33:01 +020024_copy_from_user(void *to, const void __user *from, unsigned len);
Andi Kleen95912002006-09-26 10:52:39 +020025__must_check unsigned long
26copy_in_user(void __user *to, const void __user *from, unsigned len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Arjan van de Ven9f0cf4a2009-09-26 14:33:01 +020028static inline unsigned long __must_check copy_from_user(void *to,
29 const void __user *from,
30 unsigned long n)
31{
32 int sz = __compiletime_object_size(to);
33 int ret = -EFAULT;
34
35 if (likely(sz == -1 || sz >= n))
36 ret = _copy_from_user(to, from, n);
37#ifdef CONFIG_DEBUG_VM
38 else
39 WARN(1, "Buffer overflow detected!\n");
40#endif
41 return ret;
42}
43
44
Andi Kleen95912002006-09-26 10:52:39 +020045static __always_inline __must_check
46int __copy_from_user(void *dst, const void __user *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -070047{
Andi Kleen383d0792006-09-26 10:52:40 +020048 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +010049
50 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -070052 return copy_user_generic(dst, (__force void *)src, size);
53 switch (size) {
54 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
55 ret, "b", "b", "=q", 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -070057 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
58 ret, "w", "w", "=r", 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -070060 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
61 ret, "l", "k", "=r", 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -070063 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
64 ret, "q", "", "=r", 8);
65 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 case 10:
Joe Perchesb8963132008-03-23 01:03:49 -070067 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
Hiroshi Shimamoto20a4a232008-11-13 18:06:04 -080068 ret, "q", "", "=r", 10);
Joe Perchesb8963132008-03-23 01:03:49 -070069 if (unlikely(ret))
70 return ret;
71 __get_user_asm(*(u16 *)(8 + (char *)dst),
72 (u16 __user *)(8 + (char __user *)src),
73 ret, "w", "w", "=r", 2);
74 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 case 16:
Joe Perchesb8963132008-03-23 01:03:49 -070076 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
77 ret, "q", "", "=r", 16);
78 if (unlikely(ret))
79 return ret;
80 __get_user_asm(*(u64 *)(8 + (char *)dst),
81 (u64 __user *)(8 + (char __user *)src),
82 ret, "q", "", "=r", 8);
83 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 default:
Joe Perchesb8963132008-03-23 01:03:49 -070085 return copy_user_generic(dst, (__force void *)src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 }
Joe Perchesb8963132008-03-23 01:03:49 -070087}
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Andi Kleen95912002006-09-26 10:52:39 +020089static __always_inline __must_check
90int __copy_to_user(void __user *dst, const void *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -070091{
Andi Kleen383d0792006-09-26 10:52:40 +020092 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +010093
94 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -070096 return copy_user_generic((__force void *)dst, src, size);
97 switch (size) {
98 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
99 ret, "b", "b", "iq", 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -0700101 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
102 ret, "w", "w", "ir", 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -0700104 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
105 ret, "l", "k", "ir", 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 return ret;
Joe Perchesb8963132008-03-23 01:03:49 -0700107 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200108 ret, "q", "", "er", 8);
Joe Perchesb8963132008-03-23 01:03:49 -0700109 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 case 10:
Joe Perchesb8963132008-03-23 01:03:49 -0700111 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200112 ret, "q", "", "er", 10);
Joe Perchesb8963132008-03-23 01:03:49 -0700113 if (unlikely(ret))
114 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 asm("":::"memory");
Joe Perchesb8963132008-03-23 01:03:49 -0700116 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
117 ret, "w", "w", "ir", 2);
118 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 case 16:
Joe Perchesb8963132008-03-23 01:03:49 -0700120 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200121 ret, "q", "", "er", 16);
Joe Perchesb8963132008-03-23 01:03:49 -0700122 if (unlikely(ret))
123 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 asm("":::"memory");
Joe Perchesb8963132008-03-23 01:03:49 -0700125 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200126 ret, "q", "", "er", 8);
Joe Perchesb8963132008-03-23 01:03:49 -0700127 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700129 return copy_user_generic((__force void *)dst, src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
Joe Perchesb8963132008-03-23 01:03:49 -0700131}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Andi Kleen95912002006-09-26 10:52:39 +0200133static __always_inline __must_check
134int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
Joe Perchesb8963132008-03-23 01:03:49 -0700135{
Andi Kleen383d0792006-09-26 10:52:40 +0200136 int ret = 0;
Ingo Molnard1a76182008-10-28 16:54:49 +0100137
138 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 if (!__builtin_constant_p(size))
Joe Perchesb8963132008-03-23 01:03:49 -0700140 return copy_user_generic((__force void *)dst,
141 (__force void *)src, size);
142 switch (size) {
143 case 1: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 u8 tmp;
Joe Perchesb8963132008-03-23 01:03:49 -0700145 __get_user_asm(tmp, (u8 __user *)src,
146 ret, "b", "b", "=q", 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700148 __put_user_asm(tmp, (u8 __user *)dst,
149 ret, "b", "b", "iq", 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 return ret;
151 }
Joe Perchesb8963132008-03-23 01:03:49 -0700152 case 2: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 u16 tmp;
Joe Perchesb8963132008-03-23 01:03:49 -0700154 __get_user_asm(tmp, (u16 __user *)src,
155 ret, "w", "w", "=r", 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700157 __put_user_asm(tmp, (u16 __user *)dst,
158 ret, "w", "w", "ir", 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 return ret;
160 }
161
Joe Perchesb8963132008-03-23 01:03:49 -0700162 case 4: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 u32 tmp;
Joe Perchesb8963132008-03-23 01:03:49 -0700164 __get_user_asm(tmp, (u32 __user *)src,
165 ret, "l", "k", "=r", 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700167 __put_user_asm(tmp, (u32 __user *)dst,
168 ret, "l", "k", "ir", 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 return ret;
170 }
Joe Perchesb8963132008-03-23 01:03:49 -0700171 case 8: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 u64 tmp;
Joe Perchesb8963132008-03-23 01:03:49 -0700173 __get_user_asm(tmp, (u64 __user *)src,
174 ret, "q", "", "=r", 8);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 if (likely(!ret))
Joe Perchesb8963132008-03-23 01:03:49 -0700176 __put_user_asm(tmp, (u64 __user *)dst,
Uros Bizjak155b7352009-07-19 18:06:35 +0200177 ret, "q", "", "er", 8);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 return ret;
179 }
180 default:
Joe Perchesb8963132008-03-23 01:03:49 -0700181 return copy_user_generic((__force void *)dst,
182 (__force void *)src, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 }
Joe Perchesb8963132008-03-23 01:03:49 -0700184}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Joe Perchesb8963132008-03-23 01:03:49 -0700186__must_check long
Andi Kleen95912002006-09-26 10:52:39 +0200187strncpy_from_user(char *dst, const char __user *src, long count);
Joe Perchesb8963132008-03-23 01:03:49 -0700188__must_check long
Andi Kleen95912002006-09-26 10:52:39 +0200189__strncpy_from_user(char *dst, const char __user *src, long count);
190__must_check long strnlen_user(const char __user *str, long n);
191__must_check long __strnlen_user(const char __user *str, long n);
192__must_check long strlen_user(const char __user *str);
193__must_check unsigned long clear_user(void __user *mem, unsigned long len);
194__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Joe Perchesb8963132008-03-23 01:03:49 -0700196__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
197 unsigned size);
Andi Kleenb8858082006-09-30 01:47:55 +0200198
199static __must_check __always_inline int
200__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
201{
202 return copy_user_generic((__force void *)dst, src, size);
203}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Joe Perchesb8963132008-03-23 01:03:49 -0700205extern long __copy_user_nocache(void *dst, const void __user *src,
206 unsigned size, int zerorest);
Andi Kleen0812a572007-02-13 13:26:19 +0100207
Ingo Molnarf1800532009-03-02 11:00:57 +0100208static inline int
209__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
Andi Kleen0812a572007-02-13 13:26:19 +0100210{
211 might_sleep();
Ingo Molnarf1800532009-03-02 11:00:57 +0100212 return __copy_user_nocache(dst, src, size, 1);
Andi Kleen0812a572007-02-13 13:26:19 +0100213}
214
Ingo Molnarf1800532009-03-02 11:00:57 +0100215static inline int
216__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
217 unsigned size)
Andi Kleen0812a572007-02-13 13:26:19 +0100218{
Ingo Molnarf1800532009-03-02 11:00:57 +0100219 return __copy_user_nocache(dst, src, size, 0);
Andi Kleen0812a572007-02-13 13:26:19 +0100220}
221
Vitaly Mayatskikh11295852008-07-02 15:48:21 +0200222unsigned long
223copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
224
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700225#endif /* _ASM_X86_UACCESS_64_H */