H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_UACCESS_32_H |
| 2 | #define _ASM_X86_UACCESS_32_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | |
| 4 | /* |
| 5 | * User space memory access functions |
| 6 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/errno.h> |
| 8 | #include <linux/thread_info.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/string.h> |
H. Peter Anvin | 14e6d17 | 2008-02-04 16:47:59 +0100 | [diff] [blame] | 10 | #include <asm/asm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <asm/page.h> |
| 12 | |
Joe Perches | b1fcec7 | 2008-03-23 01:03:48 -0700 | [diff] [blame] | 13 | unsigned long __must_check __copy_to_user_ll |
| 14 | (void __user *to, const void *from, unsigned long n); |
| 15 | unsigned long __must_check __copy_from_user_ll |
| 16 | (void *to, const void __user *from, unsigned long n); |
| 17 | unsigned long __must_check __copy_from_user_ll_nozero |
| 18 | (void *to, const void __user *from, unsigned long n); |
| 19 | unsigned long __must_check __copy_from_user_ll_nocache |
| 20 | (void *to, const void __user *from, unsigned long n); |
| 21 | unsigned long __must_check __copy_from_user_ll_nocache_nozero |
| 22 | (void *to, const void __user *from, unsigned long n); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Aneesh Kumar K.V | 6d1c426 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 24 | /** |
| 25 | * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. |
| 26 | * @to: Destination address, in user space. |
| 27 | * @from: Source address, in kernel space. |
| 28 | * @n: Number of bytes to copy. |
| 29 | * |
| 30 | * Context: User context only. |
| 31 | * |
| 32 | * Copy data from kernel space to user space. Caller must check |
| 33 | * the specified block with access_ok() before calling this function. |
| 34 | * The caller should also make sure he pins the user space address |
Sergey Senozhatsky | 4fe4878 | 2009-09-17 15:54:01 +0300 | [diff] [blame] | 35 | * so that we don't result in page fault and sleep. |
Aneesh Kumar K.V | 6d1c426 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 36 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault |
| 38 | * we return the initial request size (1, 2 or 4), as copy_*_user should do. |
| 39 | * If a store crosses a page boundary and gets a fault, the x86 will not write |
| 40 | * anything, so this is accurate. |
| 41 | */ |
| 42 | |
Ingo Molnar | 652050a | 2006-01-14 13:21:30 -0800 | [diff] [blame] | 43 | static __always_inline unsigned long __must_check |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) |
| 45 | { |
| 46 | if (__builtin_constant_p(n)) { |
| 47 | unsigned long ret; |
| 48 | |
| 49 | switch (n) { |
| 50 | case 1: |
Joe Perches | b1fcec7 | 2008-03-23 01:03:48 -0700 | [diff] [blame] | 51 | __put_user_size(*(u8 *)from, (u8 __user *)to, |
| 52 | 1, ret, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | return ret; |
| 54 | case 2: |
Joe Perches | b1fcec7 | 2008-03-23 01:03:48 -0700 | [diff] [blame] | 55 | __put_user_size(*(u16 *)from, (u16 __user *)to, |
| 56 | 2, ret, 2); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | return ret; |
| 58 | case 4: |
Joe Perches | b1fcec7 | 2008-03-23 01:03:48 -0700 | [diff] [blame] | 59 | __put_user_size(*(u32 *)from, (u32 __user *)to, |
| 60 | 4, ret, 4); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | return ret; |
| 62 | } |
| 63 | } |
| 64 | return __copy_to_user_ll(to, from, n); |
| 65 | } |
| 66 | |
Randy Dunlap | 9c7fff6 | 2006-10-11 01:22:10 -0700 | [diff] [blame] | 67 | /** |
| 68 | * __copy_to_user: - Copy a block of data into user space, with less checking. |
| 69 | * @to: Destination address, in user space. |
| 70 | * @from: Source address, in kernel space. |
| 71 | * @n: Number of bytes to copy. |
| 72 | * |
| 73 | * Context: User context only. This function may sleep. |
| 74 | * |
| 75 | * Copy data from kernel space to user space. Caller must check |
| 76 | * the specified block with access_ok() before calling this function. |
| 77 | * |
| 78 | * Returns number of bytes that could not be copied. |
| 79 | * On success, this will be zero. |
| 80 | */ |
Ingo Molnar | 652050a | 2006-01-14 13:21:30 -0800 | [diff] [blame] | 81 | static __always_inline unsigned long __must_check |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
| 83 | { |
Ingo Molnar | d1a7618 | 2008-10-28 16:54:49 +0100 | [diff] [blame] | 84 | might_fault(); |
| 85 | return __copy_to_user_inatomic(to, from, n); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | } |
| 87 | |
Ingo Molnar | 652050a | 2006-01-14 13:21:30 -0800 | [diff] [blame] | 88 | static __always_inline unsigned long |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) |
| 90 | { |
NeilBrown | 7c12d81 | 2006-06-25 05:48:02 -0700 | [diff] [blame] | 91 | /* Avoid zeroing the tail if the copy fails.. |
| 92 | * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, |
| 93 | * but as the zeroing behaviour is only significant when n is not |
| 94 | * constant, that shouldn't be a problem. |
| 95 | */ |
| 96 | if (__builtin_constant_p(n)) { |
| 97 | unsigned long ret; |
| 98 | |
| 99 | switch (n) { |
| 100 | case 1: |
| 101 | __get_user_size(*(u8 *)to, from, 1, ret, 1); |
| 102 | return ret; |
| 103 | case 2: |
| 104 | __get_user_size(*(u16 *)to, from, 2, ret, 2); |
| 105 | return ret; |
| 106 | case 4: |
| 107 | __get_user_size(*(u32 *)to, from, 4, ret, 4); |
| 108 | return ret; |
| 109 | } |
| 110 | } |
| 111 | return __copy_from_user_ll_nozero(to, from, n); |
| 112 | } |
Randy Dunlap | 9c7fff6 | 2006-10-11 01:22:10 -0700 | [diff] [blame] | 113 | |
| 114 | /** |
| 115 | * __copy_from_user: - Copy a block of data from user space, with less checking. |
| 116 | * @to: Destination address, in kernel space. |
| 117 | * @from: Source address, in user space. |
| 118 | * @n: Number of bytes to copy. |
| 119 | * |
| 120 | * Context: User context only. This function may sleep. |
| 121 | * |
| 122 | * Copy data from user space to kernel space. Caller must check |
| 123 | * the specified block with access_ok() before calling this function. |
| 124 | * |
| 125 | * Returns number of bytes that could not be copied. |
| 126 | * On success, this will be zero. |
| 127 | * |
| 128 | * If some data could not be copied, this function will pad the copied |
| 129 | * data to the requested size using zero bytes. |
| 130 | * |
| 131 | * An alternate version - __copy_from_user_inatomic() - may be called from |
| 132 | * atomic context and will fail rather than sleep. In this case the |
| 133 | * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h |
| 134 | * for explanation of why this is needed. |
| 135 | */ |
NeilBrown | 7c12d81 | 2006-06-25 05:48:02 -0700 | [diff] [blame] | 136 | static __always_inline unsigned long |
| 137 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
| 138 | { |
Ingo Molnar | d1a7618 | 2008-10-28 16:54:49 +0100 | [diff] [blame] | 139 | might_fault(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | if (__builtin_constant_p(n)) { |
| 141 | unsigned long ret; |
| 142 | |
| 143 | switch (n) { |
| 144 | case 1: |
| 145 | __get_user_size(*(u8 *)to, from, 1, ret, 1); |
| 146 | return ret; |
| 147 | case 2: |
| 148 | __get_user_size(*(u16 *)to, from, 2, ret, 2); |
| 149 | return ret; |
| 150 | case 4: |
| 151 | __get_user_size(*(u32 *)to, from, 4, ret, 4); |
| 152 | return ret; |
| 153 | } |
| 154 | } |
| 155 | return __copy_from_user_ll(to, from, n); |
| 156 | } |
| 157 | |
NeilBrown | 7c12d81 | 2006-06-25 05:48:02 -0700 | [diff] [blame] | 158 | static __always_inline unsigned long __copy_from_user_nocache(void *to, |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 159 | const void __user *from, unsigned long n) |
| 160 | { |
Ingo Molnar | d1a7618 | 2008-10-28 16:54:49 +0100 | [diff] [blame] | 161 | might_fault(); |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 162 | if (__builtin_constant_p(n)) { |
| 163 | unsigned long ret; |
| 164 | |
| 165 | switch (n) { |
| 166 | case 1: |
| 167 | __get_user_size(*(u8 *)to, from, 1, ret, 1); |
| 168 | return ret; |
| 169 | case 2: |
| 170 | __get_user_size(*(u16 *)to, from, 2, ret, 2); |
| 171 | return ret; |
| 172 | case 4: |
| 173 | __get_user_size(*(u32 *)to, from, 4, ret, 4); |
| 174 | return ret; |
| 175 | } |
| 176 | } |
| 177 | return __copy_from_user_ll_nocache(to, from, n); |
| 178 | } |
| 179 | |
Ingo Molnar | 652050a | 2006-01-14 13:21:30 -0800 | [diff] [blame] | 180 | static __always_inline unsigned long |
Joe Perches | b1fcec7 | 2008-03-23 01:03:48 -0700 | [diff] [blame] | 181 | __copy_from_user_inatomic_nocache(void *to, const void __user *from, |
| 182 | unsigned long n) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | { |
NeilBrown | 7c12d81 | 2006-06-25 05:48:02 -0700 | [diff] [blame] | 184 | return __copy_from_user_ll_nocache_nozero(to, from, n); |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 185 | } |
| 186 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 187 | #endif /* _ASM_X86_UACCESS_32_H */ |