Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 1 | #ifndef _ASM_UACCESS_H |
| 2 | #define _ASM_UACCESS_H |
| 3 | |
| 4 | /* |
| 5 | * User space memory access functions |
| 6 | */ |
| 7 | |
| 8 | #ifdef __KERNEL__ |
David S. Miller | fb34035 | 2009-12-10 23:05:23 -0800 | [diff] [blame] | 9 | #include <linux/errno.h> |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 10 | #include <linux/compiler.h> |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 11 | #include <linux/string.h> |
Alexey Dobriyan | 8abf919 | 2009-08-13 10:05:43 +0000 | [diff] [blame] | 12 | #include <linux/thread_info.h> |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 13 | #include <asm/asi.h> |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 14 | #include <asm/spitfire.h> |
Arnd Bergmann | 5b17e1c | 2009-05-13 22:56:30 +0000 | [diff] [blame] | 15 | #include <asm-generic/uaccess-unaligned.h> |
Al Viro | c99d2ab | 2016-09-05 11:42:55 -0400 | [diff] [blame] | 16 | #include <asm/extable_64.h> |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 17 | #endif |
| 18 | |
| 19 | #ifndef __ASSEMBLY__ |
| 20 | |
David Miller | 2c66f62 | 2012-05-26 11:14:27 -0700 | [diff] [blame] | 21 | #include <asm/processor.h> |
| 22 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 23 | /* |
| 24 | * Sparc64 is segmented, though more like the M68K than the I386. |
| 25 | * We use the secondary ASI to address user memory, which references a |
| 26 | * completely different VM map, thus there is zero chance of the user |
| 27 | * doing something queer and tricking us into poking kernel memory. |
| 28 | * |
| 29 | * What is left here is basically what is needed for the other parts of |
| 30 | * the kernel that expect to be able to manipulate, erum, "segments". |
| 31 | * Or perhaps more properly, permissions. |
| 32 | * |
| 33 | * "For historical reasons, these macros are grossly misnamed." -Linus |
| 34 | */ |
| 35 | |
| 36 | #define KERNEL_DS ((mm_segment_t) { ASI_P }) |
| 37 | #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */ |
| 38 | |
| 39 | #define VERIFY_READ 0 |
| 40 | #define VERIFY_WRITE 1 |
| 41 | |
Al Viro | dff933d | 2012-09-26 01:21:14 -0400 | [diff] [blame] | 42 | #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 43 | #define get_ds() (KERNEL_DS) |
| 44 | |
Michael S. Tsirkin | 7185820 | 2015-01-06 14:32:17 +0200 | [diff] [blame] | 45 | #define segment_eq(a, b) ((a).seg == (b).seg) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 46 | |
| 47 | #define set_fs(val) \ |
| 48 | do { \ |
Michael S. Tsirkin | 7185820 | 2015-01-06 14:32:17 +0200 | [diff] [blame] | 49 | current_thread_info()->current_ds = (val).seg; \ |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 50 | __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ |
| 51 | } while(0) |
| 52 | |
David Ahern | b69fb76 | 2015-06-15 16:15:45 -0400 | [diff] [blame] | 53 | /* |
| 54 | * Test whether a block of memory is a valid user space address. |
| 55 | * Returns 0 if the range is valid, nonzero otherwise. |
| 56 | */ |
| 57 | static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) |
| 58 | { |
| 59 | if (__builtin_constant_p(size)) |
| 60 | return addr > limit - size; |
| 61 | |
| 62 | addr += size; |
| 63 | if (addr < size) |
| 64 | return true; |
| 65 | |
| 66 | return addr > limit; |
| 67 | } |
| 68 | |
| 69 | #define __range_not_ok(addr, size, limit) \ |
| 70 | ({ \ |
| 71 | __chk_user_ptr(addr); \ |
| 72 | __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ |
| 73 | }) |
| 74 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 75 | static inline int __access_ok(const void __user * addr, unsigned long size) |
| 76 | { |
| 77 | return 1; |
| 78 | } |
| 79 | |
| 80 | static inline int access_ok(int type, const void __user * addr, unsigned long size) |
| 81 | { |
| 82 | return 1; |
| 83 | } |
| 84 | |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 85 | void __ret_efault(void); |
| 86 | void __retl_efault(void); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 87 | |
| 88 | /* Uh, these should become the main single-value transfer routines.. |
| 89 | * They automatically use the right size if we just have the right |
| 90 | * pointer type.. |
| 91 | * |
| 92 | * This gets kind of ugly. We want to return _two_ values in "get_user()" |
| 93 | * and yet we don't want to do any pointers, because that is too much |
| 94 | * of a performance impact. Thus we have a few rather ugly macros here, |
| 95 | * and hide all the ugliness from the user. |
| 96 | */ |
Michael S. Tsirkin | 7185820 | 2015-01-06 14:32:17 +0200 | [diff] [blame] | 97 | #define put_user(x, ptr) ({ \ |
| 98 | unsigned long __pu_addr = (unsigned long)(ptr); \ |
| 99 | __chk_user_ptr(ptr); \ |
| 100 | __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\ |
| 101 | }) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 102 | |
Michael S. Tsirkin | 7185820 | 2015-01-06 14:32:17 +0200 | [diff] [blame] | 103 | #define get_user(x, ptr) ({ \ |
| 104 | unsigned long __gu_addr = (unsigned long)(ptr); \ |
| 105 | __chk_user_ptr(ptr); \ |
| 106 | __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\ |
| 107 | }) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 108 | |
Michael S. Tsirkin | 7185820 | 2015-01-06 14:32:17 +0200 | [diff] [blame] | 109 | #define __put_user(x, ptr) put_user(x, ptr) |
| 110 | #define __get_user(x, ptr) get_user(x, ptr) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 111 | |
| 112 | struct __large_struct { unsigned long buf[100]; }; |
| 113 | #define __m(x) ((struct __large_struct *)(x)) |
| 114 | |
Michael S. Tsirkin | 4b636ba | 2015-01-06 23:29:43 +0200 | [diff] [blame] | 115 | #define __put_user_nocheck(data, addr, size) ({ \ |
| 116 | register int __pu_ret; \ |
| 117 | switch (size) { \ |
| 118 | case 1: __put_user_asm(data, b, addr, __pu_ret); break; \ |
| 119 | case 2: __put_user_asm(data, h, addr, __pu_ret); break; \ |
| 120 | case 4: __put_user_asm(data, w, addr, __pu_ret); break; \ |
| 121 | case 8: __put_user_asm(data, x, addr, __pu_ret); break; \ |
| 122 | default: __pu_ret = __put_user_bad(); break; \ |
| 123 | } \ |
| 124 | __pu_ret; \ |
Michael S. Tsirkin | 7185820 | 2015-01-06 14:32:17 +0200 | [diff] [blame] | 125 | }) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 126 | |
Michael S. Tsirkin | 7185820 | 2015-01-06 14:32:17 +0200 | [diff] [blame] | 127 | #define __put_user_asm(x, size, addr, ret) \ |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 128 | __asm__ __volatile__( \ |
Michael S. Tsirkin | 7185820 | 2015-01-06 14:32:17 +0200 | [diff] [blame] | 129 | "/* Put user asm, inline. */\n" \ |
| 130 | "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ |
| 131 | "clr %0\n" \ |
| 132 | "2:\n\n\t" \ |
| 133 | ".section .fixup,#alloc,#execinstr\n\t" \ |
| 134 | ".align 4\n" \ |
| 135 | "3:\n\t" \ |
| 136 | "sethi %%hi(2b), %0\n\t" \ |
| 137 | "jmpl %0 + %%lo(2b), %%g0\n\t" \ |
| 138 | " mov %3, %0\n\n\t" \ |
| 139 | ".previous\n\t" \ |
| 140 | ".section __ex_table,\"a\"\n\t" \ |
| 141 | ".align 4\n\t" \ |
| 142 | ".word 1b, 3b\n\t" \ |
| 143 | ".previous\n\n\t" \ |
| 144 | : "=r" (ret) : "r" (x), "r" (__m(addr)), \ |
| 145 | "i" (-EFAULT)) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 146 | |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 147 | int __put_user_bad(void); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 148 | |
Michael S. Tsirkin | 4b636ba | 2015-01-06 23:29:43 +0200 | [diff] [blame] | 149 | #define __get_user_nocheck(data, addr, size, type) ({ \ |
| 150 | register int __gu_ret; \ |
| 151 | register unsigned long __gu_val; \ |
| 152 | switch (size) { \ |
| 153 | case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ |
| 154 | case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ |
| 155 | case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \ |
| 156 | case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \ |
| 157 | default: \ |
| 158 | __gu_val = 0; \ |
| 159 | __gu_ret = __get_user_bad(); \ |
| 160 | break; \ |
| 161 | } \ |
| 162 | data = (__force type) __gu_val; \ |
| 163 | __gu_ret; \ |
Michael S. Tsirkin | 7185820 | 2015-01-06 14:32:17 +0200 | [diff] [blame] | 164 | }) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 165 | |
Michael S. Tsirkin | 7185820 | 2015-01-06 14:32:17 +0200 | [diff] [blame] | 166 | #define __get_user_asm(x, size, addr, ret) \ |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 167 | __asm__ __volatile__( \ |
Michael S. Tsirkin | 7185820 | 2015-01-06 14:32:17 +0200 | [diff] [blame] | 168 | "/* Get user asm, inline. */\n" \ |
| 169 | "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ |
| 170 | "clr %0\n" \ |
| 171 | "2:\n\n\t" \ |
| 172 | ".section .fixup,#alloc,#execinstr\n\t" \ |
| 173 | ".align 4\n" \ |
| 174 | "3:\n\t" \ |
| 175 | "sethi %%hi(2b), %0\n\t" \ |
| 176 | "clr %1\n\t" \ |
| 177 | "jmpl %0 + %%lo(2b), %%g0\n\t" \ |
| 178 | " mov %3, %0\n\n\t" \ |
| 179 | ".previous\n\t" \ |
| 180 | ".section __ex_table,\"a\"\n\t" \ |
| 181 | ".align 4\n\t" \ |
| 182 | ".word 1b, 3b\n\n\t" \ |
| 183 | ".previous\n\t" \ |
| 184 | : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ |
| 185 | "i" (-EFAULT)) |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 186 | |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 187 | int __get_user_bad(void); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 188 | |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 189 | unsigned long __must_check ___copy_from_user(void *to, |
| 190 | const void __user *from, |
| 191 | unsigned long size); |
| 192 | unsigned long copy_from_user_fixup(void *to, const void __user *from, |
| 193 | unsigned long size); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 194 | static inline unsigned long __must_check |
| 195 | copy_from_user(void *to, const void __user *from, unsigned long size) |
| 196 | { |
Kees Cook | 9d9208a | 2016-06-23 15:10:13 -0700 | [diff] [blame] | 197 | unsigned long ret; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 198 | |
Kees Cook | 81409e9 | 2016-08-31 16:04:21 -0700 | [diff] [blame] | 199 | check_object_size(to, size, false); |
Kees Cook | 9d9208a | 2016-06-23 15:10:13 -0700 | [diff] [blame] | 200 | |
| 201 | ret = ___copy_from_user(to, from, size); |
David S. Miller | 4cb6066 | 2010-08-09 00:45:46 -0700 | [diff] [blame] | 202 | if (unlikely(ret)) |
| 203 | ret = copy_from_user_fixup(to, from, size); |
| 204 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 205 | return ret; |
| 206 | } |
| 207 | #define __copy_from_user copy_from_user |
| 208 | |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 209 | unsigned long __must_check ___copy_to_user(void __user *to, |
| 210 | const void *from, |
| 211 | unsigned long size); |
| 212 | unsigned long copy_to_user_fixup(void __user *to, const void *from, |
| 213 | unsigned long size); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 214 | static inline unsigned long __must_check |
| 215 | copy_to_user(void __user *to, const void *from, unsigned long size) |
| 216 | { |
Kees Cook | 9d9208a | 2016-06-23 15:10:13 -0700 | [diff] [blame] | 217 | unsigned long ret; |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 218 | |
Kees Cook | 81409e9 | 2016-08-31 16:04:21 -0700 | [diff] [blame] | 219 | check_object_size(from, size, true); |
| 220 | |
Kees Cook | 9d9208a | 2016-06-23 15:10:13 -0700 | [diff] [blame] | 221 | ret = ___copy_to_user(to, from, size); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 222 | if (unlikely(ret)) |
| 223 | ret = copy_to_user_fixup(to, from, size); |
| 224 | return ret; |
| 225 | } |
| 226 | #define __copy_to_user copy_to_user |
| 227 | |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 228 | unsigned long __must_check ___copy_in_user(void __user *to, |
| 229 | const void __user *from, |
| 230 | unsigned long size); |
| 231 | unsigned long copy_in_user_fixup(void __user *to, void __user *from, |
| 232 | unsigned long size); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 233 | static inline unsigned long __must_check |
| 234 | copy_in_user(void __user *to, void __user *from, unsigned long size) |
| 235 | { |
| 236 | unsigned long ret = ___copy_in_user(to, from, size); |
| 237 | |
| 238 | if (unlikely(ret)) |
| 239 | ret = copy_in_user_fixup(to, from, size); |
| 240 | return ret; |
| 241 | } |
| 242 | #define __copy_in_user copy_in_user |
| 243 | |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 244 | unsigned long __must_check __clear_user(void __user *, unsigned long); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 245 | |
| 246 | #define clear_user __clear_user |
| 247 | |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 248 | __must_check long strlen_user(const char __user *str); |
| 249 | __must_check long strnlen_user(const char __user *str, long n); |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 250 | |
Dave Kleikamp | 1693223 | 2013-12-16 15:01:00 -0600 | [diff] [blame] | 251 | #define __copy_to_user_inatomic __copy_to_user |
| 252 | #define __copy_from_user_inatomic __copy_from_user |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 253 | |
David S. Miller | f88620b | 2012-10-10 17:19:32 -0700 | [diff] [blame] | 254 | struct pt_regs; |
Sam Ravnborg | f05a686 | 2014-05-16 23:25:50 +0200 | [diff] [blame] | 255 | unsigned long compute_effective_address(struct pt_regs *, |
| 256 | unsigned int insn, |
| 257 | unsigned int rd); |
David S. Miller | f88620b | 2012-10-10 17:19:32 -0700 | [diff] [blame] | 258 | |
Sam Ravnborg | f5e706a | 2008-07-17 21:55:51 -0700 | [diff] [blame] | 259 | #endif /* __ASSEMBLY__ */ |
| 260 | |
| 261 | #endif /* _ASM_UACCESS_H */ |