Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 1 | #ifndef _ARCH_POWERPC_UACCESS_H |
| 2 | #define _ARCH_POWERPC_UACCESS_H |
| 3 | |
| 4 | #ifdef __KERNEL__ |
| 5 | #ifndef __ASSEMBLY__ |
| 6 | |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/errno.h> |
Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 9 | #include <asm/asm-compat.h> |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 10 | #include <asm/processor.h> |
Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 11 | #include <asm/page.h> |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 12 | |
| 13 | #define VERIFY_READ 0 |
| 14 | #define VERIFY_WRITE 1 |
| 15 | |
| 16 | /* |
| 17 | * The fs value determines whether argument validity checking should be |
| 18 | * performed or not. If get_fs() == USER_DS, checking is performed, with |
| 19 | * get_fs() == KERNEL_DS, checking is bypassed. |
| 20 | * |
| 21 | * For historical reasons, these macros are grossly misnamed. |
| 22 | * |
| 23 | * The fs/ds values are now the highest legal address in the "segment". |
| 24 | * This simplifies the checking in the routines below. |
| 25 | */ |
| 26 | |
| 27 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
| 28 | |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 29 | #define KERNEL_DS MAKE_MM_SEG(~0UL) |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 30 | #ifdef __powerpc64__ |
| 31 | /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ |
| 32 | #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1) |
| 33 | #else |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 34 | #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) |
| 35 | #endif |
| 36 | |
| 37 | #define get_ds() (KERNEL_DS) |
| 38 | #define get_fs() (current->thread.fs) |
| 39 | #define set_fs(val) (current->thread.fs = (val)) |
| 40 | |
| 41 | #define segment_eq(a, b) ((a).seg == (b).seg) |
| 42 | |
Paul Mackerras | 1629372 | 2012-05-28 13:03:47 +1000 | [diff] [blame^] | 43 | #define user_addr_max() (get_fs().seg) |
| 44 | |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 45 | #ifdef __powerpc64__ |
| 46 | /* |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 47 | * This check is sufficient because there is a large enough |
| 48 | * gap between user addresses and the kernel addresses |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 49 | */ |
| 50 | #define __access_ok(addr, size, segment) \ |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 51 | (((addr) <= (segment).seg) && ((size) <= (segment).seg)) |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 52 | |
| 53 | #else |
| 54 | |
| 55 | #define __access_ok(addr, size, segment) \ |
| 56 | (((addr) <= (segment).seg) && \ |
| 57 | (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr))))) |
| 58 | |
| 59 | #endif |
| 60 | |
| 61 | #define access_ok(type, addr, size) \ |
| 62 | (__chk_user_ptr(addr), \ |
| 63 | __access_ok((__force unsigned long)(addr), (size), get_fs())) |
| 64 | |
| 65 | /* |
| 66 | * The exception table consists of pairs of addresses: the first is the |
| 67 | * address of an instruction that is allowed to fault, and the second is |
| 68 | * the address at which the program should continue. No registers are |
| 69 | * modified, so it is entirely up to the continuation code to figure out |
| 70 | * what to do. |
| 71 | * |
| 72 | * All the routines below use bits of fixup code that are out of line |
| 73 | * with the main instruction path. This means when everything is well, |
| 74 | * we don't even have to jump over them. Further, they do not intrude |
| 75 | * on our cache or tlb entries. |
| 76 | */ |
| 77 | |
| 78 | struct exception_table_entry { |
| 79 | unsigned long insn; |
| 80 | unsigned long fixup; |
| 81 | }; |
| 82 | |
| 83 | /* |
| 84 | * These are the main single-value transfer routines. They automatically |
| 85 | * use the right size if we just have the right pointer type. |
| 86 | * |
| 87 | * This gets kind of ugly. We want to return _two_ values in "get_user()" |
| 88 | * and yet we don't want to do any pointers, because that is too much |
| 89 | * of a performance impact. Thus we have a few rather ugly macros here, |
| 90 | * and hide all the ugliness from the user. |
| 91 | * |
| 92 | * The "__xxx" versions of the user access functions are versions that |
| 93 | * do not verify the address space, that must have been done previously |
| 94 | * with a separate "access_ok()" call (this is used when we do multiple |
| 95 | * accesses to the same area of user memory). |
| 96 | * |
| 97 | * As we use the same address space for kernel and user data on the |
| 98 | * PowerPC, we can just do these as direct assignments. (Of course, the |
| 99 | * exception handling means that it's no longer "just"...) |
| 100 | * |
| 101 | * The "user64" versions of the user access functions are versions that |
| 102 | * allow access of 64-bit data. The "get_user" functions do not |
| 103 | * properly handle 64-bit data because the value gets down cast to a long. |
| 104 | * The "put_user" functions already handle 64-bit data properly but we add |
| 105 | * "user64" versions for completeness |
| 106 | */ |
| 107 | #define get_user(x, ptr) \ |
| 108 | __get_user_check((x), (ptr), sizeof(*(ptr))) |
| 109 | #define put_user(x, ptr) \ |
| 110 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
| 111 | |
| 112 | #define __get_user(x, ptr) \ |
| 113 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
| 114 | #define __put_user(x, ptr) \ |
| 115 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
Benjamin Herrenschmidt | e68c825 | 2007-04-11 16:13:19 +1000 | [diff] [blame] | 116 | |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 117 | #ifndef __powerpc64__ |
| 118 | #define __get_user64(x, ptr) \ |
| 119 | __get_user64_nocheck((x), (ptr), sizeof(*(ptr))) |
| 120 | #define __put_user64(x, ptr) __put_user(x, ptr) |
| 121 | #endif |
| 122 | |
Benjamin Herrenschmidt | e68c825 | 2007-04-11 16:13:19 +1000 | [diff] [blame] | 123 | #define __get_user_inatomic(x, ptr) \ |
| 124 | __get_user_nosleep((x), (ptr), sizeof(*(ptr))) |
| 125 | #define __put_user_inatomic(x, ptr) \ |
| 126 | __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
| 127 | |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 128 | #define __get_user_unaligned __get_user |
| 129 | #define __put_user_unaligned __put_user |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 130 | |
| 131 | extern long __put_user_bad(void); |
| 132 | |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 133 | /* |
| 134 | * We don't tell gcc that we are accessing memory, but this is OK |
| 135 | * because we do not write to any memory gcc knows about, so there |
| 136 | * are no aliasing issues. |
| 137 | */ |
| 138 | #define __put_user_asm(x, addr, err, op) \ |
| 139 | __asm__ __volatile__( \ |
| 140 | "1: " op " %1,0(%2) # put_user\n" \ |
| 141 | "2:\n" \ |
| 142 | ".section .fixup,\"ax\"\n" \ |
| 143 | "3: li %0,%3\n" \ |
| 144 | " b 2b\n" \ |
| 145 | ".previous\n" \ |
| 146 | ".section __ex_table,\"a\"\n" \ |
Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 147 | PPC_LONG_ALIGN "\n" \ |
David Gibson | 3ddfbcf | 2005-11-10 12:56:55 +1100 | [diff] [blame] | 148 | PPC_LONG "1b,3b\n" \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 149 | ".previous" \ |
| 150 | : "=r" (err) \ |
Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 151 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 152 | |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 153 | #ifdef __powerpc64__ |
| 154 | #define __put_user_asm2(x, ptr, retval) \ |
| 155 | __put_user_asm(x, ptr, retval, "std") |
| 156 | #else /* __powerpc64__ */ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 157 | #define __put_user_asm2(x, addr, err) \ |
| 158 | __asm__ __volatile__( \ |
| 159 | "1: stw %1,0(%2)\n" \ |
| 160 | "2: stw %1+1,4(%2)\n" \ |
| 161 | "3:\n" \ |
| 162 | ".section .fixup,\"ax\"\n" \ |
| 163 | "4: li %0,%3\n" \ |
| 164 | " b 3b\n" \ |
| 165 | ".previous\n" \ |
| 166 | ".section __ex_table,\"a\"\n" \ |
Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 167 | PPC_LONG_ALIGN "\n" \ |
David Gibson | 3ddfbcf | 2005-11-10 12:56:55 +1100 | [diff] [blame] | 168 | PPC_LONG "1b,4b\n" \ |
| 169 | PPC_LONG "2b,4b\n" \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 170 | ".previous" \ |
| 171 | : "=r" (err) \ |
Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 172 | : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 173 | #endif /* __powerpc64__ */ |
| 174 | |
| 175 | #define __put_user_size(x, ptr, size, retval) \ |
| 176 | do { \ |
| 177 | retval = 0; \ |
| 178 | switch (size) { \ |
| 179 | case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ |
| 180 | case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ |
| 181 | case 4: __put_user_asm(x, ptr, retval, "stw"); break; \ |
| 182 | case 8: __put_user_asm2(x, ptr, retval); break; \ |
| 183 | default: __put_user_bad(); \ |
| 184 | } \ |
| 185 | } while (0) |
| 186 | |
| 187 | #define __put_user_nocheck(x, ptr, size) \ |
| 188 | ({ \ |
| 189 | long __pu_err; \ |
Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 190 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ |
| 191 | if (!is_kernel_addr((unsigned long)__pu_addr)) \ |
| 192 | might_sleep(); \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 193 | __chk_user_ptr(ptr); \ |
Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 194 | __put_user_size((x), __pu_addr, (size), __pu_err); \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 195 | __pu_err; \ |
| 196 | }) |
| 197 | |
| 198 | #define __put_user_check(x, ptr, size) \ |
| 199 | ({ \ |
| 200 | long __pu_err = -EFAULT; \ |
| 201 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ |
| 202 | might_sleep(); \ |
| 203 | if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ |
| 204 | __put_user_size((x), __pu_addr, (size), __pu_err); \ |
| 205 | __pu_err; \ |
| 206 | }) |
| 207 | |
Benjamin Herrenschmidt | e68c825 | 2007-04-11 16:13:19 +1000 | [diff] [blame] | 208 | #define __put_user_nosleep(x, ptr, size) \ |
| 209 | ({ \ |
| 210 | long __pu_err; \ |
| 211 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ |
| 212 | __chk_user_ptr(ptr); \ |
| 213 | __put_user_size((x), __pu_addr, (size), __pu_err); \ |
| 214 | __pu_err; \ |
| 215 | }) |
| 216 | |
| 217 | |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 218 | extern long __get_user_bad(void); |
| 219 | |
| 220 | #define __get_user_asm(x, addr, err, op) \ |
| 221 | __asm__ __volatile__( \ |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 222 | "1: "op" %1,0(%2) # get_user\n" \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 223 | "2:\n" \ |
| 224 | ".section .fixup,\"ax\"\n" \ |
| 225 | "3: li %0,%3\n" \ |
| 226 | " li %1,0\n" \ |
| 227 | " b 2b\n" \ |
| 228 | ".previous\n" \ |
| 229 | ".section __ex_table,\"a\"\n" \ |
Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 230 | PPC_LONG_ALIGN "\n" \ |
David Gibson | 3ddfbcf | 2005-11-10 12:56:55 +1100 | [diff] [blame] | 231 | PPC_LONG "1b,3b\n" \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 232 | ".previous" \ |
| 233 | : "=r" (err), "=r" (x) \ |
Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 234 | : "b" (addr), "i" (-EFAULT), "0" (err)) |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 235 | |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 236 | #ifdef __powerpc64__ |
| 237 | #define __get_user_asm2(x, addr, err) \ |
| 238 | __get_user_asm(x, addr, err, "ld") |
| 239 | #else /* __powerpc64__ */ |
| 240 | #define __get_user_asm2(x, addr, err) \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 241 | __asm__ __volatile__( \ |
| 242 | "1: lwz %1,0(%2)\n" \ |
| 243 | "2: lwz %1+1,4(%2)\n" \ |
| 244 | "3:\n" \ |
| 245 | ".section .fixup,\"ax\"\n" \ |
| 246 | "4: li %0,%3\n" \ |
| 247 | " li %1,0\n" \ |
| 248 | " li %1+1,0\n" \ |
| 249 | " b 3b\n" \ |
| 250 | ".previous\n" \ |
| 251 | ".section __ex_table,\"a\"\n" \ |
Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 252 | PPC_LONG_ALIGN "\n" \ |
David Gibson | 3ddfbcf | 2005-11-10 12:56:55 +1100 | [diff] [blame] | 253 | PPC_LONG "1b,4b\n" \ |
| 254 | PPC_LONG "2b,4b\n" \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 255 | ".previous" \ |
| 256 | : "=r" (err), "=&r" (x) \ |
Michael Ellerman | 551c3c0 | 2008-07-17 17:17:52 +1000 | [diff] [blame] | 257 | : "b" (addr), "i" (-EFAULT), "0" (err)) |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 258 | #endif /* __powerpc64__ */ |
| 259 | |
| 260 | #define __get_user_size(x, ptr, size, retval) \ |
| 261 | do { \ |
| 262 | retval = 0; \ |
| 263 | __chk_user_ptr(ptr); \ |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 264 | if (size > sizeof(x)) \ |
| 265 | (x) = __get_user_bad(); \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 266 | switch (size) { \ |
| 267 | case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ |
| 268 | case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ |
| 269 | case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \ |
| 270 | case 8: __get_user_asm2(x, ptr, retval); break; \ |
| 271 | default: (x) = __get_user_bad(); \ |
| 272 | } \ |
| 273 | } while (0) |
| 274 | |
| 275 | #define __get_user_nocheck(x, ptr, size) \ |
| 276 | ({ \ |
| 277 | long __gu_err; \ |
| 278 | unsigned long __gu_val; \ |
Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 279 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 280 | __chk_user_ptr(ptr); \ |
Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 281 | if (!is_kernel_addr((unsigned long)__gu_addr)) \ |
| 282 | might_sleep(); \ |
| 283 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 284 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
| 285 | __gu_err; \ |
| 286 | }) |
| 287 | |
| 288 | #ifndef __powerpc64__ |
| 289 | #define __get_user64_nocheck(x, ptr, size) \ |
| 290 | ({ \ |
| 291 | long __gu_err; \ |
| 292 | long long __gu_val; \ |
Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 293 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 294 | __chk_user_ptr(ptr); \ |
Paul Mackerras | 6bfd93c | 2006-05-03 23:02:04 +1000 | [diff] [blame] | 295 | if (!is_kernel_addr((unsigned long)__gu_addr)) \ |
| 296 | might_sleep(); \ |
| 297 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 298 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
| 299 | __gu_err; \ |
| 300 | }) |
| 301 | #endif /* __powerpc64__ */ |
| 302 | |
| 303 | #define __get_user_check(x, ptr, size) \ |
| 304 | ({ \ |
| 305 | long __gu_err = -EFAULT; \ |
| 306 | unsigned long __gu_val = 0; \ |
| 307 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 308 | might_sleep(); \ |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 309 | if (access_ok(VERIFY_READ, __gu_addr, (size))) \ |
| 310 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
| 311 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
| 312 | __gu_err; \ |
| 313 | }) |
| 314 | |
Benjamin Herrenschmidt | e68c825 | 2007-04-11 16:13:19 +1000 | [diff] [blame] | 315 | #define __get_user_nosleep(x, ptr, size) \ |
| 316 | ({ \ |
| 317 | long __gu_err; \ |
| 318 | unsigned long __gu_val; \ |
| 319 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ |
| 320 | __chk_user_ptr(ptr); \ |
| 321 | __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ |
| 322 | (x) = (__typeof__(*(ptr)))__gu_val; \ |
| 323 | __gu_err; \ |
| 324 | }) |
| 325 | |
| 326 | |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 327 | /* more complex routines */ |
| 328 | |
| 329 | extern unsigned long __copy_tofrom_user(void __user *to, |
| 330 | const void __user *from, unsigned long size); |
| 331 | |
| 332 | #ifndef __powerpc64__ |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 333 | |
Adrian Bunk | 4cfbdff | 2006-12-01 12:53:18 +0100 | [diff] [blame] | 334 | static inline unsigned long copy_from_user(void *to, |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 335 | const void __user *from, unsigned long n) |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 336 | { |
| 337 | unsigned long over; |
| 338 | |
| 339 | if (access_ok(VERIFY_READ, from, n)) |
| 340 | return __copy_tofrom_user((__force void __user *)to, from, n); |
| 341 | if ((unsigned long)from < TASK_SIZE) { |
| 342 | over = (unsigned long)from + n - TASK_SIZE; |
| 343 | return __copy_tofrom_user((__force void __user *)to, from, |
| 344 | n - over) + over; |
| 345 | } |
| 346 | return n; |
| 347 | } |
| 348 | |
Adrian Bunk | 4cfbdff | 2006-12-01 12:53:18 +0100 | [diff] [blame] | 349 | static inline unsigned long copy_to_user(void __user *to, |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 350 | const void *from, unsigned long n) |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 351 | { |
| 352 | unsigned long over; |
| 353 | |
| 354 | if (access_ok(VERIFY_WRITE, to, n)) |
| 355 | return __copy_tofrom_user(to, (__force void __user *)from, n); |
| 356 | if ((unsigned long)to < TASK_SIZE) { |
| 357 | over = (unsigned long)to + n - TASK_SIZE; |
| 358 | return __copy_tofrom_user(to, (__force void __user *)from, |
| 359 | n - over) + over; |
| 360 | } |
| 361 | return n; |
| 362 | } |
| 363 | |
| 364 | #else /* __powerpc64__ */ |
| 365 | |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 366 | #define __copy_in_user(to, from, size) \ |
| 367 | __copy_tofrom_user((to), (from), (size)) |
| 368 | |
| 369 | extern unsigned long copy_from_user(void *to, const void __user *from, |
| 370 | unsigned long n); |
| 371 | extern unsigned long copy_to_user(void __user *to, const void *from, |
| 372 | unsigned long n); |
| 373 | extern unsigned long copy_in_user(void __user *to, const void __user *from, |
| 374 | unsigned long n); |
| 375 | |
Stephen Rothwell | 48fe487 | 2005-11-01 15:53:19 +1100 | [diff] [blame] | 376 | #endif /* __powerpc64__ */ |
| 377 | |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 378 | static inline unsigned long __copy_from_user_inatomic(void *to, |
| 379 | const void __user *from, unsigned long n) |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 380 | { |
| 381 | if (__builtin_constant_p(n) && (n <= 8)) { |
Nate Case | 9c8387a | 2008-05-13 06:14:14 +1000 | [diff] [blame] | 382 | unsigned long ret = 1; |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 383 | |
| 384 | switch (n) { |
| 385 | case 1: |
| 386 | __get_user_size(*(u8 *)to, from, 1, ret); |
| 387 | break; |
| 388 | case 2: |
| 389 | __get_user_size(*(u16 *)to, from, 2, ret); |
| 390 | break; |
| 391 | case 4: |
| 392 | __get_user_size(*(u32 *)to, from, 4, ret); |
| 393 | break; |
| 394 | case 8: |
| 395 | __get_user_size(*(u64 *)to, from, 8, ret); |
| 396 | break; |
| 397 | } |
Stephen Rothwell | 48fe487 | 2005-11-01 15:53:19 +1100 | [diff] [blame] | 398 | if (ret == 0) |
| 399 | return 0; |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 400 | } |
Stephen Rothwell | 48fe487 | 2005-11-01 15:53:19 +1100 | [diff] [blame] | 401 | return __copy_tofrom_user((__force void __user *)to, from, n); |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 402 | } |
| 403 | |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 404 | static inline unsigned long __copy_to_user_inatomic(void __user *to, |
| 405 | const void *from, unsigned long n) |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 406 | { |
| 407 | if (__builtin_constant_p(n) && (n <= 8)) { |
Nate Case | 9c8387a | 2008-05-13 06:14:14 +1000 | [diff] [blame] | 408 | unsigned long ret = 1; |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 409 | |
| 410 | switch (n) { |
| 411 | case 1: |
| 412 | __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret); |
| 413 | break; |
| 414 | case 2: |
| 415 | __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret); |
| 416 | break; |
| 417 | case 4: |
| 418 | __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret); |
| 419 | break; |
| 420 | case 8: |
| 421 | __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); |
| 422 | break; |
| 423 | } |
Stephen Rothwell | 48fe487 | 2005-11-01 15:53:19 +1100 | [diff] [blame] | 424 | if (ret == 0) |
| 425 | return 0; |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 426 | } |
Stephen Rothwell | 48fe487 | 2005-11-01 15:53:19 +1100 | [diff] [blame] | 427 | return __copy_tofrom_user(to, (__force const void __user *)from, n); |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 428 | } |
| 429 | |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 430 | static inline unsigned long __copy_from_user(void *to, |
| 431 | const void __user *from, unsigned long size) |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 432 | { |
| 433 | might_sleep(); |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 434 | return __copy_from_user_inatomic(to, from, size); |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 435 | } |
| 436 | |
Stephen Rothwell | 5015b49 | 2005-10-31 18:39:20 +1100 | [diff] [blame] | 437 | static inline unsigned long __copy_to_user(void __user *to, |
| 438 | const void *from, unsigned long size) |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 439 | { |
| 440 | might_sleep(); |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 441 | return __copy_to_user_inatomic(to, from, size); |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 442 | } |
| 443 | |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 444 | extern unsigned long __clear_user(void __user *addr, unsigned long size); |
| 445 | |
| 446 | static inline unsigned long clear_user(void __user *addr, unsigned long size) |
| 447 | { |
| 448 | might_sleep(); |
| 449 | if (likely(access_ok(VERIFY_WRITE, addr, size))) |
| 450 | return __clear_user(addr, size); |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 451 | if ((unsigned long)addr < TASK_SIZE) { |
| 452 | unsigned long over = (unsigned long)addr + size - TASK_SIZE; |
| 453 | return __clear_user(addr, size - over) + over; |
| 454 | } |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 455 | return size; |
| 456 | } |
| 457 | |
Paul Mackerras | 1629372 | 2012-05-28 13:03:47 +1000 | [diff] [blame^] | 458 | extern long strncpy_from_user(char *dst, const char __user *src, long count); |
| 459 | extern __must_check long strlen_user(const char __user *str); |
| 460 | extern __must_check long strnlen_user(const char __user *str, long n); |
Stephen Rothwell | 2df5e8b | 2005-10-29 17:51:31 +1000 | [diff] [blame] | 461 | |
| 462 | #endif /* __ASSEMBLY__ */ |
| 463 | #endif /* __KERNEL__ */ |
| 464 | |
| 465 | #endif /* _ARCH_POWERPC_UACCESS_H */ |