Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_UACCESS_H |
| 2 | #define _ASM_IA64_UACCESS_H |
| 3 | |
| 4 | /* |
| 5 | * This file defines various macros to transfer memory areas across |
| 6 | * the user/kernel boundary. This needs to be done carefully because |
| 7 | * this code is executed in kernel mode and uses user-specified |
| 8 | * addresses. Thus, we need to be careful not to let the user to |
| 9 | * trick us into accessing kernel memory that would normally be |
| 10 | * inaccessible. This code is also fairly performance sensitive, |
| 11 | * so we want to spend as little time doing safety checks as |
| 12 | * possible. |
| 13 | * |
| 14 | * To make matters a bit more interesting, these macros sometimes also |
| 15 | * called from within the kernel itself, in which case the address |
| 16 | * validity check must be skipped. The get_fs() macro tells us what |
| 17 | * to do: if get_fs()==USER_DS, checking is performed, if |
| 18 | * get_fs()==KERNEL_DS, checking is bypassed. |
| 19 | * |
| 20 | * Note that even if the memory area specified by the user is in a |
| 21 | * valid address range, it is still possible that we'll get a page |
| 22 | * fault while accessing it. This is handled by filling out an |
| 23 | * exception handler fixup entry for each instruction that has the |
| 24 | * potential to fault. When such a fault occurs, the page fault |
| 25 | * handler checks to see whether the faulting instruction has a fixup |
| 26 | * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and |
| 27 | * then resumes execution at the continuation point. |
| 28 | * |
| 29 | * Based on <asm-alpha/uaccess.h>. |
| 30 | * |
| 31 | * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co |
| 32 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 33 | */ |
| 34 | |
| 35 | #include <linux/compiler.h> |
| 36 | #include <linux/errno.h> |
| 37 | #include <linux/sched.h> |
| 38 | #include <linux/page-flags.h> |
| 39 | #include <linux/mm.h> |
| 40 | |
| 41 | #include <asm/intrinsics.h> |
| 42 | #include <asm/pgtable.h> |
| 43 | #include <asm/io.h> |
| 44 | |
| 45 | /* |
| 46 | * For historical reasons, the following macros are grossly misnamed: |
| 47 | */ |
| 48 | #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ |
| 49 | #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ |
| 50 | |
| 51 | #define VERIFY_READ 0 |
| 52 | #define VERIFY_WRITE 1 |
| 53 | |
| 54 | #define get_ds() (KERNEL_DS) |
| 55 | #define get_fs() (current_thread_info()->addr_limit) |
| 56 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) |
| 57 | |
| 58 | #define segment_eq(a, b) ((a).seg == (b).seg) |
| 59 | |
| 60 | /* |
| 61 | * When accessing user memory, we need to make sure the entire area really is in |
| 62 | * user-level space. In order to do this efficiently, we make sure that the page at |
| 63 | * address TASK_SIZE is never valid. We also need to make sure that the address doesn't |
| 64 | * point inside the virtually mapped linear page table. |
| 65 | */ |
| 66 | #define __access_ok(addr, size, segment) \ |
| 67 | ({ \ |
| 68 | __chk_user_ptr(addr); \ |
| 69 | (likely((unsigned long) (addr) <= (segment).seg) \ |
| 70 | && ((segment).seg == KERNEL_DS.seg \ |
| 71 | || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ |
| 72 | }) |
| 73 | #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) |
| 74 | |
| 75 | /* this function will go away soon - use access_ok() instead */ |
| 76 | static inline int __deprecated |
| 77 | verify_area (int type, const void __user *addr, unsigned long size) |
| 78 | { |
| 79 | return access_ok(type, addr, size) ? 0 : -EFAULT; |
| 80 | } |
| 81 | |
| 82 | /* |
| 83 | * These are the main single-value transfer routines. They automatically |
| 84 | * use the right size if we just have the right pointer type. |
| 85 | * |
| 86 | * Careful to not |
| 87 | * (a) re-use the arguments for side effects (sizeof/typeof is ok) |
| 88 | * (b) require any knowledge of processes at this stage |
| 89 | */ |
| 90 | #define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs()) |
| 91 | #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) |
| 92 | |
| 93 | /* |
| 94 | * The "__xxx" versions do not do address space checking, useful when |
| 95 | * doing multiple accesses to the same area (the programmer has to do the |
| 96 | * checks by hand with "access_ok()") |
| 97 | */ |
| 98 | #define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) |
| 99 | #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
| 100 | |
| 101 | extern long __put_user_unaligned_unknown (void); |
| 102 | |
| 103 | #define __put_user_unaligned(x, ptr) \ |
| 104 | ({ \ |
| 105 | long __ret; \ |
| 106 | switch (sizeof(*(ptr))) { \ |
| 107 | case 1: __ret = __put_user((x), (ptr)); break; \ |
| 108 | case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \ |
| 109 | | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \ |
| 110 | case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \ |
| 111 | | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \ |
| 112 | case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \ |
| 113 | | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \ |
| 114 | default: __ret = __put_user_unaligned_unknown(); \ |
| 115 | } \ |
| 116 | __ret; \ |
| 117 | }) |
| 118 | |
| 119 | extern long __get_user_unaligned_unknown (void); |
| 120 | |
| 121 | #define __get_user_unaligned(x, ptr) \ |
| 122 | ({ \ |
| 123 | long __ret; \ |
| 124 | switch (sizeof(*(ptr))) { \ |
| 125 | case 1: __ret = __get_user((x), (ptr)); break; \ |
| 126 | case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \ |
| 127 | | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \ |
| 128 | case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \ |
| 129 | | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \ |
| 130 | case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \ |
| 131 | | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \ |
| 132 | default: __ret = __get_user_unaligned_unknown(); \ |
| 133 | } \ |
| 134 | __ret; \ |
| 135 | }) |
| 136 | |
| 137 | #ifdef ASM_SUPPORTED |
| 138 | struct __large_struct { unsigned long buf[100]; }; |
| 139 | # define __m(x) (*(struct __large_struct __user *)(x)) |
| 140 | |
| 141 | /* We need to declare the __ex_table section before we can use it in .xdata. */ |
| 142 | asm (".section \"__ex_table\", \"a\"\n\t.previous"); |
| 143 | |
| 144 | # define __get_user_size(val, addr, n, err) \ |
| 145 | do { \ |
| 146 | register long __gu_r8 asm ("r8") = 0; \ |
| 147 | register long __gu_r9 asm ("r9"); \ |
| 148 | asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \ |
| 149 | "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \ |
| 150 | "[1:]" \ |
| 151 | : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \ |
| 152 | (err) = __gu_r8; \ |
| 153 | (val) = __gu_r9; \ |
| 154 | } while (0) |
| 155 | |
| 156 | /* |
| 157 | * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This |
| 158 | * is because they do not write to any memory gcc knows about, so there are no aliasing |
| 159 | * issues. |
| 160 | */ |
| 161 | # define __put_user_size(val, addr, n, err) \ |
| 162 | do { \ |
| 163 | register long __pu_r8 asm ("r8") = 0; \ |
| 164 | asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \ |
| 165 | "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \ |
| 166 | "[1:]" \ |
| 167 | : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \ |
| 168 | (err) = __pu_r8; \ |
| 169 | } while (0) |
| 170 | |
| 171 | #else /* !ASM_SUPPORTED */ |
| 172 | # define RELOC_TYPE 2 /* ip-rel */ |
| 173 | # define __get_user_size(val, addr, n, err) \ |
| 174 | do { \ |
| 175 | __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \ |
| 176 | (err) = ia64_getreg(_IA64_REG_R8); \ |
| 177 | (val) = ia64_getreg(_IA64_REG_R9); \ |
| 178 | } while (0) |
| 179 | # define __put_user_size(val, addr, n, err) \ |
| 180 | do { \ |
| 181 | __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \ |
| 182 | (err) = ia64_getreg(_IA64_REG_R8); \ |
| 183 | } while (0) |
| 184 | #endif /* !ASM_SUPPORTED */ |
| 185 | |
| 186 | extern void __get_user_unknown (void); |
| 187 | |
| 188 | /* |
| 189 | * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which |
| 190 | * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while |
| 191 | * using r8/r9. |
| 192 | */ |
| 193 | #define __do_get_user(check, x, ptr, size, segment) \ |
| 194 | ({ \ |
| 195 | const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ |
| 196 | __typeof__ (size) __gu_size = (size); \ |
| 197 | long __gu_err = -EFAULT, __gu_val = 0; \ |
| 198 | \ |
| 199 | if (!check || __access_ok(__gu_ptr, size, segment)) \ |
| 200 | switch (__gu_size) { \ |
| 201 | case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ |
| 202 | case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ |
| 203 | case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \ |
| 204 | case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ |
| 205 | default: __get_user_unknown(); break; \ |
| 206 | } \ |
| 207 | (x) = (__typeof__(*(__gu_ptr))) __gu_val; \ |
| 208 | __gu_err; \ |
| 209 | }) |
| 210 | |
| 211 | #define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS) |
| 212 | #define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment) |
| 213 | |
| 214 | extern void __put_user_unknown (void); |
| 215 | |
| 216 | /* |
| 217 | * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which |
| 218 | * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8. |
| 219 | */ |
| 220 | #define __do_put_user(check, x, ptr, size, segment) \ |
| 221 | ({ \ |
| 222 | __typeof__ (x) __pu_x = (x); \ |
| 223 | __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \ |
| 224 | __typeof__ (size) __pu_size = (size); \ |
| 225 | long __pu_err = -EFAULT; \ |
| 226 | \ |
| 227 | if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \ |
| 228 | switch (__pu_size) { \ |
| 229 | case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ |
| 230 | case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ |
| 231 | case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \ |
| 232 | case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \ |
| 233 | default: __put_user_unknown(); break; \ |
| 234 | } \ |
| 235 | __pu_err; \ |
| 236 | }) |
| 237 | |
| 238 | #define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS) |
| 239 | #define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment) |
| 240 | |
| 241 | /* |
| 242 | * Complex access routines |
| 243 | */ |
| 244 | extern unsigned long __must_check __copy_user (void __user *to, const void __user *from, |
| 245 | unsigned long count); |
| 246 | |
| 247 | static inline unsigned long |
| 248 | __copy_to_user (void __user *to, const void *from, unsigned long count) |
| 249 | { |
| 250 | return __copy_user(to, (void __user *) from, count); |
| 251 | } |
| 252 | |
| 253 | static inline unsigned long |
| 254 | __copy_from_user (void *to, const void __user *from, unsigned long count) |
| 255 | { |
| 256 | return __copy_user((void __user *) to, from, count); |
| 257 | } |
| 258 | |
| 259 | #define __copy_to_user_inatomic __copy_to_user |
| 260 | #define __copy_from_user_inatomic __copy_from_user |
| 261 | #define copy_to_user(to, from, n) \ |
| 262 | ({ \ |
| 263 | void __user *__cu_to = (to); \ |
| 264 | const void *__cu_from = (from); \ |
| 265 | long __cu_len = (n); \ |
| 266 | \ |
| 267 | if (__access_ok(__cu_to, __cu_len, get_fs())) \ |
| 268 | __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len); \ |
| 269 | __cu_len; \ |
| 270 | }) |
| 271 | |
| 272 | #define copy_from_user(to, from, n) \ |
| 273 | ({ \ |
| 274 | void *__cu_to = (to); \ |
| 275 | const void __user *__cu_from = (from); \ |
| 276 | long __cu_len = (n); \ |
| 277 | \ |
| 278 | __chk_user_ptr(__cu_from); \ |
| 279 | if (__access_ok(__cu_from, __cu_len, get_fs())) \ |
| 280 | __cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len); \ |
| 281 | __cu_len; \ |
| 282 | }) |
| 283 | |
| 284 | #define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) |
| 285 | |
| 286 | static inline unsigned long |
| 287 | copy_in_user (void __user *to, const void __user *from, unsigned long n) |
| 288 | { |
| 289 | if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))) |
| 290 | n = __copy_user(to, from, n); |
| 291 | return n; |
| 292 | } |
| 293 | |
| 294 | extern unsigned long __do_clear_user (void __user *, unsigned long); |
| 295 | |
| 296 | #define __clear_user(to, n) __do_clear_user(to, n) |
| 297 | |
| 298 | #define clear_user(to, n) \ |
| 299 | ({ \ |
| 300 | unsigned long __cu_len = (n); \ |
| 301 | if (__access_ok(to, __cu_len, get_fs())) \ |
| 302 | __cu_len = __do_clear_user(to, __cu_len); \ |
| 303 | __cu_len; \ |
| 304 | }) |
| 305 | |
| 306 | |
| 307 | /* |
| 308 | * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else |
| 309 | * strlen. |
| 310 | */ |
| 311 | extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len); |
| 312 | |
| 313 | #define strncpy_from_user(to, from, n) \ |
| 314 | ({ \ |
| 315 | const char __user * __sfu_from = (from); \ |
| 316 | long __sfu_ret = -EFAULT; \ |
| 317 | if (__access_ok(__sfu_from, 0, get_fs())) \ |
| 318 | __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ |
| 319 | __sfu_ret; \ |
| 320 | }) |
| 321 | |
| 322 | /* Returns: 0 if bad, string length+1 (memory size) of string if ok */ |
| 323 | extern unsigned long __strlen_user (const char __user *); |
| 324 | |
| 325 | #define strlen_user(str) \ |
| 326 | ({ \ |
| 327 | const char __user *__su_str = (str); \ |
| 328 | unsigned long __su_ret = 0; \ |
| 329 | if (__access_ok(__su_str, 0, get_fs())) \ |
| 330 | __su_ret = __strlen_user(__su_str); \ |
| 331 | __su_ret; \ |
| 332 | }) |
| 333 | |
| 334 | /* |
| 335 | * Returns: 0 if exception before NUL or reaching the supplied limit |
| 336 | * (N), a value greater than N if the limit would be exceeded, else |
| 337 | * strlen. |
| 338 | */ |
| 339 | extern unsigned long __strnlen_user (const char __user *, long); |
| 340 | |
| 341 | #define strnlen_user(str, len) \ |
| 342 | ({ \ |
| 343 | const char __user *__su_str = (str); \ |
| 344 | unsigned long __su_ret = 0; \ |
| 345 | if (__access_ok(__su_str, 0, get_fs())) \ |
| 346 | __su_ret = __strnlen_user(__su_str, len); \ |
| 347 | __su_ret; \ |
| 348 | }) |
| 349 | |
| 350 | /* Generic code can't deal with the location-relative format that we use for compactness. */ |
| 351 | #define ARCH_HAS_SORT_EXTABLE |
| 352 | #define ARCH_HAS_SEARCH_EXTABLE |
| 353 | |
| 354 | struct exception_table_entry { |
| 355 | int addr; /* location-relative address of insn this fixup is for */ |
| 356 | int cont; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */ |
| 357 | }; |
| 358 | |
| 359 | extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e); |
| 360 | extern const struct exception_table_entry *search_exception_tables (unsigned long addr); |
| 361 | |
| 362 | static inline int |
| 363 | ia64_done_with_exception (struct pt_regs *regs) |
| 364 | { |
| 365 | const struct exception_table_entry *e; |
| 366 | e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri); |
| 367 | if (e) { |
| 368 | ia64_handle_exception(regs, e); |
| 369 | return 1; |
| 370 | } |
| 371 | return 0; |
| 372 | } |
| 373 | |
| 374 | #define ARCH_HAS_TRANSLATE_MEM_PTR 1 |
| 375 | static __inline__ char * |
| 376 | xlate_dev_mem_ptr (unsigned long p) |
| 377 | { |
| 378 | struct page *page; |
| 379 | char * ptr; |
| 380 | |
| 381 | page = pfn_to_page(p >> PAGE_SHIFT); |
| 382 | if (PageUncached(page)) |
| 383 | ptr = (char *)p + __IA64_UNCACHED_OFFSET; |
| 384 | else |
| 385 | ptr = __va(p); |
| 386 | |
| 387 | return ptr; |
| 388 | } |
| 389 | |
| 390 | /* |
| 391 | * Convert a virtual cached kernel memory pointer to an uncached pointer |
| 392 | */ |
| 393 | static __inline__ char * |
| 394 | xlate_dev_kmem_ptr (char * p) |
| 395 | { |
| 396 | struct page *page; |
| 397 | char * ptr; |
| 398 | |
| 399 | page = virt_to_page((unsigned long)p >> PAGE_SHIFT); |
| 400 | if (PageUncached(page)) |
| 401 | ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET; |
| 402 | else |
| 403 | ptr = p; |
| 404 | |
| 405 | return ptr; |
| 406 | } |
| 407 | |
| 408 | #endif /* _ASM_IA64_UACCESS_H */ |