Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __V850_UACCESS_H__ |
| 2 | #define __V850_UACCESS_H__ |
| 3 | |
| 4 | /* |
| 5 | * User space memory access functions |
| 6 | */ |
| 7 | |
| 8 | #include <linux/errno.h> |
| 9 | #include <linux/string.h> |
| 10 | |
| 11 | #include <asm/segment.h> |
| 12 | #include <asm/machdep.h> |
| 13 | |
| 14 | #define VERIFY_READ 0 |
| 15 | #define VERIFY_WRITE 1 |
| 16 | |
Adrian Bunk | 23f88fe | 2005-11-07 00:59:00 -0800 | [diff] [blame] | 17 | static inline int access_ok (int type, const void *addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | { |
| 19 | /* XXX I guess we should check against real ram bounds at least, and |
| 20 | possibly make sure ADDR is not within the kernel. |
| 21 | For now we just check to make sure it's not a small positive |
| 22 | or negative value, as that will at least catch some kinds of |
| 23 | error. In particular, we make sure that ADDR's not within the |
| 24 | interrupt vector area, which we know starts at zero, or within the |
| 25 | peripheral-I/O area, which is located just _before_ zero. */ |
| 26 | unsigned long val = (unsigned long)addr; |
| 27 | return val >= (0x80 + NUM_CPU_IRQS*16) && val < 0xFFFFF000; |
| 28 | } |
| 29 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | /* |
| 31 | * The exception table consists of pairs of addresses: the first is the |
| 32 | * address of an instruction that is allowed to fault, and the second is |
| 33 | * the address at which the program should continue. No registers are |
| 34 | * modified, so it is entirely up to the continuation code to figure out |
| 35 | * what to do. |
| 36 | * |
| 37 | * All the routines below use bits of fixup code that are out of line |
| 38 | * with the main instruction path. This means when everything is well, |
| 39 | * we don't even have to jump over them. Further, they do not intrude |
| 40 | * on our cache or tlb entries. |
| 41 | */ |
| 42 | |
| 43 | struct exception_table_entry |
| 44 | { |
| 45 | unsigned long insn, fixup; |
| 46 | }; |
| 47 | |
| 48 | /* Returns 0 if exception not found and fixup otherwise. */ |
| 49 | extern unsigned long search_exception_table (unsigned long); |
| 50 | |
| 51 | |
| 52 | /* |
| 53 | * These are the main single-value transfer routines. They automatically |
| 54 | * use the right size if we just have the right pointer type. |
| 55 | */ |
| 56 | |
| 57 | extern int bad_user_access_length (void); |
| 58 | |
| 59 | #define __get_user(var, ptr) \ |
| 60 | ({ \ |
| 61 | int __gu_err = 0; \ |
| 62 | typeof(*(ptr)) __gu_val = 0; \ |
| 63 | switch (sizeof (*(ptr))) { \ |
| 64 | case 1: \ |
| 65 | case 2: \ |
| 66 | case 4: \ |
| 67 | __gu_val = *(ptr); \ |
| 68 | break; \ |
| 69 | case 8: \ |
| 70 | memcpy(&__gu_val, ptr, sizeof(__gu_val)); \ |
| 71 | break; \ |
| 72 | default: \ |
| 73 | __gu_val = 0; \ |
| 74 | __gu_err = __get_user_bad (); \ |
| 75 | break; \ |
| 76 | } \ |
| 77 | (var) = __gu_val; \ |
| 78 | __gu_err; \ |
| 79 | }) |
| 80 | #define __get_user_bad() (bad_user_access_length (), (-EFAULT)) |
| 81 | |
| 82 | #define __put_user(var, ptr) \ |
| 83 | ({ \ |
| 84 | int __pu_err = 0; \ |
| 85 | switch (sizeof (*(ptr))) { \ |
| 86 | case 1: \ |
| 87 | case 2: \ |
| 88 | case 4: \ |
| 89 | *(ptr) = (var); \ |
| 90 | break; \ |
| 91 | case 8: { \ |
| 92 | typeof(*(ptr)) __pu_val = 0; \ |
| 93 | memcpy(ptr, &__pu_val, sizeof(__pu_val)); \ |
| 94 | } \ |
| 95 | break; \ |
| 96 | default: \ |
| 97 | __pu_err = __put_user_bad (); \ |
| 98 | break; \ |
| 99 | } \ |
| 100 | __pu_err; \ |
| 101 | }) |
| 102 | #define __put_user_bad() (bad_user_access_length (), (-EFAULT)) |
| 103 | |
| 104 | #define put_user(x, ptr) __put_user(x, ptr) |
| 105 | #define get_user(x, ptr) __get_user(x, ptr) |
| 106 | |
| 107 | #define __copy_from_user(to, from, n) (memcpy (to, from, n), 0) |
| 108 | #define __copy_to_user(to, from, n) (memcpy(to, from, n), 0) |
| 109 | |
| 110 | #define __copy_to_user_inatomic __copy_to_user |
| 111 | #define __copy_from_user_inatomic __copy_from_user |
| 112 | |
| 113 | #define copy_from_user(to, from, n) __copy_from_user (to, from, n) |
| 114 | #define copy_to_user(to, from, n) __copy_to_user(to, from, n) |
| 115 | |
| 116 | #define copy_to_user_ret(to,from,n,retval) \ |
| 117 | ({ if (copy_to_user (to,from,n)) return retval; }) |
| 118 | |
| 119 | #define copy_from_user_ret(to,from,n,retval) \ |
| 120 | ({ if (copy_from_user (to,from,n)) return retval; }) |
| 121 | |
| 122 | /* |
| 123 | * Copy a null terminated string from userspace. |
| 124 | */ |
| 125 | |
| 126 | static inline long |
| 127 | strncpy_from_user (char *dst, const char *src, long count) |
| 128 | { |
| 129 | char *tmp; |
| 130 | strncpy (dst, src, count); |
| 131 | for (tmp = dst; *tmp && count > 0; tmp++, count--) |
| 132 | ; |
| 133 | return tmp - dst; |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * Return the size of a string (including the ending 0) |
| 138 | * |
| 139 | * Return 0 on exception, a value greater than N if too long |
| 140 | */ |
| 141 | static inline long strnlen_user (const char *src, long n) |
| 142 | { |
| 143 | return strlen (src) + 1; |
| 144 | } |
| 145 | |
| 146 | #define strlen_user(str) strnlen_user (str, 32767) |
| 147 | |
| 148 | /* |
| 149 | * Zero Userspace |
| 150 | */ |
| 151 | |
| 152 | static inline unsigned long |
| 153 | clear_user (void *to, unsigned long n) |
| 154 | { |
| 155 | memset (to, 0, n); |
| 156 | return 0; |
| 157 | } |
| 158 | |
| 159 | #endif /* __V850_UACCESS_H__ */ |