Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 1 | /* |
| 2 | * arch/s390/lib/uaccess_pt.c |
| 3 | * |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 4 | * User access functions based on page table walks for enhanced |
| 5 | * system layout without hardware support. |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 6 | * |
| 7 | * Copyright IBM Corp. 2006 |
| 8 | * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) |
| 9 | */ |
| 10 | |
| 11 | #include <linux/errno.h> |
Heiko Carstens | d8ad075 | 2007-01-09 10:18:50 +0100 | [diff] [blame] | 12 | #include <linux/hardirq.h> |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 13 | #include <linux/mm.h> |
Heiko Carstens | 2215591 | 2006-12-08 15:53:49 +0100 | [diff] [blame] | 14 | #include <asm/uaccess.h> |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 15 | #include <asm/futex.h> |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 16 | #include "uaccess.h" |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 17 | |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 18 | static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr) |
| 19 | { |
| 20 | pgd_t *pgd; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 21 | pud_t *pud; |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 22 | pmd_t *pmd; |
| 23 | |
| 24 | pgd = pgd_offset(mm, addr); |
| 25 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 26 | return (pte_t *) 0x3a; |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 27 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 28 | pud = pud_offset(pgd, addr); |
| 29 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 30 | return (pte_t *) 0x3b; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 31 | |
| 32 | pmd = pmd_offset(pud, addr); |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 33 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 34 | return (pte_t *) 0x10; |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 35 | |
| 36 | return pte_offset_map(pmd, addr); |
| 37 | } |
| 38 | |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 39 | static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, |
| 40 | size_t n, int write_user) |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 41 | { |
| 42 | struct mm_struct *mm = current->mm; |
| 43 | unsigned long offset, pfn, done, size; |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 44 | pte_t *pte; |
| 45 | void *from, *to; |
| 46 | |
| 47 | done = 0; |
| 48 | retry: |
| 49 | spin_lock(&mm->page_table_lock); |
| 50 | do { |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 51 | pte = follow_table(mm, uaddr); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 52 | if ((unsigned long) pte < 0x1000) |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 53 | goto fault; |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 54 | if (!pte_present(*pte)) { |
| 55 | pte = (pte_t *) 0x11; |
| 56 | goto fault; |
| 57 | } else if (write_user && !pte_write(*pte)) { |
| 58 | pte = (pte_t *) 0x04; |
| 59 | goto fault; |
| 60 | } |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 61 | |
| 62 | pfn = pte_pfn(*pte); |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 63 | offset = uaddr & (PAGE_SIZE - 1); |
| 64 | size = min(n - done, PAGE_SIZE - offset); |
| 65 | if (write_user) { |
| 66 | to = (void *)((pfn << PAGE_SHIFT) + offset); |
| 67 | from = kptr + done; |
| 68 | } else { |
| 69 | from = (void *)((pfn << PAGE_SHIFT) + offset); |
| 70 | to = kptr + done; |
| 71 | } |
| 72 | memcpy(to, from, size); |
| 73 | done += size; |
| 74 | uaddr += size; |
| 75 | } while (done < n); |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 76 | spin_unlock(&mm->page_table_lock); |
| 77 | return n - done; |
| 78 | fault: |
| 79 | spin_unlock(&mm->page_table_lock); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 80 | if (__handle_fault(uaddr, (unsigned long) pte, write_user)) |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 81 | return n - done; |
| 82 | goto retry; |
| 83 | } |
| 84 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 85 | /* |
| 86 | * Do DAT for user address by page table walk, return kernel address. |
| 87 | * This function needs to be called with current->mm->page_table_lock held. |
| 88 | */ |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 89 | static __always_inline unsigned long __dat_user_addr(unsigned long uaddr) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 90 | { |
| 91 | struct mm_struct *mm = current->mm; |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 92 | unsigned long pfn; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 93 | pte_t *pte; |
| 94 | int rc; |
| 95 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 96 | retry: |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 97 | pte = follow_table(mm, uaddr); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 98 | if ((unsigned long) pte < 0x1000) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 99 | goto fault; |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 100 | if (!pte_present(*pte)) { |
| 101 | pte = (pte_t *) 0x11; |
| 102 | goto fault; |
| 103 | } |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 104 | |
| 105 | pfn = pte_pfn(*pte); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 106 | return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 107 | fault: |
| 108 | spin_unlock(&mm->page_table_lock); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 109 | rc = __handle_fault(uaddr, (unsigned long) pte, 0); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 110 | spin_lock(&mm->page_table_lock); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 111 | if (!rc) |
| 112 | goto retry; |
| 113 | return 0; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 114 | } |
| 115 | |
Gerald Schaefer | 59f35d5 | 2006-12-04 15:40:45 +0100 | [diff] [blame] | 116 | size_t copy_from_user_pt(size_t n, const void __user *from, void *to) |
| 117 | { |
| 118 | size_t rc; |
| 119 | |
| 120 | if (segment_eq(get_fs(), KERNEL_DS)) { |
| 121 | memcpy(to, (void __kernel __force *) from, n); |
| 122 | return 0; |
| 123 | } |
| 124 | rc = __user_copy_pt((unsigned long) from, to, n, 0); |
| 125 | if (unlikely(rc)) |
| 126 | memset(to + n - rc, 0, rc); |
| 127 | return rc; |
| 128 | } |
| 129 | |
| 130 | size_t copy_to_user_pt(size_t n, void __user *to, const void *from) |
| 131 | { |
| 132 | if (segment_eq(get_fs(), KERNEL_DS)) { |
| 133 | memcpy((void __kernel __force *) to, from, n); |
| 134 | return 0; |
| 135 | } |
| 136 | return __user_copy_pt((unsigned long) to, (void *) from, n, 1); |
| 137 | } |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 138 | |
| 139 | static size_t clear_user_pt(size_t n, void __user *to) |
| 140 | { |
| 141 | long done, size, ret; |
| 142 | |
| 143 | if (segment_eq(get_fs(), KERNEL_DS)) { |
| 144 | memset((void __kernel __force *) to, 0, n); |
| 145 | return 0; |
| 146 | } |
| 147 | done = 0; |
| 148 | do { |
| 149 | if (n - done > PAGE_SIZE) |
| 150 | size = PAGE_SIZE; |
| 151 | else |
| 152 | size = n - done; |
| 153 | ret = __user_copy_pt((unsigned long) to + done, |
| 154 | &empty_zero_page, size, 1); |
| 155 | done += size; |
| 156 | if (ret) |
| 157 | return ret + n - done; |
| 158 | } while (done < n); |
| 159 | return 0; |
| 160 | } |
| 161 | |
| 162 | static size_t strnlen_user_pt(size_t count, const char __user *src) |
| 163 | { |
| 164 | char *addr; |
| 165 | unsigned long uaddr = (unsigned long) src; |
| 166 | struct mm_struct *mm = current->mm; |
| 167 | unsigned long offset, pfn, done, len; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 168 | pte_t *pte; |
| 169 | size_t len_str; |
| 170 | |
| 171 | if (segment_eq(get_fs(), KERNEL_DS)) |
| 172 | return strnlen((const char __kernel __force *) src, count) + 1; |
| 173 | done = 0; |
| 174 | retry: |
| 175 | spin_lock(&mm->page_table_lock); |
| 176 | do { |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 177 | pte = follow_table(mm, uaddr); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 178 | if ((unsigned long) pte < 0x1000) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 179 | goto fault; |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 180 | if (!pte_present(*pte)) { |
| 181 | pte = (pte_t *) 0x11; |
| 182 | goto fault; |
| 183 | } |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 184 | |
| 185 | pfn = pte_pfn(*pte); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 186 | offset = uaddr & (PAGE_SIZE-1); |
| 187 | addr = (char *)(pfn << PAGE_SHIFT) + offset; |
| 188 | len = min(count - done, PAGE_SIZE - offset); |
| 189 | len_str = strnlen(addr, len); |
| 190 | done += len_str; |
| 191 | uaddr += len_str; |
| 192 | } while ((len_str == len) && (done < count)); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 193 | spin_unlock(&mm->page_table_lock); |
| 194 | return done + 1; |
| 195 | fault: |
| 196 | spin_unlock(&mm->page_table_lock); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 197 | if (__handle_fault(uaddr, (unsigned long) pte, 0)) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 198 | return 0; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 199 | goto retry; |
| 200 | } |
| 201 | |
| 202 | static size_t strncpy_from_user_pt(size_t count, const char __user *src, |
| 203 | char *dst) |
| 204 | { |
| 205 | size_t n = strnlen_user_pt(count, src); |
| 206 | |
| 207 | if (!n) |
| 208 | return -EFAULT; |
| 209 | if (n > count) |
| 210 | n = count; |
| 211 | if (segment_eq(get_fs(), KERNEL_DS)) { |
| 212 | memcpy(dst, (const char __kernel __force *) src, n); |
| 213 | if (dst[n-1] == '\0') |
| 214 | return n-1; |
| 215 | else |
| 216 | return n; |
| 217 | } |
| 218 | if (__user_copy_pt((unsigned long) src, dst, n, 0)) |
| 219 | return -EFAULT; |
| 220 | if (dst[n-1] == '\0') |
| 221 | return n-1; |
| 222 | else |
| 223 | return n; |
| 224 | } |
| 225 | |
| 226 | static size_t copy_in_user_pt(size_t n, void __user *to, |
| 227 | const void __user *from) |
| 228 | { |
| 229 | struct mm_struct *mm = current->mm; |
| 230 | unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 231 | uaddr, done, size, error_code; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 232 | unsigned long uaddr_from = (unsigned long) from; |
| 233 | unsigned long uaddr_to = (unsigned long) to; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 234 | pte_t *pte_from, *pte_to; |
| 235 | int write_user; |
| 236 | |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 237 | if (segment_eq(get_fs(), KERNEL_DS)) { |
| 238 | memcpy((void __force *) to, (void __force *) from, n); |
| 239 | return 0; |
| 240 | } |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 241 | done = 0; |
| 242 | retry: |
| 243 | spin_lock(&mm->page_table_lock); |
| 244 | do { |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 245 | write_user = 0; |
| 246 | uaddr = uaddr_from; |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 247 | pte_from = follow_table(mm, uaddr_from); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 248 | error_code = (unsigned long) pte_from; |
| 249 | if (error_code < 0x1000) |
| 250 | goto fault; |
| 251 | if (!pte_present(*pte_from)) { |
| 252 | error_code = 0x11; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 253 | goto fault; |
| 254 | } |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 255 | |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 256 | write_user = 1; |
| 257 | uaddr = uaddr_to; |
Martin Schwidefsky | e4aa402 | 2007-10-22 12:52:46 +0200 | [diff] [blame] | 258 | pte_to = follow_table(mm, uaddr_to); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 259 | error_code = (unsigned long) pte_to; |
| 260 | if (error_code < 0x1000) |
| 261 | goto fault; |
| 262 | if (!pte_present(*pte_to)) { |
| 263 | error_code = 0x11; |
| 264 | goto fault; |
| 265 | } else if (!pte_write(*pte_to)) { |
| 266 | error_code = 0x04; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 267 | goto fault; |
| 268 | } |
| 269 | |
| 270 | pfn_from = pte_pfn(*pte_from); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 271 | pfn_to = pte_pfn(*pte_to); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 272 | offset_from = uaddr_from & (PAGE_SIZE-1); |
| 273 | offset_to = uaddr_from & (PAGE_SIZE-1); |
| 274 | offset_max = max(offset_from, offset_to); |
| 275 | size = min(n - done, PAGE_SIZE - offset_max); |
| 276 | |
| 277 | memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to, |
| 278 | (void *)(pfn_from << PAGE_SHIFT) + offset_from, size); |
| 279 | done += size; |
| 280 | uaddr_from += size; |
| 281 | uaddr_to += size; |
| 282 | } while (done < n); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 283 | spin_unlock(&mm->page_table_lock); |
| 284 | return n - done; |
| 285 | fault: |
| 286 | spin_unlock(&mm->page_table_lock); |
Gerald Schaefer | 6c1e3e7 | 2009-12-07 12:51:47 +0100 | [diff] [blame] | 287 | if (__handle_fault(uaddr, error_code, write_user)) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 288 | return n - done; |
| 289 | goto retry; |
| 290 | } |
| 291 | |
| 292 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ |
| 293 | asm volatile("0: l %1,0(%6)\n" \ |
| 294 | "1: " insn \ |
| 295 | "2: cs %1,%2,0(%6)\n" \ |
| 296 | "3: jl 1b\n" \ |
| 297 | " lhi %0,0\n" \ |
| 298 | "4:\n" \ |
| 299 | EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ |
| 300 | : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ |
| 301 | "=m" (*uaddr) \ |
| 302 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
| 303 | "m" (*uaddr) : "cc" ); |
| 304 | |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 305 | static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 306 | { |
| 307 | int oldval = 0, newval, ret; |
| 308 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 309 | switch (op) { |
| 310 | case FUTEX_OP_SET: |
| 311 | __futex_atomic_op("lr %2,%5\n", |
| 312 | ret, oldval, newval, uaddr, oparg); |
| 313 | break; |
| 314 | case FUTEX_OP_ADD: |
| 315 | __futex_atomic_op("lr %2,%1\nar %2,%5\n", |
| 316 | ret, oldval, newval, uaddr, oparg); |
| 317 | break; |
| 318 | case FUTEX_OP_OR: |
| 319 | __futex_atomic_op("lr %2,%1\nor %2,%5\n", |
| 320 | ret, oldval, newval, uaddr, oparg); |
| 321 | break; |
| 322 | case FUTEX_OP_ANDN: |
| 323 | __futex_atomic_op("lr %2,%1\nnr %2,%5\n", |
| 324 | ret, oldval, newval, uaddr, oparg); |
| 325 | break; |
| 326 | case FUTEX_OP_XOR: |
| 327 | __futex_atomic_op("lr %2,%1\nxr %2,%5\n", |
| 328 | ret, oldval, newval, uaddr, oparg); |
| 329 | break; |
| 330 | default: |
| 331 | ret = -ENOSYS; |
| 332 | } |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 333 | if (ret == 0) |
| 334 | *old = oldval; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 335 | return ret; |
| 336 | } |
| 337 | |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 338 | int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 339 | { |
| 340 | int ret; |
| 341 | |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 342 | if (segment_eq(get_fs(), KERNEL_DS)) |
| 343 | return __futex_atomic_op_pt(op, uaddr, oparg, old); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 344 | spin_lock(¤t->mm->page_table_lock); |
| 345 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); |
| 346 | if (!uaddr) { |
| 347 | spin_unlock(¤t->mm->page_table_lock); |
| 348 | return -EFAULT; |
| 349 | } |
| 350 | get_page(virt_to_page(uaddr)); |
| 351 | spin_unlock(¤t->mm->page_table_lock); |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 352 | ret = __futex_atomic_op_pt(op, uaddr, oparg, old); |
| 353 | put_page(virt_to_page(uaddr)); |
| 354 | return ret; |
| 355 | } |
| 356 | |
| 357 | static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) |
| 358 | { |
| 359 | int ret; |
| 360 | |
| 361 | asm volatile("0: cs %1,%4,0(%5)\n" |
| 362 | "1: lr %0,%1\n" |
| 363 | "2:\n" |
| 364 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 365 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) |
| 366 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) |
| 367 | : "cc", "memory" ); |
Heiko Carstens | 3f12ebc | 2008-04-17 07:46:27 +0200 | [diff] [blame] | 368 | return ret; |
| 369 | } |
| 370 | |
| 371 | int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) |
| 372 | { |
| 373 | int ret; |
| 374 | |
| 375 | if (segment_eq(get_fs(), KERNEL_DS)) |
| 376 | return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); |
| 377 | spin_lock(¤t->mm->page_table_lock); |
| 378 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); |
| 379 | if (!uaddr) { |
| 380 | spin_unlock(¤t->mm->page_table_lock); |
| 381 | return -EFAULT; |
| 382 | } |
| 383 | get_page(virt_to_page(uaddr)); |
| 384 | spin_unlock(¤t->mm->page_table_lock); |
| 385 | ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 386 | put_page(virt_to_page(uaddr)); |
| 387 | return ret; |
| 388 | } |
| 389 | |
| 390 | struct uaccess_ops uaccess_pt = { |
| 391 | .copy_from_user = copy_from_user_pt, |
| 392 | .copy_from_user_small = copy_from_user_pt, |
| 393 | .copy_to_user = copy_to_user_pt, |
| 394 | .copy_to_user_small = copy_to_user_pt, |
| 395 | .copy_in_user = copy_in_user_pt, |
| 396 | .clear_user = clear_user_pt, |
| 397 | .strnlen_user = strnlen_user_pt, |
| 398 | .strncpy_from_user = strncpy_from_user_pt, |
| 399 | .futex_atomic_op = futex_atomic_op_pt, |
| 400 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, |
| 401 | }; |