blob: 0632dc50da78b88557ca61c766b29bccbb30a64d [file] [log] [blame]
Gerald Schaefer59f35d52006-12-04 15:40:45 +01001/*
Gerald Schaeferc1821c22007-02-05 21:18:17 +01002 * User access functions based on page table walks for enhanced
3 * system layout without hardware support.
Gerald Schaefer59f35d52006-12-04 15:40:45 +01004 *
Gerald Schaefer4db84d42012-09-10 16:43:26 +02005 * Copyright IBM Corp. 2006, 2012
Gerald Schaefer59f35d52006-12-04 15:40:45 +01006 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
7 */
8
9#include <linux/errno.h>
Heiko Carstensd8ad0752007-01-09 10:18:50 +010010#include <linux/hardirq.h>
Gerald Schaefer59f35d52006-12-04 15:40:45 +010011#include <linux/mm.h>
Gerald Schaefer4db84d42012-09-10 16:43:26 +020012#include <linux/hugetlb.h>
Heiko Carstens22155912006-12-08 15:53:49 +010013#include <asm/uaccess.h>
Gerald Schaefer59f35d52006-12-04 15:40:45 +010014#include <asm/futex.h>
Heiko Carstens2b67fc42007-02-05 21:16:47 +010015#include "uaccess.h"
Gerald Schaefer59f35d52006-12-04 15:40:45 +010016
Heiko Carstens066c4372013-02-25 09:10:23 +010017#ifndef CONFIG_64BIT
18#define AHI "ahi"
19#define SLR "slr"
20#else
21#define AHI "aghi"
22#define SLR "slgr"
23#endif
24
25static size_t strnlen_kernel(size_t count, const char __user *src)
26{
27 register unsigned long reg0 asm("0") = 0UL;
28 unsigned long tmp1, tmp2;
29
30 asm volatile(
31 " la %2,0(%1)\n"
32 " la %3,0(%0,%1)\n"
33 " "SLR" %0,%0\n"
34 "0: srst %3,%2\n"
35 " jo 0b\n"
36 " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */
37 " "SLR" %0,%1\n"
38 "1:\n"
39 EX_TABLE(0b,1b)
40 : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
41 : "d" (reg0) : "cc", "memory");
42 return count;
43}
44
45static size_t copy_in_kernel(size_t count, void __user *to,
46 const void __user *from)
47{
48 unsigned long tmp1;
49
50 asm volatile(
51 " "AHI" %0,-1\n"
52 " jo 5f\n"
53 " bras %3,3f\n"
54 "0:"AHI" %0,257\n"
55 "1: mvc 0(1,%1),0(%2)\n"
56 " la %1,1(%1)\n"
57 " la %2,1(%2)\n"
58 " "AHI" %0,-1\n"
59 " jnz 1b\n"
60 " j 5f\n"
61 "2: mvc 0(256,%1),0(%2)\n"
62 " la %1,256(%1)\n"
63 " la %2,256(%2)\n"
64 "3:"AHI" %0,-256\n"
65 " jnm 2b\n"
66 "4: ex %0,1b-0b(%3)\n"
67 "5:"SLR" %0,%0\n"
68 "6:\n"
69 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
70 : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
71 : : "cc", "memory");
72 return count;
73}
Gerald Schaefer4db84d42012-09-10 16:43:26 +020074
75/*
76 * Returns kernel address for user virtual address. If the returned address is
Hendrik Bruecknerb4a96012013-12-13 12:53:42 +010077 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the
78 * address contains the (negative) exception code.
Gerald Schaefer4db84d42012-09-10 16:43:26 +020079 */
Heiko Carstensea815312013-03-21 12:50:39 +010080#ifdef CONFIG_64BIT
Heiko Carstens71a86ef2013-11-21 16:22:17 +010081
Heiko Carstensea815312013-03-21 12:50:39 +010082static unsigned long follow_table(struct mm_struct *mm,
83 unsigned long address, int write)
Martin Schwidefskye4aa4022007-10-22 12:52:46 +020084{
Heiko Carstensea815312013-03-21 12:50:39 +010085 unsigned long *table = (unsigned long *)__pa(mm->pgd);
Martin Schwidefskye4aa4022007-10-22 12:52:46 +020086
Heiko Carstens71a86ef2013-11-21 16:22:17 +010087 if (unlikely(address > mm->context.asce_limit - 1))
88 return -0x38UL;
Heiko Carstensea815312013-03-21 12:50:39 +010089 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
90 case _ASCE_TYPE_REGION1:
91 table = table + ((address >> 53) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +020092 if (unlikely(*table & _REGION_ENTRY_INVALID))
Heiko Carstensea815312013-03-21 12:50:39 +010093 return -0x39UL;
94 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
Heiko Carstens12d84712013-04-29 10:58:56 +020095 /* fallthrough */
Heiko Carstensea815312013-03-21 12:50:39 +010096 case _ASCE_TYPE_REGION2:
97 table = table + ((address >> 42) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +020098 if (unlikely(*table & _REGION_ENTRY_INVALID))
Heiko Carstensea815312013-03-21 12:50:39 +010099 return -0x3aUL;
100 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
Heiko Carstens12d84712013-04-29 10:58:56 +0200101 /* fallthrough */
Heiko Carstensea815312013-03-21 12:50:39 +0100102 case _ASCE_TYPE_REGION3:
103 table = table + ((address >> 31) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200104 if (unlikely(*table & _REGION_ENTRY_INVALID))
Heiko Carstensea815312013-03-21 12:50:39 +0100105 return -0x3bUL;
106 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
Heiko Carstens12d84712013-04-29 10:58:56 +0200107 /* fallthrough */
Heiko Carstensea815312013-03-21 12:50:39 +0100108 case _ASCE_TYPE_SEGMENT:
109 table = table + ((address >> 20) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200110 if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
Heiko Carstensea815312013-03-21 12:50:39 +0100111 return -0x10UL;
112 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
Martin Schwidefskye5098612013-07-23 20:57:57 +0200113 if (write && (*table & _SEGMENT_ENTRY_PROTECT))
Heiko Carstensea815312013-03-21 12:50:39 +0100114 return -0x04UL;
115 return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
116 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
117 }
118 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200119 }
Heiko Carstensea815312013-03-21 12:50:39 +0100120 table = table + ((address >> 12) & 0xff);
121 if (unlikely(*table & _PAGE_INVALID))
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200122 return -0x11UL;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200123 if (write && (*table & _PAGE_PROTECT))
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200124 return -0x04UL;
Heiko Carstensea815312013-03-21 12:50:39 +0100125 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
Martin Schwidefskye4aa4022007-10-22 12:52:46 +0200126}
127
Heiko Carstensea815312013-03-21 12:50:39 +0100128#else /* CONFIG_64BIT */
129
130static unsigned long follow_table(struct mm_struct *mm,
131 unsigned long address, int write)
132{
133 unsigned long *table = (unsigned long *)__pa(mm->pgd);
134
135 table = table + ((address >> 20) & 0x7ff);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200136 if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
Heiko Carstensea815312013-03-21 12:50:39 +0100137 return -0x10UL;
138 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
139 table = table + ((address >> 12) & 0xff);
140 if (unlikely(*table & _PAGE_INVALID))
141 return -0x11UL;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200142 if (write && (*table & _PAGE_PROTECT))
Heiko Carstensea815312013-03-21 12:50:39 +0100143 return -0x04UL;
144 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
145}
146
147#endif /* CONFIG_64BIT */
148
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100149static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
150 size_t n, int write_user)
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100151{
152 struct mm_struct *mm = current->mm;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200153 unsigned long offset, done, size, kaddr;
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100154 void *from, *to;
155
156 done = 0;
157retry:
158 spin_lock(&mm->page_table_lock);
159 do {
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200160 kaddr = follow_table(mm, uaddr, write_user);
161 if (IS_ERR_VALUE(kaddr))
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100162 goto fault;
163
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200164 offset = uaddr & ~PAGE_MASK;
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100165 size = min(n - done, PAGE_SIZE - offset);
166 if (write_user) {
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200167 to = (void *) kaddr;
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100168 from = kptr + done;
169 } else {
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200170 from = (void *) kaddr;
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100171 to = kptr + done;
172 }
173 memcpy(to, from, size);
174 done += size;
175 uaddr += size;
176 } while (done < n);
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100177 spin_unlock(&mm->page_table_lock);
178 return n - done;
179fault:
180 spin_unlock(&mm->page_table_lock);
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200181 if (__handle_fault(uaddr, -kaddr, write_user))
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100182 return n - done;
183 goto retry;
184}
185
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100186/*
187 * Do DAT for user address by page table walk, return kernel address.
188 * This function needs to be called with current->mm->page_table_lock held.
189 */
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200190static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
191 int write)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100192{
193 struct mm_struct *mm = current->mm;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200194 unsigned long kaddr;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100195 int rc;
196
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100197retry:
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200198 kaddr = follow_table(mm, uaddr, write);
199 if (IS_ERR_VALUE(kaddr))
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100200 goto fault;
201
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200202 return kaddr;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100203fault:
204 spin_unlock(&mm->page_table_lock);
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200205 rc = __handle_fault(uaddr, -kaddr, write);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100206 spin_lock(&mm->page_table_lock);
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100207 if (!rc)
208 goto retry;
209 return 0;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100210}
211
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100212size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
213{
214 size_t rc;
215
Heiko Carstens066c4372013-02-25 09:10:23 +0100216 if (segment_eq(get_fs(), KERNEL_DS))
217 return copy_in_kernel(n, (void __user *) to, from);
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100218 rc = __user_copy_pt((unsigned long) from, to, n, 0);
219 if (unlikely(rc))
220 memset(to + n - rc, 0, rc);
221 return rc;
222}
223
224size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
225{
Heiko Carstens066c4372013-02-25 09:10:23 +0100226 if (segment_eq(get_fs(), KERNEL_DS))
227 return copy_in_kernel(n, to, (void __user *) from);
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100228 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
229}
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100230
231static size_t clear_user_pt(size_t n, void __user *to)
232{
Heiko Carstensb7fef2d2013-03-21 08:24:11 +0100233 void *zpage = (void *) empty_zero_page;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100234 long done, size, ret;
235
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100236 done = 0;
237 do {
238 if (n - done > PAGE_SIZE)
239 size = PAGE_SIZE;
240 else
241 size = n - done;
Heiko Carstens066c4372013-02-25 09:10:23 +0100242 if (segment_eq(get_fs(), KERNEL_DS))
243 ret = copy_in_kernel(n, to, (void __user *) zpage);
244 else
245 ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100246 done += size;
Heiko Carstens066c4372013-02-25 09:10:23 +0100247 to += size;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100248 if (ret)
249 return ret + n - done;
250 } while (done < n);
251 return 0;
252}
253
254static size_t strnlen_user_pt(size_t count, const char __user *src)
255{
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100256 unsigned long uaddr = (unsigned long) src;
257 struct mm_struct *mm = current->mm;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200258 unsigned long offset, done, len, kaddr;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100259 size_t len_str;
260
Heiko Carstensf45655f2013-02-21 13:30:42 +0100261 if (unlikely(!count))
262 return 0;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100263 if (segment_eq(get_fs(), KERNEL_DS))
Heiko Carstens066c4372013-02-25 09:10:23 +0100264 return strnlen_kernel(count, src);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100265 done = 0;
266retry:
267 spin_lock(&mm->page_table_lock);
268 do {
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200269 kaddr = follow_table(mm, uaddr, 0);
270 if (IS_ERR_VALUE(kaddr))
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100271 goto fault;
272
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200273 offset = uaddr & ~PAGE_MASK;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100274 len = min(count - done, PAGE_SIZE - offset);
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200275 len_str = strnlen((char *) kaddr, len);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100276 done += len_str;
277 uaddr += len_str;
278 } while ((len_str == len) && (done < count));
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100279 spin_unlock(&mm->page_table_lock);
280 return done + 1;
281fault:
282 spin_unlock(&mm->page_table_lock);
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200283 if (__handle_fault(uaddr, -kaddr, 0))
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100284 return 0;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100285 goto retry;
286}
287
288static size_t strncpy_from_user_pt(size_t count, const char __user *src,
289 char *dst)
290{
Heiko Carstens225cf8d2013-02-25 07:24:20 +0100291 size_t done, len, offset, len_str;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100292
Heiko Carstensf45655f2013-02-21 13:30:42 +0100293 if (unlikely(!count))
294 return 0;
Heiko Carstens225cf8d2013-02-25 07:24:20 +0100295 done = 0;
296 do {
297 offset = (size_t)src & ~PAGE_MASK;
298 len = min(count - done, PAGE_SIZE - offset);
Heiko Carstens066c4372013-02-25 09:10:23 +0100299 if (segment_eq(get_fs(), KERNEL_DS)) {
300 if (copy_in_kernel(len, (void __user *) dst, src))
301 return -EFAULT;
302 } else {
303 if (__user_copy_pt((unsigned long) src, dst, len, 0))
304 return -EFAULT;
305 }
Heiko Carstens225cf8d2013-02-25 07:24:20 +0100306 len_str = strnlen(dst, len);
307 done += len_str;
308 src += len_str;
309 dst += len_str;
310 } while ((len_str == len) && (done < count));
311 return done;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100312}
313
314static size_t copy_in_user_pt(size_t n, void __user *to,
315 const void __user *from)
316{
317 struct mm_struct *mm = current->mm;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200318 unsigned long offset_max, uaddr, done, size, error_code;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100319 unsigned long uaddr_from = (unsigned long) from;
320 unsigned long uaddr_to = (unsigned long) to;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200321 unsigned long kaddr_to, kaddr_from;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100322 int write_user;
323
Heiko Carstens066c4372013-02-25 09:10:23 +0100324 if (segment_eq(get_fs(), KERNEL_DS))
325 return copy_in_kernel(n, to, from);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100326 done = 0;
327retry:
328 spin_lock(&mm->page_table_lock);
329 do {
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100330 write_user = 0;
331 uaddr = uaddr_from;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200332 kaddr_from = follow_table(mm, uaddr_from, 0);
333 error_code = kaddr_from;
334 if (IS_ERR_VALUE(error_code))
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100335 goto fault;
Martin Schwidefskye4aa4022007-10-22 12:52:46 +0200336
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100337 write_user = 1;
338 uaddr = uaddr_to;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200339 kaddr_to = follow_table(mm, uaddr_to, 1);
340 error_code = (unsigned long) kaddr_to;
341 if (IS_ERR_VALUE(error_code))
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100342 goto fault;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100343
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200344 offset_max = max(uaddr_from & ~PAGE_MASK,
345 uaddr_to & ~PAGE_MASK);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100346 size = min(n - done, PAGE_SIZE - offset_max);
347
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200348 memcpy((void *) kaddr_to, (void *) kaddr_from, size);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100349 done += size;
350 uaddr_from += size;
351 uaddr_to += size;
352 } while (done < n);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100353 spin_unlock(&mm->page_table_lock);
354 return n - done;
355fault:
356 spin_unlock(&mm->page_table_lock);
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200357 if (__handle_fault(uaddr, -error_code, write_user))
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100358 return n - done;
359 goto retry;
360}
361
362#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
363 asm volatile("0: l %1,0(%6)\n" \
364 "1: " insn \
365 "2: cs %1,%2,0(%6)\n" \
366 "3: jl 1b\n" \
367 " lhi %0,0\n" \
368 "4:\n" \
369 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
370 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
371 "=m" (*uaddr) \
372 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
373 "m" (*uaddr) : "cc" );
374
Michel Lespinasse8d7718a2011-03-10 18:50:58 -0800375static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100376{
377 int oldval = 0, newval, ret;
378
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100379 switch (op) {
380 case FUTEX_OP_SET:
381 __futex_atomic_op("lr %2,%5\n",
382 ret, oldval, newval, uaddr, oparg);
383 break;
384 case FUTEX_OP_ADD:
385 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
386 ret, oldval, newval, uaddr, oparg);
387 break;
388 case FUTEX_OP_OR:
389 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
390 ret, oldval, newval, uaddr, oparg);
391 break;
392 case FUTEX_OP_ANDN:
393 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
394 ret, oldval, newval, uaddr, oparg);
395 break;
396 case FUTEX_OP_XOR:
397 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
398 ret, oldval, newval, uaddr, oparg);
399 break;
400 default:
401 ret = -ENOSYS;
402 }
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200403 if (ret == 0)
404 *old = oldval;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100405 return ret;
406}
407
Michel Lespinasse8d7718a2011-03-10 18:50:58 -0800408int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100409{
410 int ret;
411
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200412 if (segment_eq(get_fs(), KERNEL_DS))
413 return __futex_atomic_op_pt(op, uaddr, oparg, old);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100414 spin_lock(&current->mm->page_table_lock);
Martin Schwidefsky3c52e492011-10-30 15:17:15 +0100415 uaddr = (u32 __force __user *)
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200416 __dat_user_addr((__force unsigned long) uaddr, 1);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100417 if (!uaddr) {
418 spin_unlock(&current->mm->page_table_lock);
419 return -EFAULT;
420 }
421 get_page(virt_to_page(uaddr));
422 spin_unlock(&current->mm->page_table_lock);
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200423 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
424 put_page(virt_to_page(uaddr));
425 return ret;
426}
427
Michel Lespinasse8d7718a2011-03-10 18:50:58 -0800428static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
429 u32 oldval, u32 newval)
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200430{
431 int ret;
432
433 asm volatile("0: cs %1,%4,0(%5)\n"
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800434 "1: la %0,0\n"
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200435 "2:\n"
436 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100437 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
438 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
439 : "cc", "memory" );
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800440 *uval = oldval;
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200441 return ret;
442}
443
Michel Lespinasse8d7718a2011-03-10 18:50:58 -0800444int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
445 u32 oldval, u32 newval)
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200446{
447 int ret;
448
449 if (segment_eq(get_fs(), KERNEL_DS))
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800450 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200451 spin_lock(&current->mm->page_table_lock);
Martin Schwidefsky3c52e492011-10-30 15:17:15 +0100452 uaddr = (u32 __force __user *)
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200453 __dat_user_addr((__force unsigned long) uaddr, 1);
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200454 if (!uaddr) {
455 spin_unlock(&current->mm->page_table_lock);
456 return -EFAULT;
457 }
458 get_page(virt_to_page(uaddr));
459 spin_unlock(&current->mm->page_table_lock);
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800460 ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100461 put_page(virt_to_page(uaddr));
462 return ret;
463}
464
465struct uaccess_ops uaccess_pt = {
466 .copy_from_user = copy_from_user_pt,
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100467 .copy_to_user = copy_to_user_pt,
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100468 .copy_in_user = copy_in_user_pt,
469 .clear_user = clear_user_pt,
470 .strnlen_user = strnlen_user_pt,
471 .strncpy_from_user = strncpy_from_user_pt,
472 .futex_atomic_op = futex_atomic_op_pt,
473 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
474};