blob: 304e07086ab39e5dc5da75affd40331851265663 [file] [log] [blame]
Gerald Schaefer59f35d52006-12-04 15:40:45 +01001/*
Gerald Schaeferc1821c22007-02-05 21:18:17 +01002 * User access functions based on page table walks for enhanced
3 * system layout without hardware support.
Gerald Schaefer59f35d52006-12-04 15:40:45 +01004 *
Gerald Schaefer4db84d42012-09-10 16:43:26 +02005 * Copyright IBM Corp. 2006, 2012
Gerald Schaefer59f35d52006-12-04 15:40:45 +01006 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
7 */
8
9#include <linux/errno.h>
Heiko Carstensd8ad0752007-01-09 10:18:50 +010010#include <linux/hardirq.h>
Gerald Schaefer59f35d52006-12-04 15:40:45 +010011#include <linux/mm.h>
Gerald Schaefer4db84d42012-09-10 16:43:26 +020012#include <linux/hugetlb.h>
Heiko Carstens22155912006-12-08 15:53:49 +010013#include <asm/uaccess.h>
Gerald Schaefer59f35d52006-12-04 15:40:45 +010014#include <asm/futex.h>
Heiko Carstens2b67fc42007-02-05 21:16:47 +010015#include "uaccess.h"
Gerald Schaefer59f35d52006-12-04 15:40:45 +010016
Gerald Schaefer4db84d42012-09-10 16:43:26 +020017
18/*
19 * Returns kernel address for user virtual address. If the returned address is
20 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
21 * contains the (negative) exception code.
22 */
23static __always_inline unsigned long follow_table(struct mm_struct *mm,
24 unsigned long addr, int write)
Martin Schwidefskye4aa4022007-10-22 12:52:46 +020025{
26 pgd_t *pgd;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020027 pud_t *pud;
Martin Schwidefskye4aa4022007-10-22 12:52:46 +020028 pmd_t *pmd;
Gerald Schaefer4db84d42012-09-10 16:43:26 +020029 pte_t *ptep;
Martin Schwidefskye4aa4022007-10-22 12:52:46 +020030
31 pgd = pgd_offset(mm, addr);
32 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
Gerald Schaefer4db84d42012-09-10 16:43:26 +020033 return -0x3aUL;
Martin Schwidefskye4aa4022007-10-22 12:52:46 +020034
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020035 pud = pud_offset(pgd, addr);
36 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
Gerald Schaefer4db84d42012-09-10 16:43:26 +020037 return -0x3bUL;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020038
39 pmd = pmd_offset(pud, addr);
Gerald Schaefer4db84d42012-09-10 16:43:26 +020040 if (pmd_none(*pmd))
41 return -0x10UL;
Gerald Schaefer156152f2012-10-25 17:24:12 +020042 if (pmd_large(*pmd)) {
Gerald Schaefer4db84d42012-09-10 16:43:26 +020043 if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
44 return -0x04UL;
45 return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
46 }
47 if (unlikely(pmd_bad(*pmd)))
48 return -0x10UL;
Martin Schwidefskye4aa4022007-10-22 12:52:46 +020049
Gerald Schaefer4db84d42012-09-10 16:43:26 +020050 ptep = pte_offset_map(pmd, addr);
51 if (!pte_present(*ptep))
52 return -0x11UL;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +010053 if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
Gerald Schaefer4db84d42012-09-10 16:43:26 +020054 return -0x04UL;
55
56 return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
Martin Schwidefskye4aa4022007-10-22 12:52:46 +020057}
58
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +010059static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
60 size_t n, int write_user)
Gerald Schaefer59f35d52006-12-04 15:40:45 +010061{
62 struct mm_struct *mm = current->mm;
Gerald Schaefer4db84d42012-09-10 16:43:26 +020063 unsigned long offset, done, size, kaddr;
Gerald Schaefer59f35d52006-12-04 15:40:45 +010064 void *from, *to;
65
66 done = 0;
67retry:
68 spin_lock(&mm->page_table_lock);
69 do {
Gerald Schaefer4db84d42012-09-10 16:43:26 +020070 kaddr = follow_table(mm, uaddr, write_user);
71 if (IS_ERR_VALUE(kaddr))
Gerald Schaefer59f35d52006-12-04 15:40:45 +010072 goto fault;
73
Gerald Schaefer4db84d42012-09-10 16:43:26 +020074 offset = uaddr & ~PAGE_MASK;
Gerald Schaefer59f35d52006-12-04 15:40:45 +010075 size = min(n - done, PAGE_SIZE - offset);
76 if (write_user) {
Gerald Schaefer4db84d42012-09-10 16:43:26 +020077 to = (void *) kaddr;
Gerald Schaefer59f35d52006-12-04 15:40:45 +010078 from = kptr + done;
79 } else {
Gerald Schaefer4db84d42012-09-10 16:43:26 +020080 from = (void *) kaddr;
Gerald Schaefer59f35d52006-12-04 15:40:45 +010081 to = kptr + done;
82 }
83 memcpy(to, from, size);
84 done += size;
85 uaddr += size;
86 } while (done < n);
Gerald Schaefer59f35d52006-12-04 15:40:45 +010087 spin_unlock(&mm->page_table_lock);
88 return n - done;
89fault:
90 spin_unlock(&mm->page_table_lock);
Gerald Schaefer4db84d42012-09-10 16:43:26 +020091 if (__handle_fault(uaddr, -kaddr, write_user))
Gerald Schaefer59f35d52006-12-04 15:40:45 +010092 return n - done;
93 goto retry;
94}
95
Gerald Schaeferc1821c22007-02-05 21:18:17 +010096/*
97 * Do DAT for user address by page table walk, return kernel address.
98 * This function needs to be called with current->mm->page_table_lock held.
99 */
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200100static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
101 int write)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100102{
103 struct mm_struct *mm = current->mm;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200104 unsigned long kaddr;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100105 int rc;
106
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100107retry:
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200108 kaddr = follow_table(mm, uaddr, write);
109 if (IS_ERR_VALUE(kaddr))
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100110 goto fault;
111
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200112 return kaddr;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100113fault:
114 spin_unlock(&mm->page_table_lock);
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200115 rc = __handle_fault(uaddr, -kaddr, write);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100116 spin_lock(&mm->page_table_lock);
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100117 if (!rc)
118 goto retry;
119 return 0;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100120}
121
Gerald Schaefer59f35d52006-12-04 15:40:45 +0100122size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
123{
124 size_t rc;
125
126 if (segment_eq(get_fs(), KERNEL_DS)) {
127 memcpy(to, (void __kernel __force *) from, n);
128 return 0;
129 }
130 rc = __user_copy_pt((unsigned long) from, to, n, 0);
131 if (unlikely(rc))
132 memset(to + n - rc, 0, rc);
133 return rc;
134}
135
136size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
137{
138 if (segment_eq(get_fs(), KERNEL_DS)) {
139 memcpy((void __kernel __force *) to, from, n);
140 return 0;
141 }
142 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
143}
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100144
145static size_t clear_user_pt(size_t n, void __user *to)
146{
147 long done, size, ret;
148
149 if (segment_eq(get_fs(), KERNEL_DS)) {
150 memset((void __kernel __force *) to, 0, n);
151 return 0;
152 }
153 done = 0;
154 do {
155 if (n - done > PAGE_SIZE)
156 size = PAGE_SIZE;
157 else
158 size = n - done;
159 ret = __user_copy_pt((unsigned long) to + done,
160 &empty_zero_page, size, 1);
161 done += size;
162 if (ret)
163 return ret + n - done;
164 } while (done < n);
165 return 0;
166}
167
168static size_t strnlen_user_pt(size_t count, const char __user *src)
169{
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100170 unsigned long uaddr = (unsigned long) src;
171 struct mm_struct *mm = current->mm;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200172 unsigned long offset, done, len, kaddr;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100173 size_t len_str;
174
Heiko Carstensf45655f2013-02-21 13:30:42 +0100175 if (unlikely(!count))
176 return 0;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100177 if (segment_eq(get_fs(), KERNEL_DS))
178 return strnlen((const char __kernel __force *) src, count) + 1;
179 done = 0;
180retry:
181 spin_lock(&mm->page_table_lock);
182 do {
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200183 kaddr = follow_table(mm, uaddr, 0);
184 if (IS_ERR_VALUE(kaddr))
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100185 goto fault;
186
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200187 offset = uaddr & ~PAGE_MASK;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100188 len = min(count - done, PAGE_SIZE - offset);
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200189 len_str = strnlen((char *) kaddr, len);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100190 done += len_str;
191 uaddr += len_str;
192 } while ((len_str == len) && (done < count));
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100193 spin_unlock(&mm->page_table_lock);
194 return done + 1;
195fault:
196 spin_unlock(&mm->page_table_lock);
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200197 if (__handle_fault(uaddr, -kaddr, 0))
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100198 return 0;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100199 goto retry;
200}
201
202static size_t strncpy_from_user_pt(size_t count, const char __user *src,
203 char *dst)
204{
Heiko Carstens225cf8d2013-02-25 07:24:20 +0100205 size_t done, len, offset, len_str;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100206
Heiko Carstensf45655f2013-02-21 13:30:42 +0100207 if (unlikely(!count))
208 return 0;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100209 if (segment_eq(get_fs(), KERNEL_DS)) {
Heiko Carstens225cf8d2013-02-25 07:24:20 +0100210 len = strnlen((const char __kernel __force *) src, count) + 1;
211 if (len > count)
212 len = count;
213 memcpy(dst, (const char __kernel __force *) src, len);
214 return (dst[len - 1] == '\0') ? len - 1 : len;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100215 }
Heiko Carstens225cf8d2013-02-25 07:24:20 +0100216 done = 0;
217 do {
218 offset = (size_t)src & ~PAGE_MASK;
219 len = min(count - done, PAGE_SIZE - offset);
220 if (__user_copy_pt((unsigned long) src, dst, len, 0))
221 return -EFAULT;
222 len_str = strnlen(dst, len);
223 done += len_str;
224 src += len_str;
225 dst += len_str;
226 } while ((len_str == len) && (done < count));
227 return done;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100228}
229
230static size_t copy_in_user_pt(size_t n, void __user *to,
231 const void __user *from)
232{
233 struct mm_struct *mm = current->mm;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200234 unsigned long offset_max, uaddr, done, size, error_code;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100235 unsigned long uaddr_from = (unsigned long) from;
236 unsigned long uaddr_to = (unsigned long) to;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200237 unsigned long kaddr_to, kaddr_from;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100238 int write_user;
239
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200240 if (segment_eq(get_fs(), KERNEL_DS)) {
241 memcpy((void __force *) to, (void __force *) from, n);
242 return 0;
243 }
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100244 done = 0;
245retry:
246 spin_lock(&mm->page_table_lock);
247 do {
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100248 write_user = 0;
249 uaddr = uaddr_from;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200250 kaddr_from = follow_table(mm, uaddr_from, 0);
251 error_code = kaddr_from;
252 if (IS_ERR_VALUE(error_code))
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100253 goto fault;
Martin Schwidefskye4aa4022007-10-22 12:52:46 +0200254
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100255 write_user = 1;
256 uaddr = uaddr_to;
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200257 kaddr_to = follow_table(mm, uaddr_to, 1);
258 error_code = (unsigned long) kaddr_to;
259 if (IS_ERR_VALUE(error_code))
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100260 goto fault;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100261
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200262 offset_max = max(uaddr_from & ~PAGE_MASK,
263 uaddr_to & ~PAGE_MASK);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100264 size = min(n - done, PAGE_SIZE - offset_max);
265
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200266 memcpy((void *) kaddr_to, (void *) kaddr_from, size);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100267 done += size;
268 uaddr_from += size;
269 uaddr_to += size;
270 } while (done < n);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100271 spin_unlock(&mm->page_table_lock);
272 return n - done;
273fault:
274 spin_unlock(&mm->page_table_lock);
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200275 if (__handle_fault(uaddr, -error_code, write_user))
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100276 return n - done;
277 goto retry;
278}
279
280#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
281 asm volatile("0: l %1,0(%6)\n" \
282 "1: " insn \
283 "2: cs %1,%2,0(%6)\n" \
284 "3: jl 1b\n" \
285 " lhi %0,0\n" \
286 "4:\n" \
287 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
288 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
289 "=m" (*uaddr) \
290 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
291 "m" (*uaddr) : "cc" );
292
Michel Lespinasse8d7718a2011-03-10 18:50:58 -0800293static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100294{
295 int oldval = 0, newval, ret;
296
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100297 switch (op) {
298 case FUTEX_OP_SET:
299 __futex_atomic_op("lr %2,%5\n",
300 ret, oldval, newval, uaddr, oparg);
301 break;
302 case FUTEX_OP_ADD:
303 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
304 ret, oldval, newval, uaddr, oparg);
305 break;
306 case FUTEX_OP_OR:
307 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
308 ret, oldval, newval, uaddr, oparg);
309 break;
310 case FUTEX_OP_ANDN:
311 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
312 ret, oldval, newval, uaddr, oparg);
313 break;
314 case FUTEX_OP_XOR:
315 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
316 ret, oldval, newval, uaddr, oparg);
317 break;
318 default:
319 ret = -ENOSYS;
320 }
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200321 if (ret == 0)
322 *old = oldval;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100323 return ret;
324}
325
Michel Lespinasse8d7718a2011-03-10 18:50:58 -0800326int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100327{
328 int ret;
329
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200330 if (segment_eq(get_fs(), KERNEL_DS))
331 return __futex_atomic_op_pt(op, uaddr, oparg, old);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100332 spin_lock(&current->mm->page_table_lock);
Martin Schwidefsky3c52e492011-10-30 15:17:15 +0100333 uaddr = (u32 __force __user *)
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200334 __dat_user_addr((__force unsigned long) uaddr, 1);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100335 if (!uaddr) {
336 spin_unlock(&current->mm->page_table_lock);
337 return -EFAULT;
338 }
339 get_page(virt_to_page(uaddr));
340 spin_unlock(&current->mm->page_table_lock);
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200341 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
342 put_page(virt_to_page(uaddr));
343 return ret;
344}
345
Michel Lespinasse8d7718a2011-03-10 18:50:58 -0800346static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
347 u32 oldval, u32 newval)
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200348{
349 int ret;
350
351 asm volatile("0: cs %1,%4,0(%5)\n"
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800352 "1: la %0,0\n"
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200353 "2:\n"
354 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100355 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
356 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
357 : "cc", "memory" );
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800358 *uval = oldval;
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200359 return ret;
360}
361
Michel Lespinasse8d7718a2011-03-10 18:50:58 -0800362int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
363 u32 oldval, u32 newval)
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200364{
365 int ret;
366
367 if (segment_eq(get_fs(), KERNEL_DS))
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800368 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200369 spin_lock(&current->mm->page_table_lock);
Martin Schwidefsky3c52e492011-10-30 15:17:15 +0100370 uaddr = (u32 __force __user *)
Gerald Schaefer4db84d42012-09-10 16:43:26 +0200371 __dat_user_addr((__force unsigned long) uaddr, 1);
Heiko Carstens3f12ebc2008-04-17 07:46:27 +0200372 if (!uaddr) {
373 spin_unlock(&current->mm->page_table_lock);
374 return -EFAULT;
375 }
376 get_page(virt_to_page(uaddr));
377 spin_unlock(&current->mm->page_table_lock);
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800378 ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100379 put_page(virt_to_page(uaddr));
380 return ret;
381}
382
383struct uaccess_ops uaccess_pt = {
384 .copy_from_user = copy_from_user_pt,
385 .copy_from_user_small = copy_from_user_pt,
386 .copy_to_user = copy_to_user_pt,
387 .copy_to_user_small = copy_to_user_pt,
388 .copy_in_user = copy_in_user_pt,
389 .clear_user = clear_user_pt,
390 .strnlen_user = strnlen_user_pt,
391 .strncpy_from_user = strncpy_from_user_pt,
392 .futex_atomic_op = futex_atomic_op_pt,
393 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
394};