blob: 49c3e46b406573011d591bbcbd36e6825deb0578 [file] [log] [blame]
Gerald Schaefer59f35d52006-12-04 15:40:45 +01001/*
2 * arch/s390/lib/uaccess_pt.c
3 *
4 * User access functions based on page table walks.
5 *
6 * Copyright IBM Corp. 2006
7 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
8 */
9
10#include <linux/errno.h>
Heiko Carstensd8ad0752007-01-09 10:18:50 +010011#include <linux/hardirq.h>
Gerald Schaefer59f35d52006-12-04 15:40:45 +010012#include <linux/mm.h>
Heiko Carstens22155912006-12-08 15:53:49 +010013#include <asm/uaccess.h>
Gerald Schaefer59f35d52006-12-04 15:40:45 +010014#include <asm/futex.h>
15
16static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
17 int write_access)
18{
19 struct vm_area_struct *vma;
20 int ret = -EFAULT;
21
Heiko Carstensd8ad0752007-01-09 10:18:50 +010022 if (in_atomic())
23 return ret;
Gerald Schaefer59f35d52006-12-04 15:40:45 +010024 down_read(&mm->mmap_sem);
25 vma = find_vma(mm, address);
26 if (unlikely(!vma))
27 goto out;
28 if (unlikely(vma->vm_start > address)) {
29 if (!(vma->vm_flags & VM_GROWSDOWN))
30 goto out;
31 if (expand_stack(vma, address))
32 goto out;
33 }
34
35 if (!write_access) {
36 /* page not present, check vm flags */
37 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
38 goto out;
39 } else {
40 if (!(vma->vm_flags & VM_WRITE))
41 goto out;
42 }
43
44survive:
45 switch (handle_mm_fault(mm, vma, address, write_access)) {
46 case VM_FAULT_MINOR:
47 current->min_flt++;
48 break;
49 case VM_FAULT_MAJOR:
50 current->maj_flt++;
51 break;
52 case VM_FAULT_SIGBUS:
53 goto out_sigbus;
54 case VM_FAULT_OOM:
55 goto out_of_memory;
56 default:
57 BUG();
58 }
59 ret = 0;
60out:
61 up_read(&mm->mmap_sem);
62 return ret;
63
64out_of_memory:
65 up_read(&mm->mmap_sem);
Heiko Carstens22155912006-12-08 15:53:49 +010066 if (is_init(current)) {
Gerald Schaefer59f35d52006-12-04 15:40:45 +010067 yield();
Heiko Carstens22155912006-12-08 15:53:49 +010068 down_read(&mm->mmap_sem);
Gerald Schaefer59f35d52006-12-04 15:40:45 +010069 goto survive;
70 }
71 printk("VM: killing process %s\n", current->comm);
72 return ret;
73
74out_sigbus:
75 up_read(&mm->mmap_sem);
76 current->thread.prot_addr = address;
77 current->thread.trap_no = 0x11;
78 force_sig(SIGBUS, current);
79 return ret;
80}
81
82static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
83 size_t n, int write_user)
84{
85 struct mm_struct *mm = current->mm;
86 unsigned long offset, pfn, done, size;
87 pgd_t *pgd;
88 pmd_t *pmd;
89 pte_t *pte;
90 void *from, *to;
91
92 done = 0;
93retry:
94 spin_lock(&mm->page_table_lock);
95 do {
96 pgd = pgd_offset(mm, uaddr);
97 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
98 goto fault;
99
100 pmd = pmd_offset(pgd, uaddr);
101 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
102 goto fault;
103
104 pte = pte_offset_map(pmd, uaddr);
105 if (!pte || !pte_present(*pte) ||
106 (write_user && !pte_write(*pte)))
107 goto fault;
108
109 pfn = pte_pfn(*pte);
110 if (!pfn_valid(pfn))
111 goto out;
112
113 offset = uaddr & (PAGE_SIZE - 1);
114 size = min(n - done, PAGE_SIZE - offset);
115 if (write_user) {
116 to = (void *)((pfn << PAGE_SHIFT) + offset);
117 from = kptr + done;
118 } else {
119 from = (void *)((pfn << PAGE_SHIFT) + offset);
120 to = kptr + done;
121 }
122 memcpy(to, from, size);
123 done += size;
124 uaddr += size;
125 } while (done < n);
126out:
127 spin_unlock(&mm->page_table_lock);
128 return n - done;
129fault:
130 spin_unlock(&mm->page_table_lock);
131 if (__handle_fault(mm, uaddr, write_user))
132 return n - done;
133 goto retry;
134}
135
136size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
137{
138 size_t rc;
139
140 if (segment_eq(get_fs(), KERNEL_DS)) {
141 memcpy(to, (void __kernel __force *) from, n);
142 return 0;
143 }
144 rc = __user_copy_pt((unsigned long) from, to, n, 0);
145 if (unlikely(rc))
146 memset(to + n - rc, 0, rc);
147 return rc;
148}
149
150size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
151{
152 if (segment_eq(get_fs(), KERNEL_DS)) {
153 memcpy((void __kernel __force *) to, from, n);
154 return 0;
155 }
156 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
157}