blob: 653b8571c1ed10e86c94ae727147b77ce0e09788 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/mprotect.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 *
7 * Address space accounting code <alan@redhat.com>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/slab.h>
14#include <linux/shm.h>
15#include <linux/mman.h>
16#include <linux/fs.h>
17#include <linux/highmem.h>
18#include <linux/security.h>
19#include <linux/mempolicy.h>
20#include <linux/personality.h>
21#include <linux/syscalls.h>
22
23#include <asm/uaccess.h>
24#include <asm/pgtable.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27
28static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
29 unsigned long addr, unsigned long end, pgprot_t newprot)
30{
31 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -070032 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Hugh Dickins705e87c2005-10-29 18:16:27 -070034 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 do {
36 if (pte_present(*pte)) {
37 pte_t ptent;
38
39 /* Avoid an SMP race with hardware updated dirty/clean
40 * bits by wiping the pte and then setting the new pte
41 * into place.
42 */
43 ptent = pte_modify(ptep_get_and_clear(mm, addr, pte), newprot);
44 set_pte_at(mm, addr, pte, ptent);
45 lazy_mmu_prot_update(ptent);
46 }
47 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -070048 pte_unmap_unlock(pte - 1, ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049}
50
51static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
52 unsigned long addr, unsigned long end, pgprot_t newprot)
53{
54 pmd_t *pmd;
55 unsigned long next;
56
57 pmd = pmd_offset(pud, addr);
58 do {
59 next = pmd_addr_end(addr, end);
60 if (pmd_none_or_clear_bad(pmd))
61 continue;
62 change_pte_range(mm, pmd, addr, next, newprot);
63 } while (pmd++, addr = next, addr != end);
64}
65
66static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
67 unsigned long addr, unsigned long end, pgprot_t newprot)
68{
69 pud_t *pud;
70 unsigned long next;
71
72 pud = pud_offset(pgd, addr);
73 do {
74 next = pud_addr_end(addr, end);
75 if (pud_none_or_clear_bad(pud))
76 continue;
77 change_pmd_range(mm, pud, addr, next, newprot);
78 } while (pud++, addr = next, addr != end);
79}
80
81static void change_protection(struct vm_area_struct *vma,
82 unsigned long addr, unsigned long end, pgprot_t newprot)
83{
84 struct mm_struct *mm = vma->vm_mm;
85 pgd_t *pgd;
86 unsigned long next;
87 unsigned long start = addr;
88
89 BUG_ON(addr >= end);
90 pgd = pgd_offset(mm, addr);
91 flush_cache_range(vma, addr, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 do {
93 next = pgd_addr_end(addr, end);
94 if (pgd_none_or_clear_bad(pgd))
95 continue;
96 change_pud_range(mm, pgd, addr, next, newprot);
97 } while (pgd++, addr = next, addr != end);
98 flush_tlb_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101static int
102mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
103 unsigned long start, unsigned long end, unsigned long newflags)
104{
105 struct mm_struct *mm = vma->vm_mm;
106 unsigned long oldflags = vma->vm_flags;
107 long nrpages = (end - start) >> PAGE_SHIFT;
108 unsigned long charged = 0;
109 pgprot_t newprot;
110 pgoff_t pgoff;
111 int error;
112
113 if (newflags == oldflags) {
114 *pprev = vma;
115 return 0;
116 }
117
118 /*
119 * If we make a private mapping writable we increase our commit;
120 * but (without finer accounting) cannot reduce our commit if we
121 * make it unwritable again.
122 *
123 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
124 * a MAP_NORESERVE private mapping to writable will now reserve.
125 */
126 if (newflags & VM_WRITE) {
127 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED|VM_HUGETLB))) {
128 charged = nrpages;
129 if (security_vm_enough_memory(charged))
130 return -ENOMEM;
131 newflags |= VM_ACCOUNT;
132 }
133 }
134
135 newprot = protection_map[newflags & 0xf];
136
137 /*
138 * First try to merge with previous and/or next vma.
139 */
140 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
141 *pprev = vma_merge(mm, *pprev, start, end, newflags,
142 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
143 if (*pprev) {
144 vma = *pprev;
145 goto success;
146 }
147
148 *pprev = vma;
149
150 if (start != vma->vm_start) {
151 error = split_vma(mm, vma, start, 1);
152 if (error)
153 goto fail;
154 }
155
156 if (end != vma->vm_end) {
157 error = split_vma(mm, vma, end, 0);
158 if (error)
159 goto fail;
160 }
161
162success:
163 /*
164 * vm_flags and vm_page_prot are protected by the mmap_sem
165 * held in write mode.
166 */
167 vma->vm_flags = newflags;
168 vma->vm_page_prot = newprot;
169 change_protection(vma, start, end, newprot);
Hugh Dickinsab50b8e2005-10-29 18:15:56 -0700170 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
171 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 return 0;
173
174fail:
175 vm_unacct_memory(charged);
176 return error;
177}
178
179asmlinkage long
180sys_mprotect(unsigned long start, size_t len, unsigned long prot)
181{
182 unsigned long vm_flags, nstart, end, tmp, reqprot;
183 struct vm_area_struct *vma, *prev;
184 int error = -EINVAL;
185 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
186 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
187 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
188 return -EINVAL;
189
190 if (start & ~PAGE_MASK)
191 return -EINVAL;
192 if (!len)
193 return 0;
194 len = PAGE_ALIGN(len);
195 end = start + len;
196 if (end <= start)
197 return -ENOMEM;
198 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
199 return -EINVAL;
200
201 reqprot = prot;
202 /*
203 * Does the application expect PROT_READ to imply PROT_EXEC:
204 */
205 if (unlikely((prot & PROT_READ) &&
206 (current->personality & READ_IMPLIES_EXEC)))
207 prot |= PROT_EXEC;
208
209 vm_flags = calc_vm_prot_bits(prot);
210
211 down_write(&current->mm->mmap_sem);
212
213 vma = find_vma_prev(current->mm, start, &prev);
214 error = -ENOMEM;
215 if (!vma)
216 goto out;
217 if (unlikely(grows & PROT_GROWSDOWN)) {
218 if (vma->vm_start >= end)
219 goto out;
220 start = vma->vm_start;
221 error = -EINVAL;
222 if (!(vma->vm_flags & VM_GROWSDOWN))
223 goto out;
224 }
225 else {
226 if (vma->vm_start > start)
227 goto out;
228 if (unlikely(grows & PROT_GROWSUP)) {
229 end = vma->vm_end;
230 error = -EINVAL;
231 if (!(vma->vm_flags & VM_GROWSUP))
232 goto out;
233 }
234 }
235 if (start > vma->vm_start)
236 prev = vma;
237
238 for (nstart = start ; ; ) {
239 unsigned long newflags;
240
241 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
242
243 if (is_vm_hugetlb_page(vma)) {
244 error = -EACCES;
245 goto out;
246 }
247
248 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
249
Paolo 'Blaisorblade' Giarrusso7e2cff42005-09-21 09:55:39 -0700250 /* newflags >> 4 shift VM_MAY% in place of VM_% */
251 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 error = -EACCES;
253 goto out;
254 }
255
256 error = security_file_mprotect(vma, reqprot, prot);
257 if (error)
258 goto out;
259
260 tmp = vma->vm_end;
261 if (tmp > end)
262 tmp = end;
263 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
264 if (error)
265 goto out;
266 nstart = tmp;
267
268 if (nstart < prev->vm_end)
269 nstart = prev->vm_end;
270 if (nstart >= end)
271 goto out;
272
273 vma = prev->vm_next;
274 if (!vma || vma->vm_start != nstart) {
275 error = -ENOMEM;
276 goto out;
277 }
278 }
279out:
280 up_write(&current->mm->mmap_sem);
281 return error;
282}