blob: a5bf31c273757848ae7614bd9fc3c8595c493705 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/mprotect.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 *
7 * Address space accounting code <alan@redhat.com>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/slab.h>
14#include <linux/shm.h>
15#include <linux/mman.h>
16#include <linux/fs.h>
17#include <linux/highmem.h>
18#include <linux/security.h>
19#include <linux/mempolicy.h>
20#include <linux/personality.h>
21#include <linux/syscalls.h>
Christoph Lameter06972122006-06-23 02:03:35 -070022#include <linux/swap.h>
23#include <linux/swapops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/uaccess.h>
25#include <asm/pgtable.h>
26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h>
28
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -070029#ifndef pgprot_modify
30static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
31{
32 return newprot;
33}
34#endif
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
Peter Zijlstrac1e60982006-09-25 23:30:59 -070037 unsigned long addr, unsigned long end, pgprot_t newprot,
38 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039{
Christoph Lameter06972122006-06-23 02:03:35 -070040 pte_t *pte, oldpte;
Hugh Dickins705e87c2005-10-29 18:16:27 -070041 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Hugh Dickins705e87c2005-10-29 18:16:27 -070043 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
Zachary Amsden6606c3e2006-09-30 23:29:33 -070044 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 do {
Christoph Lameter06972122006-06-23 02:03:35 -070046 oldpte = *pte;
47 if (pte_present(oldpte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 pte_t ptent;
49
50 /* Avoid an SMP race with hardware updated dirty/clean
51 * bits by wiping the pte and then setting the new pte
52 * into place.
53 */
Peter Zijlstrac1e60982006-09-25 23:30:59 -070054 ptent = ptep_get_and_clear(mm, addr, pte);
55 ptent = pte_modify(ptent, newprot);
56 /*
57 * Avoid taking write faults for pages we know to be
58 * dirty.
59 */
60 if (dirty_accountable && pte_dirty(ptent))
61 ptent = pte_mkwrite(ptent);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 set_pte_at(mm, addr, pte, ptent);
Christoph Lameter06972122006-06-23 02:03:35 -070063#ifdef CONFIG_MIGRATION
64 } else if (!pte_file(oldpte)) {
65 swp_entry_t entry = pte_to_swp_entry(oldpte);
66
67 if (is_write_migration_entry(entry)) {
68 /*
69 * A protection check is difficult so
70 * just be safe and disable write
71 */
72 make_migration_entry_read(&entry);
73 set_pte_at(mm, addr, pte,
74 swp_entry_to_pte(entry));
75 }
76#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 }
Christoph Lameter06972122006-06-23 02:03:35 -070078
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 } while (pte++, addr += PAGE_SIZE, addr != end);
Zachary Amsden6606c3e2006-09-30 23:29:33 -070080 arch_leave_lazy_mmu_mode();
Hugh Dickins705e87c2005-10-29 18:16:27 -070081 pte_unmap_unlock(pte - 1, ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
Peter Zijlstrac1e60982006-09-25 23:30:59 -070085 unsigned long addr, unsigned long end, pgprot_t newprot,
86 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
88 pmd_t *pmd;
89 unsigned long next;
90
91 pmd = pmd_offset(pud, addr);
92 do {
93 next = pmd_addr_end(addr, end);
94 if (pmd_none_or_clear_bad(pmd))
95 continue;
Peter Zijlstrac1e60982006-09-25 23:30:59 -070096 change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 } while (pmd++, addr = next, addr != end);
98}
99
100static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700101 unsigned long addr, unsigned long end, pgprot_t newprot,
102 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
104 pud_t *pud;
105 unsigned long next;
106
107 pud = pud_offset(pgd, addr);
108 do {
109 next = pud_addr_end(addr, end);
110 if (pud_none_or_clear_bad(pud))
111 continue;
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700112 change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 } while (pud++, addr = next, addr != end);
114}
115
116static void change_protection(struct vm_area_struct *vma,
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700117 unsigned long addr, unsigned long end, pgprot_t newprot,
118 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
120 struct mm_struct *mm = vma->vm_mm;
121 pgd_t *pgd;
122 unsigned long next;
123 unsigned long start = addr;
124
125 BUG_ON(addr >= end);
126 pgd = pgd_offset(mm, addr);
127 flush_cache_range(vma, addr, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 do {
129 next = pgd_addr_end(addr, end);
130 if (pgd_none_or_clear_bad(pgd))
131 continue;
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700132 change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 } while (pgd++, addr = next, addr != end);
134 flush_tlb_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700137int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
139 unsigned long start, unsigned long end, unsigned long newflags)
140{
141 struct mm_struct *mm = vma->vm_mm;
142 unsigned long oldflags = vma->vm_flags;
143 long nrpages = (end - start) >> PAGE_SHIFT;
144 unsigned long charged = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 pgoff_t pgoff;
146 int error;
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700147 int dirty_accountable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
149 if (newflags == oldflags) {
150 *pprev = vma;
151 return 0;
152 }
153
154 /*
155 * If we make a private mapping writable we increase our commit;
156 * but (without finer accounting) cannot reduce our commit if we
157 * make it unwritable again.
158 *
159 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
160 * a MAP_NORESERVE private mapping to writable will now reserve.
161 */
162 if (newflags & VM_WRITE) {
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800163 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 charged = nrpages;
165 if (security_vm_enough_memory(charged))
166 return -ENOMEM;
167 newflags |= VM_ACCOUNT;
168 }
169 }
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 /*
172 * First try to merge with previous and/or next vma.
173 */
174 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
175 *pprev = vma_merge(mm, *pprev, start, end, newflags,
176 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
177 if (*pprev) {
178 vma = *pprev;
179 goto success;
180 }
181
182 *pprev = vma;
183
184 if (start != vma->vm_start) {
185 error = split_vma(mm, vma, start, 1);
186 if (error)
187 goto fail;
188 }
189
190 if (end != vma->vm_end) {
191 error = split_vma(mm, vma, end, 0);
192 if (error)
193 goto fail;
194 }
195
196success:
197 /*
198 * vm_flags and vm_page_prot are protected by the mmap_sem
199 * held in write mode.
200 */
201 vma->vm_flags = newflags;
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700202 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
203 vm_get_page_prot(newflags));
204
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700205 if (vma_wants_writenotify(vma)) {
Hugh Dickins1ddd4392007-10-22 20:45:12 -0700206 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700207 dirty_accountable = 1;
208 }
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700209
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800210 if (is_vm_hugetlb_page(vma))
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700211 hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800212 else
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700213 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
Hugh Dickinsab50b8e2005-10-29 18:15:56 -0700214 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
215 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 return 0;
217
218fail:
219 vm_unacct_memory(charged);
220 return error;
221}
222
223asmlinkage long
224sys_mprotect(unsigned long start, size_t len, unsigned long prot)
225{
226 unsigned long vm_flags, nstart, end, tmp, reqprot;
227 struct vm_area_struct *vma, *prev;
228 int error = -EINVAL;
229 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
230 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
231 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
232 return -EINVAL;
233
234 if (start & ~PAGE_MASK)
235 return -EINVAL;
236 if (!len)
237 return 0;
238 len = PAGE_ALIGN(len);
239 end = start + len;
240 if (end <= start)
241 return -ENOMEM;
242 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
243 return -EINVAL;
244
245 reqprot = prot;
246 /*
247 * Does the application expect PROT_READ to imply PROT_EXEC:
248 */
Hua Zhongb344e052006-06-23 02:03:23 -0700249 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 prot |= PROT_EXEC;
251
252 vm_flags = calc_vm_prot_bits(prot);
253
254 down_write(&current->mm->mmap_sem);
255
256 vma = find_vma_prev(current->mm, start, &prev);
257 error = -ENOMEM;
258 if (!vma)
259 goto out;
260 if (unlikely(grows & PROT_GROWSDOWN)) {
261 if (vma->vm_start >= end)
262 goto out;
263 start = vma->vm_start;
264 error = -EINVAL;
265 if (!(vma->vm_flags & VM_GROWSDOWN))
266 goto out;
267 }
268 else {
269 if (vma->vm_start > start)
270 goto out;
271 if (unlikely(grows & PROT_GROWSUP)) {
272 end = vma->vm_end;
273 error = -EINVAL;
274 if (!(vma->vm_flags & VM_GROWSUP))
275 goto out;
276 }
277 }
278 if (start > vma->vm_start)
279 prev = vma;
280
281 for (nstart = start ; ; ) {
282 unsigned long newflags;
283
284 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
287
Paolo 'Blaisorblade' Giarrusso7e2cff42005-09-21 09:55:39 -0700288 /* newflags >> 4 shift VM_MAY% in place of VM_% */
289 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 error = -EACCES;
291 goto out;
292 }
293
294 error = security_file_mprotect(vma, reqprot, prot);
295 if (error)
296 goto out;
297
298 tmp = vma->vm_end;
299 if (tmp > end)
300 tmp = end;
301 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
302 if (error)
303 goto out;
304 nstart = tmp;
305
306 if (nstart < prev->vm_end)
307 nstart = prev->vm_end;
308 if (nstart >= end)
309 goto out;
310
311 vma = prev->vm_next;
312 if (!vma || vma->vm_start != nstart) {
313 error = -ENOMEM;
314 goto out;
315 }
316 }
317out:
318 up_write(&current->mm->mmap_sem);
319 return error;
320}