blob: 955f9d0e38aa3f5f35d09eb2473ce3f565196292 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/mprotect.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 *
7 * Address space accounting code <alan@redhat.com>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/slab.h>
14#include <linux/shm.h>
15#include <linux/mman.h>
16#include <linux/fs.h>
17#include <linux/highmem.h>
18#include <linux/security.h>
19#include <linux/mempolicy.h>
20#include <linux/personality.h>
21#include <linux/syscalls.h>
Christoph Lameter06972122006-06-23 02:03:35 -070022#include <linux/swap.h>
23#include <linux/swapops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/uaccess.h>
25#include <asm/pgtable.h>
26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h>
28
29static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
Peter Zijlstrac1e60982006-09-25 23:30:59 -070030 unsigned long addr, unsigned long end, pgprot_t newprot,
31 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
Christoph Lameter06972122006-06-23 02:03:35 -070033 pte_t *pte, oldpte;
Hugh Dickins705e87c2005-10-29 18:16:27 -070034 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Hugh Dickins705e87c2005-10-29 18:16:27 -070036 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 do {
Christoph Lameter06972122006-06-23 02:03:35 -070038 oldpte = *pte;
39 if (pte_present(oldpte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 pte_t ptent;
41
42 /* Avoid an SMP race with hardware updated dirty/clean
43 * bits by wiping the pte and then setting the new pte
44 * into place.
45 */
Peter Zijlstrac1e60982006-09-25 23:30:59 -070046 ptent = ptep_get_and_clear(mm, addr, pte);
47 ptent = pte_modify(ptent, newprot);
48 /*
49 * Avoid taking write faults for pages we know to be
50 * dirty.
51 */
52 if (dirty_accountable && pte_dirty(ptent))
53 ptent = pte_mkwrite(ptent);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 set_pte_at(mm, addr, pte, ptent);
55 lazy_mmu_prot_update(ptent);
Christoph Lameter06972122006-06-23 02:03:35 -070056#ifdef CONFIG_MIGRATION
57 } else if (!pte_file(oldpte)) {
58 swp_entry_t entry = pte_to_swp_entry(oldpte);
59
60 if (is_write_migration_entry(entry)) {
61 /*
62 * A protection check is difficult so
63 * just be safe and disable write
64 */
65 make_migration_entry_read(&entry);
66 set_pte_at(mm, addr, pte,
67 swp_entry_to_pte(entry));
68 }
69#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 }
Christoph Lameter06972122006-06-23 02:03:35 -070071
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -070073 pte_unmap_unlock(pte - 1, ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
76static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
Peter Zijlstrac1e60982006-09-25 23:30:59 -070077 unsigned long addr, unsigned long end, pgprot_t newprot,
78 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
80 pmd_t *pmd;
81 unsigned long next;
82
83 pmd = pmd_offset(pud, addr);
84 do {
85 next = pmd_addr_end(addr, end);
86 if (pmd_none_or_clear_bad(pmd))
87 continue;
Peter Zijlstrac1e60982006-09-25 23:30:59 -070088 change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 } while (pmd++, addr = next, addr != end);
90}
91
92static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
Peter Zijlstrac1e60982006-09-25 23:30:59 -070093 unsigned long addr, unsigned long end, pgprot_t newprot,
94 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 pud_t *pud;
97 unsigned long next;
98
99 pud = pud_offset(pgd, addr);
100 do {
101 next = pud_addr_end(addr, end);
102 if (pud_none_or_clear_bad(pud))
103 continue;
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700104 change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 } while (pud++, addr = next, addr != end);
106}
107
108static void change_protection(struct vm_area_struct *vma,
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700109 unsigned long addr, unsigned long end, pgprot_t newprot,
110 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
112 struct mm_struct *mm = vma->vm_mm;
113 pgd_t *pgd;
114 unsigned long next;
115 unsigned long start = addr;
116
117 BUG_ON(addr >= end);
118 pgd = pgd_offset(mm, addr);
119 flush_cache_range(vma, addr, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 do {
121 next = pgd_addr_end(addr, end);
122 if (pgd_none_or_clear_bad(pgd))
123 continue;
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700124 change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 } while (pgd++, addr = next, addr != end);
126 flush_tlb_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127}
128
129static int
130mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
131 unsigned long start, unsigned long end, unsigned long newflags)
132{
133 struct mm_struct *mm = vma->vm_mm;
134 unsigned long oldflags = vma->vm_flags;
135 long nrpages = (end - start) >> PAGE_SHIFT;
136 unsigned long charged = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 pgoff_t pgoff;
138 int error;
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700139 int dirty_accountable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 if (newflags == oldflags) {
142 *pprev = vma;
143 return 0;
144 }
145
146 /*
147 * If we make a private mapping writable we increase our commit;
148 * but (without finer accounting) cannot reduce our commit if we
149 * make it unwritable again.
150 *
151 * FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
152 * a MAP_NORESERVE private mapping to writable will now reserve.
153 */
154 if (newflags & VM_WRITE) {
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800155 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 charged = nrpages;
157 if (security_vm_enough_memory(charged))
158 return -ENOMEM;
159 newflags |= VM_ACCOUNT;
160 }
161 }
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 /*
164 * First try to merge with previous and/or next vma.
165 */
166 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
167 *pprev = vma_merge(mm, *pprev, start, end, newflags,
168 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
169 if (*pprev) {
170 vma = *pprev;
171 goto success;
172 }
173
174 *pprev = vma;
175
176 if (start != vma->vm_start) {
177 error = split_vma(mm, vma, start, 1);
178 if (error)
179 goto fail;
180 }
181
182 if (end != vma->vm_end) {
183 error = split_vma(mm, vma, end, 0);
184 if (error)
185 goto fail;
186 }
187
188success:
189 /*
190 * vm_flags and vm_page_prot are protected by the mmap_sem
191 * held in write mode.
192 */
193 vma->vm_flags = newflags;
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700194 vma->vm_page_prot = protection_map[newflags &
195 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700196 if (vma_wants_writenotify(vma)) {
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700197 vma->vm_page_prot = protection_map[newflags &
198 (VM_READ|VM_WRITE|VM_EXEC)];
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700199 dirty_accountable = 1;
200 }
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700201
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800202 if (is_vm_hugetlb_page(vma))
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700203 hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800204 else
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700205 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
Hugh Dickinsab50b8e2005-10-29 18:15:56 -0700206 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
207 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 return 0;
209
210fail:
211 vm_unacct_memory(charged);
212 return error;
213}
214
215asmlinkage long
216sys_mprotect(unsigned long start, size_t len, unsigned long prot)
217{
218 unsigned long vm_flags, nstart, end, tmp, reqprot;
219 struct vm_area_struct *vma, *prev;
220 int error = -EINVAL;
221 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
222 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
223 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
224 return -EINVAL;
225
226 if (start & ~PAGE_MASK)
227 return -EINVAL;
228 if (!len)
229 return 0;
230 len = PAGE_ALIGN(len);
231 end = start + len;
232 if (end <= start)
233 return -ENOMEM;
234 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
235 return -EINVAL;
236
237 reqprot = prot;
238 /*
239 * Does the application expect PROT_READ to imply PROT_EXEC:
240 */
Hua Zhongb344e052006-06-23 02:03:23 -0700241 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 prot |= PROT_EXEC;
243
244 vm_flags = calc_vm_prot_bits(prot);
245
246 down_write(&current->mm->mmap_sem);
247
248 vma = find_vma_prev(current->mm, start, &prev);
249 error = -ENOMEM;
250 if (!vma)
251 goto out;
252 if (unlikely(grows & PROT_GROWSDOWN)) {
253 if (vma->vm_start >= end)
254 goto out;
255 start = vma->vm_start;
256 error = -EINVAL;
257 if (!(vma->vm_flags & VM_GROWSDOWN))
258 goto out;
259 }
260 else {
261 if (vma->vm_start > start)
262 goto out;
263 if (unlikely(grows & PROT_GROWSUP)) {
264 end = vma->vm_end;
265 error = -EINVAL;
266 if (!(vma->vm_flags & VM_GROWSUP))
267 goto out;
268 }
269 }
270 if (start > vma->vm_start)
271 prev = vma;
272
273 for (nstart = start ; ; ) {
274 unsigned long newflags;
275
276 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
279
Paolo 'Blaisorblade' Giarrusso7e2cff42005-09-21 09:55:39 -0700280 /* newflags >> 4 shift VM_MAY% in place of VM_% */
281 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 error = -EACCES;
283 goto out;
284 }
285
286 error = security_file_mprotect(vma, reqprot, prot);
287 if (error)
288 goto out;
289
290 tmp = vma->vm_end;
291 if (tmp > end)
292 tmp = end;
293 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
294 if (error)
295 goto out;
296 nstart = tmp;
297
298 if (nstart < prev->vm_end)
299 nstart = prev->vm_end;
300 if (nstart >= end)
301 goto out;
302
303 vma = prev->vm_next;
304 if (!vma || vma->vm_start != nstart) {
305 error = -ENOMEM;
306 goto out;
307 }
308 }
309out:
310 up_write(&current->mm->mmap_sem);
311 return error;
312}