blob: 9402080d0d4a88a622598b6dcfacb85f264204a8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/mprotect.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 *
Alan Cox046c6882009-01-05 14:06:29 +00007 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/shm.h>
14#include <linux/mman.h>
15#include <linux/fs.h>
16#include <linux/highmem.h>
17#include <linux/security.h>
18#include <linux/mempolicy.h>
19#include <linux/personality.h>
20#include <linux/syscalls.h>
Christoph Lameter06972122006-06-23 02:03:35 -070021#include <linux/swap.h>
22#include <linux/swapops.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070023#include <linux/mmu_notifier.h>
KOSAKI Motohiro64cdd542009-01-06 14:39:16 -080024#include <linux/migrate.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020025#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <asm/uaccess.h>
27#include <asm/pgtable.h>
28#include <asm/cacheflush.h>
29#include <asm/tlbflush.h>
30
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -070031#ifndef pgprot_modify
32static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
33{
34 return newprot;
35}
36#endif
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
Peter Zijlstrac1e60982006-09-25 23:30:59 -070039 unsigned long addr, unsigned long end, pgprot_t newprot,
40 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
Christoph Lameter06972122006-06-23 02:03:35 -070042 pte_t *pte, oldpte;
Hugh Dickins705e87c2005-10-29 18:16:27 -070043 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Hugh Dickins705e87c2005-10-29 18:16:27 -070045 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
Zachary Amsden6606c3e2006-09-30 23:29:33 -070046 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 do {
Christoph Lameter06972122006-06-23 02:03:35 -070048 oldpte = *pte;
49 if (pte_present(oldpte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 pte_t ptent;
51
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -070052 ptent = ptep_modify_prot_start(mm, addr, pte);
Peter Zijlstrac1e60982006-09-25 23:30:59 -070053 ptent = pte_modify(ptent, newprot);
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -070054
Peter Zijlstrac1e60982006-09-25 23:30:59 -070055 /*
56 * Avoid taking write faults for pages we know to be
57 * dirty.
58 */
59 if (dirty_accountable && pte_dirty(ptent))
60 ptent = pte_mkwrite(ptent);
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -070061
62 ptep_modify_prot_commit(mm, addr, pte, ptent);
KOSAKI Motohiro64cdd542009-01-06 14:39:16 -080063 } else if (PAGE_MIGRATION && !pte_file(oldpte)) {
Christoph Lameter06972122006-06-23 02:03:35 -070064 swp_entry_t entry = pte_to_swp_entry(oldpte);
65
66 if (is_write_migration_entry(entry)) {
67 /*
68 * A protection check is difficult so
69 * just be safe and disable write
70 */
71 make_migration_entry_read(&entry);
72 set_pte_at(mm, addr, pte,
73 swp_entry_to_pte(entry));
74 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 }
76 } while (pte++, addr += PAGE_SIZE, addr != end);
Zachary Amsden6606c3e2006-09-30 23:29:33 -070077 arch_leave_lazy_mmu_mode();
Hugh Dickins705e87c2005-10-29 18:16:27 -070078 pte_unmap_unlock(pte - 1, ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
Johannes Weinerb36f5b02011-01-13 15:47:03 -080081static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Peter Zijlstrac1e60982006-09-25 23:30:59 -070082 unsigned long addr, unsigned long end, pgprot_t newprot,
83 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084{
85 pmd_t *pmd;
86 unsigned long next;
87
88 pmd = pmd_offset(pud, addr);
89 do {
90 next = pmd_addr_end(addr, end);
Johannes Weinerb36f5b02011-01-13 15:47:03 -080091 split_huge_page_pmd(vma->vm_mm, pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 if (pmd_none_or_clear_bad(pmd))
93 continue;
Johannes Weinerb36f5b02011-01-13 15:47:03 -080094 change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
95 dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 } while (pmd++, addr = next, addr != end);
97}
98
Johannes Weinerb36f5b02011-01-13 15:47:03 -080099static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700100 unsigned long addr, unsigned long end, pgprot_t newprot,
101 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102{
103 pud_t *pud;
104 unsigned long next;
105
106 pud = pud_offset(pgd, addr);
107 do {
108 next = pud_addr_end(addr, end);
109 if (pud_none_or_clear_bad(pud))
110 continue;
Johannes Weinerb36f5b02011-01-13 15:47:03 -0800111 change_pmd_range(vma, pud, addr, next, newprot,
112 dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 } while (pud++, addr = next, addr != end);
114}
115
116static void change_protection(struct vm_area_struct *vma,
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700117 unsigned long addr, unsigned long end, pgprot_t newprot,
118 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
120 struct mm_struct *mm = vma->vm_mm;
121 pgd_t *pgd;
122 unsigned long next;
123 unsigned long start = addr;
124
125 BUG_ON(addr >= end);
126 pgd = pgd_offset(mm, addr);
127 flush_cache_range(vma, addr, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 do {
129 next = pgd_addr_end(addr, end);
130 if (pgd_none_or_clear_bad(pgd))
131 continue;
Johannes Weinerb36f5b02011-01-13 15:47:03 -0800132 change_pud_range(vma, pgd, addr, next, newprot,
133 dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 } while (pgd++, addr = next, addr != end);
135 flush_tlb_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136}
137
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700138int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
140 unsigned long start, unsigned long end, unsigned long newflags)
141{
142 struct mm_struct *mm = vma->vm_mm;
143 unsigned long oldflags = vma->vm_flags;
144 long nrpages = (end - start) >> PAGE_SHIFT;
145 unsigned long charged = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 pgoff_t pgoff;
147 int error;
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700148 int dirty_accountable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150 if (newflags == oldflags) {
151 *pprev = vma;
152 return 0;
153 }
154
155 /*
156 * If we make a private mapping writable we increase our commit;
157 * but (without finer accounting) cannot reduce our commit if we
Mel Gorman5a6fe122009-02-10 14:02:27 +0000158 * make it unwritable again. hugetlb mapping were accounted for
159 * even if read-only so there is no need to account for them here
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 */
161 if (newflags & VM_WRITE) {
Mel Gorman5a6fe122009-02-10 14:02:27 +0000162 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
Andy Whitcroftcdfd4322008-07-23 21:27:28 -0700163 VM_SHARED|VM_NORESERVE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 charged = nrpages;
165 if (security_vm_enough_memory(charged))
166 return -ENOMEM;
167 newflags |= VM_ACCOUNT;
168 }
169 }
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 /*
172 * First try to merge with previous and/or next vma.
173 */
174 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
175 *pprev = vma_merge(mm, *pprev, start, end, newflags,
176 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
177 if (*pprev) {
178 vma = *pprev;
179 goto success;
180 }
181
182 *pprev = vma;
183
184 if (start != vma->vm_start) {
185 error = split_vma(mm, vma, start, 1);
186 if (error)
187 goto fail;
188 }
189
190 if (end != vma->vm_end) {
191 error = split_vma(mm, vma, end, 0);
192 if (error)
193 goto fail;
194 }
195
196success:
197 /*
198 * vm_flags and vm_page_prot are protected by the mmap_sem
199 * held in write mode.
200 */
201 vma->vm_flags = newflags;
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700202 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
203 vm_get_page_prot(newflags));
204
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700205 if (vma_wants_writenotify(vma)) {
Hugh Dickins1ddd4392007-10-22 20:45:12 -0700206 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700207 dirty_accountable = 1;
208 }
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700209
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700210 mmu_notifier_invalidate_range_start(mm, start, end);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800211 if (is_vm_hugetlb_page(vma))
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700212 hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800213 else
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700214 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700215 mmu_notifier_invalidate_range_end(mm, start, end);
Hugh Dickinsab50b8e2005-10-29 18:15:56 -0700216 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
217 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
Pekka Enberg63bfd732010-11-08 21:29:07 +0200218 perf_event_mmap(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 return 0;
220
221fail:
222 vm_unacct_memory(charged);
223 return error;
224}
225
Heiko Carstens6a6160a2009-01-14 14:14:15 +0100226SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
227 unsigned long, prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228{
229 unsigned long vm_flags, nstart, end, tmp, reqprot;
230 struct vm_area_struct *vma, *prev;
231 int error = -EINVAL;
232 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
233 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
234 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
235 return -EINVAL;
236
237 if (start & ~PAGE_MASK)
238 return -EINVAL;
239 if (!len)
240 return 0;
241 len = PAGE_ALIGN(len);
242 end = start + len;
243 if (end <= start)
244 return -ENOMEM;
Dave Kleikampb845f312008-07-08 00:28:51 +1000245 if (!arch_validate_prot(prot))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 return -EINVAL;
247
248 reqprot = prot;
249 /*
250 * Does the application expect PROT_READ to imply PROT_EXEC:
251 */
Hua Zhongb344e052006-06-23 02:03:23 -0700252 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 prot |= PROT_EXEC;
254
255 vm_flags = calc_vm_prot_bits(prot);
256
257 down_write(&current->mm->mmap_sem);
258
259 vma = find_vma_prev(current->mm, start, &prev);
260 error = -ENOMEM;
261 if (!vma)
262 goto out;
263 if (unlikely(grows & PROT_GROWSDOWN)) {
264 if (vma->vm_start >= end)
265 goto out;
266 start = vma->vm_start;
267 error = -EINVAL;
268 if (!(vma->vm_flags & VM_GROWSDOWN))
269 goto out;
270 }
271 else {
272 if (vma->vm_start > start)
273 goto out;
274 if (unlikely(grows & PROT_GROWSUP)) {
275 end = vma->vm_end;
276 error = -EINVAL;
277 if (!(vma->vm_flags & VM_GROWSUP))
278 goto out;
279 }
280 }
281 if (start > vma->vm_start)
282 prev = vma;
283
284 for (nstart = start ; ; ) {
285 unsigned long newflags;
286
287 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
290
Paolo 'Blaisorblade' Giarrusso7e2cff42005-09-21 09:55:39 -0700291 /* newflags >> 4 shift VM_MAY% in place of VM_% */
292 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 error = -EACCES;
294 goto out;
295 }
296
297 error = security_file_mprotect(vma, reqprot, prot);
298 if (error)
299 goto out;
300
301 tmp = vma->vm_end;
302 if (tmp > end)
303 tmp = end;
304 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
305 if (error)
306 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 nstart = tmp;
308
309 if (nstart < prev->vm_end)
310 nstart = prev->vm_end;
311 if (nstart >= end)
312 goto out;
313
314 vma = prev->vm_next;
315 if (!vma || vma->vm_start != nstart) {
316 error = -ENOMEM;
317 goto out;
318 }
319 }
320out:
321 up_write(&current->mm->mmap_sem);
322 return error;
323}