blob: c621e999cbf70e254827b182ac926c71fe5e3697 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/mprotect.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 *
Alan Cox046c6882009-01-05 14:06:29 +00007 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/shm.h>
14#include <linux/mman.h>
15#include <linux/fs.h>
16#include <linux/highmem.h>
17#include <linux/security.h>
18#include <linux/mempolicy.h>
19#include <linux/personality.h>
20#include <linux/syscalls.h>
Christoph Lameter06972122006-06-23 02:03:35 -070021#include <linux/swap.h>
22#include <linux/swapops.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070023#include <linux/mmu_notifier.h>
KOSAKI Motohiro64cdd542009-01-06 14:39:16 -080024#include <linux/migrate.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020025#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <asm/uaccess.h>
27#include <asm/pgtable.h>
28#include <asm/cacheflush.h>
29#include <asm/tlbflush.h>
30
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -070031#ifndef pgprot_modify
32static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
33{
34 return newprot;
35}
36#endif
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
Peter Zijlstrac1e60982006-09-25 23:30:59 -070039 unsigned long addr, unsigned long end, pgprot_t newprot,
40 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
Christoph Lameter06972122006-06-23 02:03:35 -070042 pte_t *pte, oldpte;
Hugh Dickins705e87c2005-10-29 18:16:27 -070043 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Hugh Dickins705e87c2005-10-29 18:16:27 -070045 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
Zachary Amsden6606c3e2006-09-30 23:29:33 -070046 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 do {
Christoph Lameter06972122006-06-23 02:03:35 -070048 oldpte = *pte;
49 if (pte_present(oldpte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 pte_t ptent;
51
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -070052 ptent = ptep_modify_prot_start(mm, addr, pte);
Peter Zijlstrac1e60982006-09-25 23:30:59 -070053 ptent = pte_modify(ptent, newprot);
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -070054
Peter Zijlstrac1e60982006-09-25 23:30:59 -070055 /*
56 * Avoid taking write faults for pages we know to be
57 * dirty.
58 */
59 if (dirty_accountable && pte_dirty(ptent))
60 ptent = pte_mkwrite(ptent);
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -070061
62 ptep_modify_prot_commit(mm, addr, pte, ptent);
Konstantin Khlebnikovce1744f2012-03-21 16:33:59 -070063 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
Christoph Lameter06972122006-06-23 02:03:35 -070064 swp_entry_t entry = pte_to_swp_entry(oldpte);
65
66 if (is_write_migration_entry(entry)) {
67 /*
68 * A protection check is difficult so
69 * just be safe and disable write
70 */
71 make_migration_entry_read(&entry);
72 set_pte_at(mm, addr, pte,
73 swp_entry_to_pte(entry));
74 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 }
76 } while (pte++, addr += PAGE_SIZE, addr != end);
Zachary Amsden6606c3e2006-09-30 23:29:33 -070077 arch_leave_lazy_mmu_mode();
Hugh Dickins705e87c2005-10-29 18:16:27 -070078 pte_unmap_unlock(pte - 1, ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
Johannes Weinerb36f5b02011-01-13 15:47:03 -080081static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Peter Zijlstrac1e60982006-09-25 23:30:59 -070082 unsigned long addr, unsigned long end, pgprot_t newprot,
83 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084{
85 pmd_t *pmd;
86 unsigned long next;
87
88 pmd = pmd_offset(pud, addr);
89 do {
90 next = pmd_addr_end(addr, end);
Johannes Weinercd7548a2011-01-13 15:47:04 -080091 if (pmd_trans_huge(*pmd)) {
92 if (next - addr != HPAGE_PMD_SIZE)
93 split_huge_page_pmd(vma->vm_mm, pmd);
94 else if (change_huge_pmd(vma, pmd, addr, newprot))
95 continue;
96 /* fall through */
97 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 if (pmd_none_or_clear_bad(pmd))
99 continue;
Johannes Weinerb36f5b02011-01-13 15:47:03 -0800100 change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
101 dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 } while (pmd++, addr = next, addr != end);
103}
104
Johannes Weinerb36f5b02011-01-13 15:47:03 -0800105static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700106 unsigned long addr, unsigned long end, pgprot_t newprot,
107 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108{
109 pud_t *pud;
110 unsigned long next;
111
112 pud = pud_offset(pgd, addr);
113 do {
114 next = pud_addr_end(addr, end);
115 if (pud_none_or_clear_bad(pud))
116 continue;
Johannes Weinerb36f5b02011-01-13 15:47:03 -0800117 change_pmd_range(vma, pud, addr, next, newprot,
118 dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 } while (pud++, addr = next, addr != end);
120}
121
122static void change_protection(struct vm_area_struct *vma,
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700123 unsigned long addr, unsigned long end, pgprot_t newprot,
124 int dirty_accountable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 struct mm_struct *mm = vma->vm_mm;
127 pgd_t *pgd;
128 unsigned long next;
129 unsigned long start = addr;
130
131 BUG_ON(addr >= end);
132 pgd = pgd_offset(mm, addr);
133 flush_cache_range(vma, addr, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 do {
135 next = pgd_addr_end(addr, end);
136 if (pgd_none_or_clear_bad(pgd))
137 continue;
Johannes Weinerb36f5b02011-01-13 15:47:03 -0800138 change_pud_range(vma, pgd, addr, next, newprot,
139 dirty_accountable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 } while (pgd++, addr = next, addr != end);
141 flush_tlb_range(vma, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142}
143
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700144int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
146 unsigned long start, unsigned long end, unsigned long newflags)
147{
148 struct mm_struct *mm = vma->vm_mm;
149 unsigned long oldflags = vma->vm_flags;
150 long nrpages = (end - start) >> PAGE_SHIFT;
151 unsigned long charged = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 pgoff_t pgoff;
153 int error;
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700154 int dirty_accountable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
156 if (newflags == oldflags) {
157 *pprev = vma;
158 return 0;
159 }
160
161 /*
162 * If we make a private mapping writable we increase our commit;
163 * but (without finer accounting) cannot reduce our commit if we
Mel Gorman5a6fe122009-02-10 14:02:27 +0000164 * make it unwritable again. hugetlb mapping were accounted for
165 * even if read-only so there is no need to account for them here
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 */
167 if (newflags & VM_WRITE) {
Mel Gorman5a6fe122009-02-10 14:02:27 +0000168 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
Andy Whitcroftcdfd4322008-07-23 21:27:28 -0700169 VM_SHARED|VM_NORESERVE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 charged = nrpages;
171 if (security_vm_enough_memory(charged))
172 return -ENOMEM;
173 newflags |= VM_ACCOUNT;
174 }
175 }
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /*
178 * First try to merge with previous and/or next vma.
179 */
180 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
181 *pprev = vma_merge(mm, *pprev, start, end, newflags,
182 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
183 if (*pprev) {
184 vma = *pprev;
185 goto success;
186 }
187
188 *pprev = vma;
189
190 if (start != vma->vm_start) {
191 error = split_vma(mm, vma, start, 1);
192 if (error)
193 goto fail;
194 }
195
196 if (end != vma->vm_end) {
197 error = split_vma(mm, vma, end, 0);
198 if (error)
199 goto fail;
200 }
201
202success:
203 /*
204 * vm_flags and vm_page_prot are protected by the mmap_sem
205 * held in write mode.
206 */
207 vma->vm_flags = newflags;
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700208 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
209 vm_get_page_prot(newflags));
210
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700211 if (vma_wants_writenotify(vma)) {
Hugh Dickins1ddd4392007-10-22 20:45:12 -0700212 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700213 dirty_accountable = 1;
214 }
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700215
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700216 mmu_notifier_invalidate_range_start(mm, start, end);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800217 if (is_vm_hugetlb_page(vma))
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700218 hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800219 else
Peter Zijlstrac1e60982006-09-25 23:30:59 -0700220 change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
Andrea Arcangelicddb8a52008-07-28 15:46:29 -0700221 mmu_notifier_invalidate_range_end(mm, start, end);
Hugh Dickinsab50b8e2005-10-29 18:15:56 -0700222 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
223 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
Pekka Enberg63bfd732010-11-08 21:29:07 +0200224 perf_event_mmap(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 return 0;
226
227fail:
228 vm_unacct_memory(charged);
229 return error;
230}
231
Heiko Carstens6a6160a2009-01-14 14:14:15 +0100232SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
233 unsigned long, prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234{
235 unsigned long vm_flags, nstart, end, tmp, reqprot;
236 struct vm_area_struct *vma, *prev;
237 int error = -EINVAL;
238 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
239 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
240 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
241 return -EINVAL;
242
243 if (start & ~PAGE_MASK)
244 return -EINVAL;
245 if (!len)
246 return 0;
247 len = PAGE_ALIGN(len);
248 end = start + len;
249 if (end <= start)
250 return -ENOMEM;
Dave Kleikampb845f312008-07-08 00:28:51 +1000251 if (!arch_validate_prot(prot))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 return -EINVAL;
253
254 reqprot = prot;
255 /*
256 * Does the application expect PROT_READ to imply PROT_EXEC:
257 */
Hua Zhongb344e052006-06-23 02:03:23 -0700258 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 prot |= PROT_EXEC;
260
261 vm_flags = calc_vm_prot_bits(prot);
262
263 down_write(&current->mm->mmap_sem);
264
Linus Torvalds097d5912012-03-06 18:23:36 -0800265 vma = find_vma(current->mm, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 error = -ENOMEM;
267 if (!vma)
268 goto out;
Linus Torvalds097d5912012-03-06 18:23:36 -0800269 prev = vma->vm_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 if (unlikely(grows & PROT_GROWSDOWN)) {
271 if (vma->vm_start >= end)
272 goto out;
273 start = vma->vm_start;
274 error = -EINVAL;
275 if (!(vma->vm_flags & VM_GROWSDOWN))
276 goto out;
277 }
278 else {
279 if (vma->vm_start > start)
280 goto out;
281 if (unlikely(grows & PROT_GROWSUP)) {
282 end = vma->vm_end;
283 error = -EINVAL;
284 if (!(vma->vm_flags & VM_GROWSUP))
285 goto out;
286 }
287 }
288 if (start > vma->vm_start)
289 prev = vma;
290
291 for (nstart = start ; ; ) {
292 unsigned long newflags;
293
294 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
295
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
297
Paolo 'Blaisorblade' Giarrusso7e2cff42005-09-21 09:55:39 -0700298 /* newflags >> 4 shift VM_MAY% in place of VM_% */
299 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 error = -EACCES;
301 goto out;
302 }
303
304 error = security_file_mprotect(vma, reqprot, prot);
305 if (error)
306 goto out;
307
308 tmp = vma->vm_end;
309 if (tmp > end)
310 tmp = end;
311 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
312 if (error)
313 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 nstart = tmp;
315
316 if (nstart < prev->vm_end)
317 nstart = prev->vm_end;
318 if (nstart >= end)
319 goto out;
320
321 vma = prev->vm_next;
322 if (!vma || vma->vm_start != nstart) {
323 error = -ENOMEM;
324 goto out;
325 }
326 }
327out:
328 up_write(&current->mm->mmap_sem);
329 return error;
330}