Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * mm/mprotect.c |
| 3 | * |
| 4 | * (C) Copyright 1994 Linus Torvalds |
| 5 | * (C) Copyright 2002 Christoph Hellwig |
| 6 | * |
Alan Cox | 046c688 | 2009-01-05 14:06:29 +0000 | [diff] [blame] | 7 | * Address space accounting code <alan@lxorguk.ukuu.org.uk> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * (C) Copyright 2002 Red Hat Inc, All Rights Reserved |
| 9 | */ |
| 10 | |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/hugetlb.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/shm.h> |
| 14 | #include <linux/mman.h> |
| 15 | #include <linux/fs.h> |
| 16 | #include <linux/highmem.h> |
| 17 | #include <linux/security.h> |
| 18 | #include <linux/mempolicy.h> |
| 19 | #include <linux/personality.h> |
| 20 | #include <linux/syscalls.h> |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 21 | #include <linux/swap.h> |
| 22 | #include <linux/swapops.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 23 | #include <linux/mmu_notifier.h> |
KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 24 | #include <linux/migrate.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 25 | #include <linux/perf_event.h> |
Mel Gorman | 64a9a34 | 2014-01-21 15:51:02 -0800 | [diff] [blame] | 26 | #include <linux/ksm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <asm/uaccess.h> |
| 28 | #include <asm/pgtable.h> |
| 29 | #include <asm/cacheflush.h> |
| 30 | #include <asm/tlbflush.h> |
| 31 | |
Kirill A. Shutemov | 36f8818 | 2015-06-24 16:56:10 -0700 | [diff] [blame] | 32 | #include "internal.h" |
| 33 | |
Mel Gorman | 1ad9f62 | 2014-04-07 15:36:56 -0700 | [diff] [blame] | 34 | /* |
| 35 | * For a prot_numa update we only hold mmap_sem for read so there is a |
| 36 | * potential race with faulting where a pmd was temporarily none. This |
| 37 | * function checks for a transhuge pmd under the appropriate lock. It |
| 38 | * returns a pte if it was successfully locked or NULL if it raced with |
| 39 | * a transhuge insertion. |
| 40 | */ |
| 41 | static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, |
| 42 | unsigned long addr, int prot_numa, spinlock_t **ptl) |
| 43 | { |
| 44 | pte_t *pte; |
| 45 | spinlock_t *pmdl; |
| 46 | |
| 47 | /* !prot_numa is protected by mmap_sem held for write */ |
| 48 | if (!prot_numa) |
| 49 | return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); |
| 50 | |
| 51 | pmdl = pmd_lock(vma->vm_mm, pmd); |
| 52 | if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) { |
| 53 | spin_unlock(pmdl); |
| 54 | return NULL; |
| 55 | } |
| 56 | |
| 57 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); |
| 58 | spin_unlock(pmdl); |
| 59 | return pte; |
| 60 | } |
| 61 | |
Mel Gorman | 4b10e7d | 2012-10-25 14:16:32 +0200 | [diff] [blame] | 62 | static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 63 | unsigned long addr, unsigned long end, pgprot_t newprot, |
Mel Gorman | 0f19c17 | 2013-10-07 11:29:25 +0100 | [diff] [blame] | 64 | int dirty_accountable, int prot_numa) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | { |
Mel Gorman | 4b10e7d | 2012-10-25 14:16:32 +0200 | [diff] [blame] | 66 | struct mm_struct *mm = vma->vm_mm; |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 67 | pte_t *pte, oldpte; |
Hugh Dickins | 705e87c | 2005-10-29 18:16:27 -0700 | [diff] [blame] | 68 | spinlock_t *ptl; |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 69 | unsigned long pages = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | |
Mel Gorman | 1ad9f62 | 2014-04-07 15:36:56 -0700 | [diff] [blame] | 71 | pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); |
| 72 | if (!pte) |
| 73 | return 0; |
| 74 | |
Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 75 | arch_enter_lazy_mmu_mode(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | do { |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 77 | oldpte = *pte; |
| 78 | if (pte_present(oldpte)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | pte_t ptent; |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 80 | bool preserve_write = prot_numa && pte_write(oldpte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | |
Mel Gorman | e944fd6 | 2015-02-12 14:58:35 -0800 | [diff] [blame] | 82 | /* |
| 83 | * Avoid trapping faults against the zero or KSM |
| 84 | * pages. See similar comment in change_huge_pmd. |
| 85 | */ |
| 86 | if (prot_numa) { |
| 87 | struct page *page; |
| 88 | |
| 89 | page = vm_normal_page(vma, addr, oldpte); |
| 90 | if (!page || PageKsm(page)) |
| 91 | continue; |
Mel Gorman | 10c1045 | 2015-02-12 14:58:44 -0800 | [diff] [blame] | 92 | |
| 93 | /* Avoid TLB flush if possible */ |
| 94 | if (pte_protnone(oldpte)) |
| 95 | continue; |
Mel Gorman | e944fd6 | 2015-02-12 14:58:35 -0800 | [diff] [blame] | 96 | } |
| 97 | |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 98 | ptent = ptep_modify_prot_start(mm, addr, pte); |
| 99 | ptent = pte_modify(ptent, newprot); |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 100 | if (preserve_write) |
| 101 | ptent = pte_mkwrite(ptent); |
Mel Gorman | 4b10e7d | 2012-10-25 14:16:32 +0200 | [diff] [blame] | 102 | |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 103 | /* Avoid taking write faults for known dirty pages */ |
| 104 | if (dirty_accountable && pte_dirty(ptent) && |
| 105 | (pte_soft_dirty(ptent) || |
| 106 | !(vma->vm_flags & VM_SOFTDIRTY))) { |
| 107 | ptent = pte_mkwrite(ptent); |
Mel Gorman | 4b10e7d | 2012-10-25 14:16:32 +0200 | [diff] [blame] | 108 | } |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 109 | ptep_modify_prot_commit(mm, addr, pte, ptent); |
| 110 | pages++; |
Kirill A. Shutemov | 0661a33 | 2015-02-10 14:10:04 -0800 | [diff] [blame] | 111 | } else if (IS_ENABLED(CONFIG_MIGRATION)) { |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 112 | swp_entry_t entry = pte_to_swp_entry(oldpte); |
| 113 | |
| 114 | if (is_write_migration_entry(entry)) { |
Cyrill Gorcunov | c3d16e1 | 2013-10-16 13:46:51 -0700 | [diff] [blame] | 115 | pte_t newpte; |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 116 | /* |
| 117 | * A protection check is difficult so |
| 118 | * just be safe and disable write |
| 119 | */ |
| 120 | make_migration_entry_read(&entry); |
Cyrill Gorcunov | c3d16e1 | 2013-10-16 13:46:51 -0700 | [diff] [blame] | 121 | newpte = swp_entry_to_pte(entry); |
| 122 | if (pte_swp_soft_dirty(oldpte)) |
| 123 | newpte = pte_swp_mksoft_dirty(newpte); |
| 124 | set_pte_at(mm, addr, pte, newpte); |
Mel Gorman | e920e14 | 2013-10-07 11:28:48 +0100 | [diff] [blame] | 125 | |
| 126 | pages++; |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 127 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | } |
| 129 | } while (pte++, addr += PAGE_SIZE, addr != end); |
Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 130 | arch_leave_lazy_mmu_mode(); |
Hugh Dickins | 705e87c | 2005-10-29 18:16:27 -0700 | [diff] [blame] | 131 | pte_unmap_unlock(pte - 1, ptl); |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 132 | |
| 133 | return pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | } |
| 135 | |
Andrew Morton | 7d12efa | 2012-12-18 14:23:17 -0800 | [diff] [blame] | 136 | static inline unsigned long change_pmd_range(struct vm_area_struct *vma, |
| 137 | pud_t *pud, unsigned long addr, unsigned long end, |
| 138 | pgprot_t newprot, int dirty_accountable, int prot_numa) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | { |
| 140 | pmd_t *pmd; |
Rik van Riel | a533809 | 2014-04-07 15:36:57 -0700 | [diff] [blame] | 141 | struct mm_struct *mm = vma->vm_mm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | unsigned long next; |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 143 | unsigned long pages = 0; |
Mel Gorman | 72403b4 | 2013-11-12 15:08:32 -0800 | [diff] [blame] | 144 | unsigned long nr_huge_updates = 0; |
Rik van Riel | a533809 | 2014-04-07 15:36:57 -0700 | [diff] [blame] | 145 | unsigned long mni_start = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | |
| 147 | pmd = pmd_offset(pud, addr); |
| 148 | do { |
Mel Gorman | 25cbbef | 2013-10-07 11:29:14 +0100 | [diff] [blame] | 149 | unsigned long this_pages; |
| 150 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | next = pmd_addr_end(addr, end); |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame^] | 152 | if (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) |
| 153 | && pmd_none_or_clear_bad(pmd)) |
Rik van Riel | 88a9ab6 | 2014-04-07 15:36:55 -0700 | [diff] [blame] | 154 | continue; |
Rik van Riel | a533809 | 2014-04-07 15:36:57 -0700 | [diff] [blame] | 155 | |
| 156 | /* invoke the mmu notifier if the pmd is populated */ |
| 157 | if (!mni_start) { |
| 158 | mni_start = addr; |
| 159 | mmu_notifier_invalidate_range_start(mm, mni_start, end); |
| 160 | } |
| 161 | |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame^] | 162 | if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 163 | if (next - addr != HPAGE_PMD_SIZE) |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 164 | split_huge_pmd(vma, pmd, addr); |
Mel Gorman | f123d74 | 2013-10-07 11:28:49 +0100 | [diff] [blame] | 165 | else { |
| 166 | int nr_ptes = change_huge_pmd(vma, pmd, addr, |
Mel Gorman | e944fd6 | 2015-02-12 14:58:35 -0800 | [diff] [blame] | 167 | newprot, prot_numa); |
Mel Gorman | f123d74 | 2013-10-07 11:28:49 +0100 | [diff] [blame] | 168 | |
| 169 | if (nr_ptes) { |
Mel Gorman | 72403b4 | 2013-11-12 15:08:32 -0800 | [diff] [blame] | 170 | if (nr_ptes == HPAGE_PMD_NR) { |
| 171 | pages += HPAGE_PMD_NR; |
| 172 | nr_huge_updates++; |
| 173 | } |
Mel Gorman | 1ad9f62 | 2014-04-07 15:36:56 -0700 | [diff] [blame] | 174 | |
| 175 | /* huge pmd was handled */ |
Mel Gorman | f123d74 | 2013-10-07 11:28:49 +0100 | [diff] [blame] | 176 | continue; |
| 177 | } |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 178 | } |
Rik van Riel | 88a9ab6 | 2014-04-07 15:36:55 -0700 | [diff] [blame] | 179 | /* fall through, the trans huge pmd just split */ |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 180 | } |
Mel Gorman | 25cbbef | 2013-10-07 11:29:14 +0100 | [diff] [blame] | 181 | this_pages = change_pte_range(vma, pmd, addr, next, newprot, |
Mel Gorman | 0f19c17 | 2013-10-07 11:29:25 +0100 | [diff] [blame] | 182 | dirty_accountable, prot_numa); |
Mel Gorman | 25cbbef | 2013-10-07 11:29:14 +0100 | [diff] [blame] | 183 | pages += this_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | } while (pmd++, addr = next, addr != end); |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 185 | |
Rik van Riel | a533809 | 2014-04-07 15:36:57 -0700 | [diff] [blame] | 186 | if (mni_start) |
| 187 | mmu_notifier_invalidate_range_end(mm, mni_start, end); |
| 188 | |
Mel Gorman | 72403b4 | 2013-11-12 15:08:32 -0800 | [diff] [blame] | 189 | if (nr_huge_updates) |
| 190 | count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 191 | return pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | } |
| 193 | |
Andrew Morton | 7d12efa | 2012-12-18 14:23:17 -0800 | [diff] [blame] | 194 | static inline unsigned long change_pud_range(struct vm_area_struct *vma, |
| 195 | pgd_t *pgd, unsigned long addr, unsigned long end, |
| 196 | pgprot_t newprot, int dirty_accountable, int prot_numa) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | { |
| 198 | pud_t *pud; |
| 199 | unsigned long next; |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 200 | unsigned long pages = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | |
| 202 | pud = pud_offset(pgd, addr); |
| 203 | do { |
| 204 | next = pud_addr_end(addr, end); |
| 205 | if (pud_none_or_clear_bad(pud)) |
| 206 | continue; |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 207 | pages += change_pmd_range(vma, pud, addr, next, newprot, |
Mel Gorman | 4b10e7d | 2012-10-25 14:16:32 +0200 | [diff] [blame] | 208 | dirty_accountable, prot_numa); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | } while (pud++, addr = next, addr != end); |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 210 | |
| 211 | return pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | } |
| 213 | |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 214 | static unsigned long change_protection_range(struct vm_area_struct *vma, |
Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 215 | unsigned long addr, unsigned long end, pgprot_t newprot, |
Mel Gorman | 4b10e7d | 2012-10-25 14:16:32 +0200 | [diff] [blame] | 216 | int dirty_accountable, int prot_numa) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | { |
| 218 | struct mm_struct *mm = vma->vm_mm; |
| 219 | pgd_t *pgd; |
| 220 | unsigned long next; |
| 221 | unsigned long start = addr; |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 222 | unsigned long pages = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | |
| 224 | BUG_ON(addr >= end); |
| 225 | pgd = pgd_offset(mm, addr); |
| 226 | flush_cache_range(vma, addr, end); |
Rik van Riel | 2084140 | 2013-12-18 17:08:44 -0800 | [diff] [blame] | 227 | set_tlb_flush_pending(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | do { |
| 229 | next = pgd_addr_end(addr, end); |
| 230 | if (pgd_none_or_clear_bad(pgd)) |
| 231 | continue; |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 232 | pages += change_pud_range(vma, pgd, addr, next, newprot, |
Mel Gorman | 4b10e7d | 2012-10-25 14:16:32 +0200 | [diff] [blame] | 233 | dirty_accountable, prot_numa); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | } while (pgd++, addr = next, addr != end); |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 235 | |
Ingo Molnar | 1233d58 | 2012-11-19 03:14:24 +0100 | [diff] [blame] | 236 | /* Only flush the TLB if we actually modified any entries: */ |
| 237 | if (pages) |
| 238 | flush_tlb_range(vma, start, end); |
Rik van Riel | 2084140 | 2013-12-18 17:08:44 -0800 | [diff] [blame] | 239 | clear_tlb_flush_pending(mm); |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 240 | |
| 241 | return pages; |
| 242 | } |
| 243 | |
| 244 | unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, |
| 245 | unsigned long end, pgprot_t newprot, |
Mel Gorman | 4b10e7d | 2012-10-25 14:16:32 +0200 | [diff] [blame] | 246 | int dirty_accountable, int prot_numa) |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 247 | { |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 248 | unsigned long pages; |
| 249 | |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 250 | if (is_vm_hugetlb_page(vma)) |
| 251 | pages = hugetlb_change_protection(vma, start, end, newprot); |
| 252 | else |
Mel Gorman | 4b10e7d | 2012-10-25 14:16:32 +0200 | [diff] [blame] | 253 | pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 254 | |
| 255 | return pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | } |
| 257 | |
Ollie Wild | b6a2fea | 2007-07-19 01:48:16 -0700 | [diff] [blame] | 258 | int |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, |
| 260 | unsigned long start, unsigned long end, unsigned long newflags) |
| 261 | { |
| 262 | struct mm_struct *mm = vma->vm_mm; |
| 263 | unsigned long oldflags = vma->vm_flags; |
| 264 | long nrpages = (end - start) >> PAGE_SHIFT; |
| 265 | unsigned long charged = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | pgoff_t pgoff; |
| 267 | int error; |
Peter Zijlstra | c1e6098 | 2006-09-25 23:30:59 -0700 | [diff] [blame] | 268 | int dirty_accountable = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | |
| 270 | if (newflags == oldflags) { |
| 271 | *pprev = vma; |
| 272 | return 0; |
| 273 | } |
| 274 | |
| 275 | /* |
| 276 | * If we make a private mapping writable we increase our commit; |
| 277 | * but (without finer accounting) cannot reduce our commit if we |
Mel Gorman | 5a6fe12 | 2009-02-10 14:02:27 +0000 | [diff] [blame] | 278 | * make it unwritable again. hugetlb mapping were accounted for |
| 279 | * even if read-only so there is no need to account for them here |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | */ |
| 281 | if (newflags & VM_WRITE) { |
Konstantin Khlebnikov | 8463833 | 2016-01-14 15:22:07 -0800 | [diff] [blame] | 282 | /* Check space limits when area turns into data. */ |
| 283 | if (!may_expand_vm(mm, newflags, nrpages) && |
| 284 | may_expand_vm(mm, oldflags, nrpages)) |
| 285 | return -ENOMEM; |
Mel Gorman | 5a6fe12 | 2009-02-10 14:02:27 +0000 | [diff] [blame] | 286 | if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| |
Andy Whitcroft | cdfd432 | 2008-07-23 21:27:28 -0700 | [diff] [blame] | 287 | VM_SHARED|VM_NORESERVE))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | charged = nrpages; |
Al Viro | 191c542 | 2012-02-13 03:58:52 +0000 | [diff] [blame] | 289 | if (security_vm_enough_memory_mm(mm, charged)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | return -ENOMEM; |
| 291 | newflags |= VM_ACCOUNT; |
| 292 | } |
| 293 | } |
| 294 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | /* |
| 296 | * First try to merge with previous and/or next vma. |
| 297 | */ |
| 298 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); |
| 299 | *pprev = vma_merge(mm, *pprev, start, end, newflags, |
Andrea Arcangeli | 19a809a | 2015-09-04 15:46:24 -0700 | [diff] [blame] | 300 | vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), |
| 301 | vma->vm_userfaultfd_ctx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | if (*pprev) { |
| 303 | vma = *pprev; |
| 304 | goto success; |
| 305 | } |
| 306 | |
| 307 | *pprev = vma; |
| 308 | |
| 309 | if (start != vma->vm_start) { |
| 310 | error = split_vma(mm, vma, start, 1); |
| 311 | if (error) |
| 312 | goto fail; |
| 313 | } |
| 314 | |
| 315 | if (end != vma->vm_end) { |
| 316 | error = split_vma(mm, vma, end, 0); |
| 317 | if (error) |
| 318 | goto fail; |
| 319 | } |
| 320 | |
| 321 | success: |
| 322 | /* |
| 323 | * vm_flags and vm_page_prot are protected by the mmap_sem |
| 324 | * held in write mode. |
| 325 | */ |
| 326 | vma->vm_flags = newflags; |
Peter Feiner | 64e4550 | 2014-10-13 15:55:46 -0700 | [diff] [blame] | 327 | dirty_accountable = vma_wants_writenotify(vma); |
| 328 | vma_set_page_prot(vma); |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 329 | |
Andrew Morton | 7d12efa | 2012-12-18 14:23:17 -0800 | [diff] [blame] | 330 | change_protection(vma, start, end, vma->vm_page_prot, |
| 331 | dirty_accountable, 0); |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 332 | |
Kirill A. Shutemov | 36f8818 | 2015-06-24 16:56:10 -0700 | [diff] [blame] | 333 | /* |
| 334 | * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major |
| 335 | * fault on access. |
| 336 | */ |
| 337 | if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED && |
| 338 | (newflags & VM_WRITE)) { |
| 339 | populate_vma_page_range(vma, start, end, NULL); |
| 340 | } |
| 341 | |
Konstantin Khlebnikov | 8463833 | 2016-01-14 15:22:07 -0800 | [diff] [blame] | 342 | vm_stat_account(mm, oldflags, -nrpages); |
| 343 | vm_stat_account(mm, newflags, nrpages); |
Pekka Enberg | 63bfd73 | 2010-11-08 21:29:07 +0200 | [diff] [blame] | 344 | perf_event_mmap(vma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | return 0; |
| 346 | |
| 347 | fail: |
| 348 | vm_unacct_memory(charged); |
| 349 | return error; |
| 350 | } |
| 351 | |
Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 352 | SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, |
| 353 | unsigned long, prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | { |
| 355 | unsigned long vm_flags, nstart, end, tmp, reqprot; |
| 356 | struct vm_area_struct *vma, *prev; |
| 357 | int error = -EINVAL; |
| 358 | const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); |
| 359 | prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); |
| 360 | if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ |
| 361 | return -EINVAL; |
| 362 | |
| 363 | if (start & ~PAGE_MASK) |
| 364 | return -EINVAL; |
| 365 | if (!len) |
| 366 | return 0; |
| 367 | len = PAGE_ALIGN(len); |
| 368 | end = start + len; |
| 369 | if (end <= start) |
| 370 | return -ENOMEM; |
Dave Kleikamp | b845f31 | 2008-07-08 00:28:51 +1000 | [diff] [blame] | 371 | if (!arch_validate_prot(prot)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | return -EINVAL; |
| 373 | |
| 374 | reqprot = prot; |
| 375 | /* |
| 376 | * Does the application expect PROT_READ to imply PROT_EXEC: |
| 377 | */ |
Hua Zhong | b344e05 | 2006-06-23 02:03:23 -0700 | [diff] [blame] | 378 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | prot |= PROT_EXEC; |
| 380 | |
| 381 | vm_flags = calc_vm_prot_bits(prot); |
| 382 | |
| 383 | down_write(¤t->mm->mmap_sem); |
| 384 | |
Linus Torvalds | 097d591 | 2012-03-06 18:23:36 -0800 | [diff] [blame] | 385 | vma = find_vma(current->mm, start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | error = -ENOMEM; |
| 387 | if (!vma) |
| 388 | goto out; |
Linus Torvalds | 097d591 | 2012-03-06 18:23:36 -0800 | [diff] [blame] | 389 | prev = vma->vm_prev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | if (unlikely(grows & PROT_GROWSDOWN)) { |
| 391 | if (vma->vm_start >= end) |
| 392 | goto out; |
| 393 | start = vma->vm_start; |
| 394 | error = -EINVAL; |
| 395 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 396 | goto out; |
Andrew Morton | 7d12efa | 2012-12-18 14:23:17 -0800 | [diff] [blame] | 397 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | if (vma->vm_start > start) |
| 399 | goto out; |
| 400 | if (unlikely(grows & PROT_GROWSUP)) { |
| 401 | end = vma->vm_end; |
| 402 | error = -EINVAL; |
| 403 | if (!(vma->vm_flags & VM_GROWSUP)) |
| 404 | goto out; |
| 405 | } |
| 406 | } |
| 407 | if (start > vma->vm_start) |
| 408 | prev = vma; |
| 409 | |
| 410 | for (nstart = start ; ; ) { |
| 411 | unsigned long newflags; |
| 412 | |
Andrew Morton | 7d12efa | 2012-12-18 14:23:17 -0800 | [diff] [blame] | 413 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | |
Andrew Morton | 7d12efa | 2012-12-18 14:23:17 -0800 | [diff] [blame] | 415 | newflags = vm_flags; |
| 416 | newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | |
Paolo 'Blaisorblade' Giarrusso | 7e2cff4 | 2005-09-21 09:55:39 -0700 | [diff] [blame] | 418 | /* newflags >> 4 shift VM_MAY% in place of VM_% */ |
| 419 | if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | error = -EACCES; |
| 421 | goto out; |
| 422 | } |
| 423 | |
| 424 | error = security_file_mprotect(vma, reqprot, prot); |
| 425 | if (error) |
| 426 | goto out; |
| 427 | |
| 428 | tmp = vma->vm_end; |
| 429 | if (tmp > end) |
| 430 | tmp = end; |
| 431 | error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); |
| 432 | if (error) |
| 433 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | nstart = tmp; |
| 435 | |
| 436 | if (nstart < prev->vm_end) |
| 437 | nstart = prev->vm_end; |
| 438 | if (nstart >= end) |
| 439 | goto out; |
| 440 | |
| 441 | vma = prev->vm_next; |
| 442 | if (!vma || vma->vm_start != nstart) { |
| 443 | error = -ENOMEM; |
| 444 | goto out; |
| 445 | } |
| 446 | } |
| 447 | out: |
| 448 | up_write(¤t->mm->mmap_sem); |
| 449 | return error; |
| 450 | } |