Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 2 | /* |
| 3 | * mm/pgtable-generic.c |
| 4 | * |
| 5 | * Generic pgtable methods declared in asm-generic/pgtable.h |
| 6 | * |
| 7 | * Copyright (C) 2010 Linus Torvalds |
| 8 | */ |
| 9 | |
Andrew Morton | f95ba94 | 2011-01-25 15:07:11 -0800 | [diff] [blame] | 10 | #include <linux/pagemap.h> |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 11 | #include <asm/tlb.h> |
| 12 | #include <asm-generic/pgtable.h> |
| 13 | |
Joonsoo Kim | bc4b444 | 2013-09-11 14:21:28 -0700 | [diff] [blame] | 14 | /* |
| 15 | * If a p?d_bad entry is found while walking page tables, report |
| 16 | * the error, before resetting entry to p?d_none. Usually (but |
| 17 | * very seldom) called out from the p?d_none_or_clear_bad macros. |
| 18 | */ |
| 19 | |
| 20 | void pgd_clear_bad(pgd_t *pgd) |
| 21 | { |
| 22 | pgd_ERROR(*pgd); |
| 23 | pgd_clear(pgd); |
| 24 | } |
| 25 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 26 | void p4d_clear_bad(p4d_t *p4d) |
| 27 | { |
| 28 | p4d_ERROR(*p4d); |
| 29 | p4d_clear(p4d); |
| 30 | } |
| 31 | |
Joonsoo Kim | bc4b444 | 2013-09-11 14:21:28 -0700 | [diff] [blame] | 32 | void pud_clear_bad(pud_t *pud) |
| 33 | { |
| 34 | pud_ERROR(*pud); |
| 35 | pud_clear(pud); |
| 36 | } |
| 37 | |
| 38 | void pmd_clear_bad(pmd_t *pmd) |
| 39 | { |
| 40 | pmd_ERROR(*pmd); |
| 41 | pmd_clear(pmd); |
| 42 | } |
| 43 | |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 44 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
| 45 | /* |
Rik van Riel | cef23d9 | 2012-11-06 09:56:01 +0000 | [diff] [blame] | 46 | * Only sets the access flags (dirty, accessed), as well as write |
| 47 | * permission. Furthermore, we know it always gets set to a "more |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 48 | * permissive" setting, which allows most architectures to optimize |
| 49 | * this. We return whether the PTE actually changed, which in turn |
| 50 | * instructs the caller to do things like update__mmu_cache. This |
| 51 | * used to be done in the caller, but sparc needs minor faults to |
| 52 | * force that call on sun4c so we changed this macro slightly |
| 53 | */ |
| 54 | int ptep_set_access_flags(struct vm_area_struct *vma, |
| 55 | unsigned long address, pte_t *ptep, |
| 56 | pte_t entry, int dirty) |
| 57 | { |
| 58 | int changed = !pte_same(*ptep, entry); |
| 59 | if (changed) { |
| 60 | set_pte_at(vma->vm_mm, address, ptep, entry); |
Rik van Riel | cef23d9 | 2012-11-06 09:56:01 +0000 | [diff] [blame] | 61 | flush_tlb_fix_spurious_fault(vma, address); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 62 | } |
| 63 | return changed; |
| 64 | } |
| 65 | #endif |
| 66 | |
Vineet Gupta | 52585bc | 2015-07-09 17:19:30 +0530 | [diff] [blame] | 67 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
| 68 | int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 69 | unsigned long address, pte_t *ptep) |
| 70 | { |
| 71 | int young; |
| 72 | young = ptep_test_and_clear_young(vma, address, ptep); |
| 73 | if (young) |
| 74 | flush_tlb_page(vma, address); |
| 75 | return young; |
| 76 | } |
| 77 | #endif |
| 78 | |
| 79 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH |
| 80 | pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, |
| 81 | pte_t *ptep) |
| 82 | { |
| 83 | struct mm_struct *mm = (vma)->vm_mm; |
| 84 | pte_t pte; |
| 85 | pte = ptep_get_and_clear(mm, address, ptep); |
| 86 | if (pte_accessible(mm, pte)) |
| 87 | flush_tlb_page(vma, address); |
| 88 | return pte; |
| 89 | } |
| 90 | #endif |
| 91 | |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 92 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 93 | |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 94 | #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
| 95 | int pmdp_set_access_flags(struct vm_area_struct *vma, |
| 96 | unsigned long address, pmd_t *pmdp, |
| 97 | pmd_t entry, int dirty) |
| 98 | { |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 99 | int changed = !pmd_same(*pmdp, entry); |
| 100 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 101 | if (changed) { |
| 102 | set_pmd_at(vma->vm_mm, address, pmdp, entry); |
Vineet Gupta | 12ebc15 | 2015-02-20 10:36:28 +0530 | [diff] [blame] | 103 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 104 | } |
| 105 | return changed; |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 106 | } |
| 107 | #endif |
| 108 | |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 109 | #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
| 110 | int pmdp_clear_flush_young(struct vm_area_struct *vma, |
| 111 | unsigned long address, pmd_t *pmdp) |
| 112 | { |
| 113 | int young; |
Naoya Horiguchi | d8c37c4 | 2012-03-21 16:34:27 -0700 | [diff] [blame] | 114 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 115 | young = pmdp_test_and_clear_young(vma, address, pmdp); |
| 116 | if (young) |
Vineet Gupta | 12ebc15 | 2015-02-20 10:36:28 +0530 | [diff] [blame] | 117 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 118 | return young; |
| 119 | } |
| 120 | #endif |
| 121 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 122 | #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 123 | pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, |
| 124 | pmd_t *pmdp) |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 125 | { |
| 126 | pmd_t pmd; |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 127 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 128 | VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && |
| 129 | !pmd_devmap(*pmdp)) || !pmd_present(*pmdp)); |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 130 | pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); |
Vineet Gupta | 12ebc15 | 2015-02-20 10:36:28 +0530 | [diff] [blame] | 131 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 132 | return pmd; |
| 133 | } |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 134 | |
| 135 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
| 136 | pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, |
| 137 | pud_t *pudp) |
| 138 | { |
| 139 | pud_t pud; |
| 140 | |
| 141 | VM_BUG_ON(address & ~HPAGE_PUD_MASK); |
| 142 | VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp)); |
| 143 | pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); |
| 144 | flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); |
| 145 | return pud; |
| 146 | } |
| 147 | #endif |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 148 | #endif |
| 149 | |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 150 | #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 151 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 152 | pgtable_t pgtable) |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 153 | { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 154 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 155 | |
| 156 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 157 | if (!pmd_huge_pte(mm, pmdp)) |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 158 | INIT_LIST_HEAD(&pgtable->lru); |
| 159 | else |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 160 | list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); |
| 161 | pmd_huge_pte(mm, pmdp) = pgtable; |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 162 | } |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 163 | #endif |
| 164 | |
| 165 | #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 166 | /* no "address" argument so destroys page coloring of some arch */ |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 167 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 168 | { |
| 169 | pgtable_t pgtable; |
| 170 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 171 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 172 | |
| 173 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 174 | pgtable = pmd_huge_pte(mm, pmdp); |
Geliang Tang | 1466934 | 2016-01-14 15:19:32 -0800 | [diff] [blame] | 175 | pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, |
| 176 | struct page, lru); |
| 177 | if (pmd_huge_pte(mm, pmdp)) |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 178 | list_del(&pgtable->lru); |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 179 | return pgtable; |
| 180 | } |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 181 | #endif |
Gerald Schaefer | 46dcde7 | 2012-10-08 16:30:09 -0700 | [diff] [blame] | 182 | |
| 183 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE |
Gerald Schaefer | 46dcde7 | 2012-10-08 16:30:09 -0700 | [diff] [blame] | 184 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
| 185 | pmd_t *pmdp) |
| 186 | { |
Mel Gorman | 67f8746 | 2013-12-18 17:08:34 -0800 | [diff] [blame] | 187 | pmd_t entry = *pmdp; |
Matthew Wilcox | ce8369b | 2014-08-29 15:18:33 -0700 | [diff] [blame] | 188 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); |
Vineet Gupta | 12ebc15 | 2015-02-20 10:36:28 +0530 | [diff] [blame] | 189 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Gerald Schaefer | 46dcde7 | 2012-10-08 16:30:09 -0700 | [diff] [blame] | 190 | } |
Gerald Schaefer | 46dcde7 | 2012-10-08 16:30:09 -0700 | [diff] [blame] | 191 | #endif |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 192 | |
| 193 | #ifndef pmdp_collapse_flush |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 194 | pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, |
| 195 | pmd_t *pmdp) |
| 196 | { |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 197 | /* |
| 198 | * pmd and hugepage pte format are same. So we could |
| 199 | * use the same function. |
| 200 | */ |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 201 | pmd_t pmd; |
| 202 | |
| 203 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 204 | VM_BUG_ON(pmd_trans_huge(*pmdp)); |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 205 | pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); |
Vineet Gupta | 6a6ac72 | 2016-02-11 16:13:09 -0800 | [diff] [blame] | 206 | |
| 207 | /* collapse entails shooting down ptes not pmd */ |
| 208 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 209 | return pmd; |
| 210 | } |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 211 | #endif |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 212 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |