Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 1 | /* |
| 2 | * mm/pgtable-generic.c |
| 3 | * |
| 4 | * Generic pgtable methods declared in asm-generic/pgtable.h |
| 5 | * |
| 6 | * Copyright (C) 2010 Linus Torvalds |
| 7 | */ |
| 8 | |
Andrew Morton | f95ba94 | 2011-01-25 15:07:11 -0800 | [diff] [blame] | 9 | #include <linux/pagemap.h> |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 10 | #include <asm/tlb.h> |
| 11 | #include <asm-generic/pgtable.h> |
| 12 | |
Joonsoo Kim | bc4b444 | 2013-09-11 14:21:28 -0700 | [diff] [blame] | 13 | /* |
| 14 | * If a p?d_bad entry is found while walking page tables, report |
| 15 | * the error, before resetting entry to p?d_none. Usually (but |
| 16 | * very seldom) called out from the p?d_none_or_clear_bad macros. |
| 17 | */ |
| 18 | |
| 19 | void pgd_clear_bad(pgd_t *pgd) |
| 20 | { |
| 21 | pgd_ERROR(*pgd); |
| 22 | pgd_clear(pgd); |
| 23 | } |
| 24 | |
| 25 | void pud_clear_bad(pud_t *pud) |
| 26 | { |
| 27 | pud_ERROR(*pud); |
| 28 | pud_clear(pud); |
| 29 | } |
| 30 | |
| 31 | void pmd_clear_bad(pmd_t *pmd) |
| 32 | { |
| 33 | pmd_ERROR(*pmd); |
| 34 | pmd_clear(pmd); |
| 35 | } |
| 36 | |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 37 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
| 38 | /* |
Rik van Riel | cef23d9 | 2012-11-06 09:56:01 +0000 | [diff] [blame] | 39 | * Only sets the access flags (dirty, accessed), as well as write |
| 40 | * permission. Furthermore, we know it always gets set to a "more |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 41 | * permissive" setting, which allows most architectures to optimize |
| 42 | * this. We return whether the PTE actually changed, which in turn |
| 43 | * instructs the caller to do things like update__mmu_cache. This |
| 44 | * used to be done in the caller, but sparc needs minor faults to |
| 45 | * force that call on sun4c so we changed this macro slightly |
| 46 | */ |
| 47 | int ptep_set_access_flags(struct vm_area_struct *vma, |
| 48 | unsigned long address, pte_t *ptep, |
| 49 | pte_t entry, int dirty) |
| 50 | { |
| 51 | int changed = !pte_same(*ptep, entry); |
| 52 | if (changed) { |
| 53 | set_pte_at(vma->vm_mm, address, ptep, entry); |
Rik van Riel | cef23d9 | 2012-11-06 09:56:01 +0000 | [diff] [blame] | 54 | flush_tlb_fix_spurious_fault(vma, address); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 55 | } |
| 56 | return changed; |
| 57 | } |
| 58 | #endif |
| 59 | |
Vineet Gupta | 52585bc | 2015-07-09 17:19:30 +0530 | [diff] [blame] | 60 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
| 61 | int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 62 | unsigned long address, pte_t *ptep) |
| 63 | { |
| 64 | int young; |
| 65 | young = ptep_test_and_clear_young(vma, address, ptep); |
| 66 | if (young) |
| 67 | flush_tlb_page(vma, address); |
| 68 | return young; |
| 69 | } |
| 70 | #endif |
| 71 | |
| 72 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH |
| 73 | pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, |
| 74 | pte_t *ptep) |
| 75 | { |
| 76 | struct mm_struct *mm = (vma)->vm_mm; |
| 77 | pte_t pte; |
| 78 | pte = ptep_get_and_clear(mm, address, ptep); |
| 79 | if (pte_accessible(mm, pte)) |
| 80 | flush_tlb_page(vma, address); |
| 81 | return pte; |
| 82 | } |
| 83 | #endif |
| 84 | |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 85 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 86 | |
Vineet Gupta | 12ebc15 | 2015-02-20 10:36:28 +0530 | [diff] [blame] | 87 | #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE |
| 88 | |
| 89 | /* |
| 90 | * ARCHes with special requirements for evicting THP backing TLB entries can |
| 91 | * implement this. Otherwise also, it can help optimize normal TLB flush in |
| 92 | * THP regime. stock flush_tlb_range() typically has optimization to nuke the |
Vineet Gupta | 6b75d14 | 2016-02-11 16:13:11 -0800 | [diff] [blame] | 93 | * entire TLB if flush span is greater than a threshold, which will |
Vineet Gupta | 12ebc15 | 2015-02-20 10:36:28 +0530 | [diff] [blame] | 94 | * likely be true for a single huge page. Thus a single thp flush will |
Vineet Gupta | 6b75d14 | 2016-02-11 16:13:11 -0800 | [diff] [blame] | 95 | * invalidate the entire TLB which is not desirable. |
Vineet Gupta | 12ebc15 | 2015-02-20 10:36:28 +0530 | [diff] [blame] | 96 | * e.g. see arch/arc: flush_pmd_tlb_range |
| 97 | */ |
| 98 | #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) |
| 99 | #endif |
| 100 | |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 101 | #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
| 102 | int pmdp_set_access_flags(struct vm_area_struct *vma, |
| 103 | unsigned long address, pmd_t *pmdp, |
| 104 | pmd_t entry, int dirty) |
| 105 | { |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 106 | int changed = !pmd_same(*pmdp, entry); |
| 107 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 108 | if (changed) { |
| 109 | set_pmd_at(vma->vm_mm, address, pmdp, entry); |
Vineet Gupta | 12ebc15 | 2015-02-20 10:36:28 +0530 | [diff] [blame] | 110 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 111 | } |
| 112 | return changed; |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 113 | } |
| 114 | #endif |
| 115 | |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 116 | #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
| 117 | int pmdp_clear_flush_young(struct vm_area_struct *vma, |
| 118 | unsigned long address, pmd_t *pmdp) |
| 119 | { |
| 120 | int young; |
Naoya Horiguchi | d8c37c4 | 2012-03-21 16:34:27 -0700 | [diff] [blame] | 121 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 122 | young = pmdp_test_and_clear_young(vma, address, pmdp); |
| 123 | if (young) |
Vineet Gupta | 12ebc15 | 2015-02-20 10:36:28 +0530 | [diff] [blame] | 124 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 125 | return young; |
| 126 | } |
| 127 | #endif |
| 128 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 129 | #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 130 | pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, |
| 131 | pmd_t *pmdp) |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 132 | { |
| 133 | pmd_t pmd; |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 134 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame] | 135 | VM_BUG_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 136 | pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); |
Vineet Gupta | 12ebc15 | 2015-02-20 10:36:28 +0530 | [diff] [blame] | 137 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 138 | return pmd; |
| 139 | } |
| 140 | #endif |
| 141 | |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 142 | #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 143 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 144 | pgtable_t pgtable) |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 145 | { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 146 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 147 | |
| 148 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 149 | if (!pmd_huge_pte(mm, pmdp)) |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 150 | INIT_LIST_HEAD(&pgtable->lru); |
| 151 | else |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 152 | list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); |
| 153 | pmd_huge_pte(mm, pmdp) = pgtable; |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 154 | } |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 155 | #endif |
| 156 | |
| 157 | #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 158 | /* no "address" argument so destroys page coloring of some arch */ |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 159 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 160 | { |
| 161 | pgtable_t pgtable; |
| 162 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 163 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 164 | |
| 165 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 166 | pgtable = pmd_huge_pte(mm, pmdp); |
Geliang Tang | 1466934 | 2016-01-14 15:19:32 -0800 | [diff] [blame] | 167 | pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, |
| 168 | struct page, lru); |
| 169 | if (pmd_huge_pte(mm, pmdp)) |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 170 | list_del(&pgtable->lru); |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 171 | return pgtable; |
| 172 | } |
Gerald Schaefer | e3ebcf64 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 173 | #endif |
Gerald Schaefer | 46dcde7 | 2012-10-08 16:30:09 -0700 | [diff] [blame] | 174 | |
| 175 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE |
Gerald Schaefer | 46dcde7 | 2012-10-08 16:30:09 -0700 | [diff] [blame] | 176 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
| 177 | pmd_t *pmdp) |
| 178 | { |
Mel Gorman | 67f8746 | 2013-12-18 17:08:34 -0800 | [diff] [blame] | 179 | pmd_t entry = *pmdp; |
Matthew Wilcox | ce8369b | 2014-08-29 15:18:33 -0700 | [diff] [blame] | 180 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); |
Vineet Gupta | 12ebc15 | 2015-02-20 10:36:28 +0530 | [diff] [blame] | 181 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Gerald Schaefer | 46dcde7 | 2012-10-08 16:30:09 -0700 | [diff] [blame] | 182 | } |
Gerald Schaefer | 46dcde7 | 2012-10-08 16:30:09 -0700 | [diff] [blame] | 183 | #endif |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 184 | |
| 185 | #ifndef pmdp_collapse_flush |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 186 | pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, |
| 187 | pmd_t *pmdp) |
| 188 | { |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 189 | /* |
| 190 | * pmd and hugepage pte format are same. So we could |
| 191 | * use the same function. |
| 192 | */ |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 193 | pmd_t pmd; |
| 194 | |
| 195 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 196 | VM_BUG_ON(pmd_trans_huge(*pmdp)); |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 197 | pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); |
Vineet Gupta | 6a6ac72 | 2016-02-11 16:13:09 -0800 | [diff] [blame] | 198 | |
| 199 | /* collapse entails shooting down ptes not pmd */ |
| 200 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 201 | return pmd; |
| 202 | } |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 203 | #endif |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 204 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |