Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/sched.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 11 | #include <linux/mm_types.h> |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 12 | #include <misc/cxl-base.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 13 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 14 | #include <asm/pgalloc.h> |
| 15 | #include <asm/tlb.h> |
| 16 | |
| 17 | #include "mmu_decl.h" |
| 18 | #include <trace/events/thp.h> |
| 19 | |
Michael Ellerman | eea8148 | 2016-08-04 15:32:06 +1000 | [diff] [blame] | 20 | int (*register_process_table)(unsigned long base, unsigned long page_size, |
| 21 | unsigned long tbl_size); |
| 22 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 23 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 24 | /* |
| 25 | * This is called when relaxing access to a hugepage. It's also called in the page |
| 26 | * fault path when we don't hit any of the major fault cases, ie, a minor |
| 27 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have |
| 28 | * handled those two for us, we additionally deal with missing execute |
| 29 | * permission here on some processors |
| 30 | */ |
| 31 | int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
| 32 | pmd_t *pmdp, pmd_t entry, int dirty) |
| 33 | { |
| 34 | int changed; |
| 35 | #ifdef CONFIG_DEBUG_VM |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 36 | WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 37 | assert_spin_locked(&vma->vm_mm->page_table_lock); |
| 38 | #endif |
| 39 | changed = !pmd_same(*(pmdp), entry); |
| 40 | if (changed) { |
Aneesh Kumar K.V | b3603e1 | 2016-11-28 11:47:02 +0530 | [diff] [blame] | 41 | __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), |
| 42 | pmd_pte(entry), address); |
Aneesh Kumar K.V | d8e91e9 | 2016-07-13 15:06:40 +0530 | [diff] [blame] | 43 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 44 | } |
| 45 | return changed; |
| 46 | } |
| 47 | |
| 48 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 49 | unsigned long address, pmd_t *pmdp) |
| 50 | { |
| 51 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); |
| 52 | } |
| 53 | /* |
| 54 | * set a new huge pmd. We should not be called for updating |
| 55 | * an existing pmd entry. That should go via pmd_hugepage_update. |
| 56 | */ |
| 57 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 58 | pmd_t *pmdp, pmd_t pmd) |
| 59 | { |
| 60 | #ifdef CONFIG_DEBUG_VM |
| 61 | WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); |
| 62 | assert_spin_locked(&mm->page_table_lock); |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 63 | WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd))); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 64 | #endif |
| 65 | trace_hugepage_set_pmd(addr, pmd_val(pmd)); |
| 66 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); |
| 67 | } |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 68 | |
| 69 | static void do_nothing(void *unused) |
| 70 | { |
| 71 | |
| 72 | } |
| 73 | /* |
| 74 | * Serialize against find_current_mm_pte which does lock-less |
| 75 | * lookup in page tables with local interrupts disabled. For huge pages |
| 76 | * it casts pmd_t to pte_t. Since format of pte_t is different from |
| 77 | * pmd_t we want to prevent transit from pmd pointing to page table |
| 78 | * to pmd pointing to huge page (and back) while interrupts are disabled. |
| 79 | * We clear pmd to possibly replace it with page table pointer in |
| 80 | * different code paths. So make sure we wait for the parallel |
| 81 | * find_current_mm_pte to finish. |
| 82 | */ |
| 83 | void serialize_against_pte_lookup(struct mm_struct *mm) |
| 84 | { |
| 85 | smp_mb(); |
Aneesh Kumar K.V | 0f4bc09 | 2017-07-27 11:54:55 +0530 | [diff] [blame] | 86 | smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1); |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 87 | } |
| 88 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 89 | /* |
| 90 | * We use this to invalidate a pmdp entry before switching from a |
| 91 | * hugepte to regular pmd entry. |
| 92 | */ |
| 93 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
| 94 | pmd_t *pmdp) |
| 95 | { |
| 96 | pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); |
Aneesh Kumar K.V | d8e91e9 | 2016-07-13 15:06:40 +0530 | [diff] [blame] | 97 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 98 | /* |
| 99 | * This ensures that generic code that rely on IRQ disabling |
| 100 | * to prevent a parallel THP split work as expected. |
| 101 | */ |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 102 | serialize_against_pte_lookup(vma->vm_mm); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) |
| 106 | { |
| 107 | return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); |
| 108 | } |
| 109 | |
| 110 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) |
| 111 | { |
| 112 | unsigned long pmdv; |
| 113 | |
| 114 | pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; |
| 115 | return pmd_set_protbits(__pmd(pmdv), pgprot); |
| 116 | } |
| 117 | |
| 118 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) |
| 119 | { |
| 120 | return pfn_pmd(page_to_pfn(page), pgprot); |
| 121 | } |
| 122 | |
| 123 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
| 124 | { |
| 125 | unsigned long pmdv; |
| 126 | |
| 127 | pmdv = pmd_val(pmd); |
| 128 | pmdv &= _HPAGE_CHG_MASK; |
| 129 | return pmd_set_protbits(__pmd(pmdv), newprot); |
| 130 | } |
| 131 | |
| 132 | /* |
| 133 | * This is called at the end of handling a user page fault, when the |
| 134 | * fault has been handled by updating a HUGE PMD entry in the linux page tables. |
| 135 | * We use it to preload an HPTE into the hash table corresponding to |
| 136 | * the updated linux HUGE PMD entry. |
| 137 | */ |
| 138 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, |
| 139 | pmd_t *pmd) |
| 140 | { |
| 141 | return; |
| 142 | } |
| 143 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Benjamin Herrenschmidt | fe036a0 | 2016-08-19 14:22:37 +0530 | [diff] [blame] | 144 | |
| 145 | /* For use by kexec */ |
| 146 | void mmu_cleanup_all(void) |
| 147 | { |
| 148 | if (radix_enabled()) |
| 149 | radix__mmu_cleanup_all(); |
| 150 | else if (mmu_hash_ops.hpte_clear_all) |
| 151 | mmu_hash_ops.hpte_clear_all(); |
| 152 | } |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 153 | |
| 154 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 155 | int create_section_mapping(unsigned long start, unsigned long end) |
| 156 | { |
| 157 | if (radix_enabled()) |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 158 | return radix__create_section_mapping(start, end); |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 159 | |
| 160 | return hash__create_section_mapping(start, end); |
| 161 | } |
| 162 | |
| 163 | int remove_section_mapping(unsigned long start, unsigned long end) |
| 164 | { |
| 165 | if (radix_enabled()) |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 166 | return radix__remove_section_mapping(start, end); |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 167 | |
| 168 | return hash__remove_section_mapping(start, end); |
| 169 | } |
| 170 | #endif /* CONFIG_MEMORY_HOTPLUG */ |