Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/sched.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame^] | 11 | #include <linux/mm_types.h> |
| 12 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 13 | #include <asm/pgalloc.h> |
| 14 | #include <asm/tlb.h> |
| 15 | |
| 16 | #include "mmu_decl.h" |
| 17 | #include <trace/events/thp.h> |
| 18 | |
Michael Ellerman | eea8148 | 2016-08-04 15:32:06 +1000 | [diff] [blame] | 19 | int (*register_process_table)(unsigned long base, unsigned long page_size, |
| 20 | unsigned long tbl_size); |
| 21 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 22 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 23 | /* |
| 24 | * This is called when relaxing access to a hugepage. It's also called in the page |
| 25 | * fault path when we don't hit any of the major fault cases, ie, a minor |
| 26 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have |
| 27 | * handled those two for us, we additionally deal with missing execute |
| 28 | * permission here on some processors |
| 29 | */ |
| 30 | int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
| 31 | pmd_t *pmdp, pmd_t entry, int dirty) |
| 32 | { |
| 33 | int changed; |
| 34 | #ifdef CONFIG_DEBUG_VM |
| 35 | WARN_ON(!pmd_trans_huge(*pmdp)); |
| 36 | assert_spin_locked(&vma->vm_mm->page_table_lock); |
| 37 | #endif |
| 38 | changed = !pmd_same(*(pmdp), entry); |
| 39 | if (changed) { |
Aneesh Kumar K.V | b3603e1 | 2016-11-28 11:47:02 +0530 | [diff] [blame] | 40 | __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), |
| 41 | pmd_pte(entry), address); |
Aneesh Kumar K.V | d8e91e9 | 2016-07-13 15:06:40 +0530 | [diff] [blame] | 42 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 43 | } |
| 44 | return changed; |
| 45 | } |
| 46 | |
| 47 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 48 | unsigned long address, pmd_t *pmdp) |
| 49 | { |
| 50 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); |
| 51 | } |
| 52 | /* |
| 53 | * set a new huge pmd. We should not be called for updating |
| 54 | * an existing pmd entry. That should go via pmd_hugepage_update. |
| 55 | */ |
| 56 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 57 | pmd_t *pmdp, pmd_t pmd) |
| 58 | { |
| 59 | #ifdef CONFIG_DEBUG_VM |
| 60 | WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); |
| 61 | assert_spin_locked(&mm->page_table_lock); |
| 62 | WARN_ON(!pmd_trans_huge(pmd)); |
| 63 | #endif |
| 64 | trace_hugepage_set_pmd(addr, pmd_val(pmd)); |
| 65 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); |
| 66 | } |
| 67 | /* |
| 68 | * We use this to invalidate a pmdp entry before switching from a |
| 69 | * hugepte to regular pmd entry. |
| 70 | */ |
| 71 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
| 72 | pmd_t *pmdp) |
| 73 | { |
| 74 | pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); |
Aneesh Kumar K.V | d8e91e9 | 2016-07-13 15:06:40 +0530 | [diff] [blame] | 75 | flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 76 | /* |
| 77 | * This ensures that generic code that rely on IRQ disabling |
| 78 | * to prevent a parallel THP split work as expected. |
| 79 | */ |
| 80 | kick_all_cpus_sync(); |
| 81 | } |
| 82 | |
| 83 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) |
| 84 | { |
| 85 | return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); |
| 86 | } |
| 87 | |
| 88 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) |
| 89 | { |
| 90 | unsigned long pmdv; |
| 91 | |
| 92 | pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; |
| 93 | return pmd_set_protbits(__pmd(pmdv), pgprot); |
| 94 | } |
| 95 | |
| 96 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) |
| 97 | { |
| 98 | return pfn_pmd(page_to_pfn(page), pgprot); |
| 99 | } |
| 100 | |
| 101 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
| 102 | { |
| 103 | unsigned long pmdv; |
| 104 | |
| 105 | pmdv = pmd_val(pmd); |
| 106 | pmdv &= _HPAGE_CHG_MASK; |
| 107 | return pmd_set_protbits(__pmd(pmdv), newprot); |
| 108 | } |
| 109 | |
| 110 | /* |
| 111 | * This is called at the end of handling a user page fault, when the |
| 112 | * fault has been handled by updating a HUGE PMD entry in the linux page tables. |
| 113 | * We use it to preload an HPTE into the hash table corresponding to |
| 114 | * the updated linux HUGE PMD entry. |
| 115 | */ |
| 116 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, |
| 117 | pmd_t *pmd) |
| 118 | { |
| 119 | return; |
| 120 | } |
| 121 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Benjamin Herrenschmidt | fe036a0 | 2016-08-19 14:22:37 +0530 | [diff] [blame] | 122 | |
| 123 | /* For use by kexec */ |
| 124 | void mmu_cleanup_all(void) |
| 125 | { |
| 126 | if (radix_enabled()) |
| 127 | radix__mmu_cleanup_all(); |
| 128 | else if (mmu_hash_ops.hpte_clear_all) |
| 129 | mmu_hash_ops.hpte_clear_all(); |
| 130 | } |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 131 | |
| 132 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 133 | int create_section_mapping(unsigned long start, unsigned long end) |
| 134 | { |
| 135 | if (radix_enabled()) |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 136 | return radix__create_section_mapping(start, end); |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 137 | |
| 138 | return hash__create_section_mapping(start, end); |
| 139 | } |
| 140 | |
| 141 | int remove_section_mapping(unsigned long start, unsigned long end) |
| 142 | { |
| 143 | if (radix_enabled()) |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 144 | return radix__remove_section_mapping(start, end); |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 145 | |
| 146 | return hash__remove_section_mapping(start, end); |
| 147 | } |
| 148 | #endif /* CONFIG_MEMORY_HOTPLUG */ |