blob: 518518fb7c45afacc44dddc4a906ae46236c4066 [file] [log] [blame]
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +10001/*
2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/sched.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010011#include <linux/mm_types.h>
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +053012#include <misc/cxl-base.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010013
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100014#include <asm/pgalloc.h>
15#include <asm/tlb.h>
16
17#include "mmu_decl.h"
18#include <trace/events/thp.h>
19
Michael Ellermaneea81482016-08-04 15:32:06 +100020int (*register_process_table)(unsigned long base, unsigned long page_size,
21 unsigned long tbl_size);
22
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100023#ifdef CONFIG_TRANSPARENT_HUGEPAGE
24/*
25 * This is called when relaxing access to a hugepage. It's also called in the page
26 * fault path when we don't hit any of the major fault cases, ie, a minor
27 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
28 * handled those two for us, we additionally deal with missing execute
29 * permission here on some processors
30 */
31int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
32 pmd_t *pmdp, pmd_t entry, int dirty)
33{
34 int changed;
35#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +100036 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100037 assert_spin_locked(&vma->vm_mm->page_table_lock);
38#endif
39 changed = !pmd_same(*(pmdp), entry);
40 if (changed) {
Aneesh Kumar K.Vb3603e12016-11-28 11:47:02 +053041 __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
42 pmd_pte(entry), address);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +053043 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100044 }
45 return changed;
46}
47
48int pmdp_test_and_clear_young(struct vm_area_struct *vma,
49 unsigned long address, pmd_t *pmdp)
50{
51 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
52}
53/*
54 * set a new huge pmd. We should not be called for updating
55 * an existing pmd entry. That should go via pmd_hugepage_update.
56 */
57void set_pmd_at(struct mm_struct *mm, unsigned long addr,
58 pmd_t *pmdp, pmd_t pmd)
59{
60#ifdef CONFIG_DEBUG_VM
61 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
62 assert_spin_locked(&mm->page_table_lock);
Oliver O'Halloranebd31192017-06-28 11:32:34 +100063 WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100064#endif
65 trace_hugepage_set_pmd(addr, pmd_val(pmd));
66 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
67}
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +053068
69static void do_nothing(void *unused)
70{
71
72}
73/*
74 * Serialize against find_current_mm_pte which does lock-less
75 * lookup in page tables with local interrupts disabled. For huge pages
76 * it casts pmd_t to pte_t. Since format of pte_t is different from
77 * pmd_t we want to prevent transit from pmd pointing to page table
78 * to pmd pointing to huge page (and back) while interrupts are disabled.
79 * We clear pmd to possibly replace it with page table pointer in
80 * different code paths. So make sure we wait for the parallel
81 * find_current_mm_pte to finish.
82 */
83void serialize_against_pte_lookup(struct mm_struct *mm)
84{
85 smp_mb();
Aneesh Kumar K.V0f4bc092017-07-27 11:54:55 +053086 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +053087}
88
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100089/*
90 * We use this to invalidate a pmdp entry before switching from a
91 * hugepte to regular pmd entry.
92 */
Aneesh Kumar K.V8cc931e2018-01-31 16:18:02 -080093pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100094 pmd_t *pmdp)
95{
Aneesh Kumar K.V8cc931e2018-01-31 16:18:02 -080096 unsigned long old_pmd;
97
98 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +053099 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +1000100 /*
101 * This ensures that generic code that rely on IRQ disabling
102 * to prevent a parallel THP split work as expected.
103 */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530104 serialize_against_pte_lookup(vma->vm_mm);
Aneesh Kumar K.V8cc931e2018-01-31 16:18:02 -0800105 return __pmd(old_pmd);
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +1000106}
107
108static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
109{
110 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
111}
112
113pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
114{
115 unsigned long pmdv;
116
117 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
118 return pmd_set_protbits(__pmd(pmdv), pgprot);
119}
120
121pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
122{
123 return pfn_pmd(page_to_pfn(page), pgprot);
124}
125
126pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
127{
128 unsigned long pmdv;
129
130 pmdv = pmd_val(pmd);
131 pmdv &= _HPAGE_CHG_MASK;
132 return pmd_set_protbits(__pmd(pmdv), newprot);
133}
134
135/*
136 * This is called at the end of handling a user page fault, when the
137 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
138 * We use it to preload an HPTE into the hash table corresponding to
139 * the updated linux HUGE PMD entry.
140 */
141void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
142 pmd_t *pmd)
143{
144 return;
145}
146#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530147
148/* For use by kexec */
149void mmu_cleanup_all(void)
150{
151 if (radix_enabled())
152 radix__mmu_cleanup_all();
153 else if (mmu_hash_ops.hpte_clear_all)
154 mmu_hash_ops.hpte_clear_all();
155}
Reza Arbab32b53c02017-01-03 14:39:51 -0600156
157#ifdef CONFIG_MEMORY_HOTPLUG
Michael Ellermanf437c512018-03-31 00:11:24 +1100158int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
Reza Arbab32b53c02017-01-03 14:39:51 -0600159{
160 if (radix_enabled())
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000161 return radix__create_section_mapping(start, end, nid);
Reza Arbab32b53c02017-01-03 14:39:51 -0600162
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000163 return hash__create_section_mapping(start, end, nid);
Reza Arbab32b53c02017-01-03 14:39:51 -0600164}
165
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300166int __meminit remove_section_mapping(unsigned long start, unsigned long end)
Reza Arbab32b53c02017-01-03 14:39:51 -0600167{
168 if (radix_enabled())
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600169 return radix__remove_section_mapping(start, end);
Reza Arbab32b53c02017-01-03 14:39:51 -0600170
171 return hash__remove_section_mapping(start, end);
172}
173#endif /* CONFIG_MEMORY_HOTPLUG */