blob: abda2b92f1baab4831d8f604cb9c0687371646c0 [file] [log] [blame]
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +10001/*
2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/sched.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010011#include <linux/mm_types.h>
Aneesh Kumar K.V59879d52018-04-16 16:57:14 +053012#include <linux/memblock.h>
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +053013#include <misc/cxl-base.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010014
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100015#include <asm/pgalloc.h>
16#include <asm/tlb.h>
Aneesh Kumar K.V59879d52018-04-16 16:57:14 +053017#include <asm/trace.h>
18#include <asm/powernv.h>
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100019
20#include "mmu_decl.h"
21#include <trace/events/thp.h>
22
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +053023unsigned long __pmd_frag_nr;
24EXPORT_SYMBOL(__pmd_frag_nr);
25unsigned long __pmd_frag_size_shift;
26EXPORT_SYMBOL(__pmd_frag_size_shift);
27
Michael Ellermaneea81482016-08-04 15:32:06 +100028int (*register_process_table)(unsigned long base, unsigned long page_size,
29 unsigned long tbl_size);
30
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100031#ifdef CONFIG_TRANSPARENT_HUGEPAGE
32/*
33 * This is called when relaxing access to a hugepage. It's also called in the page
34 * fault path when we don't hit any of the major fault cases, ie, a minor
35 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
36 * handled those two for us, we additionally deal with missing execute
37 * permission here on some processors
38 */
39int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
40 pmd_t *pmdp, pmd_t entry, int dirty)
41{
42 int changed;
43#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +100044 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.Vaf60a4c2018-04-16 16:57:16 +053045 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100046#endif
47 changed = !pmd_same(*(pmdp), entry);
48 if (changed) {
Aneesh Kumar K.Vb3603e12016-11-28 11:47:02 +053049 __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
50 pmd_pte(entry), address);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +053051 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100052 }
53 return changed;
54}
55
56int pmdp_test_and_clear_young(struct vm_area_struct *vma,
57 unsigned long address, pmd_t *pmdp)
58{
59 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
60}
61/*
62 * set a new huge pmd. We should not be called for updating
63 * an existing pmd entry. That should go via pmd_hugepage_update.
64 */
65void set_pmd_at(struct mm_struct *mm, unsigned long addr,
66 pmd_t *pmdp, pmd_t pmd)
67{
68#ifdef CONFIG_DEBUG_VM
69 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
Aneesh Kumar K.Vaf60a4c2018-04-16 16:57:16 +053070 assert_spin_locked(pmd_lockptr(mm, pmdp));
Oliver O'Halloranebd31192017-06-28 11:32:34 +100071 WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100072#endif
73 trace_hugepage_set_pmd(addr, pmd_val(pmd));
74 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
75}
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +053076
77static void do_nothing(void *unused)
78{
79
80}
81/*
82 * Serialize against find_current_mm_pte which does lock-less
83 * lookup in page tables with local interrupts disabled. For huge pages
84 * it casts pmd_t to pte_t. Since format of pte_t is different from
85 * pmd_t we want to prevent transit from pmd pointing to page table
86 * to pmd pointing to huge page (and back) while interrupts are disabled.
87 * We clear pmd to possibly replace it with page table pointer in
88 * different code paths. So make sure we wait for the parallel
89 * find_current_mm_pte to finish.
90 */
91void serialize_against_pte_lookup(struct mm_struct *mm)
92{
93 smp_mb();
Aneesh Kumar K.V0f4bc092017-07-27 11:54:55 +053094 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +053095}
96
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100097/*
98 * We use this to invalidate a pmdp entry before switching from a
99 * hugepte to regular pmd entry.
100 */
Aneesh Kumar K.V8cc931e2018-01-31 16:18:02 -0800101pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +1000102 pmd_t *pmdp)
103{
Aneesh Kumar K.V8cc931e2018-01-31 16:18:02 -0800104 unsigned long old_pmd;
105
106 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +0530107 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +1000108 /*
109 * This ensures that generic code that rely on IRQ disabling
110 * to prevent a parallel THP split work as expected.
111 */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530112 serialize_against_pte_lookup(vma->vm_mm);
Aneesh Kumar K.V8cc931e2018-01-31 16:18:02 -0800113 return __pmd(old_pmd);
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +1000114}
115
116static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
117{
118 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
119}
120
121pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
122{
123 unsigned long pmdv;
124
125 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
126 return pmd_set_protbits(__pmd(pmdv), pgprot);
127}
128
129pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
130{
131 return pfn_pmd(page_to_pfn(page), pgprot);
132}
133
134pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
135{
136 unsigned long pmdv;
137
138 pmdv = pmd_val(pmd);
139 pmdv &= _HPAGE_CHG_MASK;
140 return pmd_set_protbits(__pmd(pmdv), newprot);
141}
142
143/*
144 * This is called at the end of handling a user page fault, when the
145 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
146 * We use it to preload an HPTE into the hash table corresponding to
147 * the updated linux HUGE PMD entry.
148 */
149void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
150 pmd_t *pmd)
151{
152 return;
153}
154#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530155
156/* For use by kexec */
157void mmu_cleanup_all(void)
158{
159 if (radix_enabled())
160 radix__mmu_cleanup_all();
161 else if (mmu_hash_ops.hpte_clear_all)
162 mmu_hash_ops.hpte_clear_all();
163}
Reza Arbab32b53c02017-01-03 14:39:51 -0600164
165#ifdef CONFIG_MEMORY_HOTPLUG
Michael Ellermanf437c512018-03-31 00:11:24 +1100166int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
Reza Arbab32b53c02017-01-03 14:39:51 -0600167{
168 if (radix_enabled())
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000169 return radix__create_section_mapping(start, end, nid);
Reza Arbab32b53c02017-01-03 14:39:51 -0600170
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000171 return hash__create_section_mapping(start, end, nid);
Reza Arbab32b53c02017-01-03 14:39:51 -0600172}
173
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300174int __meminit remove_section_mapping(unsigned long start, unsigned long end)
Reza Arbab32b53c02017-01-03 14:39:51 -0600175{
176 if (radix_enabled())
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600177 return radix__remove_section_mapping(start, end);
Reza Arbab32b53c02017-01-03 14:39:51 -0600178
179 return hash__remove_section_mapping(start, end);
180}
181#endif /* CONFIG_MEMORY_HOTPLUG */
Aneesh Kumar K.V59879d52018-04-16 16:57:14 +0530182
183void __init mmu_partition_table_init(void)
184{
185 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
186 unsigned long ptcr;
187
188 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
189 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
190 MEMBLOCK_ALLOC_ANYWHERE));
191
192 /* Initialize the Partition Table with no entries */
193 memset((void *)partition_tb, 0, patb_size);
194
195 /*
196 * update partition table control register,
197 * 64 K size.
198 */
199 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
200 mtspr(SPRN_PTCR, ptcr);
201 powernv_set_nmmu_ptcr(ptcr);
202}
203
204void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
205 unsigned long dw1)
206{
207 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
208
209 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
210 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
211
212 /*
213 * Global flush of TLBs and partition table caches for this lpid.
214 * The type of flush (hash or radix) depends on what the previous
215 * use of this partition ID was, not the new use.
216 */
217 asm volatile("ptesync" : : : "memory");
218 if (old & PATB_HR) {
219 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
220 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
221 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
222 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
223 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
224 } else {
225 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
226 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
227 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
228 }
229 /* do we need fixup here ?*/
230 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
231}
232EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
Aneesh Kumar K.V1c7ec8a2018-04-16 16:57:20 +0530233
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530234static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
235{
236 void *pmd_frag, *ret;
237
238 spin_lock(&mm->page_table_lock);
239 ret = mm->context.pmd_frag;
240 if (ret) {
241 pmd_frag = ret + PMD_FRAG_SIZE;
242 /*
243 * If we have taken up all the fragments mark PTE page NULL
244 */
245 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
246 pmd_frag = NULL;
247 mm->context.pmd_frag = pmd_frag;
248 }
249 spin_unlock(&mm->page_table_lock);
250 return (pmd_t *)ret;
251}
252
253static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
254{
255 void *ret = NULL;
256 struct page *page;
257 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
258
259 if (mm == &init_mm)
260 gfp &= ~__GFP_ACCOUNT;
261 page = alloc_page(gfp);
262 if (!page)
263 return NULL;
264 if (!pgtable_pmd_page_ctor(page)) {
265 __free_pages(page, 0);
266 return NULL;
267 }
268
269 ret = page_address(page);
270 /*
271 * if we support only one fragment just return the
272 * allocated page.
273 */
274 if (PMD_FRAG_NR == 1)
275 return ret;
276
277 spin_lock(&mm->page_table_lock);
278 /*
279 * If we find pgtable_page set, we return
280 * the allocated page with single fragement
281 * count.
282 */
283 if (likely(!mm->context.pmd_frag)) {
284 set_page_count(page, PMD_FRAG_NR);
285 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
286 }
287 spin_unlock(&mm->page_table_lock);
288
289 return (pmd_t *)ret;
290}
291
292pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
293{
294 pmd_t *pmd;
295
296 pmd = get_pmd_from_cache(mm);
297 if (pmd)
298 return pmd;
299
300 return __alloc_for_pmdcache(mm);
301}
302
303void pmd_fragment_free(unsigned long *pmd)
304{
305 struct page *page = virt_to_page(pmd);
306
307 if (put_page_testzero(page)) {
308 pgtable_pmd_page_dtor(page);
309 free_unref_page(page);
310 }
311}
312
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530313static pte_t *get_pte_from_cache(struct mm_struct *mm)
314{
315 void *pte_frag, *ret;
316
317 spin_lock(&mm->page_table_lock);
318 ret = mm->context.pte_frag;
319 if (ret) {
320 pte_frag = ret + PTE_FRAG_SIZE;
321 /*
322 * If we have taken up all the fragments mark PTE page NULL
323 */
324 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
325 pte_frag = NULL;
326 mm->context.pte_frag = pte_frag;
327 }
328 spin_unlock(&mm->page_table_lock);
329 return (pte_t *)ret;
330}
331
332static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
333{
334 void *ret = NULL;
335 struct page *page;
336
337 if (!kernel) {
338 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
339 if (!page)
340 return NULL;
341 if (!pgtable_page_ctor(page)) {
342 __free_page(page);
343 return NULL;
344 }
345 } else {
346 page = alloc_page(PGALLOC_GFP);
347 if (!page)
348 return NULL;
349 }
350
Aneesh Kumar K.V1c7ec8a2018-04-16 16:57:20 +0530351
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530352 ret = page_address(page);
Aneesh Kumar K.V1c7ec8a2018-04-16 16:57:20 +0530353 /*
354 * if we support only one fragment just return the
355 * allocated page.
356 */
357 if (PTE_FRAG_NR == 1)
358 return ret;
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530359 spin_lock(&mm->page_table_lock);
360 /*
361 * If we find pgtable_page set, we return
362 * the allocated page with single fragement
363 * count.
364 */
365 if (likely(!mm->context.pte_frag)) {
366 set_page_count(page, PTE_FRAG_NR);
367 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
368 }
369 spin_unlock(&mm->page_table_lock);
370
371 return (pte_t *)ret;
372}
373
374pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
375{
376 pte_t *pte;
377
378 pte = get_pte_from_cache(mm);
379 if (pte)
380 return pte;
381
382 return __alloc_for_ptecache(mm, kernel);
383}
384
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530385void pte_fragment_free(unsigned long *table, int kernel)
386{
387 struct page *page = virt_to_page(table);
388
389 if (put_page_testzero(page)) {
390 if (!kernel)
391 pgtable_page_dtor(page);
392 free_unref_page(page);
393 }
394}
395
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530396static inline void pgtable_free(void *table, int index)
397{
398 switch (index) {
399 case PTE_INDEX:
400 pte_fragment_free(table, 0);
401 break;
402 case PMD_INDEX:
Aneesh Kumar K.V738f9642018-04-16 16:57:23 +0530403 pmd_fragment_free(table);
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530404 break;
405 case PUD_INDEX:
406 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
407 break;
408 /* We don't free pgd table via RCU callback */
409 default:
410 BUG();
411 }
412}
413
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530414#ifdef CONFIG_SMP
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530415void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530416{
417 unsigned long pgf = (unsigned long)table;
418
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530419 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
420 pgf |= index;
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530421 tlb_remove_table(tlb, (void *)pgf);
422}
423
424void __tlb_remove_table(void *_table)
425{
426 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530427 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530428
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530429 return pgtable_free(table, index);
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530430}
431#else
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530432void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530433{
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530434 return pgtable_free(table, index);
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530435}
436#endif