blob: 4afbfbb64bfd0a21254a177f4fa3df3c37bff6ea [file] [log] [blame]
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +10001/*
2 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/sched.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010011#include <linux/mm_types.h>
Aneesh Kumar K.V59879d52018-04-16 16:57:14 +053012#include <linux/memblock.h>
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +053013#include <misc/cxl-base.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010014
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100015#include <asm/pgalloc.h>
16#include <asm/tlb.h>
Aneesh Kumar K.V59879d52018-04-16 16:57:14 +053017#include <asm/trace.h>
18#include <asm/powernv.h>
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100019
20#include "mmu_decl.h"
21#include <trace/events/thp.h>
22
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +053023unsigned long __pmd_frag_nr;
24EXPORT_SYMBOL(__pmd_frag_nr);
25unsigned long __pmd_frag_size_shift;
26EXPORT_SYMBOL(__pmd_frag_size_shift);
27
Michael Ellermaneea81482016-08-04 15:32:06 +100028int (*register_process_table)(unsigned long base, unsigned long page_size,
29 unsigned long tbl_size);
30
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100031#ifdef CONFIG_TRANSPARENT_HUGEPAGE
32/*
33 * This is called when relaxing access to a hugepage. It's also called in the page
34 * fault path when we don't hit any of the major fault cases, ie, a minor
35 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
36 * handled those two for us, we additionally deal with missing execute
37 * permission here on some processors
38 */
39int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
40 pmd_t *pmdp, pmd_t entry, int dirty)
41{
42 int changed;
43#ifdef CONFIG_DEBUG_VM
Oliver O'Halloranebd31192017-06-28 11:32:34 +100044 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
Aneesh Kumar K.Vaf60a4c2018-04-16 16:57:16 +053045 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100046#endif
47 changed = !pmd_same(*(pmdp), entry);
48 if (changed) {
Aneesh Kumar K.Ve4c11122018-05-29 19:58:40 +053049 /*
50 * We can use MMU_PAGE_2M here, because only radix
51 * path look at the psize.
52 */
53 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
54 pmd_pte(entry), address, MMU_PAGE_2M);
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100055 }
56 return changed;
57}
58
59int pmdp_test_and_clear_young(struct vm_area_struct *vma,
60 unsigned long address, pmd_t *pmdp)
61{
62 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
63}
64/*
65 * set a new huge pmd. We should not be called for updating
66 * an existing pmd entry. That should go via pmd_hugepage_update.
67 */
68void set_pmd_at(struct mm_struct *mm, unsigned long addr,
69 pmd_t *pmdp, pmd_t pmd)
70{
71#ifdef CONFIG_DEBUG_VM
72 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
Aneesh Kumar K.Vaf60a4c2018-04-16 16:57:16 +053073 assert_spin_locked(pmd_lockptr(mm, pmdp));
Oliver O'Halloranebd31192017-06-28 11:32:34 +100074 WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +100075#endif
76 trace_hugepage_set_pmd(addr, pmd_val(pmd));
77 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
78}
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +053079
80static void do_nothing(void *unused)
81{
82
83}
84/*
85 * Serialize against find_current_mm_pte which does lock-less
86 * lookup in page tables with local interrupts disabled. For huge pages
87 * it casts pmd_t to pte_t. Since format of pte_t is different from
88 * pmd_t we want to prevent transit from pmd pointing to page table
89 * to pmd pointing to huge page (and back) while interrupts are disabled.
90 * We clear pmd to possibly replace it with page table pointer in
91 * different code paths. So make sure we wait for the parallel
92 * find_current_mm_pte to finish.
93 */
94void serialize_against_pte_lookup(struct mm_struct *mm)
95{
96 smp_mb();
Aneesh Kumar K.V0f4bc092017-07-27 11:54:55 +053097 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +053098}
99
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +1000100/*
101 * We use this to invalidate a pmdp entry before switching from a
102 * hugepte to regular pmd entry.
103 */
Aneesh Kumar K.V8cc931e2018-01-31 16:18:02 -0800104pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +1000105 pmd_t *pmdp)
106{
Aneesh Kumar K.V8cc931e2018-01-31 16:18:02 -0800107 unsigned long old_pmd;
108
109 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +0530110 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +1000111 /*
112 * This ensures that generic code that rely on IRQ disabling
113 * to prevent a parallel THP split work as expected.
114 */
Aneesh Kumar K.Vfa4531f2017-07-27 11:54:54 +0530115 serialize_against_pte_lookup(vma->vm_mm);
Aneesh Kumar K.V8cc931e2018-01-31 16:18:02 -0800116 return __pmd(old_pmd);
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +1000117}
118
119static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
120{
121 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
122}
123
124pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
125{
126 unsigned long pmdv;
127
128 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
129 return pmd_set_protbits(__pmd(pmdv), pgprot);
130}
131
132pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
133{
134 return pfn_pmd(page_to_pfn(page), pgprot);
135}
136
137pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
138{
139 unsigned long pmdv;
140
141 pmdv = pmd_val(pmd);
142 pmdv &= _HPAGE_CHG_MASK;
143 return pmd_set_protbits(__pmd(pmdv), newprot);
144}
145
146/*
147 * This is called at the end of handling a user page fault, when the
148 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
149 * We use it to preload an HPTE into the hash table corresponding to
150 * the updated linux HUGE PMD entry.
151 */
152void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
153 pmd_t *pmd)
154{
Nicholas Piggin68662f82018-06-01 20:01:18 +1000155 if (radix_enabled())
156 prefetch((void *)addr);
Aneesh Kumar K.V3df33f12016-04-29 23:26:29 +1000157}
158#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Benjamin Herrenschmidtfe036a02016-08-19 14:22:37 +0530159
160/* For use by kexec */
161void mmu_cleanup_all(void)
162{
163 if (radix_enabled())
164 radix__mmu_cleanup_all();
165 else if (mmu_hash_ops.hpte_clear_all)
166 mmu_hash_ops.hpte_clear_all();
167}
Reza Arbab32b53c02017-01-03 14:39:51 -0600168
169#ifdef CONFIG_MEMORY_HOTPLUG
Michael Ellermanf437c512018-03-31 00:11:24 +1100170int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
Reza Arbab32b53c02017-01-03 14:39:51 -0600171{
172 if (radix_enabled())
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000173 return radix__create_section_mapping(start, end, nid);
Reza Arbab32b53c02017-01-03 14:39:51 -0600174
Nicholas Piggin29ab6c42018-02-14 01:08:22 +1000175 return hash__create_section_mapping(start, end, nid);
Reza Arbab32b53c02017-01-03 14:39:51 -0600176}
177
Mauricio Faria de Oliveirabde709a2018-03-09 17:45:58 -0300178int __meminit remove_section_mapping(unsigned long start, unsigned long end)
Reza Arbab32b53c02017-01-03 14:39:51 -0600179{
180 if (radix_enabled())
Reza Arbab4b5d62c2017-01-16 13:07:45 -0600181 return radix__remove_section_mapping(start, end);
Reza Arbab32b53c02017-01-03 14:39:51 -0600182
183 return hash__remove_section_mapping(start, end);
184}
185#endif /* CONFIG_MEMORY_HOTPLUG */
Aneesh Kumar K.V59879d52018-04-16 16:57:14 +0530186
187void __init mmu_partition_table_init(void)
188{
189 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
190 unsigned long ptcr;
191
192 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
193 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
194 MEMBLOCK_ALLOC_ANYWHERE));
195
196 /* Initialize the Partition Table with no entries */
197 memset((void *)partition_tb, 0, patb_size);
198
199 /*
200 * update partition table control register,
201 * 64 K size.
202 */
203 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
204 mtspr(SPRN_PTCR, ptcr);
205 powernv_set_nmmu_ptcr(ptcr);
206}
207
208void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
209 unsigned long dw1)
210{
211 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
212
213 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
214 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
215
216 /*
217 * Global flush of TLBs and partition table caches for this lpid.
218 * The type of flush (hash or radix) depends on what the previous
219 * use of this partition ID was, not the new use.
220 */
221 asm volatile("ptesync" : : : "memory");
222 if (old & PATB_HR) {
223 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
224 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
225 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
226 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
227 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
228 } else {
229 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
230 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
231 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
232 }
233 /* do we need fixup here ?*/
234 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
235}
236EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
Aneesh Kumar K.V1c7ec8a2018-04-16 16:57:20 +0530237
Aneesh Kumar K.V8a6c6972018-04-16 16:57:22 +0530238static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
239{
240 void *pmd_frag, *ret;
241
242 spin_lock(&mm->page_table_lock);
243 ret = mm->context.pmd_frag;
244 if (ret) {
245 pmd_frag = ret + PMD_FRAG_SIZE;
246 /*
247 * If we have taken up all the fragments mark PTE page NULL
248 */
249 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
250 pmd_frag = NULL;
251 mm->context.pmd_frag = pmd_frag;
252 }
253 spin_unlock(&mm->page_table_lock);
254 return (pmd_t *)ret;
255}
256
257static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
258{
259 void *ret = NULL;
260 struct page *page;
261 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
262
263 if (mm == &init_mm)
264 gfp &= ~__GFP_ACCOUNT;
265 page = alloc_page(gfp);
266 if (!page)
267 return NULL;
268 if (!pgtable_pmd_page_ctor(page)) {
269 __free_pages(page, 0);
270 return NULL;
271 }
272
273 ret = page_address(page);
274 /*
275 * if we support only one fragment just return the
276 * allocated page.
277 */
278 if (PMD_FRAG_NR == 1)
279 return ret;
280
281 spin_lock(&mm->page_table_lock);
282 /*
283 * If we find pgtable_page set, we return
284 * the allocated page with single fragement
285 * count.
286 */
287 if (likely(!mm->context.pmd_frag)) {
288 set_page_count(page, PMD_FRAG_NR);
289 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
290 }
291 spin_unlock(&mm->page_table_lock);
292
293 return (pmd_t *)ret;
294}
295
296pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
297{
298 pmd_t *pmd;
299
300 pmd = get_pmd_from_cache(mm);
301 if (pmd)
302 return pmd;
303
304 return __alloc_for_pmdcache(mm);
305}
306
307void pmd_fragment_free(unsigned long *pmd)
308{
309 struct page *page = virt_to_page(pmd);
310
311 if (put_page_testzero(page)) {
312 pgtable_pmd_page_dtor(page);
313 free_unref_page(page);
314 }
315}
316
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530317static pte_t *get_pte_from_cache(struct mm_struct *mm)
318{
319 void *pte_frag, *ret;
320
321 spin_lock(&mm->page_table_lock);
322 ret = mm->context.pte_frag;
323 if (ret) {
324 pte_frag = ret + PTE_FRAG_SIZE;
325 /*
326 * If we have taken up all the fragments mark PTE page NULL
327 */
328 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
329 pte_frag = NULL;
330 mm->context.pte_frag = pte_frag;
331 }
332 spin_unlock(&mm->page_table_lock);
333 return (pte_t *)ret;
334}
335
336static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
337{
338 void *ret = NULL;
339 struct page *page;
340
341 if (!kernel) {
342 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
343 if (!page)
344 return NULL;
345 if (!pgtable_page_ctor(page)) {
346 __free_page(page);
347 return NULL;
348 }
349 } else {
350 page = alloc_page(PGALLOC_GFP);
351 if (!page)
352 return NULL;
353 }
354
Aneesh Kumar K.V1c7ec8a2018-04-16 16:57:20 +0530355
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530356 ret = page_address(page);
Aneesh Kumar K.V1c7ec8a2018-04-16 16:57:20 +0530357 /*
358 * if we support only one fragment just return the
359 * allocated page.
360 */
361 if (PTE_FRAG_NR == 1)
362 return ret;
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530363 spin_lock(&mm->page_table_lock);
364 /*
365 * If we find pgtable_page set, we return
366 * the allocated page with single fragement
367 * count.
368 */
369 if (likely(!mm->context.pte_frag)) {
370 set_page_count(page, PTE_FRAG_NR);
371 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
372 }
373 spin_unlock(&mm->page_table_lock);
374
375 return (pte_t *)ret;
376}
377
378pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
379{
380 pte_t *pte;
381
382 pte = get_pte_from_cache(mm);
383 if (pte)
384 return pte;
385
386 return __alloc_for_ptecache(mm, kernel);
387}
388
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530389void pte_fragment_free(unsigned long *table, int kernel)
390{
391 struct page *page = virt_to_page(table);
392
393 if (put_page_testzero(page)) {
394 if (!kernel)
395 pgtable_page_dtor(page);
396 free_unref_page(page);
397 }
398}
399
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530400static inline void pgtable_free(void *table, int index)
401{
402 switch (index) {
403 case PTE_INDEX:
404 pte_fragment_free(table, 0);
405 break;
406 case PMD_INDEX:
Aneesh Kumar K.V738f9642018-04-16 16:57:23 +0530407 pmd_fragment_free(table);
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530408 break;
409 case PUD_INDEX:
410 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
411 break;
Aneesh Kumar K.Vfadd03c2018-06-14 16:01:52 +0530412#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
413 /* 16M hugepd directory at pud level */
414 case HTLB_16M_INDEX:
415 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
416 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
417 break;
418 /* 16G hugepd directory at the pgd level */
419 case HTLB_16G_INDEX:
420 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
421 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
422 break;
423#endif
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530424 /* We don't free pgd table via RCU callback */
425 default:
426 BUG();
427 }
428}
429
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530430#ifdef CONFIG_SMP
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530431void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530432{
433 unsigned long pgf = (unsigned long)table;
434
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530435 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
436 pgf |= index;
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530437 tlb_remove_table(tlb, (void *)pgf);
438}
439
440void __tlb_remove_table(void *_table)
441{
442 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530443 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530444
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530445 return pgtable_free(table, index);
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530446}
447#else
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530448void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530449{
Aneesh Kumar K.V0c4d2682018-04-16 16:57:21 +0530450 return pgtable_free(table, index);
Aneesh Kumar K.V70234672018-04-16 16:57:19 +0530451}
452#endif