blob: 8d2d6742a465158f3764dc74a46ba3bad355bb7f [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +100010 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
Paul Mackerras14cf11a2005-09-26 16:04:21 +100024#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040029#include <linux/export.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100030#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100036#include <linux/memblock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Aneesh Kumar K.V06743522014-11-05 21:57:39 +053038#include <linux/hugetlb.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100039
40#include <asm/pgalloc.h>
41#include <asm/page.h>
42#include <asm/prom.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100043#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100047#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/tlb.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100050#include <asm/processor.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100051#include <asm/cputable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100052#include <asm/sections.h>
Stephen Rothwell5e203d62006-09-25 13:36:31 +100053#include <asm/firmware.h>
Anton Blanchard68cf0d62014-09-17 22:15:35 +100054#include <asm/dma.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110055#include <asm/powernv.h>
David Gibson800fc3e2005-11-16 15:43:48 +110056
57#include "mmu_decl.h"
Paul Mackerras14cf11a2005-09-26 16:04:21 +100058
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000059#ifdef CONFIG_PPC_STD_MMU_64
Aneesh Kumar K.Vaf81d782013-03-13 03:34:55 +000060#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000061#error TASK_SIZE_USER64 exceeds user VSID range
62#endif
63#endif
64
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +100065#ifdef CONFIG_PPC_BOOK3S_64
66/*
67 * partition table and process table for ISA 3.0
68 */
69struct prtb_entry *process_tb;
70struct patb_entry *partition_tb;
Aneesh Kumar K.Vdd1842a2016-04-29 23:25:49 +100071/*
72 * page table size
73 */
74unsigned long __pte_index_size;
75EXPORT_SYMBOL(__pte_index_size);
76unsigned long __pmd_index_size;
77EXPORT_SYMBOL(__pmd_index_size);
78unsigned long __pud_index_size;
79EXPORT_SYMBOL(__pud_index_size);
80unsigned long __pgd_index_size;
81EXPORT_SYMBOL(__pgd_index_size);
82unsigned long __pmd_cache_index;
83EXPORT_SYMBOL(__pmd_cache_index);
84unsigned long __pte_table_size;
85EXPORT_SYMBOL(__pte_table_size);
86unsigned long __pmd_table_size;
87EXPORT_SYMBOL(__pmd_table_size);
88unsigned long __pud_table_size;
89EXPORT_SYMBOL(__pud_table_size);
90unsigned long __pgd_table_size;
91EXPORT_SYMBOL(__pgd_table_size);
Aneesh Kumar K.Va2f41eb2016-04-29 23:26:19 +100092unsigned long __pmd_val_bits;
93EXPORT_SYMBOL(__pmd_val_bits);
94unsigned long __pud_val_bits;
95EXPORT_SYMBOL(__pud_val_bits);
96unsigned long __pgd_val_bits;
97EXPORT_SYMBOL(__pgd_val_bits);
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +100098unsigned long __kernel_virt_start;
99EXPORT_SYMBOL(__kernel_virt_start);
100unsigned long __kernel_virt_size;
101EXPORT_SYMBOL(__kernel_virt_size);
102unsigned long __vmalloc_start;
103EXPORT_SYMBOL(__vmalloc_start);
104unsigned long __vmalloc_end;
105EXPORT_SYMBOL(__vmalloc_end);
106struct page *vmemmap;
107EXPORT_SYMBOL(vmemmap);
Aneesh Kumar K.V5ed7ecd2016-04-29 23:26:23 +1000108unsigned long __pte_frag_nr;
109EXPORT_SYMBOL(__pte_frag_nr);
110unsigned long __pte_frag_size_shift;
111EXPORT_SYMBOL(__pte_frag_size_shift);
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000112unsigned long ioremap_bot;
113#else /* !CONFIG_PPC_BOOK3S_64 */
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +0000114unsigned long ioremap_bot = IOREMAP_BASE;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000115#endif
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000116
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000117/**
118 * __ioremap_at - Low level function to establish the page tables
119 * for an IO mapping
120 */
121void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000122 unsigned long flags)
123{
124 unsigned long i;
125
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700126 /* Make sure we have the base flags */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000127 if ((flags & _PAGE_PRESENT) == 0)
128 flags |= pgprot_val(PAGE_KERNEL);
129
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700130 /* We don't support the 4K PFN hack with ioremap */
Aneesh Kumar K.V945537d2016-04-29 23:25:45 +1000131 if (flags & H_PAGE_4K_PFN)
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700132 return NULL;
133
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000134 WARN_ON(pa & ~PAGE_MASK);
135 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
136 WARN_ON(size & ~PAGE_MASK);
137
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000138 for (i = 0; i < size; i += PAGE_SIZE)
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000139 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000140 return NULL;
141
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000142 return (void __iomem *)ea;
143}
144
145/**
146 * __iounmap_from - Low level function to tear down the page tables
147 * for an IO mapping. This is used for mappings that
148 * are manipulated manually, like partial unmapping of
149 * PCI IOs or ISA space.
150 */
151void __iounmap_at(void *ea, unsigned long size)
152{
153 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
154 WARN_ON(size & ~PAGE_MASK);
155
156 unmap_kernel_range((unsigned long)ea, size);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000157}
158
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000159void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
160 unsigned long flags, void *caller)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000161{
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000162 phys_addr_t paligned;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000163 void __iomem *ret;
164
165 /*
166 * Choose an address to map it to.
167 * Once the imalloc system is running, we use it.
168 * Before that, we map using addresses going
169 * up from ioremap_bot. imalloc will use
170 * the addresses from ioremap_bot through
171 * IMALLOC_END
172 *
173 */
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000174 paligned = addr & PAGE_MASK;
175 size = PAGE_ALIGN(addr + size) - paligned;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000176
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000177 if ((size == 0) || (paligned == 0))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000178 return NULL;
179
Michael Ellermanf691fa12015-03-30 14:10:37 +1100180 if (slab_is_available()) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000181 struct vm_struct *area;
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000182
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000183 area = __get_vm_area_caller(size, VM_IOREMAP,
184 ioremap_bot, IOREMAP_END,
185 caller);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000186 if (area == NULL)
187 return NULL;
Michael Ellerman7a9d1252010-11-28 18:26:36 +0000188
189 area->phys_addr = paligned;
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000190 ret = __ioremap_at(paligned, area->addr, size, flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000191 if (!ret)
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000192 vunmap(area->addr);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000193 } else {
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000194 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000195 if (ret)
196 ioremap_bot += size;
197 }
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000198
199 if (ret)
200 ret += addr & ~PAGE_MASK;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000201 return ret;
202}
203
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000204void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
205 unsigned long flags)
206{
207 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
208}
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100209
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100210void __iomem * ioremap(phys_addr_t addr, unsigned long size)
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100211{
Aneesh Kumar K.V72176dd2016-04-29 23:25:37 +1000212 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000213 void *caller = __builtin_return_address(0);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100214
215 if (ppc_md.ioremap)
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000216 return ppc_md.ioremap(addr, size, flags, caller);
217 return __ioremap_caller(addr, size, flags, caller);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100218}
219
Anton Blanchardbe135f42011-05-08 21:41:59 +0000220void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
221{
Aneesh Kumar K.V72176dd2016-04-29 23:25:37 +1000222 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
Anton Blanchardbe135f42011-05-08 21:41:59 +0000223 void *caller = __builtin_return_address(0);
224
225 if (ppc_md.ioremap)
226 return ppc_md.ioremap(addr, size, flags, caller);
227 return __ioremap_caller(addr, size, flags, caller);
228}
229
Anton Blanchard40f1ce72011-05-08 21:43:47 +0000230void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100231 unsigned long flags)
232{
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000233 void *caller = __builtin_return_address(0);
234
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700235 /* writeable implies dirty for kernel addresses */
Aneesh Kumar K.Vc7d54842016-04-29 23:25:30 +1000236 if (flags & _PAGE_WRITE)
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700237 flags |= _PAGE_DIRTY;
238
Aneesh Kumar K.Vac29c642016-04-29 23:25:34 +1000239 /* we don't want to let _PAGE_EXEC leak out */
240 flags &= ~_PAGE_EXEC;
241 /*
242 * Force kernel mapping.
243 */
244#if defined(CONFIG_PPC_BOOK3S_64)
245 flags |= _PAGE_PRIVILEGED;
246#else
247 flags &= ~_PAGE_USER;
248#endif
249
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700250
Benjamin Herrenschmidt55052ee2010-04-07 14:39:36 +1000251#ifdef _PAGE_BAP_SR
252 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
253 * which means that we just cleared supervisor access... oops ;-) This
254 * restores it
255 */
256 flags |= _PAGE_BAP_SR;
257#endif
258
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100259 if (ppc_md.ioremap)
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000260 return ppc_md.ioremap(addr, size, flags, caller);
261 return __ioremap_caller(addr, size, flags, caller);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100262}
263
264
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000265/*
266 * Unmap an IO region and remove it from imalloc'd list.
267 * Access to IO memory should be serialized by driver.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000268 */
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100269void __iounmap(volatile void __iomem *token)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000270{
271 void *addr;
272
Michael Ellermanf691fa12015-03-30 14:10:37 +1100273 if (!slab_is_available())
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000274 return;
275
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000276 addr = (void *) ((unsigned long __force)
277 PCI_FIX_ADDR(token) & PAGE_MASK);
278 if ((unsigned long)addr < ioremap_bot) {
279 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
280 " at 0x%p\n", addr);
281 return;
282 }
283 vunmap(addr);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000284}
285
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100286void iounmap(volatile void __iomem *token)
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100287{
288 if (ppc_md.iounmap)
289 ppc_md.iounmap(token);
290 else
291 __iounmap(token);
292}
293
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000294EXPORT_SYMBOL(ioremap);
Anton Blanchardbe135f42011-05-08 21:41:59 +0000295EXPORT_SYMBOL(ioremap_wc);
Anton Blanchard40f1ce72011-05-08 21:43:47 +0000296EXPORT_SYMBOL(ioremap_prot);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000297EXPORT_SYMBOL(__ioremap);
Olof Johanssona302cb92007-08-31 13:58:51 +1000298EXPORT_SYMBOL(__ioremap_at);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000299EXPORT_SYMBOL(iounmap);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100300EXPORT_SYMBOL(__iounmap);
Olof Johanssona302cb92007-08-31 13:58:51 +1000301EXPORT_SYMBOL(__iounmap_at);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000302
Aneesh Kumar K.V06743522014-11-05 21:57:39 +0530303#ifndef __PAGETABLE_PUD_FOLDED
304/* 4 level page table */
305struct page *pgd_page(pgd_t pgd)
306{
307 if (pgd_huge(pgd))
308 return pte_page(pgd_pte(pgd));
309 return virt_to_page(pgd_page_vaddr(pgd));
310}
311#endif
312
313struct page *pud_page(pud_t pud)
314{
315 if (pud_huge(pud))
316 return pte_page(pud_pte(pud));
317 return virt_to_page(pud_page_vaddr(pud));
318}
319
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530320/*
321 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
322 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
323 */
324struct page *pmd_page(pmd_t pmd)
325{
Aneesh Kumar K.V06743522014-11-05 21:57:39 +0530326 if (pmd_trans_huge(pmd) || pmd_huge(pmd))
Aneesh Kumar K.Ve34aa032015-12-01 09:06:53 +0530327 return pte_page(pmd_pte(pmd));
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530328 return virt_to_page(pmd_page_vaddr(pmd));
329}
330
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000331#ifdef CONFIG_PPC_64K_PAGES
332static pte_t *get_from_cache(struct mm_struct *mm)
333{
334 void *pte_frag, *ret;
335
336 spin_lock(&mm->page_table_lock);
337 ret = mm->context.pte_frag;
338 if (ret) {
339 pte_frag = ret + PTE_FRAG_SIZE;
340 /*
341 * If we have taken up all the fragments mark PTE page NULL
342 */
343 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
344 pte_frag = NULL;
345 mm->context.pte_frag = pte_frag;
346 }
347 spin_unlock(&mm->page_table_lock);
348 return (pte_t *)ret;
349}
350
351static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
352{
353 void *ret = NULL;
Balbir Singhde3b8762017-05-02 15:17:04 +1000354 struct page *page;
355
356 if (!kernel) {
357 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
358 if (!page)
359 return NULL;
360 if (!pgtable_page_ctor(page)) {
361 __free_page(page);
362 return NULL;
363 }
364 } else {
365 page = alloc_page(PGALLOC_GFP);
366 if (!page)
367 return NULL;
Kirill A. Shutemov4f8049432013-11-14 14:31:38 -0800368 }
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000369
370 ret = page_address(page);
371 spin_lock(&mm->page_table_lock);
372 /*
373 * If we find pgtable_page set, we return
374 * the allocated page with single fragement
375 * count.
376 */
377 if (likely(!mm->context.pte_frag)) {
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700378 set_page_count(page, PTE_FRAG_NR);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000379 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
380 }
381 spin_unlock(&mm->page_table_lock);
382
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000383 return (pte_t *)ret;
384}
385
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000386pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000387{
388 pte_t *pte;
389
390 pte = get_from_cache(mm);
391 if (pte)
392 return pte;
393
394 return __alloc_for_cache(mm, kernel);
395}
Aneesh Kumar K.V934828e2016-04-29 23:26:18 +1000396#endif /* CONFIG_PPC_64K_PAGES */
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000397
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000398void pte_fragment_free(unsigned long *table, int kernel)
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000399{
400 struct page *page = virt_to_page(table);
401 if (put_page_testzero(page)) {
402 if (!kernel)
403 pgtable_page_dtor(page);
404 free_hot_cold_page(page, 0);
405 }
406}
407
408#ifdef CONFIG_SMP
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000409void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
410{
411 unsigned long pgf = (unsigned long)table;
412
413 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
414 pgf |= shift;
415 tlb_remove_table(tlb, (void *)pgf);
416}
417
418void __tlb_remove_table(void *_table)
419{
420 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
421 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
422
423 if (!shift)
424 /* PTE page needs special handling */
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000425 pte_fragment_free(table, 0);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000426 else {
427 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
428 kmem_cache_free(PGT_CACHE(shift), table);
429 }
430}
431#else
432void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
433{
434 if (!shift) {
435 /* PTE page needs special handling */
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000436 pte_fragment_free(table, 0);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000437 } else {
438 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
439 kmem_cache_free(PGT_CACHE(shift), table);
440 }
441}
442#endif
Paul Mackerras9d661952016-11-21 16:00:58 +1100443
444#ifdef CONFIG_PPC_BOOK3S_64
445void __init mmu_partition_table_init(void)
446{
447 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
Alistair Popple1d0761d2016-12-14 13:36:51 +1100448 unsigned long ptcr;
Paul Mackerras9d661952016-11-21 16:00:58 +1100449
450 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
451 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
452 MEMBLOCK_ALLOC_ANYWHERE));
453
454 /* Initialize the Partition Table with no entries */
455 memset((void *)partition_tb, 0, patb_size);
456
457 /*
458 * update partition table control register,
459 * 64 K size.
460 */
Alistair Popple1d0761d2016-12-14 13:36:51 +1100461 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
462 mtspr(SPRN_PTCR, ptcr);
463 powernv_set_nmmu_ptcr(ptcr);
Paul Mackerras9d661952016-11-21 16:00:58 +1100464}
465
466void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
467 unsigned long dw1)
468{
Paul Mackerras16ed1412017-01-30 21:21:39 +1100469 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
470
Paul Mackerras9d661952016-11-21 16:00:58 +1100471 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
472 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
473
Paul Mackerras16ed1412017-01-30 21:21:39 +1100474 /*
475 * Global flush of TLBs and partition table caches for this lpid.
476 * The type of flush (hash or radix) depends on what the previous
477 * use of this partition ID was, not the new use.
478 */
Paul Mackerras9d661952016-11-21 16:00:58 +1100479 asm volatile("ptesync" : : : "memory");
Paul Mackerras16ed1412017-01-30 21:21:39 +1100480 if (old & PATB_HR)
481 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
482 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
483 else
484 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
485 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
Paul Mackerras9d661952016-11-21 16:00:58 +1100486 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
487}
488EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
489#endif /* CONFIG_PPC_BOOK3S_64 */