blob: ac0717a90ca6bd7be1a9071666e61c4e78782ceb [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +100010 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
Paul Mackerras14cf11a2005-09-26 16:04:21 +100024#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040029#include <linux/export.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100030#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100036#include <linux/memblock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Aneesh Kumar K.V06743522014-11-05 21:57:39 +053038#include <linux/hugetlb.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100039
40#include <asm/pgalloc.h>
41#include <asm/page.h>
42#include <asm/prom.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100043#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100047#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/tlb.h>
Balbir Singh04284912017-04-11 15:23:25 +100050#include <asm/trace.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100051#include <asm/processor.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100052#include <asm/cputable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100053#include <asm/sections.h>
Stephen Rothwell5e203d62006-09-25 13:36:31 +100054#include <asm/firmware.h>
Anton Blanchard68cf0d62014-09-17 22:15:35 +100055#include <asm/dma.h>
Alistair Popple1d0761d2016-12-14 13:36:51 +110056#include <asm/powernv.h>
David Gibson800fc3e2005-11-16 15:43:48 +110057
58#include "mmu_decl.h"
Paul Mackerras14cf11a2005-09-26 16:04:21 +100059
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000060#ifdef CONFIG_PPC_STD_MMU_64
Aneesh Kumar K.Vaf81d782013-03-13 03:34:55 +000061#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000062#error TASK_SIZE_USER64 exceeds user VSID range
63#endif
64#endif
65
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +100066#ifdef CONFIG_PPC_BOOK3S_64
67/*
68 * partition table and process table for ISA 3.0
69 */
70struct prtb_entry *process_tb;
71struct patb_entry *partition_tb;
Aneesh Kumar K.Vdd1842a2016-04-29 23:25:49 +100072/*
73 * page table size
74 */
75unsigned long __pte_index_size;
76EXPORT_SYMBOL(__pte_index_size);
77unsigned long __pmd_index_size;
78EXPORT_SYMBOL(__pmd_index_size);
79unsigned long __pud_index_size;
80EXPORT_SYMBOL(__pud_index_size);
81unsigned long __pgd_index_size;
82EXPORT_SYMBOL(__pgd_index_size);
83unsigned long __pmd_cache_index;
84EXPORT_SYMBOL(__pmd_cache_index);
85unsigned long __pte_table_size;
86EXPORT_SYMBOL(__pte_table_size);
87unsigned long __pmd_table_size;
88EXPORT_SYMBOL(__pmd_table_size);
89unsigned long __pud_table_size;
90EXPORT_SYMBOL(__pud_table_size);
91unsigned long __pgd_table_size;
92EXPORT_SYMBOL(__pgd_table_size);
Aneesh Kumar K.Va2f41eb92016-04-29 23:26:19 +100093unsigned long __pmd_val_bits;
94EXPORT_SYMBOL(__pmd_val_bits);
95unsigned long __pud_val_bits;
96EXPORT_SYMBOL(__pud_val_bits);
97unsigned long __pgd_val_bits;
98EXPORT_SYMBOL(__pgd_val_bits);
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +100099unsigned long __kernel_virt_start;
100EXPORT_SYMBOL(__kernel_virt_start);
101unsigned long __kernel_virt_size;
102EXPORT_SYMBOL(__kernel_virt_size);
103unsigned long __vmalloc_start;
104EXPORT_SYMBOL(__vmalloc_start);
105unsigned long __vmalloc_end;
106EXPORT_SYMBOL(__vmalloc_end);
Michael Ellerman63ee9b22017-08-01 20:29:22 +1000107unsigned long __kernel_io_start;
108EXPORT_SYMBOL(__kernel_io_start);
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000109struct page *vmemmap;
110EXPORT_SYMBOL(vmemmap);
Aneesh Kumar K.V5ed7ecd2016-04-29 23:26:23 +1000111unsigned long __pte_frag_nr;
112EXPORT_SYMBOL(__pte_frag_nr);
113unsigned long __pte_frag_size_shift;
114EXPORT_SYMBOL(__pte_frag_size_shift);
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000115unsigned long ioremap_bot;
116#else /* !CONFIG_PPC_BOOK3S_64 */
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +0000117unsigned long ioremap_bot = IOREMAP_BASE;
Aneesh Kumar K.Vd6a99962016-04-29 23:26:21 +1000118#endif
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000119
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000120/**
121 * __ioremap_at - Low level function to establish the page tables
122 * for an IO mapping
123 */
124void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000125 unsigned long flags)
126{
127 unsigned long i;
128
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700129 /* Make sure we have the base flags */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000130 if ((flags & _PAGE_PRESENT) == 0)
131 flags |= pgprot_val(PAGE_KERNEL);
132
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700133 /* We don't support the 4K PFN hack with ioremap */
Aneesh Kumar K.V945537d2016-04-29 23:25:45 +1000134 if (flags & H_PAGE_4K_PFN)
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700135 return NULL;
136
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000137 WARN_ON(pa & ~PAGE_MASK);
138 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
139 WARN_ON(size & ~PAGE_MASK);
140
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000141 for (i = 0; i < size; i += PAGE_SIZE)
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000142 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000143 return NULL;
144
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000145 return (void __iomem *)ea;
146}
147
148/**
149 * __iounmap_from - Low level function to tear down the page tables
150 * for an IO mapping. This is used for mappings that
151 * are manipulated manually, like partial unmapping of
152 * PCI IOs or ISA space.
153 */
154void __iounmap_at(void *ea, unsigned long size)
155{
156 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
157 WARN_ON(size & ~PAGE_MASK);
158
159 unmap_kernel_range((unsigned long)ea, size);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000160}
161
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000162void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
163 unsigned long flags, void *caller)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000164{
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000165 phys_addr_t paligned;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000166 void __iomem *ret;
167
168 /*
169 * Choose an address to map it to.
170 * Once the imalloc system is running, we use it.
171 * Before that, we map using addresses going
172 * up from ioremap_bot. imalloc will use
173 * the addresses from ioremap_bot through
174 * IMALLOC_END
175 *
176 */
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000177 paligned = addr & PAGE_MASK;
178 size = PAGE_ALIGN(addr + size) - paligned;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000179
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000180 if ((size == 0) || (paligned == 0))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000181 return NULL;
182
Michael Ellermanf691fa12015-03-30 14:10:37 +1100183 if (slab_is_available()) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000184 struct vm_struct *area;
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000185
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000186 area = __get_vm_area_caller(size, VM_IOREMAP,
187 ioremap_bot, IOREMAP_END,
188 caller);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000189 if (area == NULL)
190 return NULL;
Michael Ellerman7a9d1252010-11-28 18:26:36 +0000191
192 area->phys_addr = paligned;
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000193 ret = __ioremap_at(paligned, area->addr, size, flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000194 if (!ret)
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000195 vunmap(area->addr);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000196 } else {
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000197 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000198 if (ret)
199 ioremap_bot += size;
200 }
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000201
202 if (ret)
203 ret += addr & ~PAGE_MASK;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000204 return ret;
205}
206
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000207void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
208 unsigned long flags)
209{
210 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
211}
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100212
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100213void __iomem * ioremap(phys_addr_t addr, unsigned long size)
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100214{
Aneesh Kumar K.V72176dd2016-04-29 23:25:37 +1000215 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000216 void *caller = __builtin_return_address(0);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100217
218 if (ppc_md.ioremap)
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000219 return ppc_md.ioremap(addr, size, flags, caller);
220 return __ioremap_caller(addr, size, flags, caller);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100221}
222
Anton Blanchardbe135f42011-05-08 21:41:59 +0000223void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
224{
Aneesh Kumar K.V72176dd2016-04-29 23:25:37 +1000225 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
Anton Blanchardbe135f42011-05-08 21:41:59 +0000226 void *caller = __builtin_return_address(0);
227
228 if (ppc_md.ioremap)
229 return ppc_md.ioremap(addr, size, flags, caller);
230 return __ioremap_caller(addr, size, flags, caller);
231}
232
Anton Blanchard40f1ce72011-05-08 21:43:47 +0000233void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100234 unsigned long flags)
235{
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000236 void *caller = __builtin_return_address(0);
237
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700238 /* writeable implies dirty for kernel addresses */
Aneesh Kumar K.Vc7d54842016-04-29 23:25:30 +1000239 if (flags & _PAGE_WRITE)
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700240 flags |= _PAGE_DIRTY;
241
Aneesh Kumar K.Vac29c642016-04-29 23:25:34 +1000242 /* we don't want to let _PAGE_EXEC leak out */
243 flags &= ~_PAGE_EXEC;
244 /*
245 * Force kernel mapping.
246 */
247#if defined(CONFIG_PPC_BOOK3S_64)
248 flags |= _PAGE_PRIVILEGED;
249#else
250 flags &= ~_PAGE_USER;
251#endif
252
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700253
Benjamin Herrenschmidt55052ee2010-04-07 14:39:36 +1000254#ifdef _PAGE_BAP_SR
255 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
256 * which means that we just cleared supervisor access... oops ;-) This
257 * restores it
258 */
259 flags |= _PAGE_BAP_SR;
260#endif
261
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100262 if (ppc_md.ioremap)
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000263 return ppc_md.ioremap(addr, size, flags, caller);
264 return __ioremap_caller(addr, size, flags, caller);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100265}
266
267
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000268/*
269 * Unmap an IO region and remove it from imalloc'd list.
270 * Access to IO memory should be serialized by driver.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000271 */
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100272void __iounmap(volatile void __iomem *token)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000273{
274 void *addr;
275
Michael Ellermanf691fa12015-03-30 14:10:37 +1100276 if (!slab_is_available())
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000277 return;
278
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000279 addr = (void *) ((unsigned long __force)
280 PCI_FIX_ADDR(token) & PAGE_MASK);
281 if ((unsigned long)addr < ioremap_bot) {
282 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
283 " at 0x%p\n", addr);
284 return;
285 }
286 vunmap(addr);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000287}
288
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100289void iounmap(volatile void __iomem *token)
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100290{
291 if (ppc_md.iounmap)
292 ppc_md.iounmap(token);
293 else
294 __iounmap(token);
295}
296
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000297EXPORT_SYMBOL(ioremap);
Anton Blanchardbe135f42011-05-08 21:41:59 +0000298EXPORT_SYMBOL(ioremap_wc);
Anton Blanchard40f1ce72011-05-08 21:43:47 +0000299EXPORT_SYMBOL(ioremap_prot);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000300EXPORT_SYMBOL(__ioremap);
Olof Johanssona302cb92007-08-31 13:58:51 +1000301EXPORT_SYMBOL(__ioremap_at);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000302EXPORT_SYMBOL(iounmap);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100303EXPORT_SYMBOL(__iounmap);
Olof Johanssona302cb92007-08-31 13:58:51 +1000304EXPORT_SYMBOL(__iounmap_at);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000305
Aneesh Kumar K.V06743522014-11-05 21:57:39 +0530306#ifndef __PAGETABLE_PUD_FOLDED
307/* 4 level page table */
308struct page *pgd_page(pgd_t pgd)
309{
310 if (pgd_huge(pgd))
311 return pte_page(pgd_pte(pgd));
312 return virt_to_page(pgd_page_vaddr(pgd));
313}
314#endif
315
316struct page *pud_page(pud_t pud)
317{
318 if (pud_huge(pud))
319 return pte_page(pud_pte(pud));
320 return virt_to_page(pud_page_vaddr(pud));
321}
322
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530323/*
324 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
325 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
326 */
327struct page *pmd_page(pmd_t pmd)
328{
Oliver O'Halloranebd31192017-06-28 11:32:34 +1000329 if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
Aneesh Kumar K.Ve34aa032015-12-01 09:06:53 +0530330 return pte_page(pmd_pte(pmd));
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530331 return virt_to_page(pmd_page_vaddr(pmd));
332}
333
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000334#ifdef CONFIG_PPC_64K_PAGES
335static pte_t *get_from_cache(struct mm_struct *mm)
336{
337 void *pte_frag, *ret;
338
339 spin_lock(&mm->page_table_lock);
340 ret = mm->context.pte_frag;
341 if (ret) {
342 pte_frag = ret + PTE_FRAG_SIZE;
343 /*
344 * If we have taken up all the fragments mark PTE page NULL
345 */
346 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
347 pte_frag = NULL;
348 mm->context.pte_frag = pte_frag;
349 }
350 spin_unlock(&mm->page_table_lock);
351 return (pte_t *)ret;
352}
353
354static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
355{
356 void *ret = NULL;
Balbir Singhde3b8762017-05-02 15:17:04 +1000357 struct page *page;
358
359 if (!kernel) {
360 page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
361 if (!page)
362 return NULL;
363 if (!pgtable_page_ctor(page)) {
364 __free_page(page);
365 return NULL;
366 }
367 } else {
368 page = alloc_page(PGALLOC_GFP);
369 if (!page)
370 return NULL;
Kirill A. Shutemov4f8049432013-11-14 14:31:38 -0800371 }
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000372
373 ret = page_address(page);
374 spin_lock(&mm->page_table_lock);
375 /*
376 * If we find pgtable_page set, we return
377 * the allocated page with single fragement
378 * count.
379 */
380 if (likely(!mm->context.pte_frag)) {
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700381 set_page_count(page, PTE_FRAG_NR);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000382 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
383 }
384 spin_unlock(&mm->page_table_lock);
385
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000386 return (pte_t *)ret;
387}
388
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000389pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000390{
391 pte_t *pte;
392
393 pte = get_from_cache(mm);
394 if (pte)
395 return pte;
396
397 return __alloc_for_cache(mm, kernel);
398}
Aneesh Kumar K.V934828e2016-04-29 23:26:18 +1000399#endif /* CONFIG_PPC_64K_PAGES */
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000400
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000401void pte_fragment_free(unsigned long *table, int kernel)
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000402{
403 struct page *page = virt_to_page(table);
404 if (put_page_testzero(page)) {
405 if (!kernel)
406 pgtable_page_dtor(page);
407 free_hot_cold_page(page, 0);
408 }
409}
410
411#ifdef CONFIG_SMP
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000412void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
413{
414 unsigned long pgf = (unsigned long)table;
415
416 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
417 pgf |= shift;
418 tlb_remove_table(tlb, (void *)pgf);
419}
420
421void __tlb_remove_table(void *_table)
422{
423 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
424 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
425
426 if (!shift)
427 /* PTE page needs special handling */
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000428 pte_fragment_free(table, 0);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000429 else {
430 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
431 kmem_cache_free(PGT_CACHE(shift), table);
432 }
433}
434#else
435void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
436{
437 if (!shift) {
438 /* PTE page needs special handling */
Aneesh Kumar K.V74701d52016-04-29 23:26:17 +1000439 pte_fragment_free(table, 0);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000440 } else {
441 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
442 kmem_cache_free(PGT_CACHE(shift), table);
443 }
444}
445#endif
Paul Mackerras9d661952016-11-21 16:00:58 +1100446
447#ifdef CONFIG_PPC_BOOK3S_64
448void __init mmu_partition_table_init(void)
449{
450 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
Alistair Popple1d0761d2016-12-14 13:36:51 +1100451 unsigned long ptcr;
Paul Mackerras9d661952016-11-21 16:00:58 +1100452
453 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
454 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
455 MEMBLOCK_ALLOC_ANYWHERE));
456
457 /* Initialize the Partition Table with no entries */
458 memset((void *)partition_tb, 0, patb_size);
459
460 /*
461 * update partition table control register,
462 * 64 K size.
463 */
Alistair Popple1d0761d2016-12-14 13:36:51 +1100464 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
465 mtspr(SPRN_PTCR, ptcr);
466 powernv_set_nmmu_ptcr(ptcr);
Paul Mackerras9d661952016-11-21 16:00:58 +1100467}
468
469void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
470 unsigned long dw1)
471{
Paul Mackerras16ed1412017-01-30 21:21:39 +1100472 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
473
Paul Mackerras9d661952016-11-21 16:00:58 +1100474 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
475 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
476
Paul Mackerras16ed1412017-01-30 21:21:39 +1100477 /*
478 * Global flush of TLBs and partition table caches for this lpid.
479 * The type of flush (hash or radix) depends on what the previous
480 * use of this partition ID was, not the new use.
481 */
Paul Mackerras9d661952016-11-21 16:00:58 +1100482 asm volatile("ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000483 if (old & PATB_HR) {
Paul Mackerras16ed1412017-01-30 21:21:39 +1100484 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
485 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
Balbir Singh04284912017-04-11 15:23:25 +1000486 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
487 } else {
Paul Mackerras16ed1412017-01-30 21:21:39 +1100488 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
489 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
Balbir Singh04284912017-04-11 15:23:25 +1000490 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
491 }
Paul Mackerras9d661952016-11-21 16:00:58 +1100492 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
493}
494EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
495#endif /* CONFIG_PPC_BOOK3S_64 */
Balbir Singhcd65d692017-06-29 03:04:08 +1000496
497#ifdef CONFIG_STRICT_KERNEL_RWX
498void mark_rodata_ro(void)
499{
500 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
501 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
502 return;
503 }
504
Balbir Singh7614ff32017-06-29 03:04:09 +1000505 if (radix_enabled())
506 radix__mark_rodata_ro();
507 else
Balbir Singhcd65d692017-06-29 03:04:08 +1000508 hash__mark_rodata_ro();
509}
Michael Ellerman029d9252017-07-14 16:51:23 +1000510
511void mark_initmem_nx(void)
512{
513 if (radix_enabled())
514 radix__mark_initmem_nx();
515 else
516 hash__mark_initmem_nx();
517}
Balbir Singhcd65d692017-06-29 03:04:08 +1000518#endif