blob: 9e0553eafcb7043a16a9f79fafeb22137175409d [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +100010 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
Paul Mackerras14cf11a2005-09-26 16:04:21 +100024#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040029#include <linux/export.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100030#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100036#include <linux/memblock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Aneesh Kumar K.V06743522014-11-05 21:57:39 +053038#include <linux/hugetlb.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100039
40#include <asm/pgalloc.h>
41#include <asm/page.h>
42#include <asm/prom.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100043#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100047#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/tlb.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100050#include <asm/processor.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100051#include <asm/cputable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100052#include <asm/sections.h>
Stephen Rothwell5e203d62006-09-25 13:36:31 +100053#include <asm/firmware.h>
Anton Blanchard68cf0d62014-09-17 22:15:35 +100054#include <asm/dma.h>
David Gibson800fc3e2005-11-16 15:43:48 +110055
56#include "mmu_decl.h"
Paul Mackerras14cf11a2005-09-26 16:04:21 +100057
Aneesh Kumar K.V9e813302014-08-13 12:32:04 +053058#define CREATE_TRACE_POINTS
59#include <trace/events/thp.h>
60
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000061/* Some sanity checking */
62#if TASK_SIZE_USER64 > PGTABLE_RANGE
63#error TASK_SIZE_USER64 exceeds pagetable range
64#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +100065
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000066#ifdef CONFIG_PPC_STD_MMU_64
Aneesh Kumar K.Vaf81d782013-03-13 03:34:55 +000067#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000068#error TASK_SIZE_USER64 exceeds user VSID range
69#endif
70#endif
71
72unsigned long ioremap_bot = IOREMAP_BASE;
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +000073
74#ifdef CONFIG_PPC_MMU_NOHASH
Scott Wood7d176222014-08-01 22:07:40 -050075static __ref void *early_alloc_pgtable(unsigned long size)
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +000076{
77 void *pt;
78
Anton Blanchard10239732014-09-17 22:15:33 +100079 pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS)));
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +000080 memset(pt, 0, size);
81
82 return pt;
83}
84#endif /* CONFIG_PPC_MMU_NOHASH */
85
Paul Mackerras14cf11a2005-09-26 16:04:21 +100086/*
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +000087 * map_kernel_page currently only called by __ioremap
88 * map_kernel_page adds an entry to the ioremap page table
Paul Mackerras14cf11a2005-09-26 16:04:21 +100089 * and adds an entry to the HPT, possibly bolting it
90 */
Paul Mackerras849f86a2016-02-22 13:41:15 +110091int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100092{
93 pgd_t *pgdp;
94 pud_t *pudp;
95 pmd_t *pmdp;
96 pte_t *ptep;
Paul Mackerras14cf11a2005-09-26 16:04:21 +100097
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +000098 if (slab_is_available()) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +100099 pgdp = pgd_offset_k(ea);
100 pudp = pud_alloc(&init_mm, pgdp, ea);
101 if (!pudp)
102 return -ENOMEM;
103 pmdp = pmd_alloc(&init_mm, pudp, ea);
104 if (!pmdp)
105 return -ENOMEM;
Paul Mackerras23fd0772005-10-31 13:37:12 +1100106 ptep = pte_alloc_kernel(pmdp, ea);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000107 if (!ptep)
108 return -ENOMEM;
109 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
110 __pgprot(flags)));
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000111 } else {
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000112#ifdef CONFIG_PPC_MMU_NOHASH
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000113 pgdp = pgd_offset_k(ea);
114#ifdef PUD_TABLE_SIZE
115 if (pgd_none(*pgdp)) {
116 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
117 BUG_ON(pudp == NULL);
118 pgd_populate(&init_mm, pgdp, pudp);
119 }
120#endif /* PUD_TABLE_SIZE */
121 pudp = pud_offset(pgdp, ea);
122 if (pud_none(*pudp)) {
123 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
124 BUG_ON(pmdp == NULL);
125 pud_populate(&init_mm, pudp, pmdp);
126 }
127 pmdp = pmd_offset(pudp, ea);
128 if (!pmd_present(*pmdp)) {
129 ptep = early_alloc_pgtable(PAGE_SIZE);
130 BUG_ON(ptep == NULL);
131 pmd_populate_kernel(&init_mm, pmdp, ptep);
132 }
133 ptep = pte_offset_kernel(pmdp, ea);
134 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
135 __pgprot(flags)));
136#else /* CONFIG_PPC_MMU_NOHASH */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000137 /*
138 * If the mm subsystem is not fully up, we cannot create a
139 * linux page table entry for this mapping. Simply bolt an
140 * entry in the hardware page table.
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100141 *
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000142 */
Paul Mackerras1189be62007-10-11 20:37:10 +1000143 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
144 mmu_io_psize, mmu_kernel_ssize)) {
Benjamin Herrenschmidt77ac1662005-11-10 11:12:11 +1100145 printk(KERN_ERR "Failed to do bolted mapping IO "
146 "memory at %016lx !\n", pa);
147 return -ENOMEM;
148 }
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000149#endif /* !CONFIG_PPC_MMU_NOHASH */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000150 }
Scott Wood47ce8af2013-10-11 19:22:37 -0500151
Scott Wood47ce8af2013-10-11 19:22:37 -0500152 smp_wmb();
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000153 return 0;
154}
155
156
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000157/**
158 * __ioremap_at - Low level function to establish the page tables
159 * for an IO mapping
160 */
161void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000162 unsigned long flags)
163{
164 unsigned long i;
165
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700166 /* Make sure we have the base flags */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000167 if ((flags & _PAGE_PRESENT) == 0)
168 flags |= pgprot_val(PAGE_KERNEL);
169
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700170 /* Non-cacheable page cannot be coherent */
171 if (flags & _PAGE_NO_CACHE)
172 flags &= ~_PAGE_COHERENT;
173
174 /* We don't support the 4K PFN hack with ioremap */
175 if (flags & _PAGE_4K_PFN)
176 return NULL;
177
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000178 WARN_ON(pa & ~PAGE_MASK);
179 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
180 WARN_ON(size & ~PAGE_MASK);
181
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000182 for (i = 0; i < size; i += PAGE_SIZE)
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000183 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000184 return NULL;
185
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000186 return (void __iomem *)ea;
187}
188
189/**
190 * __iounmap_from - Low level function to tear down the page tables
191 * for an IO mapping. This is used for mappings that
192 * are manipulated manually, like partial unmapping of
193 * PCI IOs or ISA space.
194 */
195void __iounmap_at(void *ea, unsigned long size)
196{
197 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
198 WARN_ON(size & ~PAGE_MASK);
199
200 unmap_kernel_range((unsigned long)ea, size);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000201}
202
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000203void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
204 unsigned long flags, void *caller)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000205{
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000206 phys_addr_t paligned;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000207 void __iomem *ret;
208
209 /*
210 * Choose an address to map it to.
211 * Once the imalloc system is running, we use it.
212 * Before that, we map using addresses going
213 * up from ioremap_bot. imalloc will use
214 * the addresses from ioremap_bot through
215 * IMALLOC_END
216 *
217 */
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000218 paligned = addr & PAGE_MASK;
219 size = PAGE_ALIGN(addr + size) - paligned;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000220
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000221 if ((size == 0) || (paligned == 0))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000222 return NULL;
223
Michael Ellermanf691fa12015-03-30 14:10:37 +1100224 if (slab_is_available()) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000225 struct vm_struct *area;
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000226
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000227 area = __get_vm_area_caller(size, VM_IOREMAP,
228 ioremap_bot, IOREMAP_END,
229 caller);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000230 if (area == NULL)
231 return NULL;
Michael Ellerman7a9d1252010-11-28 18:26:36 +0000232
233 area->phys_addr = paligned;
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000234 ret = __ioremap_at(paligned, area->addr, size, flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000235 if (!ret)
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000236 vunmap(area->addr);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000237 } else {
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000238 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000239 if (ret)
240 ioremap_bot += size;
241 }
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000242
243 if (ret)
244 ret += addr & ~PAGE_MASK;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000245 return ret;
246}
247
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000248void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
249 unsigned long flags)
250{
251 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
252}
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100253
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100254void __iomem * ioremap(phys_addr_t addr, unsigned long size)
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100255{
256 unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000257 void *caller = __builtin_return_address(0);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100258
259 if (ppc_md.ioremap)
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000260 return ppc_md.ioremap(addr, size, flags, caller);
261 return __ioremap_caller(addr, size, flags, caller);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100262}
263
Anton Blanchardbe135f42011-05-08 21:41:59 +0000264void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
265{
266 unsigned long flags = _PAGE_NO_CACHE;
267 void *caller = __builtin_return_address(0);
268
269 if (ppc_md.ioremap)
270 return ppc_md.ioremap(addr, size, flags, caller);
271 return __ioremap_caller(addr, size, flags, caller);
272}
273
Anton Blanchard40f1ce72011-05-08 21:43:47 +0000274void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100275 unsigned long flags)
276{
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000277 void *caller = __builtin_return_address(0);
278
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700279 /* writeable implies dirty for kernel addresses */
Aneesh Kumar K.Vc7d54842016-04-29 23:25:30 +1000280 if (flags & _PAGE_WRITE)
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700281 flags |= _PAGE_DIRTY;
282
Aneesh Kumar K.Vac29c642016-04-29 23:25:34 +1000283 /* we don't want to let _PAGE_EXEC leak out */
284 flags &= ~_PAGE_EXEC;
285 /*
286 * Force kernel mapping.
287 */
288#if defined(CONFIG_PPC_BOOK3S_64)
289 flags |= _PAGE_PRIVILEGED;
290#else
291 flags &= ~_PAGE_USER;
292#endif
293
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700294
Benjamin Herrenschmidt55052ee2010-04-07 14:39:36 +1000295#ifdef _PAGE_BAP_SR
296 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
297 * which means that we just cleared supervisor access... oops ;-) This
298 * restores it
299 */
300 flags |= _PAGE_BAP_SR;
301#endif
302
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100303 if (ppc_md.ioremap)
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000304 return ppc_md.ioremap(addr, size, flags, caller);
305 return __ioremap_caller(addr, size, flags, caller);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100306}
307
308
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000309/*
310 * Unmap an IO region and remove it from imalloc'd list.
311 * Access to IO memory should be serialized by driver.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000312 */
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100313void __iounmap(volatile void __iomem *token)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000314{
315 void *addr;
316
Michael Ellermanf691fa12015-03-30 14:10:37 +1100317 if (!slab_is_available())
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000318 return;
319
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000320 addr = (void *) ((unsigned long __force)
321 PCI_FIX_ADDR(token) & PAGE_MASK);
322 if ((unsigned long)addr < ioremap_bot) {
323 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
324 " at 0x%p\n", addr);
325 return;
326 }
327 vunmap(addr);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000328}
329
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100330void iounmap(volatile void __iomem *token)
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100331{
332 if (ppc_md.iounmap)
333 ppc_md.iounmap(token);
334 else
335 __iounmap(token);
336}
337
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000338EXPORT_SYMBOL(ioremap);
Anton Blanchardbe135f42011-05-08 21:41:59 +0000339EXPORT_SYMBOL(ioremap_wc);
Anton Blanchard40f1ce72011-05-08 21:43:47 +0000340EXPORT_SYMBOL(ioremap_prot);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000341EXPORT_SYMBOL(__ioremap);
Olof Johanssona302cb92007-08-31 13:58:51 +1000342EXPORT_SYMBOL(__ioremap_at);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000343EXPORT_SYMBOL(iounmap);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100344EXPORT_SYMBOL(__iounmap);
Olof Johanssona302cb92007-08-31 13:58:51 +1000345EXPORT_SYMBOL(__iounmap_at);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000346
Aneesh Kumar K.V06743522014-11-05 21:57:39 +0530347#ifndef __PAGETABLE_PUD_FOLDED
348/* 4 level page table */
349struct page *pgd_page(pgd_t pgd)
350{
351 if (pgd_huge(pgd))
352 return pte_page(pgd_pte(pgd));
353 return virt_to_page(pgd_page_vaddr(pgd));
354}
355#endif
356
357struct page *pud_page(pud_t pud)
358{
359 if (pud_huge(pud))
360 return pte_page(pud_pte(pud));
361 return virt_to_page(pud_page_vaddr(pud));
362}
363
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530364/*
365 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
366 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
367 */
368struct page *pmd_page(pmd_t pmd)
369{
Aneesh Kumar K.V06743522014-11-05 21:57:39 +0530370 if (pmd_trans_huge(pmd) || pmd_huge(pmd))
Aneesh Kumar K.Ve34aa032015-12-01 09:06:53 +0530371 return pte_page(pmd_pte(pmd));
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530372 return virt_to_page(pmd_page_vaddr(pmd));
373}
374
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000375#ifdef CONFIG_PPC_64K_PAGES
376static pte_t *get_from_cache(struct mm_struct *mm)
377{
378 void *pte_frag, *ret;
379
380 spin_lock(&mm->page_table_lock);
381 ret = mm->context.pte_frag;
382 if (ret) {
383 pte_frag = ret + PTE_FRAG_SIZE;
384 /*
385 * If we have taken up all the fragments mark PTE page NULL
386 */
387 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
388 pte_frag = NULL;
389 mm->context.pte_frag = pte_frag;
390 }
391 spin_unlock(&mm->page_table_lock);
392 return (pte_t *)ret;
393}
394
395static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
396{
397 void *ret = NULL;
398 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
399 __GFP_REPEAT | __GFP_ZERO);
400 if (!page)
401 return NULL;
Kirill A. Shutemov4f8049432013-11-14 14:31:38 -0800402 if (!kernel && !pgtable_page_ctor(page)) {
403 __free_page(page);
404 return NULL;
405 }
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000406
407 ret = page_address(page);
408 spin_lock(&mm->page_table_lock);
409 /*
410 * If we find pgtable_page set, we return
411 * the allocated page with single fragement
412 * count.
413 */
414 if (likely(!mm->context.pte_frag)) {
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700415 set_page_count(page, PTE_FRAG_NR);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000416 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
417 }
418 spin_unlock(&mm->page_table_lock);
419
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000420 return (pte_t *)ret;
421}
422
423pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
424{
425 pte_t *pte;
426
427 pte = get_from_cache(mm);
428 if (pte)
429 return pte;
430
431 return __alloc_for_cache(mm, kernel);
432}
433
434void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel)
435{
436 struct page *page = virt_to_page(table);
437 if (put_page_testzero(page)) {
438 if (!kernel)
439 pgtable_page_dtor(page);
440 free_hot_cold_page(page, 0);
441 }
442}
443
444#ifdef CONFIG_SMP
445static void page_table_free_rcu(void *table)
446{
447 struct page *page = virt_to_page(table);
448 if (put_page_testzero(page)) {
449 pgtable_page_dtor(page);
450 free_hot_cold_page(page, 0);
451 }
452}
453
454void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
455{
456 unsigned long pgf = (unsigned long)table;
457
458 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
459 pgf |= shift;
460 tlb_remove_table(tlb, (void *)pgf);
461}
462
463void __tlb_remove_table(void *_table)
464{
465 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
466 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
467
468 if (!shift)
469 /* PTE page needs special handling */
470 page_table_free_rcu(table);
471 else {
472 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
473 kmem_cache_free(PGT_CACHE(shift), table);
474 }
475}
476#else
477void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
478{
479 if (!shift) {
480 /* PTE page needs special handling */
481 struct page *page = virt_to_page(table);
482 if (put_page_testzero(page)) {
483 pgtable_page_dtor(page);
484 free_hot_cold_page(page, 0);
485 }
486 } else {
487 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
488 kmem_cache_free(PGT_CACHE(shift), table);
489 }
490}
491#endif
492#endif /* CONFIG_PPC_64K_PAGES */
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530493
494#ifdef CONFIG_TRANSPARENT_HUGEPAGE
495
496/*
497 * This is called when relaxing access to a hugepage. It's also called in the page
498 * fault path when we don't hit any of the major fault cases, ie, a minor
499 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
500 * handled those two for us, we additionally deal with missing execute
501 * permission here on some processors
502 */
503int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
504 pmd_t *pmdp, pmd_t entry, int dirty)
505{
506 int changed;
507#ifdef CONFIG_DEBUG_VM
508 WARN_ON(!pmd_trans_huge(*pmdp));
509 assert_spin_locked(&vma->vm_mm->page_table_lock);
510#endif
511 changed = !pmd_same(*(pmdp), entry);
512 if (changed) {
513 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
514 /*
515 * Since we are not supporting SW TLB systems, we don't
516 * have any thing similar to flush_tlb_page_nohash()
517 */
518 }
519 return changed;
520}
521
522unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530523 pmd_t *pmdp, unsigned long clr,
524 unsigned long set)
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530525{
526
Aneesh Kumar K.V5dc1ef82016-04-29 23:25:28 +1000527 __be64 old_be, tmp;
528 unsigned long old;
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530529
530#ifdef CONFIG_DEBUG_VM
531 WARN_ON(!pmd_trans_huge(*pmdp));
532 assert_spin_locked(&mm->page_table_lock);
533#endif
534
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530535 __asm__ __volatile__(
536 "1: ldarx %0,0,%3\n\
Aneesh Kumar K.V5dc1ef82016-04-29 23:25:28 +1000537 and. %1,%0,%6\n\
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530538 bne- 1b \n\
539 andc %1,%0,%4 \n\
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530540 or %1,%1,%7\n\
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530541 stdcx. %1,0,%3 \n\
542 bne- 1b"
Aneesh Kumar K.V5dc1ef82016-04-29 23:25:28 +1000543 : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)
544 : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),
545 "r" (cpu_to_be64(_PAGE_BUSY)), "r" (cpu_to_be64(set))
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530546 : "cc" );
Aneesh Kumar K.V4bece392016-04-29 23:25:26 +1000547
Aneesh Kumar K.V5dc1ef82016-04-29 23:25:28 +1000548 old = be64_to_cpu(old_be);
549
Aneesh Kumar K.V9e813302014-08-13 12:32:04 +0530550 trace_hugepage_update(addr, old, clr, set);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530551 if (old & _PAGE_HASHPTE)
Aneesh Kumar K.Vfc047952014-08-13 12:32:00 +0530552 hpte_do_hugepage_flush(mm, addr, pmdp, old);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530553 return old;
554}
555
Aneesh Kumar K.V15a25b22015-06-24 16:57:39 -0700556pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
557 pmd_t *pmdp)
558{
559 pmd_t pmd;
560
561 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
562 VM_BUG_ON(pmd_trans_huge(*pmdp));
563
564 pmd = *pmdp;
565 pmd_clear(pmdp);
566 /*
567 * Wait for all pending hash_page to finish. This is needed
568 * in case of subpage collapse. When we collapse normal pages
569 * to hugepage, we first clear the pmd, then invalidate all
570 * the PTE entries. The assumption here is that any low level
571 * page fault will see a none pmd and take the slow path that
572 * will wait on mmap_sem. But we could very well be in a
573 * hash_page with local ptep pointer value. Such a hash page
574 * can result in adding new HPTE entries for normal subpages.
575 * That means we could be modifying the page content as we
576 * copy them to a huge page. So wait for parallel hash_page
577 * to finish before invalidating HPTE entries. We can do this
578 * by sending an IPI to all the cpus and executing a dummy
579 * function there.
580 */
581 kick_all_cpus_sync();
582 /*
583 * Now invalidate the hpte entries in the range
584 * covered by pmd. This make sure we take a
585 * fault and will find the pmd as none, which will
586 * result in a major fault which takes mmap_sem and
587 * hence wait for collapse to complete. Without this
588 * the __collapse_huge_page_copy can result in copying
589 * the old content.
590 */
591 flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530592 return pmd;
593}
594
595int pmdp_test_and_clear_young(struct vm_area_struct *vma,
596 unsigned long address, pmd_t *pmdp)
597{
598 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
599}
600
601/*
602 * We currently remove entries from the hashtable regardless of whether
603 * the entry was young or dirty. The generic routines only flush if the
604 * entry was young or dirty which is not good enough.
605 *
606 * We should be more intelligent about this but for the moment we override
607 * these functions and force a tlb flush unconditionally
608 */
609int pmdp_clear_flush_young(struct vm_area_struct *vma,
610 unsigned long address, pmd_t *pmdp)
611{
612 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
613}
614
615/*
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530616 * We want to put the pgtable in pmd and use pgtable for tracking
617 * the base page size hptes
618 */
619void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
620 pgtable_t pgtable)
621{
622 pgtable_t *pgtable_slot;
623 assert_spin_locked(&mm->page_table_lock);
624 /*
625 * we store the pgtable in the second half of PMD
626 */
627 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
628 *pgtable_slot = pgtable;
629 /*
630 * expose the deposited pgtable to other cpus.
631 * before we set the hugepage PTE at pmd level
632 * hash fault code looks at the deposted pgtable
633 * to store hash index values.
634 */
635 smp_wmb();
636}
637
638pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
639{
640 pgtable_t pgtable;
641 pgtable_t *pgtable_slot;
642
643 assert_spin_locked(&mm->page_table_lock);
644 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
645 pgtable = *pgtable_slot;
646 /*
647 * Once we withdraw, mark the entry NULL.
648 */
649 *pgtable_slot = NULL;
650 /*
651 * We store HPTE information in the deposited PTE fragment.
652 * zero out the content on withdraw.
653 */
654 memset(pgtable, 0, PTE_FRAG_SIZE);
655 return pgtable;
656}
657
Aneesh Kumar K.Vc777e2a2016-02-09 06:50:31 +0530658void pmdp_huge_split_prepare(struct vm_area_struct *vma,
659 unsigned long address, pmd_t *pmdp)
660{
661 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
662 VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
663
664 /*
665 * We can't mark the pmd none here, because that will cause a race
666 * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
667 * we spilt, but at the same time we wan't rest of the ppc64 code
668 * not to insert hash pte on this, because we will be modifying
669 * the deposited pgtable in the caller of this function. Hence
670 * clear the _PAGE_USER so that we move the fault handling to
671 * higher level function and that will serialize against ptl.
672 * We need to flush existing hash pte entries here even though,
673 * the translation is still valid, because we will withdraw
674 * pgtable_t after this.
675 */
Aneesh Kumar K.Vac29c642016-04-29 23:25:34 +1000676 pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);
Aneesh Kumar K.Vc777e2a2016-02-09 06:50:31 +0530677}
678
679
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530680/*
681 * set a new huge pmd. We should not be called for updating
682 * an existing pmd entry. That should go via pmd_hugepage_update.
683 */
684void set_pmd_at(struct mm_struct *mm, unsigned long addr,
685 pmd_t *pmdp, pmd_t pmd)
686{
687#ifdef CONFIG_DEBUG_VM
Aneesh Kumar K.Vc7d54842016-04-29 23:25:30 +1000688 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530689 assert_spin_locked(&mm->page_table_lock);
690 WARN_ON(!pmd_trans_huge(pmd));
691#endif
Michael Ellerman4f9c53c2015-03-25 20:11:57 +1100692 trace_hugepage_set_pmd(addr, pmd_val(pmd));
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530693 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
694}
695
Aneesh Kumar K.Vc777e2a2016-02-09 06:50:31 +0530696/*
697 * We use this to invalidate a pmdp entry before switching from a
698 * hugepte to regular pmd entry.
699 */
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530700void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
701 pmd_t *pmdp)
702{
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530703 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
Aneesh Kumar K.Vc777e2a2016-02-09 06:50:31 +0530704
705 /*
706 * This ensures that generic code that rely on IRQ disabling
707 * to prevent a parallel THP split work as expected.
708 */
709 kick_all_cpus_sync();
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530710}
711
712/*
713 * A linux hugepage PMD was changed and the corresponding hash table entries
714 * neesd to be flushed.
715 */
716void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
Aneesh Kumar K.Vfc047952014-08-13 12:32:00 +0530717 pmd_t *pmdp, unsigned long old_pmd)
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530718{
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530719 int ssize;
Aneesh Kumar K.Vf1581bf2014-11-02 21:15:27 +0530720 unsigned int psize;
721 unsigned long vsid;
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530722 unsigned long flags = 0;
Aneesh Kumar K.Vd557b092014-11-02 21:15:28 +0530723 const struct cpumask *tmp;
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530724
Aneesh Kumar K.Vfa1f8ae2014-08-13 12:31:58 +0530725 /* get the base page size,vsid and segment size */
Aneesh Kumar K.Vfc047952014-08-13 12:32:00 +0530726#ifdef CONFIG_DEBUG_VM
Aneesh Kumar K.Vf1581bf2014-11-02 21:15:27 +0530727 psize = get_slice_psize(mm, addr);
Aneesh Kumar K.Vfc047952014-08-13 12:32:00 +0530728 BUG_ON(psize == MMU_PAGE_16M);
729#endif
730 if (old_pmd & _PAGE_COMBO)
731 psize = MMU_PAGE_4K;
732 else
733 psize = MMU_PAGE_64K;
734
Aneesh Kumar K.Vf1581bf2014-11-02 21:15:27 +0530735 if (!is_kernel_addr(addr)) {
736 ssize = user_segment_size(addr);
737 vsid = get_vsid(mm->context.id, addr, ssize);
Aneesh Kumar K.Vfa1f8ae2014-08-13 12:31:58 +0530738 WARN_ON(vsid == 0);
739 } else {
Aneesh Kumar K.Vf1581bf2014-11-02 21:15:27 +0530740 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
Aneesh Kumar K.Vfa1f8ae2014-08-13 12:31:58 +0530741 ssize = mmu_kernel_ssize;
742 }
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530743
Aneesh Kumar K.Vd557b092014-11-02 21:15:28 +0530744 tmp = cpumask_of(smp_processor_id());
745 if (cpumask_equal(mm_cpumask(mm), tmp))
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530746 flags |= HPTE_LOCAL_UPDATE;
Aneesh Kumar K.Vd557b092014-11-02 21:15:28 +0530747
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530748 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530749}
750
751static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
752{
Aneesh Kumar K.Vf281b5d2015-12-01 09:06:35 +0530753 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530754}
755
756pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
757{
Aneesh Kumar K.Vf281b5d2015-12-01 09:06:35 +0530758 unsigned long pmdv;
Aneesh Kumar K.V6a119ea2015-12-01 09:06:54 +0530759
Aneesh Kumar K.V96270b12016-04-29 23:25:35 +1000760 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
Aneesh Kumar K.Vf281b5d2015-12-01 09:06:35 +0530761 return pmd_set_protbits(__pmd(pmdv), pgprot);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530762}
763
764pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
765{
766 return pfn_pmd(page_to_pfn(page), pgprot);
767}
768
769pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
770{
Aneesh Kumar K.Vf281b5d2015-12-01 09:06:35 +0530771 unsigned long pmdv;
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530772
Aneesh Kumar K.Vf281b5d2015-12-01 09:06:35 +0530773 pmdv = pmd_val(pmd);
774 pmdv &= _HPAGE_CHG_MASK;
775 return pmd_set_protbits(__pmd(pmdv), newprot);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530776}
777
778/*
779 * This is called at the end of handling a user page fault, when the
780 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
781 * We use it to preload an HPTE into the hash table corresponding to
782 * the updated linux HUGE PMD entry.
783 */
784void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
785 pmd_t *pmd)
786{
787 return;
788}
789
Aneesh Kumar K.V8809aa22015-06-24 16:57:44 -0700790pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
791 unsigned long addr, pmd_t *pmdp)
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530792{
793 pmd_t old_pmd;
794 pgtable_t pgtable;
795 unsigned long old;
796 pgtable_t *pgtable_slot;
797
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530798 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530799 old_pmd = __pmd(old);
800 /*
801 * We have pmd == none and we are holding page_table_lock.
802 * So we can safely go and clear the pgtable hash
803 * index info.
804 */
805 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
806 pgtable = *pgtable_slot;
807 /*
808 * Let's zero out old valid and hash index details
809 * hash fault look at them.
810 */
811 memset(pgtable, 0, PTE_FRAG_SIZE);
Aneesh Kumar K.V13bd8172015-05-11 11:56:01 +0530812 /*
813 * Serialize against find_linux_pte_or_hugepte which does lock-less
814 * lookup in page tables with local interrupts disabled. For huge pages
815 * it casts pmd_t to pte_t. Since format of pte_t is different from
816 * pmd_t we want to prevent transit from pmd pointing to page table
817 * to pmd pointing to huge page (and back) while interrupts are disabled.
818 * We clear pmd to possibly replace it with page table pointer in
819 * different code paths. So make sure we wait for the parallel
820 * find_linux_pte_or_hugepage to finish.
821 */
822 kick_all_cpus_sync();
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530823 return old_pmd;
824}
Aneesh Kumar K.V437d4962013-06-20 14:30:26 +0530825
826int has_transparent_hugepage(void)
827{
Kirill A. Shutemovff20c2e2016-03-01 09:45:14 +0530828
829 BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) >= MAX_ORDER,
830 "hugepages can't be allocated by the buddy allocator");
831
832 BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) < 2,
833 "We need more than 2 pages to do deferred thp split");
834
Aneesh Kumar K.V437d4962013-06-20 14:30:26 +0530835 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
836 return 0;
837 /*
838 * We support THP only if PMD_SIZE is 16MB.
839 */
840 if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
841 return 0;
842 /*
843 * We need to make sure that we support 16MB hugepage in a segement
844 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
845 * of 64K.
846 */
847 /*
848 * If we have 64K HPTE, we will be using that by default
849 */
850 if (mmu_psize_defs[MMU_PAGE_64K].shift &&
851 (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
852 return 0;
853 /*
854 * Ok we only have 4K HPTE
855 */
856 if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
857 return 0;
858
859 return 1;
860}
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530861#endif /* CONFIG_TRANSPARENT_HUGEPAGE */