blob: f7775193d745e845df4b2e6b86f9d0de5ceae7fd [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +100010 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
Paul Mackerras14cf11a2005-09-26 16:04:21 +100024#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040029#include <linux/export.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100030#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100036#include <linux/memblock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Aneesh Kumar K.V06743522014-11-05 21:57:39 +053038#include <linux/hugetlb.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100039
40#include <asm/pgalloc.h>
41#include <asm/page.h>
42#include <asm/prom.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100043#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100047#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/tlb.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100050#include <asm/processor.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100051#include <asm/cputable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100052#include <asm/sections.h>
Stephen Rothwell5e203d62006-09-25 13:36:31 +100053#include <asm/firmware.h>
Anton Blanchard68cf0d62014-09-17 22:15:35 +100054#include <asm/dma.h>
David Gibson800fc3e2005-11-16 15:43:48 +110055
56#include "mmu_decl.h"
Paul Mackerras14cf11a2005-09-26 16:04:21 +100057
Aneesh Kumar K.V9e813302014-08-13 12:32:04 +053058#define CREATE_TRACE_POINTS
59#include <trace/events/thp.h>
60
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000061/* Some sanity checking */
62#if TASK_SIZE_USER64 > PGTABLE_RANGE
63#error TASK_SIZE_USER64 exceeds pagetable range
64#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +100065
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000066#ifdef CONFIG_PPC_STD_MMU_64
Aneesh Kumar K.Vaf81d782013-03-13 03:34:55 +000067#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000068#error TASK_SIZE_USER64 exceeds user VSID range
69#endif
70#endif
71
72unsigned long ioremap_bot = IOREMAP_BASE;
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +000073
74#ifdef CONFIG_PPC_MMU_NOHASH
Scott Wood7d176222014-08-01 22:07:40 -050075static __ref void *early_alloc_pgtable(unsigned long size)
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +000076{
77 void *pt;
78
Anton Blanchard10239732014-09-17 22:15:33 +100079 pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS)));
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +000080 memset(pt, 0, size);
81
82 return pt;
83}
84#endif /* CONFIG_PPC_MMU_NOHASH */
85
Paul Mackerras14cf11a2005-09-26 16:04:21 +100086/*
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +000087 * map_kernel_page currently only called by __ioremap
88 * map_kernel_page adds an entry to the ioremap page table
Paul Mackerras14cf11a2005-09-26 16:04:21 +100089 * and adds an entry to the HPT, possibly bolting it
90 */
Benjamin Herrenschmidt32a74942009-07-23 23:15:58 +000091int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100092{
93 pgd_t *pgdp;
94 pud_t *pudp;
95 pmd_t *pmdp;
96 pte_t *ptep;
Paul Mackerras14cf11a2005-09-26 16:04:21 +100097
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +000098 if (slab_is_available()) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +100099 pgdp = pgd_offset_k(ea);
100 pudp = pud_alloc(&init_mm, pgdp, ea);
101 if (!pudp)
102 return -ENOMEM;
103 pmdp = pmd_alloc(&init_mm, pudp, ea);
104 if (!pmdp)
105 return -ENOMEM;
Paul Mackerras23fd0772005-10-31 13:37:12 +1100106 ptep = pte_alloc_kernel(pmdp, ea);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000107 if (!ptep)
108 return -ENOMEM;
109 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
110 __pgprot(flags)));
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000111 } else {
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000112#ifdef CONFIG_PPC_MMU_NOHASH
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000113 pgdp = pgd_offset_k(ea);
114#ifdef PUD_TABLE_SIZE
115 if (pgd_none(*pgdp)) {
116 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
117 BUG_ON(pudp == NULL);
118 pgd_populate(&init_mm, pgdp, pudp);
119 }
120#endif /* PUD_TABLE_SIZE */
121 pudp = pud_offset(pgdp, ea);
122 if (pud_none(*pudp)) {
123 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
124 BUG_ON(pmdp == NULL);
125 pud_populate(&init_mm, pudp, pmdp);
126 }
127 pmdp = pmd_offset(pudp, ea);
128 if (!pmd_present(*pmdp)) {
129 ptep = early_alloc_pgtable(PAGE_SIZE);
130 BUG_ON(ptep == NULL);
131 pmd_populate_kernel(&init_mm, pmdp, ptep);
132 }
133 ptep = pte_offset_kernel(pmdp, ea);
134 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
135 __pgprot(flags)));
136#else /* CONFIG_PPC_MMU_NOHASH */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000137 /*
138 * If the mm subsystem is not fully up, we cannot create a
139 * linux page table entry for this mapping. Simply bolt an
140 * entry in the hardware page table.
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100141 *
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000142 */
Paul Mackerras1189be62007-10-11 20:37:10 +1000143 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
144 mmu_io_psize, mmu_kernel_ssize)) {
Benjamin Herrenschmidt77ac1662005-11-10 11:12:11 +1100145 printk(KERN_ERR "Failed to do bolted mapping IO "
146 "memory at %016lx !\n", pa);
147 return -ENOMEM;
148 }
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000149#endif /* !CONFIG_PPC_MMU_NOHASH */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000150 }
Scott Wood47ce8af2013-10-11 19:22:37 -0500151
152#ifdef CONFIG_PPC_BOOK3E_64
153 /*
154 * With hardware tablewalk, a sync is needed to ensure that
155 * subsequent accesses see the PTE we just wrote. Unlike userspace
156 * mappings, we can't tolerate spurious faults, so make sure
157 * the new PTE will be seen the first time.
158 */
159 mb();
160#else
161 smp_wmb();
162#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000163 return 0;
164}
165
166
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000167/**
168 * __ioremap_at - Low level function to establish the page tables
169 * for an IO mapping
170 */
171void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000172 unsigned long flags)
173{
174 unsigned long i;
175
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700176 /* Make sure we have the base flags */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000177 if ((flags & _PAGE_PRESENT) == 0)
178 flags |= pgprot_val(PAGE_KERNEL);
179
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700180 /* Non-cacheable page cannot be coherent */
181 if (flags & _PAGE_NO_CACHE)
182 flags &= ~_PAGE_COHERENT;
183
184 /* We don't support the 4K PFN hack with ioremap */
185 if (flags & _PAGE_4K_PFN)
186 return NULL;
187
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000188 WARN_ON(pa & ~PAGE_MASK);
189 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
190 WARN_ON(size & ~PAGE_MASK);
191
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000192 for (i = 0; i < size; i += PAGE_SIZE)
Benjamin Herrenschmidta2450672009-07-23 23:15:16 +0000193 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000194 return NULL;
195
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000196 return (void __iomem *)ea;
197}
198
199/**
200 * __iounmap_from - Low level function to tear down the page tables
201 * for an IO mapping. This is used for mappings that
202 * are manipulated manually, like partial unmapping of
203 * PCI IOs or ISA space.
204 */
205void __iounmap_at(void *ea, unsigned long size)
206{
207 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
208 WARN_ON(size & ~PAGE_MASK);
209
210 unmap_kernel_range((unsigned long)ea, size);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000211}
212
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000213void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
214 unsigned long flags, void *caller)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000215{
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000216 phys_addr_t paligned;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000217 void __iomem *ret;
218
219 /*
220 * Choose an address to map it to.
221 * Once the imalloc system is running, we use it.
222 * Before that, we map using addresses going
223 * up from ioremap_bot. imalloc will use
224 * the addresses from ioremap_bot through
225 * IMALLOC_END
226 *
227 */
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000228 paligned = addr & PAGE_MASK;
229 size = PAGE_ALIGN(addr + size) - paligned;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000230
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000231 if ((size == 0) || (paligned == 0))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000232 return NULL;
233
Michael Ellermanf691fa12015-03-30 14:10:37 +1100234 if (slab_is_available()) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000235 struct vm_struct *area;
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000236
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000237 area = __get_vm_area_caller(size, VM_IOREMAP,
238 ioremap_bot, IOREMAP_END,
239 caller);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000240 if (area == NULL)
241 return NULL;
Michael Ellerman7a9d1252010-11-28 18:26:36 +0000242
243 area->phys_addr = paligned;
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000244 ret = __ioremap_at(paligned, area->addr, size, flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000245 if (!ret)
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000246 vunmap(area->addr);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000247 } else {
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000248 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000249 if (ret)
250 ioremap_bot += size;
251 }
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000252
253 if (ret)
254 ret += addr & ~PAGE_MASK;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000255 return ret;
256}
257
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000258void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
259 unsigned long flags)
260{
261 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
262}
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100263
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100264void __iomem * ioremap(phys_addr_t addr, unsigned long size)
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100265{
266 unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000267 void *caller = __builtin_return_address(0);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100268
269 if (ppc_md.ioremap)
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000270 return ppc_md.ioremap(addr, size, flags, caller);
271 return __ioremap_caller(addr, size, flags, caller);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100272}
273
Anton Blanchardbe135f42011-05-08 21:41:59 +0000274void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
275{
276 unsigned long flags = _PAGE_NO_CACHE;
277 void *caller = __builtin_return_address(0);
278
279 if (ppc_md.ioremap)
280 return ppc_md.ioremap(addr, size, flags, caller);
281 return __ioremap_caller(addr, size, flags, caller);
282}
283
Anton Blanchard40f1ce72011-05-08 21:43:47 +0000284void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100285 unsigned long flags)
286{
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000287 void *caller = __builtin_return_address(0);
288
Benjamin Herrenschmidta1f242f2008-07-23 21:27:08 -0700289 /* writeable implies dirty for kernel addresses */
290 if (flags & _PAGE_RW)
291 flags |= _PAGE_DIRTY;
292
293 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
294 flags &= ~(_PAGE_USER | _PAGE_EXEC);
295
Benjamin Herrenschmidt55052ee2010-04-07 14:39:36 +1000296#ifdef _PAGE_BAP_SR
297 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
298 * which means that we just cleared supervisor access... oops ;-) This
299 * restores it
300 */
301 flags |= _PAGE_BAP_SR;
302#endif
303
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100304 if (ppc_md.ioremap)
Benjamin Herrenschmidt1cdab552009-02-22 16:19:14 +0000305 return ppc_md.ioremap(addr, size, flags, caller);
306 return __ioremap_caller(addr, size, flags, caller);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100307}
308
309
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000310/*
311 * Unmap an IO region and remove it from imalloc'd list.
312 * Access to IO memory should be serialized by driver.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000313 */
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100314void __iounmap(volatile void __iomem *token)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000315{
316 void *addr;
317
Michael Ellermanf691fa12015-03-30 14:10:37 +1100318 if (!slab_is_available())
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000319 return;
320
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000321 addr = (void *) ((unsigned long __force)
322 PCI_FIX_ADDR(token) & PAGE_MASK);
323 if ((unsigned long)addr < ioremap_bot) {
324 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
325 " at 0x%p\n", addr);
326 return;
327 }
328 vunmap(addr);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000329}
330
Benjamin Herrenschmidt68a64352006-11-13 09:27:39 +1100331void iounmap(volatile void __iomem *token)
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100332{
333 if (ppc_md.iounmap)
334 ppc_md.iounmap(token);
335 else
336 __iounmap(token);
337}
338
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000339EXPORT_SYMBOL(ioremap);
Anton Blanchardbe135f42011-05-08 21:41:59 +0000340EXPORT_SYMBOL(ioremap_wc);
Anton Blanchard40f1ce72011-05-08 21:43:47 +0000341EXPORT_SYMBOL(ioremap_prot);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000342EXPORT_SYMBOL(__ioremap);
Olof Johanssona302cb92007-08-31 13:58:51 +1000343EXPORT_SYMBOL(__ioremap_at);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000344EXPORT_SYMBOL(iounmap);
Benjamin Herrenschmidt4cb3cee2006-11-11 17:25:10 +1100345EXPORT_SYMBOL(__iounmap);
Olof Johanssona302cb92007-08-31 13:58:51 +1000346EXPORT_SYMBOL(__iounmap_at);
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000347
Aneesh Kumar K.V06743522014-11-05 21:57:39 +0530348#ifndef __PAGETABLE_PUD_FOLDED
349/* 4 level page table */
350struct page *pgd_page(pgd_t pgd)
351{
352 if (pgd_huge(pgd))
353 return pte_page(pgd_pte(pgd));
354 return virt_to_page(pgd_page_vaddr(pgd));
355}
356#endif
357
358struct page *pud_page(pud_t pud)
359{
360 if (pud_huge(pud))
361 return pte_page(pud_pte(pud));
362 return virt_to_page(pud_page_vaddr(pud));
363}
364
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530365/*
366 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
367 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
368 */
369struct page *pmd_page(pmd_t pmd)
370{
Aneesh Kumar K.V06743522014-11-05 21:57:39 +0530371 if (pmd_trans_huge(pmd) || pmd_huge(pmd))
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530372 return pfn_to_page(pmd_pfn(pmd));
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530373 return virt_to_page(pmd_page_vaddr(pmd));
374}
375
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000376#ifdef CONFIG_PPC_64K_PAGES
377static pte_t *get_from_cache(struct mm_struct *mm)
378{
379 void *pte_frag, *ret;
380
381 spin_lock(&mm->page_table_lock);
382 ret = mm->context.pte_frag;
383 if (ret) {
384 pte_frag = ret + PTE_FRAG_SIZE;
385 /*
386 * If we have taken up all the fragments mark PTE page NULL
387 */
388 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
389 pte_frag = NULL;
390 mm->context.pte_frag = pte_frag;
391 }
392 spin_unlock(&mm->page_table_lock);
393 return (pte_t *)ret;
394}
395
396static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
397{
398 void *ret = NULL;
399 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
400 __GFP_REPEAT | __GFP_ZERO);
401 if (!page)
402 return NULL;
Kirill A. Shutemov4f8049432013-11-14 14:31:38 -0800403 if (!kernel && !pgtable_page_ctor(page)) {
404 __free_page(page);
405 return NULL;
406 }
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000407
408 ret = page_address(page);
409 spin_lock(&mm->page_table_lock);
410 /*
411 * If we find pgtable_page set, we return
412 * the allocated page with single fragement
413 * count.
414 */
415 if (likely(!mm->context.pte_frag)) {
416 atomic_set(&page->_count, PTE_FRAG_NR);
417 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
418 }
419 spin_unlock(&mm->page_table_lock);
420
Aneesh Kumar K.V5c1f6ee2013-04-28 09:37:33 +0000421 return (pte_t *)ret;
422}
423
424pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
425{
426 pte_t *pte;
427
428 pte = get_from_cache(mm);
429 if (pte)
430 return pte;
431
432 return __alloc_for_cache(mm, kernel);
433}
434
435void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel)
436{
437 struct page *page = virt_to_page(table);
438 if (put_page_testzero(page)) {
439 if (!kernel)
440 pgtable_page_dtor(page);
441 free_hot_cold_page(page, 0);
442 }
443}
444
445#ifdef CONFIG_SMP
446static void page_table_free_rcu(void *table)
447{
448 struct page *page = virt_to_page(table);
449 if (put_page_testzero(page)) {
450 pgtable_page_dtor(page);
451 free_hot_cold_page(page, 0);
452 }
453}
454
455void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
456{
457 unsigned long pgf = (unsigned long)table;
458
459 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
460 pgf |= shift;
461 tlb_remove_table(tlb, (void *)pgf);
462}
463
464void __tlb_remove_table(void *_table)
465{
466 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
467 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
468
469 if (!shift)
470 /* PTE page needs special handling */
471 page_table_free_rcu(table);
472 else {
473 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
474 kmem_cache_free(PGT_CACHE(shift), table);
475 }
476}
477#else
478void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
479{
480 if (!shift) {
481 /* PTE page needs special handling */
482 struct page *page = virt_to_page(table);
483 if (put_page_testzero(page)) {
484 pgtable_page_dtor(page);
485 free_hot_cold_page(page, 0);
486 }
487 } else {
488 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
489 kmem_cache_free(PGT_CACHE(shift), table);
490 }
491}
492#endif
493#endif /* CONFIG_PPC_64K_PAGES */
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530494
495#ifdef CONFIG_TRANSPARENT_HUGEPAGE
496
497/*
498 * This is called when relaxing access to a hugepage. It's also called in the page
499 * fault path when we don't hit any of the major fault cases, ie, a minor
500 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
501 * handled those two for us, we additionally deal with missing execute
502 * permission here on some processors
503 */
504int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
505 pmd_t *pmdp, pmd_t entry, int dirty)
506{
507 int changed;
508#ifdef CONFIG_DEBUG_VM
509 WARN_ON(!pmd_trans_huge(*pmdp));
510 assert_spin_locked(&vma->vm_mm->page_table_lock);
511#endif
512 changed = !pmd_same(*(pmdp), entry);
513 if (changed) {
514 __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
515 /*
516 * Since we are not supporting SW TLB systems, we don't
517 * have any thing similar to flush_tlb_page_nohash()
518 */
519 }
520 return changed;
521}
522
523unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530524 pmd_t *pmdp, unsigned long clr,
525 unsigned long set)
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530526{
527
528 unsigned long old, tmp;
529
530#ifdef CONFIG_DEBUG_VM
531 WARN_ON(!pmd_trans_huge(*pmdp));
532 assert_spin_locked(&mm->page_table_lock);
533#endif
534
535#ifdef PTE_ATOMIC_UPDATES
536 __asm__ __volatile__(
537 "1: ldarx %0,0,%3\n\
538 andi. %1,%0,%6\n\
539 bne- 1b \n\
540 andc %1,%0,%4 \n\
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530541 or %1,%1,%7\n\
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530542 stdcx. %1,0,%3 \n\
543 bne- 1b"
544 : "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530545 : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set)
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530546 : "cc" );
547#else
548 old = pmd_val(*pmdp);
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530549 *pmdp = __pmd((old & ~clr) | set);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530550#endif
Aneesh Kumar K.V9e813302014-08-13 12:32:04 +0530551 trace_hugepage_update(addr, old, clr, set);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530552 if (old & _PAGE_HASHPTE)
Aneesh Kumar K.Vfc047952014-08-13 12:32:00 +0530553 hpte_do_hugepage_flush(mm, addr, pmdp, old);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530554 return old;
555}
556
Aneesh Kumar K.V15a25b22015-06-24 16:57:39 -0700557pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
558 pmd_t *pmdp)
559{
560 pmd_t pmd;
561
562 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
563 VM_BUG_ON(pmd_trans_huge(*pmdp));
564
565 pmd = *pmdp;
566 pmd_clear(pmdp);
567 /*
568 * Wait for all pending hash_page to finish. This is needed
569 * in case of subpage collapse. When we collapse normal pages
570 * to hugepage, we first clear the pmd, then invalidate all
571 * the PTE entries. The assumption here is that any low level
572 * page fault will see a none pmd and take the slow path that
573 * will wait on mmap_sem. But we could very well be in a
574 * hash_page with local ptep pointer value. Such a hash page
575 * can result in adding new HPTE entries for normal subpages.
576 * That means we could be modifying the page content as we
577 * copy them to a huge page. So wait for parallel hash_page
578 * to finish before invalidating HPTE entries. We can do this
579 * by sending an IPI to all the cpus and executing a dummy
580 * function there.
581 */
582 kick_all_cpus_sync();
583 /*
584 * Now invalidate the hpte entries in the range
585 * covered by pmd. This make sure we take a
586 * fault and will find the pmd as none, which will
587 * result in a major fault which takes mmap_sem and
588 * hence wait for collapse to complete. Without this
589 * the __collapse_huge_page_copy can result in copying
590 * the old content.
591 */
592 flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530593 return pmd;
594}
595
596int pmdp_test_and_clear_young(struct vm_area_struct *vma,
597 unsigned long address, pmd_t *pmdp)
598{
599 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
600}
601
602/*
603 * We currently remove entries from the hashtable regardless of whether
604 * the entry was young or dirty. The generic routines only flush if the
605 * entry was young or dirty which is not good enough.
606 *
607 * We should be more intelligent about this but for the moment we override
608 * these functions and force a tlb flush unconditionally
609 */
610int pmdp_clear_flush_young(struct vm_area_struct *vma,
611 unsigned long address, pmd_t *pmdp)
612{
613 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
614}
615
616/*
617 * We mark the pmd splitting and invalidate all the hpte
618 * entries for this hugepage.
619 */
620void pmdp_splitting_flush(struct vm_area_struct *vma,
621 unsigned long address, pmd_t *pmdp)
622{
623 unsigned long old, tmp;
624
625 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
626
627#ifdef CONFIG_DEBUG_VM
628 WARN_ON(!pmd_trans_huge(*pmdp));
629 assert_spin_locked(&vma->vm_mm->page_table_lock);
630#endif
631
632#ifdef PTE_ATOMIC_UPDATES
633
634 __asm__ __volatile__(
635 "1: ldarx %0,0,%3\n\
636 andi. %1,%0,%6\n\
637 bne- 1b \n\
638 ori %1,%0,%4 \n\
639 stdcx. %1,0,%3 \n\
640 bne- 1b"
641 : "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
642 : "r" (pmdp), "i" (_PAGE_SPLITTING), "m" (*pmdp), "i" (_PAGE_BUSY)
643 : "cc" );
644#else
645 old = pmd_val(*pmdp);
646 *pmdp = __pmd(old | _PAGE_SPLITTING);
647#endif
648 /*
649 * If we didn't had the splitting flag set, go and flush the
650 * HPTE entries.
651 */
Aneesh Kumar K.V9e813302014-08-13 12:32:04 +0530652 trace_hugepage_splitting(address, old);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530653 if (!(old & _PAGE_SPLITTING)) {
654 /* We need to flush the hpte */
655 if (old & _PAGE_HASHPTE)
Aneesh Kumar K.Vfc047952014-08-13 12:32:00 +0530656 hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530657 }
Aneesh Kumar K.V346519a2014-03-15 16:17:58 +0530658 /*
659 * This ensures that generic code that rely on IRQ disabling
660 * to prevent a parallel THP split work as expected.
661 */
662 kick_all_cpus_sync();
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530663}
664
665/*
666 * We want to put the pgtable in pmd and use pgtable for tracking
667 * the base page size hptes
668 */
669void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
670 pgtable_t pgtable)
671{
672 pgtable_t *pgtable_slot;
673 assert_spin_locked(&mm->page_table_lock);
674 /*
675 * we store the pgtable in the second half of PMD
676 */
677 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
678 *pgtable_slot = pgtable;
679 /*
680 * expose the deposited pgtable to other cpus.
681 * before we set the hugepage PTE at pmd level
682 * hash fault code looks at the deposted pgtable
683 * to store hash index values.
684 */
685 smp_wmb();
686}
687
688pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
689{
690 pgtable_t pgtable;
691 pgtable_t *pgtable_slot;
692
693 assert_spin_locked(&mm->page_table_lock);
694 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
695 pgtable = *pgtable_slot;
696 /*
697 * Once we withdraw, mark the entry NULL.
698 */
699 *pgtable_slot = NULL;
700 /*
701 * We store HPTE information in the deposited PTE fragment.
702 * zero out the content on withdraw.
703 */
704 memset(pgtable, 0, PTE_FRAG_SIZE);
705 return pgtable;
706}
707
708/*
709 * set a new huge pmd. We should not be called for updating
710 * an existing pmd entry. That should go via pmd_hugepage_update.
711 */
712void set_pmd_at(struct mm_struct *mm, unsigned long addr,
713 pmd_t *pmdp, pmd_t pmd)
714{
715#ifdef CONFIG_DEBUG_VM
Mel Gorman8a0516e2015-02-12 14:58:22 -0800716 WARN_ON((pmd_val(*pmdp) & (_PAGE_PRESENT | _PAGE_USER)) ==
717 (_PAGE_PRESENT | _PAGE_USER));
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530718 assert_spin_locked(&mm->page_table_lock);
719 WARN_ON(!pmd_trans_huge(pmd));
720#endif
Michael Ellerman4f9c53c2015-03-25 20:11:57 +1100721 trace_hugepage_set_pmd(addr, pmd_val(pmd));
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530722 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
723}
724
725void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
726 pmd_t *pmdp)
727{
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530728 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530729}
730
731/*
732 * A linux hugepage PMD was changed and the corresponding hash table entries
733 * neesd to be flushed.
734 */
735void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
Aneesh Kumar K.Vfc047952014-08-13 12:32:00 +0530736 pmd_t *pmdp, unsigned long old_pmd)
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530737{
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530738 int ssize;
Aneesh Kumar K.Vf1581bf2014-11-02 21:15:27 +0530739 unsigned int psize;
740 unsigned long vsid;
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530741 unsigned long flags = 0;
Aneesh Kumar K.Vd557b092014-11-02 21:15:28 +0530742 const struct cpumask *tmp;
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530743
Aneesh Kumar K.Vfa1f8ae2014-08-13 12:31:58 +0530744 /* get the base page size,vsid and segment size */
Aneesh Kumar K.Vfc047952014-08-13 12:32:00 +0530745#ifdef CONFIG_DEBUG_VM
Aneesh Kumar K.Vf1581bf2014-11-02 21:15:27 +0530746 psize = get_slice_psize(mm, addr);
Aneesh Kumar K.Vfc047952014-08-13 12:32:00 +0530747 BUG_ON(psize == MMU_PAGE_16M);
748#endif
749 if (old_pmd & _PAGE_COMBO)
750 psize = MMU_PAGE_4K;
751 else
752 psize = MMU_PAGE_64K;
753
Aneesh Kumar K.Vf1581bf2014-11-02 21:15:27 +0530754 if (!is_kernel_addr(addr)) {
755 ssize = user_segment_size(addr);
756 vsid = get_vsid(mm->context.id, addr, ssize);
Aneesh Kumar K.Vfa1f8ae2014-08-13 12:31:58 +0530757 WARN_ON(vsid == 0);
758 } else {
Aneesh Kumar K.Vf1581bf2014-11-02 21:15:27 +0530759 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
Aneesh Kumar K.Vfa1f8ae2014-08-13 12:31:58 +0530760 ssize = mmu_kernel_ssize;
761 }
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530762
Aneesh Kumar K.Vd557b092014-11-02 21:15:28 +0530763 tmp = cpumask_of(smp_processor_id());
764 if (cpumask_equal(mm_cpumask(mm), tmp))
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530765 flags |= HPTE_LOCAL_UPDATE;
Aneesh Kumar K.Vd557b092014-11-02 21:15:28 +0530766
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530767 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530768}
769
770static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
771{
772 pmd_val(pmd) |= pgprot_val(pgprot);
773 return pmd;
774}
775
776pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
777{
778 pmd_t pmd;
779 /*
Kirill A. Shutemov780fc562015-02-16 16:00:18 -0800780 * For a valid pte, we would have _PAGE_PRESENT always
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530781 * set. We use this to check THP page at pmd level.
782 * leaf pte for huge page, bottom two bits != 00
783 */
784 pmd_val(pmd) = pfn << PTE_RPN_SHIFT;
785 pmd_val(pmd) |= _PAGE_THP_HUGE;
786 pmd = pmd_set_protbits(pmd, pgprot);
787 return pmd;
788}
789
790pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
791{
792 return pfn_pmd(page_to_pfn(page), pgprot);
793}
794
795pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
796{
797
798 pmd_val(pmd) &= _HPAGE_CHG_MASK;
799 pmd = pmd_set_protbits(pmd, newprot);
800 return pmd;
801}
802
803/*
804 * This is called at the end of handling a user page fault, when the
805 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
806 * We use it to preload an HPTE into the hash table corresponding to
807 * the updated linux HUGE PMD entry.
808 */
809void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
810 pmd_t *pmd)
811{
812 return;
813}
814
815pmd_t pmdp_get_and_clear(struct mm_struct *mm,
816 unsigned long addr, pmd_t *pmdp)
817{
818 pmd_t old_pmd;
819 pgtable_t pgtable;
820 unsigned long old;
821 pgtable_t *pgtable_slot;
822
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530823 old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530824 old_pmd = __pmd(old);
825 /*
826 * We have pmd == none and we are holding page_table_lock.
827 * So we can safely go and clear the pgtable hash
828 * index info.
829 */
830 pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
831 pgtable = *pgtable_slot;
832 /*
833 * Let's zero out old valid and hash index details
834 * hash fault look at them.
835 */
836 memset(pgtable, 0, PTE_FRAG_SIZE);
Aneesh Kumar K.V13bd8172015-05-11 11:56:01 +0530837 /*
838 * Serialize against find_linux_pte_or_hugepte which does lock-less
839 * lookup in page tables with local interrupts disabled. For huge pages
840 * it casts pmd_t to pte_t. Since format of pte_t is different from
841 * pmd_t we want to prevent transit from pmd pointing to page table
842 * to pmd pointing to huge page (and back) while interrupts are disabled.
843 * We clear pmd to possibly replace it with page table pointer in
844 * different code paths. So make sure we wait for the parallel
845 * find_linux_pte_or_hugepage to finish.
846 */
847 kick_all_cpus_sync();
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530848 return old_pmd;
849}
Aneesh Kumar K.V437d4962013-06-20 14:30:26 +0530850
851int has_transparent_hugepage(void)
852{
853 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
854 return 0;
855 /*
856 * We support THP only if PMD_SIZE is 16MB.
857 */
858 if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
859 return 0;
860 /*
861 * We need to make sure that we support 16MB hugepage in a segement
862 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
863 * of 64K.
864 */
865 /*
866 * If we have 64K HPTE, we will be using that by default
867 */
868 if (mmu_psize_defs[MMU_PAGE_64K].shift &&
869 (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
870 return 0;
871 /*
872 * Ok we only have 4K HPTE
873 */
874 if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
875 return 0;
876
877 return 1;
878}
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530879#endif /* CONFIG_TRANSPARENT_HUGEPAGE */