Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* |
| 2 | * This file contains ioremap and related functions for 64-bit machines. |
| 3 | * |
| 4 | * Derived from arch/ppc64/mm/init.c |
| 5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 6 | * |
| 7 | * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) |
| 8 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 9 | * Copyright (C) 1996 Paul Mackerras |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 10 | * |
| 11 | * Derived from "arch/i386/mm/init.c" |
| 12 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 13 | * |
| 14 | * Dave Engebretsen <engebret@us.ibm.com> |
| 15 | * Rework for PPC64 port. |
| 16 | * |
| 17 | * This program is free software; you can redistribute it and/or |
| 18 | * modify it under the terms of the GNU General Public License |
| 19 | * as published by the Free Software Foundation; either version |
| 20 | * 2 of the License, or (at your option) any later version. |
| 21 | * |
| 22 | */ |
| 23 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 24 | #include <linux/signal.h> |
| 25 | #include <linux/sched.h> |
| 26 | #include <linux/kernel.h> |
| 27 | #include <linux/errno.h> |
| 28 | #include <linux/string.h> |
Paul Gortmaker | 66b15db | 2011-05-27 10:46:24 -0400 | [diff] [blame] | 29 | #include <linux/export.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 30 | #include <linux/types.h> |
| 31 | #include <linux/mman.h> |
| 32 | #include <linux/mm.h> |
| 33 | #include <linux/swap.h> |
| 34 | #include <linux/stddef.h> |
| 35 | #include <linux/vmalloc.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 36 | #include <linux/memblock.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 37 | #include <linux/slab.h> |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 38 | #include <linux/hugetlb.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 39 | |
| 40 | #include <asm/pgalloc.h> |
| 41 | #include <asm/page.h> |
| 42 | #include <asm/prom.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 43 | #include <asm/io.h> |
| 44 | #include <asm/mmu_context.h> |
| 45 | #include <asm/pgtable.h> |
| 46 | #include <asm/mmu.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 47 | #include <asm/smp.h> |
| 48 | #include <asm/machdep.h> |
| 49 | #include <asm/tlb.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 50 | #include <asm/processor.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 51 | #include <asm/cputable.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 52 | #include <asm/sections.h> |
Stephen Rothwell | 5e203d6 | 2006-09-25 13:36:31 +1000 | [diff] [blame] | 53 | #include <asm/firmware.h> |
Anton Blanchard | 68cf0d6 | 2014-09-17 22:15:35 +1000 | [diff] [blame] | 54 | #include <asm/dma.h> |
David Gibson | 800fc3e | 2005-11-16 15:43:48 +1100 | [diff] [blame] | 55 | |
| 56 | #include "mmu_decl.h" |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 57 | |
Aneesh Kumar K.V | 9e81330 | 2014-08-13 12:32:04 +0530 | [diff] [blame] | 58 | #define CREATE_TRACE_POINTS |
| 59 | #include <trace/events/thp.h> |
| 60 | |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 61 | /* Some sanity checking */ |
| 62 | #if TASK_SIZE_USER64 > PGTABLE_RANGE |
| 63 | #error TASK_SIZE_USER64 exceeds pagetable range |
| 64 | #endif |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 65 | |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 66 | #ifdef CONFIG_PPC_STD_MMU_64 |
Aneesh Kumar K.V | af81d78 | 2013-03-13 03:34:55 +0000 | [diff] [blame] | 67 | #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 68 | #error TASK_SIZE_USER64 exceeds user VSID range |
| 69 | #endif |
| 70 | #endif |
| 71 | |
| 72 | unsigned long ioremap_bot = IOREMAP_BASE; |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 73 | |
| 74 | #ifdef CONFIG_PPC_MMU_NOHASH |
Scott Wood | 7d17622 | 2014-08-01 22:07:40 -0500 | [diff] [blame] | 75 | static __ref void *early_alloc_pgtable(unsigned long size) |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 76 | { |
| 77 | void *pt; |
| 78 | |
Anton Blanchard | 1023973 | 2014-09-17 22:15:33 +1000 | [diff] [blame] | 79 | pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS))); |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 80 | memset(pt, 0, size); |
| 81 | |
| 82 | return pt; |
| 83 | } |
| 84 | #endif /* CONFIG_PPC_MMU_NOHASH */ |
| 85 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 86 | /* |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 87 | * map_kernel_page currently only called by __ioremap |
| 88 | * map_kernel_page adds an entry to the ioremap page table |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 89 | * and adds an entry to the HPT, possibly bolting it |
| 90 | */ |
Paul Mackerras | 849f86a | 2016-02-22 13:41:15 +1100 | [diff] [blame] | 91 | int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 92 | { |
| 93 | pgd_t *pgdp; |
| 94 | pud_t *pudp; |
| 95 | pmd_t *pmdp; |
| 96 | pte_t *ptep; |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 97 | |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 98 | if (slab_is_available()) { |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 99 | pgdp = pgd_offset_k(ea); |
| 100 | pudp = pud_alloc(&init_mm, pgdp, ea); |
| 101 | if (!pudp) |
| 102 | return -ENOMEM; |
| 103 | pmdp = pmd_alloc(&init_mm, pudp, ea); |
| 104 | if (!pmdp) |
| 105 | return -ENOMEM; |
Paul Mackerras | 23fd077 | 2005-10-31 13:37:12 +1100 | [diff] [blame] | 106 | ptep = pte_alloc_kernel(pmdp, ea); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 107 | if (!ptep) |
| 108 | return -ENOMEM; |
| 109 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
| 110 | __pgprot(flags))); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 111 | } else { |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 112 | #ifdef CONFIG_PPC_MMU_NOHASH |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 113 | pgdp = pgd_offset_k(ea); |
| 114 | #ifdef PUD_TABLE_SIZE |
| 115 | if (pgd_none(*pgdp)) { |
| 116 | pudp = early_alloc_pgtable(PUD_TABLE_SIZE); |
| 117 | BUG_ON(pudp == NULL); |
| 118 | pgd_populate(&init_mm, pgdp, pudp); |
| 119 | } |
| 120 | #endif /* PUD_TABLE_SIZE */ |
| 121 | pudp = pud_offset(pgdp, ea); |
| 122 | if (pud_none(*pudp)) { |
| 123 | pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); |
| 124 | BUG_ON(pmdp == NULL); |
| 125 | pud_populate(&init_mm, pudp, pmdp); |
| 126 | } |
| 127 | pmdp = pmd_offset(pudp, ea); |
| 128 | if (!pmd_present(*pmdp)) { |
| 129 | ptep = early_alloc_pgtable(PAGE_SIZE); |
| 130 | BUG_ON(ptep == NULL); |
| 131 | pmd_populate_kernel(&init_mm, pmdp, ptep); |
| 132 | } |
| 133 | ptep = pte_offset_kernel(pmdp, ea); |
| 134 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
| 135 | __pgprot(flags))); |
| 136 | #else /* CONFIG_PPC_MMU_NOHASH */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 137 | /* |
| 138 | * If the mm subsystem is not fully up, we cannot create a |
| 139 | * linux page table entry for this mapping. Simply bolt an |
| 140 | * entry in the hardware page table. |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 141 | * |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 142 | */ |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 143 | if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, |
| 144 | mmu_io_psize, mmu_kernel_ssize)) { |
Benjamin Herrenschmidt | 77ac166 | 2005-11-10 11:12:11 +1100 | [diff] [blame] | 145 | printk(KERN_ERR "Failed to do bolted mapping IO " |
| 146 | "memory at %016lx !\n", pa); |
| 147 | return -ENOMEM; |
| 148 | } |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 149 | #endif /* !CONFIG_PPC_MMU_NOHASH */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 150 | } |
Scott Wood | 47ce8af | 2013-10-11 19:22:37 -0500 | [diff] [blame] | 151 | |
Scott Wood | 47ce8af | 2013-10-11 19:22:37 -0500 | [diff] [blame] | 152 | smp_wmb(); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 153 | return 0; |
| 154 | } |
| 155 | |
| 156 | |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 157 | /** |
| 158 | * __ioremap_at - Low level function to establish the page tables |
| 159 | * for an IO mapping |
| 160 | */ |
| 161 | void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 162 | unsigned long flags) |
| 163 | { |
| 164 | unsigned long i; |
| 165 | |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 166 | /* Make sure we have the base flags */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 167 | if ((flags & _PAGE_PRESENT) == 0) |
| 168 | flags |= pgprot_val(PAGE_KERNEL); |
| 169 | |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 170 | /* Non-cacheable page cannot be coherent */ |
| 171 | if (flags & _PAGE_NO_CACHE) |
| 172 | flags &= ~_PAGE_COHERENT; |
| 173 | |
| 174 | /* We don't support the 4K PFN hack with ioremap */ |
| 175 | if (flags & _PAGE_4K_PFN) |
| 176 | return NULL; |
| 177 | |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 178 | WARN_ON(pa & ~PAGE_MASK); |
| 179 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); |
| 180 | WARN_ON(size & ~PAGE_MASK); |
| 181 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 182 | for (i = 0; i < size; i += PAGE_SIZE) |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 183 | if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 184 | return NULL; |
| 185 | |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 186 | return (void __iomem *)ea; |
| 187 | } |
| 188 | |
| 189 | /** |
| 190 | * __iounmap_from - Low level function to tear down the page tables |
| 191 | * for an IO mapping. This is used for mappings that |
| 192 | * are manipulated manually, like partial unmapping of |
| 193 | * PCI IOs or ISA space. |
| 194 | */ |
| 195 | void __iounmap_at(void *ea, unsigned long size) |
| 196 | { |
| 197 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); |
| 198 | WARN_ON(size & ~PAGE_MASK); |
| 199 | |
| 200 | unmap_kernel_range((unsigned long)ea, size); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 201 | } |
| 202 | |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 203 | void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, |
| 204 | unsigned long flags, void *caller) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 205 | { |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 206 | phys_addr_t paligned; |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 207 | void __iomem *ret; |
| 208 | |
| 209 | /* |
| 210 | * Choose an address to map it to. |
| 211 | * Once the imalloc system is running, we use it. |
| 212 | * Before that, we map using addresses going |
| 213 | * up from ioremap_bot. imalloc will use |
| 214 | * the addresses from ioremap_bot through |
| 215 | * IMALLOC_END |
| 216 | * |
| 217 | */ |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 218 | paligned = addr & PAGE_MASK; |
| 219 | size = PAGE_ALIGN(addr + size) - paligned; |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 220 | |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 221 | if ((size == 0) || (paligned == 0)) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 222 | return NULL; |
| 223 | |
Michael Ellerman | f691fa1 | 2015-03-30 14:10:37 +1100 | [diff] [blame] | 224 | if (slab_is_available()) { |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 225 | struct vm_struct *area; |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 226 | |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 227 | area = __get_vm_area_caller(size, VM_IOREMAP, |
| 228 | ioremap_bot, IOREMAP_END, |
| 229 | caller); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 230 | if (area == NULL) |
| 231 | return NULL; |
Michael Ellerman | 7a9d125 | 2010-11-28 18:26:36 +0000 | [diff] [blame] | 232 | |
| 233 | area->phys_addr = paligned; |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 234 | ret = __ioremap_at(paligned, area->addr, size, flags); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 235 | if (!ret) |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 236 | vunmap(area->addr); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 237 | } else { |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 238 | ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 239 | if (ret) |
| 240 | ioremap_bot += size; |
| 241 | } |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 242 | |
| 243 | if (ret) |
| 244 | ret += addr & ~PAGE_MASK; |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 245 | return ret; |
| 246 | } |
| 247 | |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 248 | void __iomem * __ioremap(phys_addr_t addr, unsigned long size, |
| 249 | unsigned long flags) |
| 250 | { |
| 251 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); |
| 252 | } |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 253 | |
Benjamin Herrenschmidt | 68a6435 | 2006-11-13 09:27:39 +1100 | [diff] [blame] | 254 | void __iomem * ioremap(phys_addr_t addr, unsigned long size) |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 255 | { |
Aneesh Kumar K.V | 72176dd | 2016-04-29 23:25:37 +1000 | [diff] [blame^] | 256 | unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0))); |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 257 | void *caller = __builtin_return_address(0); |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 258 | |
| 259 | if (ppc_md.ioremap) |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 260 | return ppc_md.ioremap(addr, size, flags, caller); |
| 261 | return __ioremap_caller(addr, size, flags, caller); |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 262 | } |
| 263 | |
Anton Blanchard | be135f4 | 2011-05-08 21:41:59 +0000 | [diff] [blame] | 264 | void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) |
| 265 | { |
Aneesh Kumar K.V | 72176dd | 2016-04-29 23:25:37 +1000 | [diff] [blame^] | 266 | unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0))); |
Anton Blanchard | be135f4 | 2011-05-08 21:41:59 +0000 | [diff] [blame] | 267 | void *caller = __builtin_return_address(0); |
| 268 | |
| 269 | if (ppc_md.ioremap) |
| 270 | return ppc_md.ioremap(addr, size, flags, caller); |
| 271 | return __ioremap_caller(addr, size, flags, caller); |
| 272 | } |
| 273 | |
Anton Blanchard | 40f1ce7 | 2011-05-08 21:43:47 +0000 | [diff] [blame] | 274 | void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 275 | unsigned long flags) |
| 276 | { |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 277 | void *caller = __builtin_return_address(0); |
| 278 | |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 279 | /* writeable implies dirty for kernel addresses */ |
Aneesh Kumar K.V | c7d5484 | 2016-04-29 23:25:30 +1000 | [diff] [blame] | 280 | if (flags & _PAGE_WRITE) |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 281 | flags |= _PAGE_DIRTY; |
| 282 | |
Aneesh Kumar K.V | ac29c64 | 2016-04-29 23:25:34 +1000 | [diff] [blame] | 283 | /* we don't want to let _PAGE_EXEC leak out */ |
| 284 | flags &= ~_PAGE_EXEC; |
| 285 | /* |
| 286 | * Force kernel mapping. |
| 287 | */ |
| 288 | #if defined(CONFIG_PPC_BOOK3S_64) |
| 289 | flags |= _PAGE_PRIVILEGED; |
| 290 | #else |
| 291 | flags &= ~_PAGE_USER; |
| 292 | #endif |
| 293 | |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 294 | |
Benjamin Herrenschmidt | 55052ee | 2010-04-07 14:39:36 +1000 | [diff] [blame] | 295 | #ifdef _PAGE_BAP_SR |
| 296 | /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format |
| 297 | * which means that we just cleared supervisor access... oops ;-) This |
| 298 | * restores it |
| 299 | */ |
| 300 | flags |= _PAGE_BAP_SR; |
| 301 | #endif |
| 302 | |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 303 | if (ppc_md.ioremap) |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 304 | return ppc_md.ioremap(addr, size, flags, caller); |
| 305 | return __ioremap_caller(addr, size, flags, caller); |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 309 | /* |
| 310 | * Unmap an IO region and remove it from imalloc'd list. |
| 311 | * Access to IO memory should be serialized by driver. |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 312 | */ |
Benjamin Herrenschmidt | 68a6435 | 2006-11-13 09:27:39 +1100 | [diff] [blame] | 313 | void __iounmap(volatile void __iomem *token) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 314 | { |
| 315 | void *addr; |
| 316 | |
Michael Ellerman | f691fa1 | 2015-03-30 14:10:37 +1100 | [diff] [blame] | 317 | if (!slab_is_available()) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 318 | return; |
| 319 | |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 320 | addr = (void *) ((unsigned long __force) |
| 321 | PCI_FIX_ADDR(token) & PAGE_MASK); |
| 322 | if ((unsigned long)addr < ioremap_bot) { |
| 323 | printk(KERN_WARNING "Attempt to iounmap early bolted mapping" |
| 324 | " at 0x%p\n", addr); |
| 325 | return; |
| 326 | } |
| 327 | vunmap(addr); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 328 | } |
| 329 | |
Benjamin Herrenschmidt | 68a6435 | 2006-11-13 09:27:39 +1100 | [diff] [blame] | 330 | void iounmap(volatile void __iomem *token) |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 331 | { |
| 332 | if (ppc_md.iounmap) |
| 333 | ppc_md.iounmap(token); |
| 334 | else |
| 335 | __iounmap(token); |
| 336 | } |
| 337 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 338 | EXPORT_SYMBOL(ioremap); |
Anton Blanchard | be135f4 | 2011-05-08 21:41:59 +0000 | [diff] [blame] | 339 | EXPORT_SYMBOL(ioremap_wc); |
Anton Blanchard | 40f1ce7 | 2011-05-08 21:43:47 +0000 | [diff] [blame] | 340 | EXPORT_SYMBOL(ioremap_prot); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 341 | EXPORT_SYMBOL(__ioremap); |
Olof Johansson | a302cb9 | 2007-08-31 13:58:51 +1000 | [diff] [blame] | 342 | EXPORT_SYMBOL(__ioremap_at); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 343 | EXPORT_SYMBOL(iounmap); |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 344 | EXPORT_SYMBOL(__iounmap); |
Olof Johansson | a302cb9 | 2007-08-31 13:58:51 +1000 | [diff] [blame] | 345 | EXPORT_SYMBOL(__iounmap_at); |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 346 | |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 347 | #ifndef __PAGETABLE_PUD_FOLDED |
| 348 | /* 4 level page table */ |
| 349 | struct page *pgd_page(pgd_t pgd) |
| 350 | { |
| 351 | if (pgd_huge(pgd)) |
| 352 | return pte_page(pgd_pte(pgd)); |
| 353 | return virt_to_page(pgd_page_vaddr(pgd)); |
| 354 | } |
| 355 | #endif |
| 356 | |
| 357 | struct page *pud_page(pud_t pud) |
| 358 | { |
| 359 | if (pud_huge(pud)) |
| 360 | return pte_page(pud_pte(pud)); |
| 361 | return virt_to_page(pud_page_vaddr(pud)); |
| 362 | } |
| 363 | |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 364 | /* |
| 365 | * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags |
| 366 | * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. |
| 367 | */ |
| 368 | struct page *pmd_page(pmd_t pmd) |
| 369 | { |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 370 | if (pmd_trans_huge(pmd) || pmd_huge(pmd)) |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 371 | return pte_page(pmd_pte(pmd)); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 372 | return virt_to_page(pmd_page_vaddr(pmd)); |
| 373 | } |
| 374 | |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 375 | #ifdef CONFIG_PPC_64K_PAGES |
| 376 | static pte_t *get_from_cache(struct mm_struct *mm) |
| 377 | { |
| 378 | void *pte_frag, *ret; |
| 379 | |
| 380 | spin_lock(&mm->page_table_lock); |
| 381 | ret = mm->context.pte_frag; |
| 382 | if (ret) { |
| 383 | pte_frag = ret + PTE_FRAG_SIZE; |
| 384 | /* |
| 385 | * If we have taken up all the fragments mark PTE page NULL |
| 386 | */ |
| 387 | if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) |
| 388 | pte_frag = NULL; |
| 389 | mm->context.pte_frag = pte_frag; |
| 390 | } |
| 391 | spin_unlock(&mm->page_table_lock); |
| 392 | return (pte_t *)ret; |
| 393 | } |
| 394 | |
| 395 | static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) |
| 396 | { |
| 397 | void *ret = NULL; |
| 398 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | |
| 399 | __GFP_REPEAT | __GFP_ZERO); |
| 400 | if (!page) |
| 401 | return NULL; |
Kirill A. Shutemov | 4f804943 | 2013-11-14 14:31:38 -0800 | [diff] [blame] | 402 | if (!kernel && !pgtable_page_ctor(page)) { |
| 403 | __free_page(page); |
| 404 | return NULL; |
| 405 | } |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 406 | |
| 407 | ret = page_address(page); |
| 408 | spin_lock(&mm->page_table_lock); |
| 409 | /* |
| 410 | * If we find pgtable_page set, we return |
| 411 | * the allocated page with single fragement |
| 412 | * count. |
| 413 | */ |
| 414 | if (likely(!mm->context.pte_frag)) { |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 415 | set_page_count(page, PTE_FRAG_NR); |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 416 | mm->context.pte_frag = ret + PTE_FRAG_SIZE; |
| 417 | } |
| 418 | spin_unlock(&mm->page_table_lock); |
| 419 | |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 420 | return (pte_t *)ret; |
| 421 | } |
| 422 | |
| 423 | pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) |
| 424 | { |
| 425 | pte_t *pte; |
| 426 | |
| 427 | pte = get_from_cache(mm); |
| 428 | if (pte) |
| 429 | return pte; |
| 430 | |
| 431 | return __alloc_for_cache(mm, kernel); |
| 432 | } |
| 433 | |
| 434 | void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel) |
| 435 | { |
| 436 | struct page *page = virt_to_page(table); |
| 437 | if (put_page_testzero(page)) { |
| 438 | if (!kernel) |
| 439 | pgtable_page_dtor(page); |
| 440 | free_hot_cold_page(page, 0); |
| 441 | } |
| 442 | } |
| 443 | |
| 444 | #ifdef CONFIG_SMP |
| 445 | static void page_table_free_rcu(void *table) |
| 446 | { |
| 447 | struct page *page = virt_to_page(table); |
| 448 | if (put_page_testzero(page)) { |
| 449 | pgtable_page_dtor(page); |
| 450 | free_hot_cold_page(page, 0); |
| 451 | } |
| 452 | } |
| 453 | |
| 454 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) |
| 455 | { |
| 456 | unsigned long pgf = (unsigned long)table; |
| 457 | |
| 458 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); |
| 459 | pgf |= shift; |
| 460 | tlb_remove_table(tlb, (void *)pgf); |
| 461 | } |
| 462 | |
| 463 | void __tlb_remove_table(void *_table) |
| 464 | { |
| 465 | void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); |
| 466 | unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; |
| 467 | |
| 468 | if (!shift) |
| 469 | /* PTE page needs special handling */ |
| 470 | page_table_free_rcu(table); |
| 471 | else { |
| 472 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); |
| 473 | kmem_cache_free(PGT_CACHE(shift), table); |
| 474 | } |
| 475 | } |
| 476 | #else |
| 477 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) |
| 478 | { |
| 479 | if (!shift) { |
| 480 | /* PTE page needs special handling */ |
| 481 | struct page *page = virt_to_page(table); |
| 482 | if (put_page_testzero(page)) { |
| 483 | pgtable_page_dtor(page); |
| 484 | free_hot_cold_page(page, 0); |
| 485 | } |
| 486 | } else { |
| 487 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); |
| 488 | kmem_cache_free(PGT_CACHE(shift), table); |
| 489 | } |
| 490 | } |
| 491 | #endif |
| 492 | #endif /* CONFIG_PPC_64K_PAGES */ |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 493 | |
| 494 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 495 | |
| 496 | /* |
| 497 | * This is called when relaxing access to a hugepage. It's also called in the page |
| 498 | * fault path when we don't hit any of the major fault cases, ie, a minor |
| 499 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have |
| 500 | * handled those two for us, we additionally deal with missing execute |
| 501 | * permission here on some processors |
| 502 | */ |
| 503 | int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
| 504 | pmd_t *pmdp, pmd_t entry, int dirty) |
| 505 | { |
| 506 | int changed; |
| 507 | #ifdef CONFIG_DEBUG_VM |
| 508 | WARN_ON(!pmd_trans_huge(*pmdp)); |
| 509 | assert_spin_locked(&vma->vm_mm->page_table_lock); |
| 510 | #endif |
| 511 | changed = !pmd_same(*(pmdp), entry); |
| 512 | if (changed) { |
| 513 | __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); |
| 514 | /* |
| 515 | * Since we are not supporting SW TLB systems, we don't |
| 516 | * have any thing similar to flush_tlb_page_nohash() |
| 517 | */ |
| 518 | } |
| 519 | return changed; |
| 520 | } |
| 521 | |
| 522 | unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, |
Aneesh Kumar K.V | 88247e8 | 2014-02-12 09:13:36 +0530 | [diff] [blame] | 523 | pmd_t *pmdp, unsigned long clr, |
| 524 | unsigned long set) |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 525 | { |
| 526 | |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 527 | __be64 old_be, tmp; |
| 528 | unsigned long old; |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 529 | |
| 530 | #ifdef CONFIG_DEBUG_VM |
| 531 | WARN_ON(!pmd_trans_huge(*pmdp)); |
| 532 | assert_spin_locked(&mm->page_table_lock); |
| 533 | #endif |
| 534 | |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 535 | __asm__ __volatile__( |
| 536 | "1: ldarx %0,0,%3\n\ |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 537 | and. %1,%0,%6\n\ |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 538 | bne- 1b \n\ |
| 539 | andc %1,%0,%4 \n\ |
Aneesh Kumar K.V | 88247e8 | 2014-02-12 09:13:36 +0530 | [diff] [blame] | 540 | or %1,%1,%7\n\ |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 541 | stdcx. %1,0,%3 \n\ |
| 542 | bne- 1b" |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 543 | : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp) |
| 544 | : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp), |
| 545 | "r" (cpu_to_be64(_PAGE_BUSY)), "r" (cpu_to_be64(set)) |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 546 | : "cc" ); |
Aneesh Kumar K.V | 4bece39 | 2016-04-29 23:25:26 +1000 | [diff] [blame] | 547 | |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 548 | old = be64_to_cpu(old_be); |
| 549 | |
Aneesh Kumar K.V | 9e81330 | 2014-08-13 12:32:04 +0530 | [diff] [blame] | 550 | trace_hugepage_update(addr, old, clr, set); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 551 | if (old & _PAGE_HASHPTE) |
Aneesh Kumar K.V | fc04795 | 2014-08-13 12:32:00 +0530 | [diff] [blame] | 552 | hpte_do_hugepage_flush(mm, addr, pmdp, old); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 553 | return old; |
| 554 | } |
| 555 | |
Aneesh Kumar K.V | 15a25b2 | 2015-06-24 16:57:39 -0700 | [diff] [blame] | 556 | pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, |
| 557 | pmd_t *pmdp) |
| 558 | { |
| 559 | pmd_t pmd; |
| 560 | |
| 561 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 562 | VM_BUG_ON(pmd_trans_huge(*pmdp)); |
| 563 | |
| 564 | pmd = *pmdp; |
| 565 | pmd_clear(pmdp); |
| 566 | /* |
| 567 | * Wait for all pending hash_page to finish. This is needed |
| 568 | * in case of subpage collapse. When we collapse normal pages |
| 569 | * to hugepage, we first clear the pmd, then invalidate all |
| 570 | * the PTE entries. The assumption here is that any low level |
| 571 | * page fault will see a none pmd and take the slow path that |
| 572 | * will wait on mmap_sem. But we could very well be in a |
| 573 | * hash_page with local ptep pointer value. Such a hash page |
| 574 | * can result in adding new HPTE entries for normal subpages. |
| 575 | * That means we could be modifying the page content as we |
| 576 | * copy them to a huge page. So wait for parallel hash_page |
| 577 | * to finish before invalidating HPTE entries. We can do this |
| 578 | * by sending an IPI to all the cpus and executing a dummy |
| 579 | * function there. |
| 580 | */ |
| 581 | kick_all_cpus_sync(); |
| 582 | /* |
| 583 | * Now invalidate the hpte entries in the range |
| 584 | * covered by pmd. This make sure we take a |
| 585 | * fault and will find the pmd as none, which will |
| 586 | * result in a major fault which takes mmap_sem and |
| 587 | * hence wait for collapse to complete. Without this |
| 588 | * the __collapse_huge_page_copy can result in copying |
| 589 | * the old content. |
| 590 | */ |
| 591 | flush_tlb_pmd_range(vma->vm_mm, &pmd, address); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 592 | return pmd; |
| 593 | } |
| 594 | |
| 595 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 596 | unsigned long address, pmd_t *pmdp) |
| 597 | { |
| 598 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); |
| 599 | } |
| 600 | |
| 601 | /* |
| 602 | * We currently remove entries from the hashtable regardless of whether |
| 603 | * the entry was young or dirty. The generic routines only flush if the |
| 604 | * entry was young or dirty which is not good enough. |
| 605 | * |
| 606 | * We should be more intelligent about this but for the moment we override |
| 607 | * these functions and force a tlb flush unconditionally |
| 608 | */ |
| 609 | int pmdp_clear_flush_young(struct vm_area_struct *vma, |
| 610 | unsigned long address, pmd_t *pmdp) |
| 611 | { |
| 612 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); |
| 613 | } |
| 614 | |
| 615 | /* |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 616 | * We want to put the pgtable in pmd and use pgtable for tracking |
| 617 | * the base page size hptes |
| 618 | */ |
| 619 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 620 | pgtable_t pgtable) |
| 621 | { |
| 622 | pgtable_t *pgtable_slot; |
| 623 | assert_spin_locked(&mm->page_table_lock); |
| 624 | /* |
| 625 | * we store the pgtable in the second half of PMD |
| 626 | */ |
| 627 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 628 | *pgtable_slot = pgtable; |
| 629 | /* |
| 630 | * expose the deposited pgtable to other cpus. |
| 631 | * before we set the hugepage PTE at pmd level |
| 632 | * hash fault code looks at the deposted pgtable |
| 633 | * to store hash index values. |
| 634 | */ |
| 635 | smp_wmb(); |
| 636 | } |
| 637 | |
| 638 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
| 639 | { |
| 640 | pgtable_t pgtable; |
| 641 | pgtable_t *pgtable_slot; |
| 642 | |
| 643 | assert_spin_locked(&mm->page_table_lock); |
| 644 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 645 | pgtable = *pgtable_slot; |
| 646 | /* |
| 647 | * Once we withdraw, mark the entry NULL. |
| 648 | */ |
| 649 | *pgtable_slot = NULL; |
| 650 | /* |
| 651 | * We store HPTE information in the deposited PTE fragment. |
| 652 | * zero out the content on withdraw. |
| 653 | */ |
| 654 | memset(pgtable, 0, PTE_FRAG_SIZE); |
| 655 | return pgtable; |
| 656 | } |
| 657 | |
Aneesh Kumar K.V | c777e2a | 2016-02-09 06:50:31 +0530 | [diff] [blame] | 658 | void pmdp_huge_split_prepare(struct vm_area_struct *vma, |
| 659 | unsigned long address, pmd_t *pmdp) |
| 660 | { |
| 661 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 662 | VM_BUG_ON(REGION_ID(address) != USER_REGION_ID); |
| 663 | |
| 664 | /* |
| 665 | * We can't mark the pmd none here, because that will cause a race |
| 666 | * against exit_mmap. We need to continue mark pmd TRANS HUGE, while |
| 667 | * we spilt, but at the same time we wan't rest of the ppc64 code |
| 668 | * not to insert hash pte on this, because we will be modifying |
| 669 | * the deposited pgtable in the caller of this function. Hence |
| 670 | * clear the _PAGE_USER so that we move the fault handling to |
| 671 | * higher level function and that will serialize against ptl. |
| 672 | * We need to flush existing hash pte entries here even though, |
| 673 | * the translation is still valid, because we will withdraw |
| 674 | * pgtable_t after this. |
| 675 | */ |
Aneesh Kumar K.V | ac29c64 | 2016-04-29 23:25:34 +1000 | [diff] [blame] | 676 | pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED); |
Aneesh Kumar K.V | c777e2a | 2016-02-09 06:50:31 +0530 | [diff] [blame] | 677 | } |
| 678 | |
| 679 | |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 680 | /* |
| 681 | * set a new huge pmd. We should not be called for updating |
| 682 | * an existing pmd entry. That should go via pmd_hugepage_update. |
| 683 | */ |
| 684 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 685 | pmd_t *pmdp, pmd_t pmd) |
| 686 | { |
| 687 | #ifdef CONFIG_DEBUG_VM |
Aneesh Kumar K.V | c7d5484 | 2016-04-29 23:25:30 +1000 | [diff] [blame] | 688 | WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 689 | assert_spin_locked(&mm->page_table_lock); |
| 690 | WARN_ON(!pmd_trans_huge(pmd)); |
| 691 | #endif |
Michael Ellerman | 4f9c53c | 2015-03-25 20:11:57 +1100 | [diff] [blame] | 692 | trace_hugepage_set_pmd(addr, pmd_val(pmd)); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 693 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); |
| 694 | } |
| 695 | |
Aneesh Kumar K.V | c777e2a | 2016-02-09 06:50:31 +0530 | [diff] [blame] | 696 | /* |
| 697 | * We use this to invalidate a pmdp entry before switching from a |
| 698 | * hugepte to regular pmd entry. |
| 699 | */ |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 700 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
| 701 | pmd_t *pmdp) |
| 702 | { |
Aneesh Kumar K.V | 88247e8 | 2014-02-12 09:13:36 +0530 | [diff] [blame] | 703 | pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); |
Aneesh Kumar K.V | c777e2a | 2016-02-09 06:50:31 +0530 | [diff] [blame] | 704 | |
| 705 | /* |
| 706 | * This ensures that generic code that rely on IRQ disabling |
| 707 | * to prevent a parallel THP split work as expected. |
| 708 | */ |
| 709 | kick_all_cpus_sync(); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 710 | } |
| 711 | |
| 712 | /* |
| 713 | * A linux hugepage PMD was changed and the corresponding hash table entries |
| 714 | * neesd to be flushed. |
| 715 | */ |
| 716 | void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, |
Aneesh Kumar K.V | fc04795 | 2014-08-13 12:32:00 +0530 | [diff] [blame] | 717 | pmd_t *pmdp, unsigned long old_pmd) |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 718 | { |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 719 | int ssize; |
Aneesh Kumar K.V | f1581bf | 2014-11-02 21:15:27 +0530 | [diff] [blame] | 720 | unsigned int psize; |
| 721 | unsigned long vsid; |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 722 | unsigned long flags = 0; |
Aneesh Kumar K.V | d557b09 | 2014-11-02 21:15:28 +0530 | [diff] [blame] | 723 | const struct cpumask *tmp; |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 724 | |
Aneesh Kumar K.V | fa1f8ae | 2014-08-13 12:31:58 +0530 | [diff] [blame] | 725 | /* get the base page size,vsid and segment size */ |
Aneesh Kumar K.V | fc04795 | 2014-08-13 12:32:00 +0530 | [diff] [blame] | 726 | #ifdef CONFIG_DEBUG_VM |
Aneesh Kumar K.V | f1581bf | 2014-11-02 21:15:27 +0530 | [diff] [blame] | 727 | psize = get_slice_psize(mm, addr); |
Aneesh Kumar K.V | fc04795 | 2014-08-13 12:32:00 +0530 | [diff] [blame] | 728 | BUG_ON(psize == MMU_PAGE_16M); |
| 729 | #endif |
| 730 | if (old_pmd & _PAGE_COMBO) |
| 731 | psize = MMU_PAGE_4K; |
| 732 | else |
| 733 | psize = MMU_PAGE_64K; |
| 734 | |
Aneesh Kumar K.V | f1581bf | 2014-11-02 21:15:27 +0530 | [diff] [blame] | 735 | if (!is_kernel_addr(addr)) { |
| 736 | ssize = user_segment_size(addr); |
| 737 | vsid = get_vsid(mm->context.id, addr, ssize); |
Aneesh Kumar K.V | fa1f8ae | 2014-08-13 12:31:58 +0530 | [diff] [blame] | 738 | WARN_ON(vsid == 0); |
| 739 | } else { |
Aneesh Kumar K.V | f1581bf | 2014-11-02 21:15:27 +0530 | [diff] [blame] | 740 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); |
Aneesh Kumar K.V | fa1f8ae | 2014-08-13 12:31:58 +0530 | [diff] [blame] | 741 | ssize = mmu_kernel_ssize; |
| 742 | } |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 743 | |
Aneesh Kumar K.V | d557b09 | 2014-11-02 21:15:28 +0530 | [diff] [blame] | 744 | tmp = cpumask_of(smp_processor_id()); |
| 745 | if (cpumask_equal(mm_cpumask(mm), tmp)) |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 746 | flags |= HPTE_LOCAL_UPDATE; |
Aneesh Kumar K.V | d557b09 | 2014-11-02 21:15:28 +0530 | [diff] [blame] | 747 | |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 748 | return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 749 | } |
| 750 | |
| 751 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) |
| 752 | { |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 753 | return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 754 | } |
| 755 | |
| 756 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) |
| 757 | { |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 758 | unsigned long pmdv; |
Aneesh Kumar K.V | 6a119ea | 2015-12-01 09:06:54 +0530 | [diff] [blame] | 759 | |
Aneesh Kumar K.V | 96270b1 | 2016-04-29 23:25:35 +1000 | [diff] [blame] | 760 | pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 761 | return pmd_set_protbits(__pmd(pmdv), pgprot); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 762 | } |
| 763 | |
| 764 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) |
| 765 | { |
| 766 | return pfn_pmd(page_to_pfn(page), pgprot); |
| 767 | } |
| 768 | |
| 769 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
| 770 | { |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 771 | unsigned long pmdv; |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 772 | |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 773 | pmdv = pmd_val(pmd); |
| 774 | pmdv &= _HPAGE_CHG_MASK; |
| 775 | return pmd_set_protbits(__pmd(pmdv), newprot); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 776 | } |
| 777 | |
| 778 | /* |
| 779 | * This is called at the end of handling a user page fault, when the |
| 780 | * fault has been handled by updating a HUGE PMD entry in the linux page tables. |
| 781 | * We use it to preload an HPTE into the hash table corresponding to |
| 782 | * the updated linux HUGE PMD entry. |
| 783 | */ |
| 784 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, |
| 785 | pmd_t *pmd) |
| 786 | { |
| 787 | return; |
| 788 | } |
| 789 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 790 | pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, |
| 791 | unsigned long addr, pmd_t *pmdp) |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 792 | { |
| 793 | pmd_t old_pmd; |
| 794 | pgtable_t pgtable; |
| 795 | unsigned long old; |
| 796 | pgtable_t *pgtable_slot; |
| 797 | |
Aneesh Kumar K.V | 88247e8 | 2014-02-12 09:13:36 +0530 | [diff] [blame] | 798 | old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 799 | old_pmd = __pmd(old); |
| 800 | /* |
| 801 | * We have pmd == none and we are holding page_table_lock. |
| 802 | * So we can safely go and clear the pgtable hash |
| 803 | * index info. |
| 804 | */ |
| 805 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 806 | pgtable = *pgtable_slot; |
| 807 | /* |
| 808 | * Let's zero out old valid and hash index details |
| 809 | * hash fault look at them. |
| 810 | */ |
| 811 | memset(pgtable, 0, PTE_FRAG_SIZE); |
Aneesh Kumar K.V | 13bd817 | 2015-05-11 11:56:01 +0530 | [diff] [blame] | 812 | /* |
| 813 | * Serialize against find_linux_pte_or_hugepte which does lock-less |
| 814 | * lookup in page tables with local interrupts disabled. For huge pages |
| 815 | * it casts pmd_t to pte_t. Since format of pte_t is different from |
| 816 | * pmd_t we want to prevent transit from pmd pointing to page table |
| 817 | * to pmd pointing to huge page (and back) while interrupts are disabled. |
| 818 | * We clear pmd to possibly replace it with page table pointer in |
| 819 | * different code paths. So make sure we wait for the parallel |
| 820 | * find_linux_pte_or_hugepage to finish. |
| 821 | */ |
| 822 | kick_all_cpus_sync(); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 823 | return old_pmd; |
| 824 | } |
Aneesh Kumar K.V | 437d496 | 2013-06-20 14:30:26 +0530 | [diff] [blame] | 825 | |
| 826 | int has_transparent_hugepage(void) |
| 827 | { |
Kirill A. Shutemov | ff20c2e | 2016-03-01 09:45:14 +0530 | [diff] [blame] | 828 | |
| 829 | BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) >= MAX_ORDER, |
| 830 | "hugepages can't be allocated by the buddy allocator"); |
| 831 | |
| 832 | BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) < 2, |
| 833 | "We need more than 2 pages to do deferred thp split"); |
| 834 | |
Aneesh Kumar K.V | 437d496 | 2013-06-20 14:30:26 +0530 | [diff] [blame] | 835 | if (!mmu_has_feature(MMU_FTR_16M_PAGE)) |
| 836 | return 0; |
| 837 | /* |
| 838 | * We support THP only if PMD_SIZE is 16MB. |
| 839 | */ |
| 840 | if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) |
| 841 | return 0; |
| 842 | /* |
| 843 | * We need to make sure that we support 16MB hugepage in a segement |
| 844 | * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE |
| 845 | * of 64K. |
| 846 | */ |
| 847 | /* |
| 848 | * If we have 64K HPTE, we will be using that by default |
| 849 | */ |
| 850 | if (mmu_psize_defs[MMU_PAGE_64K].shift && |
| 851 | (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) |
| 852 | return 0; |
| 853 | /* |
| 854 | * Ok we only have 4K HPTE |
| 855 | */ |
| 856 | if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) |
| 857 | return 0; |
| 858 | |
| 859 | return 1; |
| 860 | } |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 861 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |