Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* |
| 2 | * This file contains ioremap and related functions for 64-bit machines. |
| 3 | * |
| 4 | * Derived from arch/ppc64/mm/init.c |
| 5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 6 | * |
| 7 | * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) |
| 8 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 9 | * Copyright (C) 1996 Paul Mackerras |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 10 | * |
| 11 | * Derived from "arch/i386/mm/init.c" |
| 12 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 13 | * |
| 14 | * Dave Engebretsen <engebret@us.ibm.com> |
| 15 | * Rework for PPC64 port. |
| 16 | * |
| 17 | * This program is free software; you can redistribute it and/or |
| 18 | * modify it under the terms of the GNU General Public License |
| 19 | * as published by the Free Software Foundation; either version |
| 20 | * 2 of the License, or (at your option) any later version. |
| 21 | * |
| 22 | */ |
| 23 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 24 | #include <linux/signal.h> |
| 25 | #include <linux/sched.h> |
| 26 | #include <linux/kernel.h> |
| 27 | #include <linux/errno.h> |
| 28 | #include <linux/string.h> |
Paul Gortmaker | 66b15db | 2011-05-27 10:46:24 -0400 | [diff] [blame] | 29 | #include <linux/export.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 30 | #include <linux/types.h> |
| 31 | #include <linux/mman.h> |
| 32 | #include <linux/mm.h> |
| 33 | #include <linux/swap.h> |
| 34 | #include <linux/stddef.h> |
| 35 | #include <linux/vmalloc.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 36 | #include <linux/memblock.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 37 | #include <linux/slab.h> |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 38 | #include <linux/hugetlb.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 39 | |
| 40 | #include <asm/pgalloc.h> |
| 41 | #include <asm/page.h> |
| 42 | #include <asm/prom.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 43 | #include <asm/io.h> |
| 44 | #include <asm/mmu_context.h> |
| 45 | #include <asm/pgtable.h> |
| 46 | #include <asm/mmu.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 47 | #include <asm/smp.h> |
| 48 | #include <asm/machdep.h> |
| 49 | #include <asm/tlb.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 50 | #include <asm/processor.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 51 | #include <asm/cputable.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 52 | #include <asm/sections.h> |
Stephen Rothwell | 5e203d6 | 2006-09-25 13:36:31 +1000 | [diff] [blame] | 53 | #include <asm/firmware.h> |
Anton Blanchard | 68cf0d6 | 2014-09-17 22:15:35 +1000 | [diff] [blame] | 54 | #include <asm/dma.h> |
David Gibson | 800fc3e | 2005-11-16 15:43:48 +1100 | [diff] [blame] | 55 | |
| 56 | #include "mmu_decl.h" |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 57 | |
Aneesh Kumar K.V | 9e81330 | 2014-08-13 12:32:04 +0530 | [diff] [blame] | 58 | #define CREATE_TRACE_POINTS |
| 59 | #include <trace/events/thp.h> |
| 60 | |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 61 | #ifdef CONFIG_PPC_STD_MMU_64 |
Aneesh Kumar K.V | af81d78 | 2013-03-13 03:34:55 +0000 | [diff] [blame] | 62 | #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 63 | #error TASK_SIZE_USER64 exceeds user VSID range |
| 64 | #endif |
| 65 | #endif |
| 66 | |
Aneesh Kumar K.V | 50de596 | 2016-04-29 23:25:43 +1000 | [diff] [blame] | 67 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 68 | /* |
| 69 | * partition table and process table for ISA 3.0 |
| 70 | */ |
| 71 | struct prtb_entry *process_tb; |
| 72 | struct patb_entry *partition_tb; |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 73 | /* |
| 74 | * page table size |
| 75 | */ |
| 76 | unsigned long __pte_index_size; |
| 77 | EXPORT_SYMBOL(__pte_index_size); |
| 78 | unsigned long __pmd_index_size; |
| 79 | EXPORT_SYMBOL(__pmd_index_size); |
| 80 | unsigned long __pud_index_size; |
| 81 | EXPORT_SYMBOL(__pud_index_size); |
| 82 | unsigned long __pgd_index_size; |
| 83 | EXPORT_SYMBOL(__pgd_index_size); |
| 84 | unsigned long __pmd_cache_index; |
| 85 | EXPORT_SYMBOL(__pmd_cache_index); |
| 86 | unsigned long __pte_table_size; |
| 87 | EXPORT_SYMBOL(__pte_table_size); |
| 88 | unsigned long __pmd_table_size; |
| 89 | EXPORT_SYMBOL(__pmd_table_size); |
| 90 | unsigned long __pud_table_size; |
| 91 | EXPORT_SYMBOL(__pud_table_size); |
| 92 | unsigned long __pgd_table_size; |
| 93 | EXPORT_SYMBOL(__pgd_table_size); |
| 94 | |
Aneesh Kumar K.V | 50de596 | 2016-04-29 23:25:43 +1000 | [diff] [blame] | 95 | #endif |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 96 | unsigned long ioremap_bot = IOREMAP_BASE; |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 97 | |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 98 | /** |
| 99 | * __ioremap_at - Low level function to establish the page tables |
| 100 | * for an IO mapping |
| 101 | */ |
| 102 | void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 103 | unsigned long flags) |
| 104 | { |
| 105 | unsigned long i; |
| 106 | |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 107 | /* Make sure we have the base flags */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 108 | if ((flags & _PAGE_PRESENT) == 0) |
| 109 | flags |= pgprot_val(PAGE_KERNEL); |
| 110 | |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 111 | /* We don't support the 4K PFN hack with ioremap */ |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 112 | if (flags & H_PAGE_4K_PFN) |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 113 | return NULL; |
| 114 | |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 115 | WARN_ON(pa & ~PAGE_MASK); |
| 116 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); |
| 117 | WARN_ON(size & ~PAGE_MASK); |
| 118 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 119 | for (i = 0; i < size; i += PAGE_SIZE) |
Benjamin Herrenschmidt | a245067 | 2009-07-23 23:15:16 +0000 | [diff] [blame] | 120 | if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 121 | return NULL; |
| 122 | |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 123 | return (void __iomem *)ea; |
| 124 | } |
| 125 | |
| 126 | /** |
| 127 | * __iounmap_from - Low level function to tear down the page tables |
| 128 | * for an IO mapping. This is used for mappings that |
| 129 | * are manipulated manually, like partial unmapping of |
| 130 | * PCI IOs or ISA space. |
| 131 | */ |
| 132 | void __iounmap_at(void *ea, unsigned long size) |
| 133 | { |
| 134 | WARN_ON(((unsigned long)ea) & ~PAGE_MASK); |
| 135 | WARN_ON(size & ~PAGE_MASK); |
| 136 | |
| 137 | unmap_kernel_range((unsigned long)ea, size); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 138 | } |
| 139 | |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 140 | void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, |
| 141 | unsigned long flags, void *caller) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 142 | { |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 143 | phys_addr_t paligned; |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 144 | void __iomem *ret; |
| 145 | |
| 146 | /* |
| 147 | * Choose an address to map it to. |
| 148 | * Once the imalloc system is running, we use it. |
| 149 | * Before that, we map using addresses going |
| 150 | * up from ioremap_bot. imalloc will use |
| 151 | * the addresses from ioremap_bot through |
| 152 | * IMALLOC_END |
| 153 | * |
| 154 | */ |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 155 | paligned = addr & PAGE_MASK; |
| 156 | size = PAGE_ALIGN(addr + size) - paligned; |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 157 | |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 158 | if ((size == 0) || (paligned == 0)) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 159 | return NULL; |
| 160 | |
Michael Ellerman | f691fa1 | 2015-03-30 14:10:37 +1100 | [diff] [blame] | 161 | if (slab_is_available()) { |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 162 | struct vm_struct *area; |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 163 | |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 164 | area = __get_vm_area_caller(size, VM_IOREMAP, |
| 165 | ioremap_bot, IOREMAP_END, |
| 166 | caller); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 167 | if (area == NULL) |
| 168 | return NULL; |
Michael Ellerman | 7a9d125 | 2010-11-28 18:26:36 +0000 | [diff] [blame] | 169 | |
| 170 | area->phys_addr = paligned; |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 171 | ret = __ioremap_at(paligned, area->addr, size, flags); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 172 | if (!ret) |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 173 | vunmap(area->addr); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 174 | } else { |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 175 | ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 176 | if (ret) |
| 177 | ioremap_bot += size; |
| 178 | } |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 179 | |
| 180 | if (ret) |
| 181 | ret += addr & ~PAGE_MASK; |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 182 | return ret; |
| 183 | } |
| 184 | |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 185 | void __iomem * __ioremap(phys_addr_t addr, unsigned long size, |
| 186 | unsigned long flags) |
| 187 | { |
| 188 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); |
| 189 | } |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 190 | |
Benjamin Herrenschmidt | 68a6435 | 2006-11-13 09:27:39 +1100 | [diff] [blame] | 191 | void __iomem * ioremap(phys_addr_t addr, unsigned long size) |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 192 | { |
Aneesh Kumar K.V | 72176dd | 2016-04-29 23:25:37 +1000 | [diff] [blame] | 193 | unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0))); |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 194 | void *caller = __builtin_return_address(0); |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 195 | |
| 196 | if (ppc_md.ioremap) |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 197 | return ppc_md.ioremap(addr, size, flags, caller); |
| 198 | return __ioremap_caller(addr, size, flags, caller); |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 199 | } |
| 200 | |
Anton Blanchard | be135f4 | 2011-05-08 21:41:59 +0000 | [diff] [blame] | 201 | void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) |
| 202 | { |
Aneesh Kumar K.V | 72176dd | 2016-04-29 23:25:37 +1000 | [diff] [blame] | 203 | unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0))); |
Anton Blanchard | be135f4 | 2011-05-08 21:41:59 +0000 | [diff] [blame] | 204 | void *caller = __builtin_return_address(0); |
| 205 | |
| 206 | if (ppc_md.ioremap) |
| 207 | return ppc_md.ioremap(addr, size, flags, caller); |
| 208 | return __ioremap_caller(addr, size, flags, caller); |
| 209 | } |
| 210 | |
Anton Blanchard | 40f1ce7 | 2011-05-08 21:43:47 +0000 | [diff] [blame] | 211 | void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 212 | unsigned long flags) |
| 213 | { |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 214 | void *caller = __builtin_return_address(0); |
| 215 | |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 216 | /* writeable implies dirty for kernel addresses */ |
Aneesh Kumar K.V | c7d5484 | 2016-04-29 23:25:30 +1000 | [diff] [blame] | 217 | if (flags & _PAGE_WRITE) |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 218 | flags |= _PAGE_DIRTY; |
| 219 | |
Aneesh Kumar K.V | ac29c64 | 2016-04-29 23:25:34 +1000 | [diff] [blame] | 220 | /* we don't want to let _PAGE_EXEC leak out */ |
| 221 | flags &= ~_PAGE_EXEC; |
| 222 | /* |
| 223 | * Force kernel mapping. |
| 224 | */ |
| 225 | #if defined(CONFIG_PPC_BOOK3S_64) |
| 226 | flags |= _PAGE_PRIVILEGED; |
| 227 | #else |
| 228 | flags &= ~_PAGE_USER; |
| 229 | #endif |
| 230 | |
Benjamin Herrenschmidt | a1f242f | 2008-07-23 21:27:08 -0700 | [diff] [blame] | 231 | |
Benjamin Herrenschmidt | 55052ee | 2010-04-07 14:39:36 +1000 | [diff] [blame] | 232 | #ifdef _PAGE_BAP_SR |
| 233 | /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format |
| 234 | * which means that we just cleared supervisor access... oops ;-) This |
| 235 | * restores it |
| 236 | */ |
| 237 | flags |= _PAGE_BAP_SR; |
| 238 | #endif |
| 239 | |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 240 | if (ppc_md.ioremap) |
Benjamin Herrenschmidt | 1cdab55 | 2009-02-22 16:19:14 +0000 | [diff] [blame] | 241 | return ppc_md.ioremap(addr, size, flags, caller); |
| 242 | return __ioremap_caller(addr, size, flags, caller); |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 246 | /* |
| 247 | * Unmap an IO region and remove it from imalloc'd list. |
| 248 | * Access to IO memory should be serialized by driver. |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 249 | */ |
Benjamin Herrenschmidt | 68a6435 | 2006-11-13 09:27:39 +1100 | [diff] [blame] | 250 | void __iounmap(volatile void __iomem *token) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 251 | { |
| 252 | void *addr; |
| 253 | |
Michael Ellerman | f691fa1 | 2015-03-30 14:10:37 +1100 | [diff] [blame] | 254 | if (!slab_is_available()) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 255 | return; |
| 256 | |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 257 | addr = (void *) ((unsigned long __force) |
| 258 | PCI_FIX_ADDR(token) & PAGE_MASK); |
| 259 | if ((unsigned long)addr < ioremap_bot) { |
| 260 | printk(KERN_WARNING "Attempt to iounmap early bolted mapping" |
| 261 | " at 0x%p\n", addr); |
| 262 | return; |
| 263 | } |
| 264 | vunmap(addr); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 265 | } |
| 266 | |
Benjamin Herrenschmidt | 68a6435 | 2006-11-13 09:27:39 +1100 | [diff] [blame] | 267 | void iounmap(volatile void __iomem *token) |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 268 | { |
| 269 | if (ppc_md.iounmap) |
| 270 | ppc_md.iounmap(token); |
| 271 | else |
| 272 | __iounmap(token); |
| 273 | } |
| 274 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 275 | EXPORT_SYMBOL(ioremap); |
Anton Blanchard | be135f4 | 2011-05-08 21:41:59 +0000 | [diff] [blame] | 276 | EXPORT_SYMBOL(ioremap_wc); |
Anton Blanchard | 40f1ce7 | 2011-05-08 21:43:47 +0000 | [diff] [blame] | 277 | EXPORT_SYMBOL(ioremap_prot); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 278 | EXPORT_SYMBOL(__ioremap); |
Olof Johansson | a302cb9 | 2007-08-31 13:58:51 +1000 | [diff] [blame] | 279 | EXPORT_SYMBOL(__ioremap_at); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 280 | EXPORT_SYMBOL(iounmap); |
Benjamin Herrenschmidt | 4cb3cee | 2006-11-11 17:25:10 +1100 | [diff] [blame] | 281 | EXPORT_SYMBOL(__iounmap); |
Olof Johansson | a302cb9 | 2007-08-31 13:58:51 +1000 | [diff] [blame] | 282 | EXPORT_SYMBOL(__iounmap_at); |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 283 | |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 284 | #ifndef __PAGETABLE_PUD_FOLDED |
| 285 | /* 4 level page table */ |
| 286 | struct page *pgd_page(pgd_t pgd) |
| 287 | { |
| 288 | if (pgd_huge(pgd)) |
| 289 | return pte_page(pgd_pte(pgd)); |
| 290 | return virt_to_page(pgd_page_vaddr(pgd)); |
| 291 | } |
| 292 | #endif |
| 293 | |
| 294 | struct page *pud_page(pud_t pud) |
| 295 | { |
| 296 | if (pud_huge(pud)) |
| 297 | return pte_page(pud_pte(pud)); |
| 298 | return virt_to_page(pud_page_vaddr(pud)); |
| 299 | } |
| 300 | |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 301 | /* |
| 302 | * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags |
| 303 | * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. |
| 304 | */ |
| 305 | struct page *pmd_page(pmd_t pmd) |
| 306 | { |
Aneesh Kumar K.V | 0674352 | 2014-11-05 21:57:39 +0530 | [diff] [blame] | 307 | if (pmd_trans_huge(pmd) || pmd_huge(pmd)) |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 308 | return pte_page(pmd_pte(pmd)); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 309 | return virt_to_page(pmd_page_vaddr(pmd)); |
| 310 | } |
| 311 | |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 312 | #ifdef CONFIG_PPC_64K_PAGES |
| 313 | static pte_t *get_from_cache(struct mm_struct *mm) |
| 314 | { |
| 315 | void *pte_frag, *ret; |
| 316 | |
| 317 | spin_lock(&mm->page_table_lock); |
| 318 | ret = mm->context.pte_frag; |
| 319 | if (ret) { |
| 320 | pte_frag = ret + PTE_FRAG_SIZE; |
| 321 | /* |
| 322 | * If we have taken up all the fragments mark PTE page NULL |
| 323 | */ |
| 324 | if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) |
| 325 | pte_frag = NULL; |
| 326 | mm->context.pte_frag = pte_frag; |
| 327 | } |
| 328 | spin_unlock(&mm->page_table_lock); |
| 329 | return (pte_t *)ret; |
| 330 | } |
| 331 | |
| 332 | static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) |
| 333 | { |
| 334 | void *ret = NULL; |
| 335 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | |
| 336 | __GFP_REPEAT | __GFP_ZERO); |
| 337 | if (!page) |
| 338 | return NULL; |
Kirill A. Shutemov | 4f804943 | 2013-11-14 14:31:38 -0800 | [diff] [blame] | 339 | if (!kernel && !pgtable_page_ctor(page)) { |
| 340 | __free_page(page); |
| 341 | return NULL; |
| 342 | } |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 343 | |
| 344 | ret = page_address(page); |
| 345 | spin_lock(&mm->page_table_lock); |
| 346 | /* |
| 347 | * If we find pgtable_page set, we return |
| 348 | * the allocated page with single fragement |
| 349 | * count. |
| 350 | */ |
| 351 | if (likely(!mm->context.pte_frag)) { |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 352 | set_page_count(page, PTE_FRAG_NR); |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 353 | mm->context.pte_frag = ret + PTE_FRAG_SIZE; |
| 354 | } |
| 355 | spin_unlock(&mm->page_table_lock); |
| 356 | |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 357 | return (pte_t *)ret; |
| 358 | } |
| 359 | |
Aneesh Kumar K.V | 74701d5 | 2016-04-29 23:26:17 +1000 | [diff] [blame^] | 360 | pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 361 | { |
| 362 | pte_t *pte; |
| 363 | |
| 364 | pte = get_from_cache(mm); |
| 365 | if (pte) |
| 366 | return pte; |
| 367 | |
| 368 | return __alloc_for_cache(mm, kernel); |
| 369 | } |
| 370 | |
Aneesh Kumar K.V | 74701d5 | 2016-04-29 23:26:17 +1000 | [diff] [blame^] | 371 | void pte_fragment_free(unsigned long *table, int kernel) |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 372 | { |
| 373 | struct page *page = virt_to_page(table); |
| 374 | if (put_page_testzero(page)) { |
| 375 | if (!kernel) |
| 376 | pgtable_page_dtor(page); |
| 377 | free_hot_cold_page(page, 0); |
| 378 | } |
| 379 | } |
| 380 | |
| 381 | #ifdef CONFIG_SMP |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 382 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) |
| 383 | { |
| 384 | unsigned long pgf = (unsigned long)table; |
| 385 | |
| 386 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); |
| 387 | pgf |= shift; |
| 388 | tlb_remove_table(tlb, (void *)pgf); |
| 389 | } |
| 390 | |
| 391 | void __tlb_remove_table(void *_table) |
| 392 | { |
| 393 | void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); |
| 394 | unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; |
| 395 | |
| 396 | if (!shift) |
| 397 | /* PTE page needs special handling */ |
Aneesh Kumar K.V | 74701d5 | 2016-04-29 23:26:17 +1000 | [diff] [blame^] | 398 | pte_fragment_free(table, 0); |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 399 | else { |
| 400 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); |
| 401 | kmem_cache_free(PGT_CACHE(shift), table); |
| 402 | } |
| 403 | } |
| 404 | #else |
| 405 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) |
| 406 | { |
| 407 | if (!shift) { |
| 408 | /* PTE page needs special handling */ |
Aneesh Kumar K.V | 74701d5 | 2016-04-29 23:26:17 +1000 | [diff] [blame^] | 409 | pte_fragment_free(table, 0); |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 410 | } else { |
| 411 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); |
| 412 | kmem_cache_free(PGT_CACHE(shift), table); |
| 413 | } |
| 414 | } |
| 415 | #endif |
| 416 | #endif /* CONFIG_PPC_64K_PAGES */ |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 417 | |
| 418 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 419 | |
| 420 | /* |
| 421 | * This is called when relaxing access to a hugepage. It's also called in the page |
| 422 | * fault path when we don't hit any of the major fault cases, ie, a minor |
| 423 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have |
| 424 | * handled those two for us, we additionally deal with missing execute |
| 425 | * permission here on some processors |
| 426 | */ |
| 427 | int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
| 428 | pmd_t *pmdp, pmd_t entry, int dirty) |
| 429 | { |
| 430 | int changed; |
| 431 | #ifdef CONFIG_DEBUG_VM |
| 432 | WARN_ON(!pmd_trans_huge(*pmdp)); |
| 433 | assert_spin_locked(&vma->vm_mm->page_table_lock); |
| 434 | #endif |
| 435 | changed = !pmd_same(*(pmdp), entry); |
| 436 | if (changed) { |
| 437 | __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); |
| 438 | /* |
| 439 | * Since we are not supporting SW TLB systems, we don't |
| 440 | * have any thing similar to flush_tlb_page_nohash() |
| 441 | */ |
| 442 | } |
| 443 | return changed; |
| 444 | } |
| 445 | |
| 446 | unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, |
Aneesh Kumar K.V | 88247e8 | 2014-02-12 09:13:36 +0530 | [diff] [blame] | 447 | pmd_t *pmdp, unsigned long clr, |
| 448 | unsigned long set) |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 449 | { |
| 450 | |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 451 | __be64 old_be, tmp; |
| 452 | unsigned long old; |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 453 | |
| 454 | #ifdef CONFIG_DEBUG_VM |
| 455 | WARN_ON(!pmd_trans_huge(*pmdp)); |
| 456 | assert_spin_locked(&mm->page_table_lock); |
| 457 | #endif |
| 458 | |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 459 | __asm__ __volatile__( |
| 460 | "1: ldarx %0,0,%3\n\ |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 461 | and. %1,%0,%6\n\ |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 462 | bne- 1b \n\ |
| 463 | andc %1,%0,%4 \n\ |
Aneesh Kumar K.V | 88247e8 | 2014-02-12 09:13:36 +0530 | [diff] [blame] | 464 | or %1,%1,%7\n\ |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 465 | stdcx. %1,0,%3 \n\ |
| 466 | bne- 1b" |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 467 | : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp) |
| 468 | : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp), |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 469 | "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 470 | : "cc" ); |
Aneesh Kumar K.V | 4bece39 | 2016-04-29 23:25:26 +1000 | [diff] [blame] | 471 | |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 472 | old = be64_to_cpu(old_be); |
| 473 | |
Aneesh Kumar K.V | 9e81330 | 2014-08-13 12:32:04 +0530 | [diff] [blame] | 474 | trace_hugepage_update(addr, old, clr, set); |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 475 | if (old & H_PAGE_HASHPTE) |
Aneesh Kumar K.V | fc04795 | 2014-08-13 12:32:00 +0530 | [diff] [blame] | 476 | hpte_do_hugepage_flush(mm, addr, pmdp, old); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 477 | return old; |
| 478 | } |
| 479 | |
Aneesh Kumar K.V | 15a25b2 | 2015-06-24 16:57:39 -0700 | [diff] [blame] | 480 | pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, |
| 481 | pmd_t *pmdp) |
| 482 | { |
| 483 | pmd_t pmd; |
| 484 | |
| 485 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 486 | VM_BUG_ON(pmd_trans_huge(*pmdp)); |
| 487 | |
| 488 | pmd = *pmdp; |
| 489 | pmd_clear(pmdp); |
| 490 | /* |
| 491 | * Wait for all pending hash_page to finish. This is needed |
| 492 | * in case of subpage collapse. When we collapse normal pages |
| 493 | * to hugepage, we first clear the pmd, then invalidate all |
| 494 | * the PTE entries. The assumption here is that any low level |
| 495 | * page fault will see a none pmd and take the slow path that |
| 496 | * will wait on mmap_sem. But we could very well be in a |
| 497 | * hash_page with local ptep pointer value. Such a hash page |
| 498 | * can result in adding new HPTE entries for normal subpages. |
| 499 | * That means we could be modifying the page content as we |
| 500 | * copy them to a huge page. So wait for parallel hash_page |
| 501 | * to finish before invalidating HPTE entries. We can do this |
| 502 | * by sending an IPI to all the cpus and executing a dummy |
| 503 | * function there. |
| 504 | */ |
| 505 | kick_all_cpus_sync(); |
| 506 | /* |
| 507 | * Now invalidate the hpte entries in the range |
| 508 | * covered by pmd. This make sure we take a |
| 509 | * fault and will find the pmd as none, which will |
| 510 | * result in a major fault which takes mmap_sem and |
| 511 | * hence wait for collapse to complete. Without this |
| 512 | * the __collapse_huge_page_copy can result in copying |
| 513 | * the old content. |
| 514 | */ |
| 515 | flush_tlb_pmd_range(vma->vm_mm, &pmd, address); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 516 | return pmd; |
| 517 | } |
| 518 | |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 519 | /* |
| 520 | * We currently remove entries from the hashtable regardless of whether |
Aneesh Kumar K.V | ff844b7 | 2016-04-29 23:25:39 +1000 | [diff] [blame] | 521 | * the entry was young or dirty. |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 522 | * |
| 523 | * We should be more intelligent about this but for the moment we override |
| 524 | * these functions and force a tlb flush unconditionally |
| 525 | */ |
Aneesh Kumar K.V | ff844b7 | 2016-04-29 23:25:39 +1000 | [diff] [blame] | 526 | int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 527 | unsigned long address, pmd_t *pmdp) |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 528 | { |
| 529 | return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); |
| 530 | } |
| 531 | |
| 532 | /* |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 533 | * We want to put the pgtable in pmd and use pgtable for tracking |
| 534 | * the base page size hptes |
| 535 | */ |
| 536 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 537 | pgtable_t pgtable) |
| 538 | { |
| 539 | pgtable_t *pgtable_slot; |
| 540 | assert_spin_locked(&mm->page_table_lock); |
| 541 | /* |
| 542 | * we store the pgtable in the second half of PMD |
| 543 | */ |
| 544 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 545 | *pgtable_slot = pgtable; |
| 546 | /* |
| 547 | * expose the deposited pgtable to other cpus. |
| 548 | * before we set the hugepage PTE at pmd level |
| 549 | * hash fault code looks at the deposted pgtable |
| 550 | * to store hash index values. |
| 551 | */ |
| 552 | smp_wmb(); |
| 553 | } |
| 554 | |
| 555 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
| 556 | { |
| 557 | pgtable_t pgtable; |
| 558 | pgtable_t *pgtable_slot; |
| 559 | |
| 560 | assert_spin_locked(&mm->page_table_lock); |
| 561 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 562 | pgtable = *pgtable_slot; |
| 563 | /* |
| 564 | * Once we withdraw, mark the entry NULL. |
| 565 | */ |
| 566 | *pgtable_slot = NULL; |
| 567 | /* |
| 568 | * We store HPTE information in the deposited PTE fragment. |
| 569 | * zero out the content on withdraw. |
| 570 | */ |
| 571 | memset(pgtable, 0, PTE_FRAG_SIZE); |
| 572 | return pgtable; |
| 573 | } |
| 574 | |
Aneesh Kumar K.V | c777e2a | 2016-02-09 06:50:31 +0530 | [diff] [blame] | 575 | void pmdp_huge_split_prepare(struct vm_area_struct *vma, |
| 576 | unsigned long address, pmd_t *pmdp) |
| 577 | { |
| 578 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 579 | VM_BUG_ON(REGION_ID(address) != USER_REGION_ID); |
| 580 | |
| 581 | /* |
| 582 | * We can't mark the pmd none here, because that will cause a race |
| 583 | * against exit_mmap. We need to continue mark pmd TRANS HUGE, while |
| 584 | * we spilt, but at the same time we wan't rest of the ppc64 code |
| 585 | * not to insert hash pte on this, because we will be modifying |
| 586 | * the deposited pgtable in the caller of this function. Hence |
| 587 | * clear the _PAGE_USER so that we move the fault handling to |
| 588 | * higher level function and that will serialize against ptl. |
| 589 | * We need to flush existing hash pte entries here even though, |
| 590 | * the translation is still valid, because we will withdraw |
| 591 | * pgtable_t after this. |
| 592 | */ |
Aneesh Kumar K.V | ac29c64 | 2016-04-29 23:25:34 +1000 | [diff] [blame] | 593 | pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED); |
Aneesh Kumar K.V | c777e2a | 2016-02-09 06:50:31 +0530 | [diff] [blame] | 594 | } |
| 595 | |
| 596 | |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 597 | /* |
| 598 | * set a new huge pmd. We should not be called for updating |
| 599 | * an existing pmd entry. That should go via pmd_hugepage_update. |
| 600 | */ |
| 601 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 602 | pmd_t *pmdp, pmd_t pmd) |
| 603 | { |
| 604 | #ifdef CONFIG_DEBUG_VM |
Aneesh Kumar K.V | c7d5484 | 2016-04-29 23:25:30 +1000 | [diff] [blame] | 605 | WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 606 | assert_spin_locked(&mm->page_table_lock); |
| 607 | WARN_ON(!pmd_trans_huge(pmd)); |
| 608 | #endif |
Michael Ellerman | 4f9c53c | 2015-03-25 20:11:57 +1100 | [diff] [blame] | 609 | trace_hugepage_set_pmd(addr, pmd_val(pmd)); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 610 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); |
| 611 | } |
| 612 | |
Aneesh Kumar K.V | c777e2a | 2016-02-09 06:50:31 +0530 | [diff] [blame] | 613 | /* |
| 614 | * We use this to invalidate a pmdp entry before switching from a |
| 615 | * hugepte to regular pmd entry. |
| 616 | */ |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 617 | void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
| 618 | pmd_t *pmdp) |
| 619 | { |
Aneesh Kumar K.V | 88247e8 | 2014-02-12 09:13:36 +0530 | [diff] [blame] | 620 | pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); |
Aneesh Kumar K.V | c777e2a | 2016-02-09 06:50:31 +0530 | [diff] [blame] | 621 | |
| 622 | /* |
| 623 | * This ensures that generic code that rely on IRQ disabling |
| 624 | * to prevent a parallel THP split work as expected. |
| 625 | */ |
| 626 | kick_all_cpus_sync(); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 627 | } |
| 628 | |
| 629 | /* |
| 630 | * A linux hugepage PMD was changed and the corresponding hash table entries |
| 631 | * neesd to be flushed. |
| 632 | */ |
| 633 | void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, |
Aneesh Kumar K.V | fc04795 | 2014-08-13 12:32:00 +0530 | [diff] [blame] | 634 | pmd_t *pmdp, unsigned long old_pmd) |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 635 | { |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 636 | int ssize; |
Aneesh Kumar K.V | f1581bf | 2014-11-02 21:15:27 +0530 | [diff] [blame] | 637 | unsigned int psize; |
| 638 | unsigned long vsid; |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 639 | unsigned long flags = 0; |
Aneesh Kumar K.V | d557b09 | 2014-11-02 21:15:28 +0530 | [diff] [blame] | 640 | const struct cpumask *tmp; |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 641 | |
Aneesh Kumar K.V | fa1f8ae | 2014-08-13 12:31:58 +0530 | [diff] [blame] | 642 | /* get the base page size,vsid and segment size */ |
Aneesh Kumar K.V | fc04795 | 2014-08-13 12:32:00 +0530 | [diff] [blame] | 643 | #ifdef CONFIG_DEBUG_VM |
Aneesh Kumar K.V | f1581bf | 2014-11-02 21:15:27 +0530 | [diff] [blame] | 644 | psize = get_slice_psize(mm, addr); |
Aneesh Kumar K.V | fc04795 | 2014-08-13 12:32:00 +0530 | [diff] [blame] | 645 | BUG_ON(psize == MMU_PAGE_16M); |
| 646 | #endif |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 647 | if (old_pmd & H_PAGE_COMBO) |
Aneesh Kumar K.V | fc04795 | 2014-08-13 12:32:00 +0530 | [diff] [blame] | 648 | psize = MMU_PAGE_4K; |
| 649 | else |
| 650 | psize = MMU_PAGE_64K; |
| 651 | |
Aneesh Kumar K.V | f1581bf | 2014-11-02 21:15:27 +0530 | [diff] [blame] | 652 | if (!is_kernel_addr(addr)) { |
| 653 | ssize = user_segment_size(addr); |
| 654 | vsid = get_vsid(mm->context.id, addr, ssize); |
Aneesh Kumar K.V | fa1f8ae | 2014-08-13 12:31:58 +0530 | [diff] [blame] | 655 | WARN_ON(vsid == 0); |
| 656 | } else { |
Aneesh Kumar K.V | f1581bf | 2014-11-02 21:15:27 +0530 | [diff] [blame] | 657 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); |
Aneesh Kumar K.V | fa1f8ae | 2014-08-13 12:31:58 +0530 | [diff] [blame] | 658 | ssize = mmu_kernel_ssize; |
| 659 | } |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 660 | |
Aneesh Kumar K.V | d557b09 | 2014-11-02 21:15:28 +0530 | [diff] [blame] | 661 | tmp = cpumask_of(smp_processor_id()); |
| 662 | if (cpumask_equal(mm_cpumask(mm), tmp)) |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 663 | flags |= HPTE_LOCAL_UPDATE; |
Aneesh Kumar K.V | d557b09 | 2014-11-02 21:15:28 +0530 | [diff] [blame] | 664 | |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 665 | return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 666 | } |
| 667 | |
| 668 | static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) |
| 669 | { |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 670 | return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 671 | } |
| 672 | |
| 673 | pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) |
| 674 | { |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 675 | unsigned long pmdv; |
Aneesh Kumar K.V | 6a119ea | 2015-12-01 09:06:54 +0530 | [diff] [blame] | 676 | |
Aneesh Kumar K.V | 96270b1 | 2016-04-29 23:25:35 +1000 | [diff] [blame] | 677 | pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK; |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 678 | return pmd_set_protbits(__pmd(pmdv), pgprot); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 679 | } |
| 680 | |
| 681 | pmd_t mk_pmd(struct page *page, pgprot_t pgprot) |
| 682 | { |
| 683 | return pfn_pmd(page_to_pfn(page), pgprot); |
| 684 | } |
| 685 | |
| 686 | pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
| 687 | { |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 688 | unsigned long pmdv; |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 689 | |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 690 | pmdv = pmd_val(pmd); |
| 691 | pmdv &= _HPAGE_CHG_MASK; |
| 692 | return pmd_set_protbits(__pmd(pmdv), newprot); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 693 | } |
| 694 | |
| 695 | /* |
| 696 | * This is called at the end of handling a user page fault, when the |
| 697 | * fault has been handled by updating a HUGE PMD entry in the linux page tables. |
| 698 | * We use it to preload an HPTE into the hash table corresponding to |
| 699 | * the updated linux HUGE PMD entry. |
| 700 | */ |
| 701 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, |
| 702 | pmd_t *pmd) |
| 703 | { |
| 704 | return; |
| 705 | } |
| 706 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 707 | pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, |
| 708 | unsigned long addr, pmd_t *pmdp) |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 709 | { |
| 710 | pmd_t old_pmd; |
| 711 | pgtable_t pgtable; |
| 712 | unsigned long old; |
| 713 | pgtable_t *pgtable_slot; |
| 714 | |
Aneesh Kumar K.V | 88247e8 | 2014-02-12 09:13:36 +0530 | [diff] [blame] | 715 | old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 716 | old_pmd = __pmd(old); |
| 717 | /* |
| 718 | * We have pmd == none and we are holding page_table_lock. |
| 719 | * So we can safely go and clear the pgtable hash |
| 720 | * index info. |
| 721 | */ |
| 722 | pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD; |
| 723 | pgtable = *pgtable_slot; |
| 724 | /* |
| 725 | * Let's zero out old valid and hash index details |
| 726 | * hash fault look at them. |
| 727 | */ |
| 728 | memset(pgtable, 0, PTE_FRAG_SIZE); |
Aneesh Kumar K.V | 13bd817 | 2015-05-11 11:56:01 +0530 | [diff] [blame] | 729 | /* |
| 730 | * Serialize against find_linux_pte_or_hugepte which does lock-less |
| 731 | * lookup in page tables with local interrupts disabled. For huge pages |
| 732 | * it casts pmd_t to pte_t. Since format of pte_t is different from |
| 733 | * pmd_t we want to prevent transit from pmd pointing to page table |
| 734 | * to pmd pointing to huge page (and back) while interrupts are disabled. |
| 735 | * We clear pmd to possibly replace it with page table pointer in |
| 736 | * different code paths. So make sure we wait for the parallel |
| 737 | * find_linux_pte_or_hugepage to finish. |
| 738 | */ |
| 739 | kick_all_cpus_sync(); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 740 | return old_pmd; |
| 741 | } |
Aneesh Kumar K.V | 437d496 | 2013-06-20 14:30:26 +0530 | [diff] [blame] | 742 | |
| 743 | int has_transparent_hugepage(void) |
| 744 | { |
Kirill A. Shutemov | ff20c2e | 2016-03-01 09:45:14 +0530 | [diff] [blame] | 745 | |
Aneesh Kumar K.V | 437d496 | 2013-06-20 14:30:26 +0530 | [diff] [blame] | 746 | if (!mmu_has_feature(MMU_FTR_16M_PAGE)) |
| 747 | return 0; |
| 748 | /* |
| 749 | * We support THP only if PMD_SIZE is 16MB. |
| 750 | */ |
| 751 | if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT) |
| 752 | return 0; |
| 753 | /* |
| 754 | * We need to make sure that we support 16MB hugepage in a segement |
| 755 | * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE |
| 756 | * of 64K. |
| 757 | */ |
| 758 | /* |
| 759 | * If we have 64K HPTE, we will be using that by default |
| 760 | */ |
| 761 | if (mmu_psize_defs[MMU_PAGE_64K].shift && |
| 762 | (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1)) |
| 763 | return 0; |
| 764 | /* |
| 765 | * Ok we only have 4K HPTE |
| 766 | */ |
| 767 | if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1) |
| 768 | return 0; |
| 769 | |
| 770 | return 1; |
| 771 | } |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 772 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |