Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* |
| 2 | * PowerPC version |
| 3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 4 | * |
| 5 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
| 6 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 7 | * Copyright (C) 1996 Paul Mackerras |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 8 | * |
| 9 | * Derived from "arch/i386/mm/init.c" |
| 10 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 11 | * |
| 12 | * Dave Engebretsen <engebret@us.ibm.com> |
| 13 | * Rework for PPC64 port. |
| 14 | * |
| 15 | * This program is free software; you can redistribute it and/or |
| 16 | * modify it under the terms of the GNU General Public License |
| 17 | * as published by the Free Software Foundation; either version |
| 18 | * 2 of the License, or (at your option) any later version. |
| 19 | * |
| 20 | */ |
| 21 | |
Benjamin Herrenschmidt | cec08e7 | 2008-04-30 15:41:48 +1000 | [diff] [blame] | 22 | #undef DEBUG |
| 23 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 24 | #include <linux/signal.h> |
| 25 | #include <linux/sched.h> |
| 26 | #include <linux/kernel.h> |
| 27 | #include <linux/errno.h> |
| 28 | #include <linux/string.h> |
| 29 | #include <linux/types.h> |
| 30 | #include <linux/mman.h> |
| 31 | #include <linux/mm.h> |
| 32 | #include <linux/swap.h> |
| 33 | #include <linux/stddef.h> |
| 34 | #include <linux/vmalloc.h> |
| 35 | #include <linux/init.h> |
| 36 | #include <linux/delay.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 37 | #include <linux/highmem.h> |
| 38 | #include <linux/idr.h> |
| 39 | #include <linux/nodemask.h> |
| 40 | #include <linux/module.h> |
Randy Dunlap | c9cf552 | 2006-06-27 02:53:52 -0700 | [diff] [blame] | 41 | #include <linux/poison.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 42 | #include <linux/memblock.h> |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 43 | #include <linux/hugetlb.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 44 | #include <linux/slab.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 45 | |
| 46 | #include <asm/pgalloc.h> |
| 47 | #include <asm/page.h> |
| 48 | #include <asm/prom.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 49 | #include <asm/rtas.h> |
| 50 | #include <asm/io.h> |
| 51 | #include <asm/mmu_context.h> |
| 52 | #include <asm/pgtable.h> |
| 53 | #include <asm/mmu.h> |
| 54 | #include <asm/uaccess.h> |
| 55 | #include <asm/smp.h> |
| 56 | #include <asm/machdep.h> |
| 57 | #include <asm/tlb.h> |
| 58 | #include <asm/eeh.h> |
| 59 | #include <asm/processor.h> |
| 60 | #include <asm/mmzone.h> |
| 61 | #include <asm/cputable.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 62 | #include <asm/sections.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 63 | #include <asm/iommu.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 64 | #include <asm/vdso.h> |
David Gibson | 800fc3e | 2005-11-16 15:43:48 +1100 | [diff] [blame] | 65 | |
| 66 | #include "mmu_decl.h" |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 67 | |
Benjamin Herrenschmidt | 9449168 | 2009-06-02 21:17:45 +0000 | [diff] [blame] | 68 | #ifdef CONFIG_PPC_STD_MMU_64 |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 69 | #if PGTABLE_RANGE > USER_VSID_RANGE |
| 70 | #warning Limited user VSID range means pagetable space is wasted |
| 71 | #endif |
| 72 | |
| 73 | #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) |
| 74 | #warning TASK_SIZE is smaller than it needs to be. |
| 75 | #endif |
Benjamin Herrenschmidt | 9449168 | 2009-06-02 21:17:45 +0000 | [diff] [blame] | 76 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 77 | |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 78 | phys_addr_t memstart_addr = ~0; |
Sonny Rao | 79c3095 | 2010-08-19 18:08:09 +0000 | [diff] [blame] | 79 | EXPORT_SYMBOL_GPL(memstart_addr); |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 80 | phys_addr_t kernstart_addr; |
Sonny Rao | 79c3095 | 2010-08-19 18:08:09 +0000 | [diff] [blame] | 81 | EXPORT_SYMBOL_GPL(kernstart_addr); |
Kumar Gala | d7917ba | 2008-04-16 05:52:22 +1000 | [diff] [blame] | 82 | |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 83 | static void pgd_ctor(void *addr) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 84 | { |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 85 | memset(addr, 0, PGD_TABLE_SIZE); |
| 86 | } |
| 87 | |
Aneesh Kumar K.V | 368ced7 | 2016-03-01 09:45:13 +0530 | [diff] [blame] | 88 | static void pud_ctor(void *addr) |
| 89 | { |
| 90 | memset(addr, 0, PUD_TABLE_SIZE); |
| 91 | } |
| 92 | |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 93 | static void pmd_ctor(void *addr) |
| 94 | { |
| 95 | memset(addr, 0, PMD_TABLE_SIZE); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 96 | } |
| 97 | |
David Gibson | a0668cd | 2009-10-28 16:27:18 +0000 | [diff] [blame] | 98 | struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 99 | |
David Gibson | a0668cd | 2009-10-28 16:27:18 +0000 | [diff] [blame] | 100 | /* |
| 101 | * Create a kmem_cache() for pagetables. This is not used for PTE |
| 102 | * pages - they're linked to struct page, come from the normal free |
| 103 | * pages pool and have a different entry size (see real_pte_t) to |
| 104 | * everything else. Caches created by this function are used for all |
| 105 | * the higher level pagetables, and for hugepage pagetables. |
| 106 | */ |
| 107 | void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) |
| 108 | { |
| 109 | char *name; |
| 110 | unsigned long table_size = sizeof(void *) << shift; |
| 111 | unsigned long align = table_size; |
| 112 | |
| 113 | /* When batching pgtable pointers for RCU freeing, we store |
| 114 | * the index size in the low bits. Table alignment must be |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 115 | * big enough to fit it. |
| 116 | * |
| 117 | * Likewise, hugeapge pagetable pointers contain a (different) |
| 118 | * shift value in the low bits. All tables must be aligned so |
| 119 | * as to leave enough 0 bits in the address to contain it. */ |
| 120 | unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, |
| 121 | HUGEPD_SHIFT_MASK + 1); |
David Gibson | a0668cd | 2009-10-28 16:27:18 +0000 | [diff] [blame] | 122 | struct kmem_cache *new; |
| 123 | |
| 124 | /* It would be nice if this was a BUILD_BUG_ON(), but at the |
| 125 | * moment, gcc doesn't seem to recognize is_power_of_2 as a |
| 126 | * constant expression, so so much for that. */ |
| 127 | BUG_ON(!is_power_of_2(minalign)); |
| 128 | BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); |
| 129 | |
| 130 | if (PGT_CACHE(shift)) |
| 131 | return; /* Already have a cache of this size */ |
| 132 | |
| 133 | align = max_t(unsigned long, align, minalign); |
| 134 | name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); |
| 135 | new = kmem_cache_create(name, table_size, align, 0, ctor); |
Yanjiang Jin | e77553c | 2015-02-27 13:30:34 +0800 | [diff] [blame] | 136 | kfree(name); |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 137 | pgtable_cache[shift - 1] = new; |
David Gibson | a0668cd | 2009-10-28 16:27:18 +0000 | [diff] [blame] | 138 | pr_debug("Allocated pgtable cache for order %d\n", shift); |
| 139 | } |
| 140 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 141 | |
| 142 | void pgtable_cache_init(void) |
| 143 | { |
David Gibson | a0668cd | 2009-10-28 16:27:18 +0000 | [diff] [blame] | 144 | pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); |
Aneesh Kumar K.V | f940f52 | 2013-06-20 14:30:14 +0530 | [diff] [blame] | 145 | pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); |
Aneesh Kumar K.V | 368ced7 | 2016-03-01 09:45:13 +0530 | [diff] [blame] | 146 | /* |
| 147 | * In all current configs, when the PUD index exists it's the |
| 148 | * same size as either the pgd or pmd index except with THP enabled |
| 149 | * on book3s 64 |
| 150 | */ |
| 151 | if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) |
| 152 | pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); |
| 153 | |
Aneesh Kumar K.V | f940f52 | 2013-06-20 14:30:14 +0530 | [diff] [blame] | 154 | if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX)) |
David Gibson | a0668cd | 2009-10-28 16:27:18 +0000 | [diff] [blame] | 155 | panic("Couldn't allocate pgtable caches"); |
Aneesh Kumar K.V | 368ced7 | 2016-03-01 09:45:13 +0530 | [diff] [blame] | 156 | if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) |
| 157 | panic("Couldn't allocate pud pgtable caches"); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 158 | } |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 159 | |
| 160 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 161 | /* |
| 162 | * Given an address within the vmemmap, determine the pfn of the page that |
| 163 | * represents the start of the section it is within. Note that we have to |
| 164 | * do this by hand as the proffered address may not be correctly aligned. |
| 165 | * Subtraction of non-aligned pointers produces undefined results. |
| 166 | */ |
Michael Ellerman | 09de9ff | 2008-05-08 14:27:07 +1000 | [diff] [blame] | 167 | static unsigned long __meminit vmemmap_section_start(unsigned long page) |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 168 | { |
| 169 | unsigned long offset = page - ((unsigned long)(vmemmap)); |
| 170 | |
| 171 | /* Return the pfn of the start of the section. */ |
| 172 | return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; |
| 173 | } |
| 174 | |
| 175 | /* |
| 176 | * Check if this vmemmap page is already initialised. If any section |
| 177 | * which overlaps this vmemmap page is initialised then this page is |
| 178 | * initialised already. |
| 179 | */ |
Michael Ellerman | 09de9ff | 2008-05-08 14:27:07 +1000 | [diff] [blame] | 180 | static int __meminit vmemmap_populated(unsigned long start, int page_size) |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 181 | { |
| 182 | unsigned long end = start + page_size; |
Li Zhong | 16a05bf | 2014-06-11 16:23:39 +0800 | [diff] [blame] | 183 | start = (unsigned long)(pfn_to_page(vmemmap_section_start(start))); |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 184 | |
| 185 | for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) |
Li Zhong | 16a05bf | 2014-06-11 16:23:39 +0800 | [diff] [blame] | 186 | if (pfn_valid(page_to_pfn((struct page *)start))) |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 187 | return 1; |
| 188 | |
| 189 | return 0; |
| 190 | } |
| 191 | |
Benjamin Herrenschmidt | 32a7494 | 2009-07-23 23:15:58 +0000 | [diff] [blame] | 192 | /* On hash-based CPUs, the vmemmap is bolted in the hash table. |
| 193 | * |
| 194 | * On Book3E CPUs, the vmemmap is currently mapped in the top half of |
| 195 | * the vmalloc space using normal page tables, though the size of |
| 196 | * pages encoded in the PTEs can be different |
| 197 | */ |
| 198 | |
| 199 | #ifdef CONFIG_PPC_BOOK3E |
David Gibson | 1dace6c | 2016-02-09 13:32:42 +1000 | [diff] [blame] | 200 | static int __meminit vmemmap_create_mapping(unsigned long start, |
| 201 | unsigned long page_size, |
| 202 | unsigned long phys) |
Benjamin Herrenschmidt | 32a7494 | 2009-07-23 23:15:58 +0000 | [diff] [blame] | 203 | { |
| 204 | /* Create a PTE encoding without page size */ |
| 205 | unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED | |
| 206 | _PAGE_KERNEL_RW; |
| 207 | |
| 208 | /* PTEs only contain page size encodings up to 32M */ |
| 209 | BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf); |
| 210 | |
| 211 | /* Encode the size in the PTE */ |
| 212 | flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8; |
| 213 | |
| 214 | /* For each PTE for that area, map things. Note that we don't |
| 215 | * increment phys because all PTEs are of the large size and |
| 216 | * thus must have the low bits clear |
| 217 | */ |
| 218 | for (i = 0; i < page_size; i += PAGE_SIZE) |
| 219 | BUG_ON(map_kernel_page(start + i, phys, flags)); |
David Gibson | 1dace6c | 2016-02-09 13:32:42 +1000 | [diff] [blame] | 220 | |
| 221 | return 0; |
Benjamin Herrenschmidt | 32a7494 | 2009-07-23 23:15:58 +0000 | [diff] [blame] | 222 | } |
Li Zhong | ed5694a | 2014-06-11 16:23:37 +0800 | [diff] [blame] | 223 | |
| 224 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 225 | static void vmemmap_remove_mapping(unsigned long start, |
| 226 | unsigned long page_size) |
| 227 | { |
| 228 | } |
| 229 | #endif |
Benjamin Herrenschmidt | 32a7494 | 2009-07-23 23:15:58 +0000 | [diff] [blame] | 230 | #else /* CONFIG_PPC_BOOK3E */ |
David Gibson | 1dace6c | 2016-02-09 13:32:42 +1000 | [diff] [blame] | 231 | static int __meminit vmemmap_create_mapping(unsigned long start, |
| 232 | unsigned long page_size, |
| 233 | unsigned long phys) |
Benjamin Herrenschmidt | 32a7494 | 2009-07-23 23:15:58 +0000 | [diff] [blame] | 234 | { |
David Gibson | 1dace6c | 2016-02-09 13:32:42 +1000 | [diff] [blame] | 235 | int rc = htab_bolt_mapping(start, start + page_size, phys, |
| 236 | pgprot_val(PAGE_KERNEL), |
| 237 | mmu_vmemmap_psize, mmu_kernel_ssize); |
| 238 | if (rc < 0) { |
| 239 | int rc2 = htab_remove_mapping(start, start + page_size, |
| 240 | mmu_vmemmap_psize, |
| 241 | mmu_kernel_ssize); |
| 242 | BUG_ON(rc2 && (rc2 != -ENOENT)); |
| 243 | } |
| 244 | return rc; |
Benjamin Herrenschmidt | 32a7494 | 2009-07-23 23:15:58 +0000 | [diff] [blame] | 245 | } |
Li Zhong | ed5694a | 2014-06-11 16:23:37 +0800 | [diff] [blame] | 246 | |
| 247 | #ifdef CONFIG_MEMORY_HOTPLUG |
Li Zhong | ed5694a | 2014-06-11 16:23:37 +0800 | [diff] [blame] | 248 | static void vmemmap_remove_mapping(unsigned long start, |
| 249 | unsigned long page_size) |
| 250 | { |
David Gibson | 27828f9 | 2016-02-09 13:32:41 +1000 | [diff] [blame] | 251 | int rc = htab_remove_mapping(start, start + page_size, |
| 252 | mmu_vmemmap_psize, |
| 253 | mmu_kernel_ssize); |
| 254 | BUG_ON((rc < 0) && (rc != -ENOENT)); |
| 255 | WARN_ON(rc == -ENOENT); |
Li Zhong | ed5694a | 2014-06-11 16:23:37 +0800 | [diff] [blame] | 256 | } |
| 257 | #endif |
| 258 | |
Benjamin Herrenschmidt | 32a7494 | 2009-07-23 23:15:58 +0000 | [diff] [blame] | 259 | #endif /* CONFIG_PPC_BOOK3E */ |
| 260 | |
Mark Nelson | 91eea67 | 2010-04-21 16:21:03 +0000 | [diff] [blame] | 261 | struct vmemmap_backing *vmemmap_list; |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 262 | static struct vmemmap_backing *next; |
| 263 | static int num_left; |
| 264 | static int num_freed; |
Mark Nelson | 91eea67 | 2010-04-21 16:21:03 +0000 | [diff] [blame] | 265 | |
| 266 | static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) |
| 267 | { |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 268 | struct vmemmap_backing *vmem_back; |
| 269 | /* get from freed entries first */ |
| 270 | if (num_freed) { |
| 271 | num_freed--; |
| 272 | vmem_back = next; |
| 273 | next = next->list; |
| 274 | |
| 275 | return vmem_back; |
| 276 | } |
Mark Nelson | 91eea67 | 2010-04-21 16:21:03 +0000 | [diff] [blame] | 277 | |
| 278 | /* allocate a page when required and hand out chunks */ |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 279 | if (!num_left) { |
Mark Nelson | 91eea67 | 2010-04-21 16:21:03 +0000 | [diff] [blame] | 280 | next = vmemmap_alloc_block(PAGE_SIZE, node); |
| 281 | if (unlikely(!next)) { |
| 282 | WARN_ON(1); |
| 283 | return NULL; |
| 284 | } |
| 285 | num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); |
| 286 | } |
| 287 | |
| 288 | num_left--; |
| 289 | |
| 290 | return next++; |
| 291 | } |
| 292 | |
| 293 | static __meminit void vmemmap_list_populate(unsigned long phys, |
| 294 | unsigned long start, |
| 295 | int node) |
| 296 | { |
| 297 | struct vmemmap_backing *vmem_back; |
| 298 | |
| 299 | vmem_back = vmemmap_list_alloc(node); |
| 300 | if (unlikely(!vmem_back)) { |
| 301 | WARN_ON(1); |
| 302 | return; |
| 303 | } |
| 304 | |
| 305 | vmem_back->phys = phys; |
| 306 | vmem_back->virt_addr = start; |
| 307 | vmem_back->list = vmemmap_list; |
| 308 | |
| 309 | vmemmap_list = vmem_back; |
| 310 | } |
| 311 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 312 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
| 313 | { |
| 314 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
| 315 | |
| 316 | /* Align to the page size of the linear mapping. */ |
| 317 | start = _ALIGN_DOWN(start, page_size); |
| 318 | |
| 319 | pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); |
| 320 | |
| 321 | for (; start < end; start += page_size) { |
| 322 | void *p; |
David Gibson | 1dace6c | 2016-02-09 13:32:42 +1000 | [diff] [blame] | 323 | int rc; |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 324 | |
| 325 | if (vmemmap_populated(start, page_size)) |
| 326 | continue; |
| 327 | |
| 328 | p = vmemmap_alloc_block(page_size, node); |
| 329 | if (!p) |
| 330 | return -ENOMEM; |
| 331 | |
| 332 | vmemmap_list_populate(__pa(p), start, node); |
| 333 | |
| 334 | pr_debug(" * %016lx..%016lx allocated at %p\n", |
| 335 | start, start + page_size, p); |
| 336 | |
David Gibson | 1dace6c | 2016-02-09 13:32:42 +1000 | [diff] [blame] | 337 | rc = vmemmap_create_mapping(start, page_size, __pa(p)); |
| 338 | if (rc < 0) { |
| 339 | pr_warning( |
| 340 | "vmemmap_populate: Unable to create vmemmap mapping: %d\n", |
| 341 | rc); |
| 342 | return -EFAULT; |
| 343 | } |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 344 | } |
| 345 | |
| 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | #ifdef CONFIG_MEMORY_HOTPLUG |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 350 | static unsigned long vmemmap_list_free(unsigned long start) |
| 351 | { |
| 352 | struct vmemmap_backing *vmem_back, *vmem_back_prev; |
| 353 | |
| 354 | vmem_back_prev = vmem_back = vmemmap_list; |
| 355 | |
| 356 | /* look for it with prev pointer recorded */ |
| 357 | for (; vmem_back; vmem_back = vmem_back->list) { |
| 358 | if (vmem_back->virt_addr == start) |
| 359 | break; |
| 360 | vmem_back_prev = vmem_back; |
| 361 | } |
| 362 | |
| 363 | if (unlikely(!vmem_back)) { |
| 364 | WARN_ON(1); |
| 365 | return 0; |
| 366 | } |
| 367 | |
| 368 | /* remove it from vmemmap_list */ |
| 369 | if (vmem_back == vmemmap_list) /* remove head */ |
| 370 | vmemmap_list = vmem_back->list; |
| 371 | else |
| 372 | vmem_back_prev->list = vmem_back->list; |
| 373 | |
| 374 | /* next point to this freed entry */ |
| 375 | vmem_back->list = next; |
| 376 | next = vmem_back; |
| 377 | num_freed++; |
| 378 | |
| 379 | return vmem_back->phys; |
| 380 | } |
| 381 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 382 | void __ref vmemmap_free(unsigned long start, unsigned long end) |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 383 | { |
Benjamin Herrenschmidt | cec08e7 | 2008-04-30 15:41:48 +1000 | [diff] [blame] | 384 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 385 | |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 386 | start = _ALIGN_DOWN(start, page_size); |
| 387 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 388 | pr_debug("vmemmap_free %lx...%lx\n", start, end); |
Benjamin Herrenschmidt | 32a7494 | 2009-07-23 23:15:58 +0000 | [diff] [blame] | 389 | |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 390 | for (; start < end; start += page_size) { |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 391 | unsigned long addr; |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 392 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 393 | /* |
| 394 | * the section has already be marked as invalid, so |
| 395 | * vmemmap_populated() true means some other sections still |
| 396 | * in this page, so skip it. |
| 397 | */ |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 398 | if (vmemmap_populated(start, page_size)) |
| 399 | continue; |
| 400 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 401 | addr = vmemmap_list_free(start); |
| 402 | if (addr) { |
| 403 | struct page *page = pfn_to_page(addr >> PAGE_SHIFT); |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 404 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 405 | if (PageReserved(page)) { |
| 406 | /* allocated from bootmem */ |
| 407 | if (page_size < PAGE_SIZE) { |
| 408 | /* |
| 409 | * this shouldn't happen, but if it is |
| 410 | * the case, leave the memory there |
| 411 | */ |
| 412 | WARN_ON_ONCE(1); |
| 413 | } else { |
| 414 | unsigned int nr_pages = |
| 415 | 1 << get_order(page_size); |
| 416 | while (nr_pages--) |
| 417 | free_reserved_page(page++); |
| 418 | } |
| 419 | } else |
| 420 | free_pages((unsigned long)(__va(addr)), |
| 421 | get_order(page_size)); |
Mark Nelson | 91eea67 | 2010-04-21 16:21:03 +0000 | [diff] [blame] | 422 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 423 | vmemmap_remove_mapping(start, page_size); |
| 424 | } |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 425 | } |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 426 | } |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 427 | #endif |
Nathan Fontenot | f7e3334 | 2013-09-27 10:18:09 -0500 | [diff] [blame] | 428 | void register_page_bootmem_memmap(unsigned long section_nr, |
| 429 | struct page *start_page, unsigned long size) |
| 430 | { |
| 431 | } |
Benjamin Herrenschmidt | cd3db0c | 2010-07-06 15:39:02 -0700 | [diff] [blame] | 432 | |
Alexey Kardashevskiy | 8e0861f | 2013-08-28 18:37:42 +1000 | [diff] [blame] | 433 | /* |
| 434 | * We do not have access to the sparsemem vmemmap, so we fallback to |
| 435 | * walking the list of sparsemem blocks which we already maintain for |
| 436 | * the sake of crashdump. In the long run, we might want to maintain |
| 437 | * a tree if performance of that linear walk becomes a problem. |
| 438 | * |
| 439 | * realmode_pfn_to_page functions can fail due to: |
| 440 | * 1) As real sparsemem blocks do not lay in RAM continously (they |
| 441 | * are in virtual address space which is not available in the real mode), |
| 442 | * the requested page struct can be split between blocks so get_page/put_page |
| 443 | * may fail. |
| 444 | * 2) When huge pages are used, the get_page/put_page API will fail |
| 445 | * in real mode as the linked addresses in the page struct are virtual |
| 446 | * too. |
| 447 | */ |
| 448 | struct page *realmode_pfn_to_page(unsigned long pfn) |
| 449 | { |
| 450 | struct vmemmap_backing *vmem_back; |
| 451 | struct page *page; |
| 452 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
| 453 | unsigned long pg_va = (unsigned long) pfn_to_page(pfn); |
| 454 | |
| 455 | for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) { |
| 456 | if (pg_va < vmem_back->virt_addr) |
| 457 | continue; |
| 458 | |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 459 | /* After vmemmap_list entry free is possible, need check all */ |
| 460 | if ((pg_va + sizeof(struct page)) <= |
| 461 | (vmem_back->virt_addr + page_size)) { |
| 462 | page = (struct page *) (vmem_back->phys + pg_va - |
Alexey Kardashevskiy | 8e0861f | 2013-08-28 18:37:42 +1000 | [diff] [blame] | 463 | vmem_back->virt_addr); |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 464 | return page; |
| 465 | } |
Alexey Kardashevskiy | 8e0861f | 2013-08-28 18:37:42 +1000 | [diff] [blame] | 466 | } |
| 467 | |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 468 | /* Probably that page struct is split between real pages */ |
Alexey Kardashevskiy | 8e0861f | 2013-08-28 18:37:42 +1000 | [diff] [blame] | 469 | return NULL; |
| 470 | } |
| 471 | EXPORT_SYMBOL_GPL(realmode_pfn_to_page); |
| 472 | |
| 473 | #elif defined(CONFIG_FLATMEM) |
| 474 | |
| 475 | struct page *realmode_pfn_to_page(unsigned long pfn) |
| 476 | { |
| 477 | struct page *page = pfn_to_page(pfn); |
| 478 | return page; |
| 479 | } |
| 480 | EXPORT_SYMBOL_GPL(realmode_pfn_to_page); |
| 481 | |
| 482 | #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ |