Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* |
| 2 | * PowerPC version |
| 3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 4 | * |
| 5 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
| 6 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 7 | * Copyright (C) 1996 Paul Mackerras |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 8 | * |
| 9 | * Derived from "arch/i386/mm/init.c" |
| 10 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 11 | * |
| 12 | * Dave Engebretsen <engebret@us.ibm.com> |
| 13 | * Rework for PPC64 port. |
| 14 | * |
| 15 | * This program is free software; you can redistribute it and/or |
| 16 | * modify it under the terms of the GNU General Public License |
| 17 | * as published by the Free Software Foundation; either version |
| 18 | * 2 of the License, or (at your option) any later version. |
| 19 | * |
| 20 | */ |
| 21 | |
Benjamin Herrenschmidt | cec08e7 | 2008-04-30 15:41:48 +1000 | [diff] [blame] | 22 | #undef DEBUG |
| 23 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 24 | #include <linux/signal.h> |
| 25 | #include <linux/sched.h> |
| 26 | #include <linux/kernel.h> |
| 27 | #include <linux/errno.h> |
| 28 | #include <linux/string.h> |
| 29 | #include <linux/types.h> |
| 30 | #include <linux/mman.h> |
| 31 | #include <linux/mm.h> |
| 32 | #include <linux/swap.h> |
| 33 | #include <linux/stddef.h> |
| 34 | #include <linux/vmalloc.h> |
| 35 | #include <linux/init.h> |
| 36 | #include <linux/delay.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 37 | #include <linux/highmem.h> |
| 38 | #include <linux/idr.h> |
| 39 | #include <linux/nodemask.h> |
| 40 | #include <linux/module.h> |
Randy Dunlap | c9cf552 | 2006-06-27 02:53:52 -0700 | [diff] [blame] | 41 | #include <linux/poison.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 42 | #include <linux/memblock.h> |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 43 | #include <linux/hugetlb.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 44 | #include <linux/slab.h> |
Paul Mackerras | 18569c1 | 2017-01-30 21:21:34 +1100 | [diff] [blame] | 45 | #include <linux/of_fdt.h> |
| 46 | #include <linux/libfdt.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 47 | |
| 48 | #include <asm/pgalloc.h> |
| 49 | #include <asm/page.h> |
| 50 | #include <asm/prom.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 51 | #include <asm/rtas.h> |
| 52 | #include <asm/io.h> |
| 53 | #include <asm/mmu_context.h> |
| 54 | #include <asm/pgtable.h> |
| 55 | #include <asm/mmu.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 56 | #include <linux/uaccess.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 57 | #include <asm/smp.h> |
| 58 | #include <asm/machdep.h> |
| 59 | #include <asm/tlb.h> |
| 60 | #include <asm/eeh.h> |
| 61 | #include <asm/processor.h> |
| 62 | #include <asm/mmzone.h> |
| 63 | #include <asm/cputable.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 64 | #include <asm/sections.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 65 | #include <asm/iommu.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 66 | #include <asm/vdso.h> |
David Gibson | 800fc3e | 2005-11-16 15:43:48 +1100 | [diff] [blame] | 67 | |
| 68 | #include "mmu_decl.h" |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 69 | |
Benjamin Herrenschmidt | 9449168 | 2009-06-02 21:17:45 +0000 | [diff] [blame] | 70 | #ifdef CONFIG_PPC_STD_MMU_64 |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 71 | #if H_PGTABLE_RANGE > USER_VSID_RANGE |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 72 | #warning Limited user VSID range means pagetable space is wasted |
| 73 | #endif |
| 74 | |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 75 | #if (TASK_SIZE_USER64 < H_PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 76 | #warning TASK_SIZE is smaller than it needs to be. |
| 77 | #endif |
Benjamin Herrenschmidt | 9449168 | 2009-06-02 21:17:45 +0000 | [diff] [blame] | 78 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 79 | |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 80 | phys_addr_t memstart_addr = ~0; |
Sonny Rao | 79c3095 | 2010-08-19 18:08:09 +0000 | [diff] [blame] | 81 | EXPORT_SYMBOL_GPL(memstart_addr); |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 82 | phys_addr_t kernstart_addr; |
Sonny Rao | 79c3095 | 2010-08-19 18:08:09 +0000 | [diff] [blame] | 83 | EXPORT_SYMBOL_GPL(kernstart_addr); |
Kumar Gala | d7917ba | 2008-04-16 05:52:22 +1000 | [diff] [blame] | 84 | |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 85 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 86 | /* |
| 87 | * Given an address within the vmemmap, determine the pfn of the page that |
| 88 | * represents the start of the section it is within. Note that we have to |
| 89 | * do this by hand as the proffered address may not be correctly aligned. |
| 90 | * Subtraction of non-aligned pointers produces undefined results. |
| 91 | */ |
Michael Ellerman | 09de9ff | 2008-05-08 14:27:07 +1000 | [diff] [blame] | 92 | static unsigned long __meminit vmemmap_section_start(unsigned long page) |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 93 | { |
| 94 | unsigned long offset = page - ((unsigned long)(vmemmap)); |
| 95 | |
| 96 | /* Return the pfn of the start of the section. */ |
| 97 | return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * Check if this vmemmap page is already initialised. If any section |
| 102 | * which overlaps this vmemmap page is initialised then this page is |
| 103 | * initialised already. |
| 104 | */ |
Michael Ellerman | 09de9ff | 2008-05-08 14:27:07 +1000 | [diff] [blame] | 105 | static int __meminit vmemmap_populated(unsigned long start, int page_size) |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 106 | { |
| 107 | unsigned long end = start + page_size; |
Li Zhong | 16a05bf | 2014-06-11 16:23:39 +0800 | [diff] [blame] | 108 | start = (unsigned long)(pfn_to_page(vmemmap_section_start(start))); |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 109 | |
| 110 | for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) |
Li Zhong | 16a05bf | 2014-06-11 16:23:39 +0800 | [diff] [blame] | 111 | if (pfn_valid(page_to_pfn((struct page *)start))) |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 112 | return 1; |
| 113 | |
| 114 | return 0; |
| 115 | } |
| 116 | |
Mark Nelson | 91eea67 | 2010-04-21 16:21:03 +0000 | [diff] [blame] | 117 | struct vmemmap_backing *vmemmap_list; |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 118 | static struct vmemmap_backing *next; |
| 119 | static int num_left; |
| 120 | static int num_freed; |
Mark Nelson | 91eea67 | 2010-04-21 16:21:03 +0000 | [diff] [blame] | 121 | |
| 122 | static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) |
| 123 | { |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 124 | struct vmemmap_backing *vmem_back; |
| 125 | /* get from freed entries first */ |
| 126 | if (num_freed) { |
| 127 | num_freed--; |
| 128 | vmem_back = next; |
| 129 | next = next->list; |
| 130 | |
| 131 | return vmem_back; |
| 132 | } |
Mark Nelson | 91eea67 | 2010-04-21 16:21:03 +0000 | [diff] [blame] | 133 | |
| 134 | /* allocate a page when required and hand out chunks */ |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 135 | if (!num_left) { |
Mark Nelson | 91eea67 | 2010-04-21 16:21:03 +0000 | [diff] [blame] | 136 | next = vmemmap_alloc_block(PAGE_SIZE, node); |
| 137 | if (unlikely(!next)) { |
| 138 | WARN_ON(1); |
| 139 | return NULL; |
| 140 | } |
| 141 | num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); |
| 142 | } |
| 143 | |
| 144 | num_left--; |
| 145 | |
| 146 | return next++; |
| 147 | } |
| 148 | |
| 149 | static __meminit void vmemmap_list_populate(unsigned long phys, |
| 150 | unsigned long start, |
| 151 | int node) |
| 152 | { |
| 153 | struct vmemmap_backing *vmem_back; |
| 154 | |
| 155 | vmem_back = vmemmap_list_alloc(node); |
| 156 | if (unlikely(!vmem_back)) { |
| 157 | WARN_ON(1); |
| 158 | return; |
| 159 | } |
| 160 | |
| 161 | vmem_back->phys = phys; |
| 162 | vmem_back->virt_addr = start; |
| 163 | vmem_back->list = vmemmap_list; |
| 164 | |
| 165 | vmemmap_list = vmem_back; |
| 166 | } |
| 167 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 168 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
| 169 | { |
| 170 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
| 171 | |
| 172 | /* Align to the page size of the linear mapping. */ |
| 173 | start = _ALIGN_DOWN(start, page_size); |
| 174 | |
| 175 | pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); |
| 176 | |
| 177 | for (; start < end; start += page_size) { |
| 178 | void *p; |
David Gibson | 1dace6c | 2016-02-09 13:32:42 +1000 | [diff] [blame] | 179 | int rc; |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 180 | |
| 181 | if (vmemmap_populated(start, page_size)) |
| 182 | continue; |
| 183 | |
| 184 | p = vmemmap_alloc_block(page_size, node); |
| 185 | if (!p) |
| 186 | return -ENOMEM; |
| 187 | |
| 188 | vmemmap_list_populate(__pa(p), start, node); |
| 189 | |
| 190 | pr_debug(" * %016lx..%016lx allocated at %p\n", |
| 191 | start, start + page_size, p); |
| 192 | |
David Gibson | 1dace6c | 2016-02-09 13:32:42 +1000 | [diff] [blame] | 193 | rc = vmemmap_create_mapping(start, page_size, __pa(p)); |
| 194 | if (rc < 0) { |
| 195 | pr_warning( |
| 196 | "vmemmap_populate: Unable to create vmemmap mapping: %d\n", |
| 197 | rc); |
| 198 | return -EFAULT; |
| 199 | } |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | return 0; |
| 203 | } |
| 204 | |
| 205 | #ifdef CONFIG_MEMORY_HOTPLUG |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 206 | static unsigned long vmemmap_list_free(unsigned long start) |
| 207 | { |
| 208 | struct vmemmap_backing *vmem_back, *vmem_back_prev; |
| 209 | |
| 210 | vmem_back_prev = vmem_back = vmemmap_list; |
| 211 | |
| 212 | /* look for it with prev pointer recorded */ |
| 213 | for (; vmem_back; vmem_back = vmem_back->list) { |
| 214 | if (vmem_back->virt_addr == start) |
| 215 | break; |
| 216 | vmem_back_prev = vmem_back; |
| 217 | } |
| 218 | |
| 219 | if (unlikely(!vmem_back)) { |
| 220 | WARN_ON(1); |
| 221 | return 0; |
| 222 | } |
| 223 | |
| 224 | /* remove it from vmemmap_list */ |
| 225 | if (vmem_back == vmemmap_list) /* remove head */ |
| 226 | vmemmap_list = vmem_back->list; |
| 227 | else |
| 228 | vmem_back_prev->list = vmem_back->list; |
| 229 | |
| 230 | /* next point to this freed entry */ |
| 231 | vmem_back->list = next; |
| 232 | next = vmem_back; |
| 233 | num_freed++; |
| 234 | |
| 235 | return vmem_back->phys; |
| 236 | } |
| 237 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 238 | void __ref vmemmap_free(unsigned long start, unsigned long end) |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 239 | { |
Benjamin Herrenschmidt | cec08e7 | 2008-04-30 15:41:48 +1000 | [diff] [blame] | 240 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 241 | |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 242 | start = _ALIGN_DOWN(start, page_size); |
| 243 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 244 | pr_debug("vmemmap_free %lx...%lx\n", start, end); |
Benjamin Herrenschmidt | 32a7494 | 2009-07-23 23:15:58 +0000 | [diff] [blame] | 245 | |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 246 | for (; start < end; start += page_size) { |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 247 | unsigned long addr; |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 248 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 249 | /* |
| 250 | * the section has already be marked as invalid, so |
| 251 | * vmemmap_populated() true means some other sections still |
| 252 | * in this page, so skip it. |
| 253 | */ |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 254 | if (vmemmap_populated(start, page_size)) |
| 255 | continue; |
| 256 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 257 | addr = vmemmap_list_free(start); |
| 258 | if (addr) { |
| 259 | struct page *page = pfn_to_page(addr >> PAGE_SHIFT); |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 260 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 261 | if (PageReserved(page)) { |
| 262 | /* allocated from bootmem */ |
| 263 | if (page_size < PAGE_SIZE) { |
| 264 | /* |
| 265 | * this shouldn't happen, but if it is |
| 266 | * the case, leave the memory there |
| 267 | */ |
| 268 | WARN_ON_ONCE(1); |
| 269 | } else { |
| 270 | unsigned int nr_pages = |
| 271 | 1 << get_order(page_size); |
| 272 | while (nr_pages--) |
| 273 | free_reserved_page(page++); |
| 274 | } |
| 275 | } else |
| 276 | free_pages((unsigned long)(__va(addr)), |
| 277 | get_order(page_size)); |
Mark Nelson | 91eea67 | 2010-04-21 16:21:03 +0000 | [diff] [blame] | 278 | |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 279 | vmemmap_remove_mapping(start, page_size); |
| 280 | } |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 281 | } |
Andy Whitcroft | d29eff7 | 2007-10-16 01:24:17 -0700 | [diff] [blame] | 282 | } |
Li Zhong | 71b0bfe | 2014-06-11 16:23:38 +0800 | [diff] [blame] | 283 | #endif |
Nathan Fontenot | f7e3334 | 2013-09-27 10:18:09 -0500 | [diff] [blame] | 284 | void register_page_bootmem_memmap(unsigned long section_nr, |
| 285 | struct page *start_page, unsigned long size) |
| 286 | { |
| 287 | } |
Benjamin Herrenschmidt | cd3db0c | 2010-07-06 15:39:02 -0700 | [diff] [blame] | 288 | |
Alexey Kardashevskiy | 8e0861f | 2013-08-28 18:37:42 +1000 | [diff] [blame] | 289 | /* |
| 290 | * We do not have access to the sparsemem vmemmap, so we fallback to |
| 291 | * walking the list of sparsemem blocks which we already maintain for |
| 292 | * the sake of crashdump. In the long run, we might want to maintain |
| 293 | * a tree if performance of that linear walk becomes a problem. |
| 294 | * |
| 295 | * realmode_pfn_to_page functions can fail due to: |
| 296 | * 1) As real sparsemem blocks do not lay in RAM continously (they |
| 297 | * are in virtual address space which is not available in the real mode), |
| 298 | * the requested page struct can be split between blocks so get_page/put_page |
| 299 | * may fail. |
| 300 | * 2) When huge pages are used, the get_page/put_page API will fail |
| 301 | * in real mode as the linked addresses in the page struct are virtual |
| 302 | * too. |
| 303 | */ |
| 304 | struct page *realmode_pfn_to_page(unsigned long pfn) |
| 305 | { |
| 306 | struct vmemmap_backing *vmem_back; |
| 307 | struct page *page; |
| 308 | unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; |
| 309 | unsigned long pg_va = (unsigned long) pfn_to_page(pfn); |
| 310 | |
| 311 | for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) { |
| 312 | if (pg_va < vmem_back->virt_addr) |
| 313 | continue; |
| 314 | |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 315 | /* After vmemmap_list entry free is possible, need check all */ |
| 316 | if ((pg_va + sizeof(struct page)) <= |
| 317 | (vmem_back->virt_addr + page_size)) { |
| 318 | page = (struct page *) (vmem_back->phys + pg_va - |
Alexey Kardashevskiy | 8e0861f | 2013-08-28 18:37:42 +1000 | [diff] [blame] | 319 | vmem_back->virt_addr); |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 320 | return page; |
| 321 | } |
Alexey Kardashevskiy | 8e0861f | 2013-08-28 18:37:42 +1000 | [diff] [blame] | 322 | } |
| 323 | |
Li Zhong | bd8cb03 | 2014-06-11 16:23:36 +0800 | [diff] [blame] | 324 | /* Probably that page struct is split between real pages */ |
Alexey Kardashevskiy | 8e0861f | 2013-08-28 18:37:42 +1000 | [diff] [blame] | 325 | return NULL; |
| 326 | } |
| 327 | EXPORT_SYMBOL_GPL(realmode_pfn_to_page); |
| 328 | |
| 329 | #elif defined(CONFIG_FLATMEM) |
| 330 | |
| 331 | struct page *realmode_pfn_to_page(unsigned long pfn) |
| 332 | { |
| 333 | struct page *page = pfn_to_page(pfn); |
| 334 | return page; |
| 335 | } |
| 336 | EXPORT_SYMBOL_GPL(realmode_pfn_to_page); |
| 337 | |
| 338 | #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ |
Michael Ellerman | 1a01dc8 | 2016-07-26 20:09:30 +1000 | [diff] [blame] | 339 | |
| 340 | #ifdef CONFIG_PPC_STD_MMU_64 |
Michael Ellerman | c610ec6 | 2016-07-26 21:29:30 +1000 | [diff] [blame] | 341 | static bool disable_radix; |
| 342 | static int __init parse_disable_radix(char *p) |
| 343 | { |
| 344 | disable_radix = true; |
| 345 | return 0; |
| 346 | } |
| 347 | early_param("disable_radix", parse_disable_radix); |
| 348 | |
Paul Mackerras | 18569c1 | 2017-01-30 21:21:34 +1100 | [diff] [blame] | 349 | /* |
Paul Mackerras | cc3d294 | 2017-01-30 21:21:36 +1100 | [diff] [blame] | 350 | * If we're running under a hypervisor, we need to check the contents of |
| 351 | * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do |
| 352 | * radix. If not, we clear the radix feature bit so we fall back to hash. |
Paul Mackerras | 18569c1 | 2017-01-30 21:21:34 +1100 | [diff] [blame] | 353 | */ |
| 354 | static void early_check_vec5(void) |
| 355 | { |
| 356 | unsigned long root, chosen; |
| 357 | int size; |
| 358 | const u8 *vec5; |
Suraj Jitindar Singh | 014d02c | 2017-02-28 17:03:48 +1100 | [diff] [blame] | 359 | u8 mmu_supported; |
Paul Mackerras | 18569c1 | 2017-01-30 21:21:34 +1100 | [diff] [blame] | 360 | |
| 361 | root = of_get_flat_dt_root(); |
| 362 | chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); |
Suraj Jitindar Singh | 014d02c | 2017-02-28 17:03:48 +1100 | [diff] [blame] | 363 | if (chosen == -FDT_ERR_NOTFOUND) { |
Paul Mackerras | cc3d294 | 2017-01-30 21:21:36 +1100 | [diff] [blame] | 364 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
Suraj Jitindar Singh | 014d02c | 2017-02-28 17:03:48 +1100 | [diff] [blame] | 365 | return; |
| 366 | } |
| 367 | vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size); |
| 368 | if (!vec5) { |
| 369 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
| 370 | return; |
| 371 | } |
| 372 | if (size <= OV5_INDX(OV5_MMU_SUPPORT)) { |
| 373 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
| 374 | return; |
| 375 | } |
| 376 | |
| 377 | /* Check for supported configuration */ |
| 378 | mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] & |
| 379 | OV5_FEAT(OV5_MMU_SUPPORT); |
| 380 | if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) { |
| 381 | /* Hypervisor only supports radix - check enabled && GTSE */ |
| 382 | if (!early_radix_enabled()) { |
| 383 | pr_warn("WARNING: Ignoring cmdline option disable_radix\n"); |
| 384 | } |
| 385 | if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] & |
| 386 | OV5_FEAT(OV5_RADIX_GTSE))) { |
| 387 | pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n"); |
| 388 | } |
| 389 | /* Do radix anyway - the hypervisor said we had to */ |
| 390 | cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX; |
| 391 | } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) { |
| 392 | /* Hypervisor only supports hash - disable radix */ |
| 393 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
| 394 | } |
Paul Mackerras | 18569c1 | 2017-01-30 21:21:34 +1100 | [diff] [blame] | 395 | } |
| 396 | |
Michael Ellerman | 1a01dc8 | 2016-07-26 20:09:30 +1000 | [diff] [blame] | 397 | void __init mmu_early_init_devtree(void) |
| 398 | { |
Michael Ellerman | c610ec6 | 2016-07-26 21:29:30 +1000 | [diff] [blame] | 399 | /* Disable radix mode based on kernel command line. */ |
Paul Mackerras | 3f91a89 | 2017-02-16 13:49:21 +1100 | [diff] [blame] | 400 | /* We don't yet have the machinery to do radix as a guest. */ |
| 401 | if (disable_radix || !(mfmsr() & MSR_HV)) |
Aneesh Kumar K.V | 5a25b6f | 2016-07-27 13:19:01 +1000 | [diff] [blame] | 402 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
Michael Ellerman | bacf9cf | 2016-07-26 21:31:59 +1000 | [diff] [blame] | 403 | |
Paul Mackerras | 18569c1 | 2017-01-30 21:21:34 +1100 | [diff] [blame] | 404 | /* |
| 405 | * Check /chosen/ibm,architecture-vec-5 if running as a guest. |
| 406 | * When running bare-metal, we can use radix if we like |
| 407 | * even though the ibm,architecture-vec-5 property created by |
| 408 | * skiboot doesn't have the necessary bits set. |
| 409 | */ |
Suraj Jitindar Singh | 014d02c | 2017-02-28 17:03:48 +1100 | [diff] [blame] | 410 | if (!(mfmsr() & MSR_HV)) |
Paul Mackerras | 18569c1 | 2017-01-30 21:21:34 +1100 | [diff] [blame] | 411 | early_check_vec5(); |
| 412 | |
Aneesh Kumar K.V | b8f1b4f | 2016-07-23 14:42:35 +0530 | [diff] [blame] | 413 | if (early_radix_enabled()) |
Michael Ellerman | 2537b09 | 2016-07-26 21:55:27 +1000 | [diff] [blame] | 414 | radix__early_init_devtree(); |
| 415 | else |
Michael Ellerman | bacf9cf | 2016-07-26 21:31:59 +1000 | [diff] [blame] | 416 | hash__early_init_devtree(); |
Michael Ellerman | 1a01dc8 | 2016-07-26 20:09:30 +1000 | [diff] [blame] | 417 | } |
| 418 | #endif /* CONFIG_PPC_STD_MMU_64 */ |