blob: 4d9481ec24683690773470b69aa2fcb25da88083 [file] [log] [blame]
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +10008 *
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11 *
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +100022#undef DEBUG
23
Paul Mackerras14cf11a2005-09-26 16:04:21 +100024#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100037#include <linux/highmem.h>
38#include <linux/idr.h>
39#include <linux/nodemask.h>
40#include <linux/module.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070041#include <linux/poison.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100042#include <linux/memblock.h>
David Gibsona4fe3ce2009-10-26 19:24:31 +000043#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090044#include <linux/slab.h>
Paul Mackerras18569c12017-01-30 21:21:34 +110045#include <linux/of_fdt.h>
46#include <linux/libfdt.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100047
48#include <asm/pgalloc.h>
49#include <asm/page.h>
50#include <asm/prom.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100051#include <asm/rtas.h>
52#include <asm/io.h>
53#include <asm/mmu_context.h>
54#include <asm/pgtable.h>
55#include <asm/mmu.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080056#include <linux/uaccess.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100057#include <asm/smp.h>
58#include <asm/machdep.h>
59#include <asm/tlb.h>
60#include <asm/eeh.h>
61#include <asm/processor.h>
62#include <asm/mmzone.h>
63#include <asm/cputable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100064#include <asm/sections.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100065#include <asm/iommu.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100066#include <asm/vdso.h>
David Gibson800fc3e2005-11-16 15:43:48 +110067
68#include "mmu_decl.h"
Paul Mackerras14cf11a2005-09-26 16:04:21 +100069
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +000070#ifdef CONFIG_PPC_STD_MMU_64
Aneesh Kumar K.Vdd1842a2016-04-29 23:25:49 +100071#if H_PGTABLE_RANGE > USER_VSID_RANGE
Paul Mackerras14cf11a2005-09-26 16:04:21 +100072#warning Limited user VSID range means pagetable space is wasted
73#endif
74
Aneesh Kumar K.Vdd1842a2016-04-29 23:25:49 +100075#if (TASK_SIZE_USER64 < H_PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100076#warning TASK_SIZE is smaller than it needs to be.
77#endif
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +000078#endif /* CONFIG_PPC_STD_MMU_64 */
Paul Mackerras14cf11a2005-09-26 16:04:21 +100079
Kumar Gala37dd2ba2008-04-22 04:22:34 +100080phys_addr_t memstart_addr = ~0;
Sonny Rao79c30952010-08-19 18:08:09 +000081EXPORT_SYMBOL_GPL(memstart_addr);
Kumar Gala37dd2ba2008-04-22 04:22:34 +100082phys_addr_t kernstart_addr;
Sonny Rao79c30952010-08-19 18:08:09 +000083EXPORT_SYMBOL_GPL(kernstart_addr);
Kumar Galad7917ba2008-04-16 05:52:22 +100084
Andy Whitcroftd29eff72007-10-16 01:24:17 -070085#ifdef CONFIG_SPARSEMEM_VMEMMAP
86/*
87 * Given an address within the vmemmap, determine the pfn of the page that
88 * represents the start of the section it is within. Note that we have to
89 * do this by hand as the proffered address may not be correctly aligned.
90 * Subtraction of non-aligned pointers produces undefined results.
91 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +100092static unsigned long __meminit vmemmap_section_start(unsigned long page)
Andy Whitcroftd29eff72007-10-16 01:24:17 -070093{
94 unsigned long offset = page - ((unsigned long)(vmemmap));
95
96 /* Return the pfn of the start of the section. */
97 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
98}
99
100/*
101 * Check if this vmemmap page is already initialised. If any section
102 * which overlaps this vmemmap page is initialised then this page is
103 * initialised already.
104 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +1000105static int __meminit vmemmap_populated(unsigned long start, int page_size)
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700106{
107 unsigned long end = start + page_size;
Li Zhong16a05bf2014-06-11 16:23:39 +0800108 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700109
110 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
Li Zhong16a05bf2014-06-11 16:23:39 +0800111 if (pfn_valid(page_to_pfn((struct page *)start)))
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700112 return 1;
113
114 return 0;
115}
116
Mark Nelson91eea672010-04-21 16:21:03 +0000117struct vmemmap_backing *vmemmap_list;
Li Zhongbd8cb032014-06-11 16:23:36 +0800118static struct vmemmap_backing *next;
119static int num_left;
120static int num_freed;
Mark Nelson91eea672010-04-21 16:21:03 +0000121
122static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
123{
Li Zhongbd8cb032014-06-11 16:23:36 +0800124 struct vmemmap_backing *vmem_back;
125 /* get from freed entries first */
126 if (num_freed) {
127 num_freed--;
128 vmem_back = next;
129 next = next->list;
130
131 return vmem_back;
132 }
Mark Nelson91eea672010-04-21 16:21:03 +0000133
134 /* allocate a page when required and hand out chunks */
Li Zhongbd8cb032014-06-11 16:23:36 +0800135 if (!num_left) {
Mark Nelson91eea672010-04-21 16:21:03 +0000136 next = vmemmap_alloc_block(PAGE_SIZE, node);
137 if (unlikely(!next)) {
138 WARN_ON(1);
139 return NULL;
140 }
141 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
142 }
143
144 num_left--;
145
146 return next++;
147}
148
149static __meminit void vmemmap_list_populate(unsigned long phys,
150 unsigned long start,
151 int node)
152{
153 struct vmemmap_backing *vmem_back;
154
155 vmem_back = vmemmap_list_alloc(node);
156 if (unlikely(!vmem_back)) {
157 WARN_ON(1);
158 return;
159 }
160
161 vmem_back->phys = phys;
162 vmem_back->virt_addr = start;
163 vmem_back->list = vmemmap_list;
164
165 vmemmap_list = vmem_back;
166}
167
Li Zhong71b0bfe2014-06-11 16:23:38 +0800168int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
169{
170 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
171
172 /* Align to the page size of the linear mapping. */
173 start = _ALIGN_DOWN(start, page_size);
174
175 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
176
177 for (; start < end; start += page_size) {
178 void *p;
David Gibson1dace6c2016-02-09 13:32:42 +1000179 int rc;
Li Zhong71b0bfe2014-06-11 16:23:38 +0800180
181 if (vmemmap_populated(start, page_size))
182 continue;
183
184 p = vmemmap_alloc_block(page_size, node);
185 if (!p)
186 return -ENOMEM;
187
188 vmemmap_list_populate(__pa(p), start, node);
189
190 pr_debug(" * %016lx..%016lx allocated at %p\n",
191 start, start + page_size, p);
192
David Gibson1dace6c2016-02-09 13:32:42 +1000193 rc = vmemmap_create_mapping(start, page_size, __pa(p));
194 if (rc < 0) {
195 pr_warning(
196 "vmemmap_populate: Unable to create vmemmap mapping: %d\n",
197 rc);
198 return -EFAULT;
199 }
Li Zhong71b0bfe2014-06-11 16:23:38 +0800200 }
201
202 return 0;
203}
204
205#ifdef CONFIG_MEMORY_HOTPLUG
Li Zhongbd8cb032014-06-11 16:23:36 +0800206static unsigned long vmemmap_list_free(unsigned long start)
207{
208 struct vmemmap_backing *vmem_back, *vmem_back_prev;
209
210 vmem_back_prev = vmem_back = vmemmap_list;
211
212 /* look for it with prev pointer recorded */
213 for (; vmem_back; vmem_back = vmem_back->list) {
214 if (vmem_back->virt_addr == start)
215 break;
216 vmem_back_prev = vmem_back;
217 }
218
219 if (unlikely(!vmem_back)) {
220 WARN_ON(1);
221 return 0;
222 }
223
224 /* remove it from vmemmap_list */
225 if (vmem_back == vmemmap_list) /* remove head */
226 vmemmap_list = vmem_back->list;
227 else
228 vmem_back_prev->list = vmem_back->list;
229
230 /* next point to this freed entry */
231 vmem_back->list = next;
232 next = vmem_back;
233 num_freed++;
234
235 return vmem_back->phys;
236}
237
Li Zhong71b0bfe2014-06-11 16:23:38 +0800238void __ref vmemmap_free(unsigned long start, unsigned long end)
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700239{
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000240 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700241
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700242 start = _ALIGN_DOWN(start, page_size);
243
Li Zhong71b0bfe2014-06-11 16:23:38 +0800244 pr_debug("vmemmap_free %lx...%lx\n", start, end);
Benjamin Herrenschmidt32a74942009-07-23 23:15:58 +0000245
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700246 for (; start < end; start += page_size) {
Li Zhong71b0bfe2014-06-11 16:23:38 +0800247 unsigned long addr;
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700248
Li Zhong71b0bfe2014-06-11 16:23:38 +0800249 /*
250 * the section has already be marked as invalid, so
251 * vmemmap_populated() true means some other sections still
252 * in this page, so skip it.
253 */
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700254 if (vmemmap_populated(start, page_size))
255 continue;
256
Li Zhong71b0bfe2014-06-11 16:23:38 +0800257 addr = vmemmap_list_free(start);
258 if (addr) {
259 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700260
Li Zhong71b0bfe2014-06-11 16:23:38 +0800261 if (PageReserved(page)) {
262 /* allocated from bootmem */
263 if (page_size < PAGE_SIZE) {
264 /*
265 * this shouldn't happen, but if it is
266 * the case, leave the memory there
267 */
268 WARN_ON_ONCE(1);
269 } else {
270 unsigned int nr_pages =
271 1 << get_order(page_size);
272 while (nr_pages--)
273 free_reserved_page(page++);
274 }
275 } else
276 free_pages((unsigned long)(__va(addr)),
277 get_order(page_size));
Mark Nelson91eea672010-04-21 16:21:03 +0000278
Li Zhong71b0bfe2014-06-11 16:23:38 +0800279 vmemmap_remove_mapping(start, page_size);
280 }
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700281 }
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700282}
Li Zhong71b0bfe2014-06-11 16:23:38 +0800283#endif
Nathan Fontenotf7e33342013-09-27 10:18:09 -0500284void register_page_bootmem_memmap(unsigned long section_nr,
285 struct page *start_page, unsigned long size)
286{
287}
Benjamin Herrenschmidtcd3db0c2010-07-06 15:39:02 -0700288
Alexey Kardashevskiy8e0861f2013-08-28 18:37:42 +1000289/*
290 * We do not have access to the sparsemem vmemmap, so we fallback to
291 * walking the list of sparsemem blocks which we already maintain for
292 * the sake of crashdump. In the long run, we might want to maintain
293 * a tree if performance of that linear walk becomes a problem.
294 *
295 * realmode_pfn_to_page functions can fail due to:
296 * 1) As real sparsemem blocks do not lay in RAM continously (they
297 * are in virtual address space which is not available in the real mode),
298 * the requested page struct can be split between blocks so get_page/put_page
299 * may fail.
300 * 2) When huge pages are used, the get_page/put_page API will fail
301 * in real mode as the linked addresses in the page struct are virtual
302 * too.
303 */
304struct page *realmode_pfn_to_page(unsigned long pfn)
305{
306 struct vmemmap_backing *vmem_back;
307 struct page *page;
308 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
309 unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
310
311 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
312 if (pg_va < vmem_back->virt_addr)
313 continue;
314
Li Zhongbd8cb032014-06-11 16:23:36 +0800315 /* After vmemmap_list entry free is possible, need check all */
316 if ((pg_va + sizeof(struct page)) <=
317 (vmem_back->virt_addr + page_size)) {
318 page = (struct page *) (vmem_back->phys + pg_va -
Alexey Kardashevskiy8e0861f2013-08-28 18:37:42 +1000319 vmem_back->virt_addr);
Li Zhongbd8cb032014-06-11 16:23:36 +0800320 return page;
321 }
Alexey Kardashevskiy8e0861f2013-08-28 18:37:42 +1000322 }
323
Li Zhongbd8cb032014-06-11 16:23:36 +0800324 /* Probably that page struct is split between real pages */
Alexey Kardashevskiy8e0861f2013-08-28 18:37:42 +1000325 return NULL;
326}
327EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
328
329#elif defined(CONFIG_FLATMEM)
330
331struct page *realmode_pfn_to_page(unsigned long pfn)
332{
333 struct page *page = pfn_to_page(pfn);
334 return page;
335}
336EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
337
338#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
Michael Ellerman1a01dc82016-07-26 20:09:30 +1000339
340#ifdef CONFIG_PPC_STD_MMU_64
Michael Ellermanc610ec62016-07-26 21:29:30 +1000341static bool disable_radix;
342static int __init parse_disable_radix(char *p)
343{
344 disable_radix = true;
345 return 0;
346}
347early_param("disable_radix", parse_disable_radix);
348
Paul Mackerras18569c12017-01-30 21:21:34 +1100349/*
350 * If we're running under a hypervisor, we currently can't do radix
351 * since we don't have the code to do the H_REGISTER_PROC_TBL hcall.
352 * We tell that we're running under a hypervisor by looking for the
353 * /chosen/ibm,architecture-vec-5 property.
354 */
355static void early_check_vec5(void)
356{
357 unsigned long root, chosen;
358 int size;
359 const u8 *vec5;
360
361 root = of_get_flat_dt_root();
362 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
363 if (chosen == -FDT_ERR_NOTFOUND)
364 return;
365 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
366 if (!vec5)
367 return;
368 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
369}
370
Michael Ellerman1a01dc82016-07-26 20:09:30 +1000371void __init mmu_early_init_devtree(void)
372{
Michael Ellermanc610ec62016-07-26 21:29:30 +1000373 /* Disable radix mode based on kernel command line. */
374 if (disable_radix)
Aneesh Kumar K.V5a25b6f2016-07-27 13:19:01 +1000375 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
Michael Ellermanbacf9cf2016-07-26 21:31:59 +1000376
Paul Mackerras18569c12017-01-30 21:21:34 +1100377 /*
378 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
379 * When running bare-metal, we can use radix if we like
380 * even though the ibm,architecture-vec-5 property created by
381 * skiboot doesn't have the necessary bits set.
382 */
383 if (early_radix_enabled() && !(mfmsr() & MSR_HV))
384 early_check_vec5();
385
Aneesh Kumar K.Vb8f1b4f2016-07-23 14:42:35 +0530386 if (early_radix_enabled())
Michael Ellerman2537b092016-07-26 21:55:27 +1000387 radix__early_init_devtree();
388 else
Michael Ellermanbacf9cf2016-07-26 21:31:59 +1000389 hash__early_init_devtree();
Michael Ellerman1a01dc82016-07-26 20:09:30 +1000390}
391#endif /* CONFIG_PPC_STD_MMU_64 */