blob: 2607d2d33405fb422ca7ef1e9bf9b0a0df6f3aa9 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/arch/parisc/mm/init.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright 1999 SuSE GmbH
7 * changed by Philipp Rumpf
8 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
9 * Copyright 2004 Randolph Chung (tausq@debian.org)
Helge Dellera8f44e32007-01-28 14:58:52 +010010 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
15#include <linux/module.h>
16#include <linux/mm.h>
17#include <linux/bootmem.h>
Helge Deller4fe9e1d2016-10-07 16:50:21 +020018#include <linux/memblock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/delay.h>
21#include <linux/init.h>
22#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
23#include <linux/initrd.h>
24#include <linux/swap.h>
25#include <linux/unistd.h>
26#include <linux/nodemask.h> /* for node_online_map */
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +030027#include <linux/pagemap.h> /* for release_pages */
Helge Dellerd0cf62f2015-11-06 23:36:01 +010028#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#include <asm/pgalloc.h>
Helge Dellerce8420b2006-10-14 22:10:44 +020031#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/tlb.h>
33#include <asm/pdc_chassis.h>
34#include <asm/mmzone.h>
Heiko Carstensa581c2a2006-07-01 04:36:30 -070035#include <asm/sections.h>
Helge Dellerd0cf62f2015-11-06 23:36:01 +010036#include <asm/msgbuf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Linus Torvalds1da177e2005-04-16 15:20:36 -070038extern int data_start;
Helge Deller161bd3b2013-11-30 22:07:51 +010039extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Kirill A. Shutemovf24ffde2015-04-14 15:45:54 -070041#if CONFIG_PGTABLE_LEVELS == 3
Thomas Gleixnerc39f52a2012-05-03 09:02:57 +000042/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
43 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
44 * guarantee that global objects will be laid out in memory in the same order
45 * as the order of declaration, so put these in different sections and use
46 * the linker script to order them. */
47pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
48#endif
49
50pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
51pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#ifdef CONFIG_DISCONTIGMEM
Helge Deller8039de12006-01-10 20:35:03 -050054struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
Helge Deller91ea8202013-06-05 20:50:01 +000055signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#endif
57
58static struct resource data_resource = {
59 .name = "Kernel data",
Toshi Kani35d98e92016-01-26 21:57:22 +010060 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Linus Torvalds1da177e2005-04-16 15:20:36 -070061};
62
63static struct resource code_resource = {
64 .name = "Kernel code",
Toshi Kani35d98e92016-01-26 21:57:22 +010065 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Linus Torvalds1da177e2005-04-16 15:20:36 -070066};
67
68static struct resource pdcdata_resource = {
69 .name = "PDC data (Page Zero)",
70 .start = 0,
71 .end = 0x9ff,
72 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
73};
74
Helge Deller8039de12006-01-10 20:35:03 -050075static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* The following array is initialized from the firmware specific
78 * information retrieved in kernel/inventory.c.
79 */
80
Helge Deller8039de12006-01-10 20:35:03 -050081physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
82int npmem_ranges __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Helge Deller4fe9e1d2016-10-07 16:50:21 +020084/*
85 * get_memblock() allocates pages via memblock.
86 * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it
87 * doesn't allocate from bottom to top which is needed because we only created
88 * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code.
89 */
90static void * __init get_memblock(unsigned long size)
91{
92 static phys_addr_t search_addr __initdata;
93 phys_addr_t phys;
94
95 if (!search_addr)
96 search_addr = PAGE_ALIGN(__pa((unsigned long) &_end));
97 search_addr = ALIGN(search_addr, size);
98 while (!memblock_is_region_memory(search_addr, size) ||
99 memblock_is_region_reserved(search_addr, size)) {
100 search_addr += size;
101 }
102 phys = search_addr;
103
104 if (phys)
105 memblock_reserve(phys, size);
106 else
107 panic("get_memblock() failed.\n");
108
Helge Dellere3b6a022016-10-11 20:40:02 +0200109 memset(__va(phys), 0, size);
110
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200111 return __va(phys);
112}
113
Helge Dellera8f44e32007-01-28 14:58:52 +0100114#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#define MAX_MEM (~0UL)
Helge Dellera8f44e32007-01-28 14:58:52 +0100116#else /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#define MAX_MEM (3584U*1024U*1024U)
Helge Dellera8f44e32007-01-28 14:58:52 +0100118#endif /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Helge Deller8039de12006-01-10 20:35:03 -0500120static unsigned long mem_limit __read_mostly = MAX_MEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122static void __init mem_limit_func(void)
123{
124 char *cp, *end;
125 unsigned long limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 /* We need this before __setup() functions are called */
128
129 limit = MAX_MEM;
Alon Bar-Lev668f9932007-02-12 00:54:16 -0800130 for (cp = boot_command_line; *cp; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 if (memcmp(cp, "mem=", 4) == 0) {
132 cp += 4;
133 limit = memparse(cp, &end);
134 if (end != cp)
135 break;
136 cp = end;
137 } else {
138 while (*cp != ' ' && *cp)
139 ++cp;
140 while (*cp == ' ')
141 ++cp;
142 }
143 }
144
145 if (limit < mem_limit)
146 mem_limit = limit;
147}
148
149#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
150
151static void __init setup_bootmem(void)
152{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 unsigned long mem_max;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154#ifndef CONFIG_DISCONTIGMEM
155 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
156 int npmem_holes;
157#endif
158 int i, sysram_resource_count;
159
160 disable_sr_hashing(); /* Turn off space register hashing */
161
162 /*
163 * Sort the ranges. Since the number of ranges is typically
164 * small, and performance is not an issue here, just do
165 * a simple insertion sort.
166 */
167
168 for (i = 1; i < npmem_ranges; i++) {
169 int j;
170
171 for (j = i; j > 0; j--) {
172 unsigned long tmp;
173
174 if (pmem_ranges[j-1].start_pfn <
175 pmem_ranges[j].start_pfn) {
176
177 break;
178 }
179 tmp = pmem_ranges[j-1].start_pfn;
180 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
181 pmem_ranges[j].start_pfn = tmp;
182 tmp = pmem_ranges[j-1].pages;
183 pmem_ranges[j-1].pages = pmem_ranges[j].pages;
184 pmem_ranges[j].pages = tmp;
185 }
186 }
187
188#ifndef CONFIG_DISCONTIGMEM
189 /*
190 * Throw out ranges that are too far apart (controlled by
191 * MAX_GAP).
192 */
193
194 for (i = 1; i < npmem_ranges; i++) {
195 if (pmem_ranges[i].start_pfn -
196 (pmem_ranges[i-1].start_pfn +
197 pmem_ranges[i-1].pages) > MAX_GAP) {
198 npmem_ranges = i;
199 printk("Large gap in memory detected (%ld pages). "
200 "Consider turning on CONFIG_DISCONTIGMEM\n",
201 pmem_ranges[i].start_pfn -
202 (pmem_ranges[i-1].start_pfn +
203 pmem_ranges[i-1].pages));
204 break;
205 }
206 }
207#endif
208
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200209 /* Print the memory ranges */
210 pr_info("Memory Ranges:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200212 for (i = 0; i < npmem_ranges; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 struct resource *res = &sysram_resources[i];
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200214 unsigned long start;
215 unsigned long size;
216
217 size = (pmem_ranges[i].pages << PAGE_SHIFT);
218 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
219 pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
220 i, start, start + (size - 1), size >> 20);
221
222 /* request memory resource */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 res->name = "System RAM";
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200224 res->start = start;
225 res->end = start + size - 1;
Toshi Kani35d98e92016-01-26 21:57:22 +0100226 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 request_resource(&iomem_resource, res);
228 }
229
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200230 sysram_resource_count = npmem_ranges;
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 /*
233 * For 32 bit kernels we limit the amount of memory we can
234 * support, in order to preserve enough kernel address space
235 * for other purposes. For 64 bit kernels we don't normally
236 * limit the memory, but this mechanism can be used to
237 * artificially limit the amount of memory (and it is written
238 * to work with multiple memory ranges).
239 */
240
241 mem_limit_func(); /* check for "mem=" argument */
242
243 mem_max = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 for (i = 0; i < npmem_ranges; i++) {
245 unsigned long rsize;
246
247 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
248 if ((mem_max + rsize) > mem_limit) {
249 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
250 if (mem_max == mem_limit)
251 npmem_ranges = i;
252 else {
253 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
254 - (mem_max >> PAGE_SHIFT);
255 npmem_ranges = i + 1;
256 mem_max = mem_limit;
257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 break;
259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 mem_max += rsize;
261 }
262
263 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
264
265#ifndef CONFIG_DISCONTIGMEM
266 /* Merge the ranges, keeping track of the holes */
267
268 {
269 unsigned long end_pfn;
270 unsigned long hole_pages;
271
272 npmem_holes = 0;
273 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
274 for (i = 1; i < npmem_ranges; i++) {
275
276 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
277 if (hole_pages) {
278 pmem_holes[npmem_holes].start_pfn = end_pfn;
279 pmem_holes[npmem_holes++].pages = hole_pages;
280 end_pfn += hole_pages;
281 }
282 end_pfn += pmem_ranges[i].pages;
283 }
284
285 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
286 npmem_ranges = 1;
287 }
288#endif
289
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290#ifdef CONFIG_DISCONTIGMEM
291 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
292 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 }
294 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
295
David Rientjesd9b41e02011-04-20 19:27:13 -0700296 for (i = 0; i < npmem_ranges; i++) {
297 node_set_state(i, N_NORMAL_MEMORY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 node_set_online(i);
David Rientjesd9b41e02011-04-20 19:27:13 -0700299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300#endif
301
302 /*
303 * Initialize and free the full range of memory in each range.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 */
305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 max_pfn = 0;
307 for (i = 0; i < npmem_ranges; i++) {
308 unsigned long start_pfn;
309 unsigned long npages;
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200310 unsigned long start;
311 unsigned long size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
313 start_pfn = pmem_ranges[i].start_pfn;
314 npages = pmem_ranges[i].pages;
315
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200316 start = start_pfn << PAGE_SHIFT;
317 size = npages << PAGE_SHIFT;
318
319 /* add system RAM memblock */
320 memblock_add(start, size);
321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 if ((start_pfn + npages) > max_pfn)
323 max_pfn = start_pfn + npages;
324 }
325
Grant Grundler5cdb8202006-01-10 20:47:57 -0500326 /* IOMMU is always used to access "high mem" on those boxes
327 * that can support enough mem that a PCI device couldn't
328 * directly DMA to any physical addresses.
329 * ISA DMA support will need to revisit this.
330 */
331 max_low_pfn = max_pfn;
332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
334
335#define PDC_CONSOLE_IO_IODC_SIZE 32768
336
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200337 memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
338 PDC_CONSOLE_IO_IODC_SIZE));
339 memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
340 (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
342#ifndef CONFIG_DISCONTIGMEM
343
344 /* reserve the holes */
345
346 for (i = 0; i < npmem_holes; i++) {
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200347 memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
348 (pmem_holes[i].pages << PAGE_SHIFT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 }
350#endif
351
352#ifdef CONFIG_BLK_DEV_INITRD
353 if (initrd_start) {
354 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
355 if (__pa(initrd_start) < mem_max) {
356 unsigned long initrd_reserve;
357
358 if (__pa(initrd_end) > mem_max) {
359 initrd_reserve = mem_max - __pa(initrd_start);
360 } else {
361 initrd_reserve = initrd_end - initrd_start;
362 }
363 initrd_below_start_ok = 1;
364 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
365
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200366 memblock_reserve(__pa(initrd_start), initrd_reserve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 }
368 }
369#endif
370
371 data_resource.start = virt_to_phys(&data_start);
Kyle McMartinc51d4762006-08-13 20:39:48 -0400372 data_resource.end = virt_to_phys(_end) - 1;
373 code_resource.start = virt_to_phys(_text);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 code_resource.end = virt_to_phys(&data_start)-1;
375
376 /* We don't know which region the kernel will be in, so try
377 * all of them.
378 */
379 for (i = 0; i < sysram_resource_count; i++) {
380 struct resource *res = &sysram_resources[i];
381 request_resource(res, &code_resource);
382 request_resource(res, &data_resource);
383 }
384 request_resource(&sysram_resources[0], &pdcdata_resource);
Helge Dellerc9c28772017-05-11 22:24:15 +0200385
386 /* Initialize Page Deallocation Table (PDT) and check for bad memory. */
387 pdc_pdt_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388}
389
Helge Deller161bd3b2013-11-30 22:07:51 +0100390static int __init parisc_text_address(unsigned long vaddr)
391{
392 static unsigned long head_ptr __initdata;
393
394 if (!head_ptr)
395 head_ptr = PAGE_MASK & (unsigned long)
396 dereference_function_descriptor(&parisc_kernel_start);
397
398 return core_kernel_text(vaddr) || vaddr == head_ptr;
399}
400
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500401static void __init map_pages(unsigned long start_vaddr,
402 unsigned long start_paddr, unsigned long size,
403 pgprot_t pgprot, int force)
404{
405 pgd_t *pg_dir;
406 pmd_t *pmd;
407 pte_t *pg_table;
408 unsigned long end_paddr;
409 unsigned long start_pmd;
410 unsigned long start_pte;
411 unsigned long tmp1;
412 unsigned long tmp2;
413 unsigned long address;
414 unsigned long vaddr;
415 unsigned long ro_start;
416 unsigned long ro_end;
Helge Deller41b85a12015-11-22 00:07:44 +0100417 unsigned long kernel_end;
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500418
419 ro_start = __pa((unsigned long)_text);
420 ro_end = __pa((unsigned long)&data_start);
Helge Deller41b85a12015-11-22 00:07:44 +0100421 kernel_end = __pa((unsigned long)&_end);
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500422
423 end_paddr = start_paddr + size;
424
425 pg_dir = pgd_offset_k(start_vaddr);
426
427#if PTRS_PER_PMD == 1
428 start_pmd = 0;
429#else
430 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
431#endif
432 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
433
434 address = start_paddr;
435 vaddr = start_vaddr;
436 while (address < end_paddr) {
437#if PTRS_PER_PMD == 1
438 pmd = (pmd_t *)__pa(pg_dir);
439#else
440 pmd = (pmd_t *)pgd_address(*pg_dir);
441
442 /*
443 * pmd is physical at this point
444 */
445
446 if (!pmd) {
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200447 pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER);
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500448 pmd = (pmd_t *) __pa(pmd);
449 }
450
451 pgd_populate(NULL, pg_dir, __va(pmd));
452#endif
453 pg_dir++;
454
455 /* now change pmd to kernel virtual addresses */
456
457 pmd = (pmd_t *)__va(pmd) + start_pmd;
458 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
459
460 /*
461 * pg_table is physical at this point
462 */
463
464 pg_table = (pte_t *)pmd_address(*pmd);
465 if (!pg_table) {
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200466 pg_table = (pte_t *) get_memblock(PAGE_SIZE);
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500467 pg_table = (pte_t *) __pa(pg_table);
468 }
469
470 pmd_populate_kernel(NULL, pmd, __va(pg_table));
471
472 /* now change pg_table to kernel virtual addresses */
473
474 pg_table = (pte_t *) __va(pg_table) + start_pte;
475 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
476 pte_t pte;
477
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500478 if (force)
479 pte = __mk_pte(address, pgprot);
Helge Deller41b85a12015-11-22 00:07:44 +0100480 else if (parisc_text_address(vaddr)) {
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500481 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
Helge Deller41b85a12015-11-22 00:07:44 +0100482 if (address >= ro_start && address < kernel_end)
483 pte = pte_mkhuge(pte);
484 }
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500485 else
486#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
Helge Deller41b85a12015-11-22 00:07:44 +0100487 if (address >= ro_start && address < ro_end) {
488 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
489 pte = pte_mkhuge(pte);
490 } else
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500491#endif
Helge Deller41b85a12015-11-22 00:07:44 +0100492 {
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500493 pte = __mk_pte(address, pgprot);
Helge Deller41b85a12015-11-22 00:07:44 +0100494 if (address >= ro_start && address < kernel_end)
495 pte = pte_mkhuge(pte);
496 }
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500497
498 if (address >= end_paddr) {
499 if (force)
500 break;
501 else
502 pte_val(pte) = 0;
503 }
504
505 set_pte(pg_table, pte);
506
507 address += PAGE_SIZE;
508 vaddr += PAGE_SIZE;
509 }
510 start_pte = 0;
511
512 if (address >= end_paddr)
513 break;
514 }
515 start_pmd = 0;
516 }
517}
518
Helge Deller8d73b182018-04-20 23:23:37 +0200519void __ref free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520{
Kyle McMartin4fb11782009-04-05 02:53:47 +0000521 unsigned long init_begin = (unsigned long)__init_begin;
522 unsigned long init_end = (unsigned long)__init_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500524 /* The init text pages are marked R-X. We have to
525 * flush the icache and mark them RW-
526 *
527 * This is tricky, because map_pages is in the init section.
528 * Do a dummy remap of the data section first (the data
529 * section is already PAGE_KERNEL) to pull in the TLB entries
530 * for map_kernel */
531 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
532 PAGE_KERNEL_RWX, 1);
533 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
534 * map_pages */
535 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
536 PAGE_KERNEL, 1);
537
538 /* force the kernel to see the new TLB entries */
539 __flush_tlb_range(0, init_begin, init_end);
Helge Deller41b85a12015-11-22 00:07:44 +0100540
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500541 /* finally dump all the instructions which were cached, since the
542 * pages are no-longer executable */
Kyle McMartin4fb11782009-04-05 02:53:47 +0000543 flush_icache_range(init_begin, init_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
Helge Deller41b85a12015-11-22 00:07:44 +0100545 free_initmem_default(POISON_FREE_INITMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
547 /* set up a new led state on systems shipped LED State panel */
548 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549}
550
Helge Deller1bcdd852006-01-13 13:21:06 -0700551
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800552#ifdef CONFIG_STRICT_KERNEL_RWX
Helge Deller1bcdd852006-01-13 13:21:06 -0700553void mark_rodata_ro(void)
554{
Helge Deller1bcdd852006-01-13 13:21:06 -0700555 /* rodata memory was already mapped with KERNEL_RO access rights by
556 pagetable_init() and map_pages(). No need to do additional stuff here */
557 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
Heiko Carstensa581c2a2006-07-01 04:36:30 -0700558 (unsigned long)(__end_rodata - __start_rodata) >> 10);
Helge Deller1bcdd852006-01-13 13:21:06 -0700559}
560#endif
561
562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563/*
564 * Just an arbitrary offset to serve as a "hole" between mapping areas
565 * (between top of physical memory and a potential pcxl dma mapping
566 * area, and below the vmalloc mapping area).
567 *
568 * The current 32K value just means that there will be a 32K "hole"
569 * between mapping areas. That means that any out-of-bounds memory
570 * accesses will hopefully be caught. The vmalloc() routines leaves
571 * a hole of 4kB between each vmalloced area for the same reason.
572 */
573
574 /* Leave room for gateway page expansion */
575#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
576#error KERNEL_MAP_START is in gateway reserved region
577#endif
578#define MAP_START (KERNEL_MAP_START)
579
580#define VM_MAP_OFFSET (32*1024)
581#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
582 & ~(VM_MAP_OFFSET-1)))
583
Helge Deller4255f0d2009-09-27 23:26:01 -0400584void *parisc_vmalloc_start __read_mostly;
585EXPORT_SYMBOL(parisc_vmalloc_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587#ifdef CONFIG_PA11
Helge Deller8039de12006-01-10 20:35:03 -0500588unsigned long pcxl_dma_start __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589#endif
590
591void __init mem_init(void)
592{
Helge Dellerd0cf62f2015-11-06 23:36:01 +0100593 /* Do sanity checks on IPC (compat) structures */
594 BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
595#ifndef CONFIG_64BIT
596 BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
597 BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
598 BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
599#endif
600#ifdef CONFIG_COMPAT
601 BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
602 BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
603 BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
604 BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
605#endif
606
Helge Deller48d27cb2009-01-18 19:16:16 +0100607 /* Do sanity checks on page table constants */
608 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
609 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
610 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
611 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
612 > BITS_PER_LONG);
613
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 high_memory = __va((max_pfn << PAGE_SHIFT));
Jiang Liud5c017d2013-07-03 15:04:31 -0700615 set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
Jiang Liu0c988532013-07-03 15:03:24 -0700616 free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618#ifdef CONFIG_PA11
619 if (hppa_dma_ops == &pcxl_dma_ops) {
620 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
Helge Deller4255f0d2009-09-27 23:26:01 -0400621 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
622 + PCXL_DMA_MAP_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 } else {
624 pcxl_dma_start = 0;
Helge Deller4255f0d2009-09-27 23:26:01 -0400625 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 }
627#else
Helge Deller4255f0d2009-09-27 23:26:01 -0400628 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629#endif
630
Jiang Liu7d2c7742013-07-03 15:04:07 -0700631 mem_init_print_info(NULL);
Helge Dellerfd8d0ca2018-01-12 22:57:15 +0100632
633#if 0
634 /*
635 * Do not expose the virtual kernel memory layout to userspace.
636 * But keep code for debugging purposes.
637 */
Helge Dellerce8420b2006-10-14 22:10:44 +0200638 printk("virtual kernel memory layout:\n"
Helge Deller63b2c372018-01-02 20:42:59 +0100639 " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
640 " memory : 0x%px - 0x%px (%4ld MB)\n"
641 " .init : 0x%px - 0x%px (%4ld kB)\n"
642 " .data : 0x%px - 0x%px (%4ld kB)\n"
643 " .text : 0x%px - 0x%px (%4ld kB)\n",
Helge Dellerce8420b2006-10-14 22:10:44 +0200644
645 (void*)VMALLOC_START, (void*)VMALLOC_END,
646 (VMALLOC_END - VMALLOC_START) >> 20,
647
648 __va(0), high_memory,
649 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
650
Kyle McMartin53faf292006-10-16 20:34:00 -0400651 __init_begin, __init_end,
652 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
Helge Dellerce8420b2006-10-14 22:10:44 +0200653
Kyle McMartin53faf292006-10-16 20:34:00 -0400654 _etext, _edata,
655 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
Helge Dellerce8420b2006-10-14 22:10:44 +0200656
Kyle McMartin53faf292006-10-16 20:34:00 -0400657 _text, _etext,
658 ((unsigned long)_etext - (unsigned long)_text) >> 10);
Helge Dellerce8420b2006-10-14 22:10:44 +0200659#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660}
661
Helge Deller8039de12006-01-10 20:35:03 -0500662unsigned long *empty_zero_page __read_mostly;
Kyle McMartin22febf12008-05-26 01:54:35 -0400663EXPORT_SYMBOL(empty_zero_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665/*
666 * pagetable_init() sets up the page tables
667 *
668 * Note that gateway_init() places the Linux gateway page at page 0.
669 * Since gateway pages cannot be dereferenced this has the desirable
670 * side effect of trapping those pesky NULL-reference errors in the
671 * kernel.
672 */
673static void __init pagetable_init(void)
674{
675 int range;
676
677 /* Map each physical memory range to its kernel vaddr */
678
679 for (range = 0; range < npmem_ranges; range++) {
680 unsigned long start_paddr;
681 unsigned long end_paddr;
682 unsigned long size;
683
684 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 size = pmem_ranges[range].pages << PAGE_SHIFT;
Helge Deller41b85a12015-11-22 00:07:44 +0100686 end_paddr = start_paddr + size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 map_pages((unsigned long)__va(start_paddr), start_paddr,
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500689 size, PAGE_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 }
691
692#ifdef CONFIG_BLK_DEV_INITRD
693 if (initrd_end && initrd_end > mem_limit) {
Helge Deller1bcdd852006-01-13 13:21:06 -0700694 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 map_pages(initrd_start, __pa(initrd_start),
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500696 initrd_end - initrd_start, PAGE_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 }
698#endif
699
Helge Deller4fe9e1d2016-10-07 16:50:21 +0200700 empty_zero_page = get_memblock(PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
702
703static void __init gateway_init(void)
704{
705 unsigned long linux_gateway_page_addr;
706 /* FIXME: This is 'const' in order to trick the compiler
707 into not treating it as DP-relative data. */
708 extern void * const linux_gateway_page;
709
710 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
711
712 /*
713 * Setup Linux Gateway page.
714 *
715 * The Linux gateway page will reside in kernel space (on virtual
716 * page 0), so it doesn't need to be aliased into user space.
717 */
718
719 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500720 PAGE_SIZE, PAGE_GATEWAY, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721}
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723void __init paging_init(void)
724{
725 int i;
726
727 setup_bootmem();
728 pagetable_init();
729 gateway_init();
730 flush_cache_all_local(); /* start with known state */
Matthew Wilcoxce339412006-01-10 20:47:49 -0500731 flush_tlb_all_local(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
733 for (i = 0; i < npmem_ranges; i++) {
Christoph Lameterf06a9682006-09-25 23:31:10 -0700734 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
Christoph Lameter00592832007-02-10 01:43:12 -0800736 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
738#ifdef CONFIG_DISCONTIGMEM
739 /* Need to initialize the pfnnid_map before we can initialize
740 the zone */
741 {
742 int j;
743 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
744 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
745 j++) {
746 pfnnid_map[j] = i;
747 }
748 }
749#endif
750
Johannes Weiner9109fb72008-07-23 21:27:20 -0700751 free_area_init_node(i, zones_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 pmem_ranges[i].start_pfn, NULL);
753 }
754}
755
756#ifdef CONFIG_PA20
757
758/*
Simon Arlott70226722007-05-11 20:42:34 +0100759 * Currently, all PA20 chips have 18 bit protection IDs, which is the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 * limiting factor (space ids are 32 bits).
761 */
762
763#define NR_SPACE_IDS 262144
764
765#else
766
767/*
Simon Arlott70226722007-05-11 20:42:34 +0100768 * Currently we have a one-to-one relationship between space IDs and
769 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
770 * support 15 bit protection IDs, so that is the limiting factor.
771 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 * probably not worth the effort for a special case here.
773 */
774
775#define NR_SPACE_IDS 32768
776
777#endif /* !CONFIG_PA20 */
778
779#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
780#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
781
782static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
783static unsigned long dirty_space_id[SID_ARRAY_SIZE];
784static unsigned long space_id_index;
785static unsigned long free_space_ids = NR_SPACE_IDS - 1;
786static unsigned long dirty_space_ids = 0;
787
788static DEFINE_SPINLOCK(sid_lock);
789
790unsigned long alloc_sid(void)
791{
792 unsigned long index;
793
794 spin_lock(&sid_lock);
795
796 if (free_space_ids == 0) {
797 if (dirty_space_ids != 0) {
798 spin_unlock(&sid_lock);
799 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
800 spin_lock(&sid_lock);
801 }
Helge Deller2fd83032006-04-20 20:40:23 +0000802 BUG_ON(free_space_ids == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 }
804
805 free_space_ids--;
806
807 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
808 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
809 space_id_index = index;
810
811 spin_unlock(&sid_lock);
812
813 return index << SPACEID_SHIFT;
814}
815
816void free_sid(unsigned long spaceid)
817{
818 unsigned long index = spaceid >> SPACEID_SHIFT;
819 unsigned long *dirty_space_offset;
820
821 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
822 index &= (BITS_PER_LONG - 1);
823
824 spin_lock(&sid_lock);
825
Helge Deller2fd83032006-04-20 20:40:23 +0000826 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
828 *dirty_space_offset |= (1L << index);
829 dirty_space_ids++;
830
831 spin_unlock(&sid_lock);
832}
833
834
835#ifdef CONFIG_SMP
836static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
837{
838 int i;
839
840 /* NOTE: sid_lock must be held upon entry */
841
842 *ndirtyptr = dirty_space_ids;
843 if (dirty_space_ids != 0) {
844 for (i = 0; i < SID_ARRAY_SIZE; i++) {
845 dirty_array[i] = dirty_space_id[i];
846 dirty_space_id[i] = 0;
847 }
848 dirty_space_ids = 0;
849 }
850
851 return;
852}
853
854static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
855{
856 int i;
857
858 /* NOTE: sid_lock must be held upon entry */
859
860 if (ndirty != 0) {
861 for (i = 0; i < SID_ARRAY_SIZE; i++) {
862 space_id[i] ^= dirty_array[i];
863 }
864
865 free_space_ids += ndirty;
866 space_id_index = 0;
867 }
868}
869
870#else /* CONFIG_SMP */
871
872static void recycle_sids(void)
873{
874 int i;
875
876 /* NOTE: sid_lock must be held upon entry */
877
878 if (dirty_space_ids != 0) {
879 for (i = 0; i < SID_ARRAY_SIZE; i++) {
880 space_id[i] ^= dirty_space_id[i];
881 dirty_space_id[i] = 0;
882 }
883
884 free_space_ids += dirty_space_ids;
885 dirty_space_ids = 0;
886 space_id_index = 0;
887 }
888}
889#endif
890
891/*
892 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
893 * purged, we can safely reuse the space ids that were released but
894 * not flushed from the tlb.
895 */
896
897#ifdef CONFIG_SMP
898
899static unsigned long recycle_ndirty;
900static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
Helge Deller2fd83032006-04-20 20:40:23 +0000901static unsigned int recycle_inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
903void flush_tlb_all(void)
904{
905 int do_recycle;
906
Helge Deller416821d2013-05-10 21:24:01 +0000907 __inc_irq_stat(irq_tlb_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 do_recycle = 0;
909 spin_lock(&sid_lock);
910 if (dirty_space_ids > RECYCLE_THRESHOLD) {
Helge Deller2fd83032006-04-20 20:40:23 +0000911 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
913 recycle_inuse++;
914 do_recycle++;
915 }
916 spin_unlock(&sid_lock);
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200917 on_each_cpu(flush_tlb_all_local, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 if (do_recycle) {
919 spin_lock(&sid_lock);
920 recycle_sids(recycle_ndirty,recycle_dirty_array);
921 recycle_inuse = 0;
922 spin_unlock(&sid_lock);
923 }
924}
925#else
926void flush_tlb_all(void)
927{
Helge Deller416821d2013-05-10 21:24:01 +0000928 __inc_irq_stat(irq_tlb_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 spin_lock(&sid_lock);
Matthew Wilcox1b2425e2006-01-10 20:47:49 -0500930 flush_tlb_all_local(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 recycle_sids();
932 spin_unlock(&sid_lock);
933}
934#endif
935
936#ifdef CONFIG_BLK_DEV_INITRD
937void free_initrd_mem(unsigned long start, unsigned long end)
938{
Jiang Liu7d2c7742013-07-03 15:04:07 -0700939 free_reserved_area((void *)start, (void *)end, -1, "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940}
941#endif