blob: c5fec4890fdf52fe459198ff7780f9e59af6d294 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/parisc/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright 1999 SuSE GmbH
6 * changed by Philipp Rumpf
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2004 Randolph Chung (tausq@debian.org)
Helge Dellera8f44e32007-01-28 14:58:52 +01009 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/bootmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
21#include <linux/initrd.h>
22#include <linux/swap.h>
23#include <linux/unistd.h>
24#include <linux/nodemask.h> /* for node_online_map */
25#include <linux/pagemap.h> /* for release_pages and page_cache_release */
Helge Dellerd0cf62f2015-11-06 23:36:01 +010026#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28#include <asm/pgalloc.h>
Helge Dellerce8420b2006-10-14 22:10:44 +020029#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/tlb.h>
31#include <asm/pdc_chassis.h>
32#include <asm/mmzone.h>
Heiko Carstensa581c2a2006-07-01 04:36:30 -070033#include <asm/sections.h>
Helge Dellerd0cf62f2015-11-06 23:36:01 +010034#include <asm/msgbuf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036extern int data_start;
Helge Deller161bd3b2013-11-30 22:07:51 +010037extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Kirill A. Shutemovf24ffde2015-04-14 15:45:54 -070039#if CONFIG_PGTABLE_LEVELS == 3
Thomas Gleixnerc39f52a2012-05-03 09:02:57 +000040/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
41 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
42 * guarantee that global objects will be laid out in memory in the same order
43 * as the order of declaration, so put these in different sections and use
44 * the linker script to order them. */
45pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
46#endif
47
48pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
49pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#ifdef CONFIG_DISCONTIGMEM
Helge Deller8039de12006-01-10 20:35:03 -050052struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
Helge Deller91ea8202013-06-05 20:50:01 +000053signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#endif
55
56static struct resource data_resource = {
57 .name = "Kernel data",
58 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
59};
60
61static struct resource code_resource = {
62 .name = "Kernel code",
63 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
64};
65
66static struct resource pdcdata_resource = {
67 .name = "PDC data (Page Zero)",
68 .start = 0,
69 .end = 0x9ff,
70 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
71};
72
Helge Deller8039de12006-01-10 20:35:03 -050073static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* The following array is initialized from the firmware specific
76 * information retrieved in kernel/inventory.c.
77 */
78
Helge Deller8039de12006-01-10 20:35:03 -050079physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
80int npmem_ranges __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Helge Dellera8f44e32007-01-28 14:58:52 +010082#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#define MAX_MEM (~0UL)
Helge Dellera8f44e32007-01-28 14:58:52 +010084#else /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#define MAX_MEM (3584U*1024U*1024U)
Helge Dellera8f44e32007-01-28 14:58:52 +010086#endif /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Helge Deller8039de12006-01-10 20:35:03 -050088static unsigned long mem_limit __read_mostly = MAX_MEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90static void __init mem_limit_func(void)
91{
92 char *cp, *end;
93 unsigned long limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95 /* We need this before __setup() functions are called */
96
97 limit = MAX_MEM;
Alon Bar-Lev668f9932007-02-12 00:54:16 -080098 for (cp = boot_command_line; *cp; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 if (memcmp(cp, "mem=", 4) == 0) {
100 cp += 4;
101 limit = memparse(cp, &end);
102 if (end != cp)
103 break;
104 cp = end;
105 } else {
106 while (*cp != ' ' && *cp)
107 ++cp;
108 while (*cp == ' ')
109 ++cp;
110 }
111 }
112
113 if (limit < mem_limit)
114 mem_limit = limit;
115}
116
117#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
118
119static void __init setup_bootmem(void)
120{
121 unsigned long bootmap_size;
122 unsigned long mem_max;
123 unsigned long bootmap_pages;
124 unsigned long bootmap_start_pfn;
125 unsigned long bootmap_pfn;
126#ifndef CONFIG_DISCONTIGMEM
127 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
128 int npmem_holes;
129#endif
130 int i, sysram_resource_count;
131
132 disable_sr_hashing(); /* Turn off space register hashing */
133
134 /*
135 * Sort the ranges. Since the number of ranges is typically
136 * small, and performance is not an issue here, just do
137 * a simple insertion sort.
138 */
139
140 for (i = 1; i < npmem_ranges; i++) {
141 int j;
142
143 for (j = i; j > 0; j--) {
144 unsigned long tmp;
145
146 if (pmem_ranges[j-1].start_pfn <
147 pmem_ranges[j].start_pfn) {
148
149 break;
150 }
151 tmp = pmem_ranges[j-1].start_pfn;
152 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
153 pmem_ranges[j].start_pfn = tmp;
154 tmp = pmem_ranges[j-1].pages;
155 pmem_ranges[j-1].pages = pmem_ranges[j].pages;
156 pmem_ranges[j].pages = tmp;
157 }
158 }
159
160#ifndef CONFIG_DISCONTIGMEM
161 /*
162 * Throw out ranges that are too far apart (controlled by
163 * MAX_GAP).
164 */
165
166 for (i = 1; i < npmem_ranges; i++) {
167 if (pmem_ranges[i].start_pfn -
168 (pmem_ranges[i-1].start_pfn +
169 pmem_ranges[i-1].pages) > MAX_GAP) {
170 npmem_ranges = i;
171 printk("Large gap in memory detected (%ld pages). "
172 "Consider turning on CONFIG_DISCONTIGMEM\n",
173 pmem_ranges[i].start_pfn -
174 (pmem_ranges[i-1].start_pfn +
175 pmem_ranges[i-1].pages));
176 break;
177 }
178 }
179#endif
180
181 if (npmem_ranges > 1) {
182
183 /* Print the memory ranges */
184
185 printk(KERN_INFO "Memory Ranges:\n");
186
187 for (i = 0; i < npmem_ranges; i++) {
188 unsigned long start;
189 unsigned long size;
190
191 size = (pmem_ranges[i].pages << PAGE_SHIFT);
192 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
193 printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
194 i,start, start + (size - 1), size >> 20);
195 }
196 }
197
198 sysram_resource_count = npmem_ranges;
199 for (i = 0; i < sysram_resource_count; i++) {
200 struct resource *res = &sysram_resources[i];
201 res->name = "System RAM";
202 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
203 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
204 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
205 request_resource(&iomem_resource, res);
206 }
207
208 /*
209 * For 32 bit kernels we limit the amount of memory we can
210 * support, in order to preserve enough kernel address space
211 * for other purposes. For 64 bit kernels we don't normally
212 * limit the memory, but this mechanism can be used to
213 * artificially limit the amount of memory (and it is written
214 * to work with multiple memory ranges).
215 */
216
217 mem_limit_func(); /* check for "mem=" argument */
218
219 mem_max = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 for (i = 0; i < npmem_ranges; i++) {
221 unsigned long rsize;
222
223 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
224 if ((mem_max + rsize) > mem_limit) {
225 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
226 if (mem_max == mem_limit)
227 npmem_ranges = i;
228 else {
229 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
230 - (mem_max >> PAGE_SHIFT);
231 npmem_ranges = i + 1;
232 mem_max = mem_limit;
233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 break;
235 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 mem_max += rsize;
237 }
238
239 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
240
241#ifndef CONFIG_DISCONTIGMEM
242 /* Merge the ranges, keeping track of the holes */
243
244 {
245 unsigned long end_pfn;
246 unsigned long hole_pages;
247
248 npmem_holes = 0;
249 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
250 for (i = 1; i < npmem_ranges; i++) {
251
252 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
253 if (hole_pages) {
254 pmem_holes[npmem_holes].start_pfn = end_pfn;
255 pmem_holes[npmem_holes++].pages = hole_pages;
256 end_pfn += hole_pages;
257 }
258 end_pfn += pmem_ranges[i].pages;
259 }
260
261 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
262 npmem_ranges = 1;
263 }
264#endif
265
266 bootmap_pages = 0;
267 for (i = 0; i < npmem_ranges; i++)
268 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
269
270 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
271
272#ifdef CONFIG_DISCONTIGMEM
273 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
274 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
Johannes Weinerb61bfa32008-07-23 21:26:55 -0700275 NODE_DATA(i)->bdata = &bootmem_node_data[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 }
277 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
278
David Rientjesd9b41e02011-04-20 19:27:13 -0700279 for (i = 0; i < npmem_ranges; i++) {
280 node_set_state(i, N_NORMAL_MEMORY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 node_set_online(i);
David Rientjesd9b41e02011-04-20 19:27:13 -0700282 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283#endif
284
285 /*
286 * Initialize and free the full range of memory in each range.
287 * Note that the only writing these routines do are to the bootmap,
288 * and we've made sure to locate the bootmap properly so that they
289 * won't be writing over anything important.
290 */
291
292 bootmap_pfn = bootmap_start_pfn;
293 max_pfn = 0;
294 for (i = 0; i < npmem_ranges; i++) {
295 unsigned long start_pfn;
296 unsigned long npages;
297
298 start_pfn = pmem_ranges[i].start_pfn;
299 npages = pmem_ranges[i].pages;
300
301 bootmap_size = init_bootmem_node(NODE_DATA(i),
302 bootmap_pfn,
303 start_pfn,
304 (start_pfn + npages) );
305 free_bootmem_node(NODE_DATA(i),
306 (start_pfn << PAGE_SHIFT),
307 (npages << PAGE_SHIFT) );
308 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
309 if ((start_pfn + npages) > max_pfn)
310 max_pfn = start_pfn + npages;
311 }
312
Grant Grundler5cdb8202006-01-10 20:47:57 -0500313 /* IOMMU is always used to access "high mem" on those boxes
314 * that can support enough mem that a PCI device couldn't
315 * directly DMA to any physical addresses.
316 * ISA DMA support will need to revisit this.
317 */
318 max_low_pfn = max_pfn;
319
Helge Deller8980a7b2009-01-06 12:57:01 +0100320 /* bootmap sizing messed up? */
321 BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
324
325#define PDC_CONSOLE_IO_IODC_SIZE 32768
326
327 reserve_bootmem_node(NODE_DATA(0), 0UL,
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800328 (unsigned long)(PAGE0->mem_free +
329 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
Helge Deller161bd3b2013-11-30 22:07:51 +0100330 reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START),
331 (unsigned long)(_end - KERNEL_BINARY_TEXT_START),
332 BOOTMEM_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800334 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
335 BOOTMEM_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
337#ifndef CONFIG_DISCONTIGMEM
338
339 /* reserve the holes */
340
341 for (i = 0; i < npmem_holes; i++) {
342 reserve_bootmem_node(NODE_DATA(0),
343 (pmem_holes[i].start_pfn << PAGE_SHIFT),
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800344 (pmem_holes[i].pages << PAGE_SHIFT),
345 BOOTMEM_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
347#endif
348
349#ifdef CONFIG_BLK_DEV_INITRD
350 if (initrd_start) {
351 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
352 if (__pa(initrd_start) < mem_max) {
353 unsigned long initrd_reserve;
354
355 if (__pa(initrd_end) > mem_max) {
356 initrd_reserve = mem_max - __pa(initrd_start);
357 } else {
358 initrd_reserve = initrd_end - initrd_start;
359 }
360 initrd_below_start_ok = 1;
361 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
362
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800363 reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
364 initrd_reserve, BOOTMEM_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 }
366 }
367#endif
368
369 data_resource.start = virt_to_phys(&data_start);
Kyle McMartinc51d4762006-08-13 20:39:48 -0400370 data_resource.end = virt_to_phys(_end) - 1;
371 code_resource.start = virt_to_phys(_text);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 code_resource.end = virt_to_phys(&data_start)-1;
373
374 /* We don't know which region the kernel will be in, so try
375 * all of them.
376 */
377 for (i = 0; i < sysram_resource_count; i++) {
378 struct resource *res = &sysram_resources[i];
379 request_resource(res, &code_resource);
380 request_resource(res, &data_resource);
381 }
382 request_resource(&sysram_resources[0], &pdcdata_resource);
383}
384
Helge Deller161bd3b2013-11-30 22:07:51 +0100385static int __init parisc_text_address(unsigned long vaddr)
386{
387 static unsigned long head_ptr __initdata;
388
389 if (!head_ptr)
390 head_ptr = PAGE_MASK & (unsigned long)
391 dereference_function_descriptor(&parisc_kernel_start);
392
393 return core_kernel_text(vaddr) || vaddr == head_ptr;
394}
395
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500396static void __init map_pages(unsigned long start_vaddr,
397 unsigned long start_paddr, unsigned long size,
398 pgprot_t pgprot, int force)
399{
400 pgd_t *pg_dir;
401 pmd_t *pmd;
402 pte_t *pg_table;
403 unsigned long end_paddr;
404 unsigned long start_pmd;
405 unsigned long start_pte;
406 unsigned long tmp1;
407 unsigned long tmp2;
408 unsigned long address;
409 unsigned long vaddr;
410 unsigned long ro_start;
411 unsigned long ro_end;
412 unsigned long fv_addr;
413 unsigned long gw_addr;
414 extern const unsigned long fault_vector_20;
415 extern void * const linux_gateway_page;
416
417 ro_start = __pa((unsigned long)_text);
418 ro_end = __pa((unsigned long)&data_start);
419 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
420 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
421
422 end_paddr = start_paddr + size;
423
424 pg_dir = pgd_offset_k(start_vaddr);
425
426#if PTRS_PER_PMD == 1
427 start_pmd = 0;
428#else
429 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
430#endif
431 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
432
433 address = start_paddr;
434 vaddr = start_vaddr;
435 while (address < end_paddr) {
436#if PTRS_PER_PMD == 1
437 pmd = (pmd_t *)__pa(pg_dir);
438#else
439 pmd = (pmd_t *)pgd_address(*pg_dir);
440
441 /*
442 * pmd is physical at this point
443 */
444
445 if (!pmd) {
446 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER);
447 pmd = (pmd_t *) __pa(pmd);
448 }
449
450 pgd_populate(NULL, pg_dir, __va(pmd));
451#endif
452 pg_dir++;
453
454 /* now change pmd to kernel virtual addresses */
455
456 pmd = (pmd_t *)__va(pmd) + start_pmd;
457 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
458
459 /*
460 * pg_table is physical at this point
461 */
462
463 pg_table = (pte_t *)pmd_address(*pmd);
464 if (!pg_table) {
465 pg_table = (pte_t *)
466 alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
467 pg_table = (pte_t *) __pa(pg_table);
468 }
469
470 pmd_populate_kernel(NULL, pmd, __va(pg_table));
471
472 /* now change pg_table to kernel virtual addresses */
473
474 pg_table = (pte_t *) __va(pg_table) + start_pte;
475 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
476 pte_t pte;
477
478 /*
479 * Map the fault vector writable so we can
480 * write the HPMC checksum.
481 */
482 if (force)
483 pte = __mk_pte(address, pgprot);
Helge Deller161bd3b2013-11-30 22:07:51 +0100484 else if (parisc_text_address(vaddr) &&
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500485 address != fv_addr)
486 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
487 else
488#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
489 if (address >= ro_start && address < ro_end
490 && address != fv_addr
491 && address != gw_addr)
492 pte = __mk_pte(address, PAGE_KERNEL_RO);
493 else
494#endif
495 pte = __mk_pte(address, pgprot);
496
497 if (address >= end_paddr) {
498 if (force)
499 break;
500 else
501 pte_val(pte) = 0;
502 }
503
504 set_pte(pg_table, pte);
505
506 address += PAGE_SIZE;
507 vaddr += PAGE_SIZE;
508 }
509 start_pte = 0;
510
511 if (address >= end_paddr)
512 break;
513 }
514 start_pmd = 0;
515 }
516}
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518void free_initmem(void)
519{
Kyle McMartin4fb11782009-04-05 02:53:47 +0000520 unsigned long init_begin = (unsigned long)__init_begin;
521 unsigned long init_end = (unsigned long)__init_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500523 /* The init text pages are marked R-X. We have to
524 * flush the icache and mark them RW-
525 *
526 * This is tricky, because map_pages is in the init section.
527 * Do a dummy remap of the data section first (the data
528 * section is already PAGE_KERNEL) to pull in the TLB entries
529 * for map_kernel */
530 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
531 PAGE_KERNEL_RWX, 1);
532 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
533 * map_pages */
534 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
535 PAGE_KERNEL, 1);
536
537 /* force the kernel to see the new TLB entries */
538 __flush_tlb_range(0, init_begin, init_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 /* Attempt to catch anyone trying to execute code here
540 * by filling the page with BRK insns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 */
Kyle McMartin20dbc9f2009-06-23 11:51:43 -0400542 memset((void *)init_begin, 0x00, init_end - init_begin);
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500543 /* finally dump all the instructions which were cached, since the
544 * pages are no-longer executable */
Kyle McMartin4fb11782009-04-05 02:53:47 +0000545 flush_icache_range(init_begin, init_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Jiang Liu7d2c7742013-07-03 15:04:07 -0700547 free_initmem_default(-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
549 /* set up a new led state on systems shipped LED State panel */
550 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551}
552
Helge Deller1bcdd852006-01-13 13:21:06 -0700553
554#ifdef CONFIG_DEBUG_RODATA
555void mark_rodata_ro(void)
556{
Helge Deller1bcdd852006-01-13 13:21:06 -0700557 /* rodata memory was already mapped with KERNEL_RO access rights by
558 pagetable_init() and map_pages(). No need to do additional stuff here */
559 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
Heiko Carstensa581c2a2006-07-01 04:36:30 -0700560 (unsigned long)(__end_rodata - __start_rodata) >> 10);
Helge Deller1bcdd852006-01-13 13:21:06 -0700561}
562#endif
563
564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565/*
566 * Just an arbitrary offset to serve as a "hole" between mapping areas
567 * (between top of physical memory and a potential pcxl dma mapping
568 * area, and below the vmalloc mapping area).
569 *
570 * The current 32K value just means that there will be a 32K "hole"
571 * between mapping areas. That means that any out-of-bounds memory
572 * accesses will hopefully be caught. The vmalloc() routines leaves
573 * a hole of 4kB between each vmalloced area for the same reason.
574 */
575
576 /* Leave room for gateway page expansion */
577#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
578#error KERNEL_MAP_START is in gateway reserved region
579#endif
580#define MAP_START (KERNEL_MAP_START)
581
582#define VM_MAP_OFFSET (32*1024)
583#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
584 & ~(VM_MAP_OFFSET-1)))
585
Helge Deller4255f0d2009-09-27 23:26:01 -0400586void *parisc_vmalloc_start __read_mostly;
587EXPORT_SYMBOL(parisc_vmalloc_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589#ifdef CONFIG_PA11
Helge Deller8039de12006-01-10 20:35:03 -0500590unsigned long pcxl_dma_start __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591#endif
592
593void __init mem_init(void)
594{
Helge Dellerd0cf62f2015-11-06 23:36:01 +0100595 /* Do sanity checks on IPC (compat) structures */
596 BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
597#ifndef CONFIG_64BIT
598 BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
599 BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
600 BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
601#endif
602#ifdef CONFIG_COMPAT
603 BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
604 BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
605 BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
606 BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
607#endif
608
Helge Deller48d27cb2009-01-18 19:16:16 +0100609 /* Do sanity checks on page table constants */
610 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
611 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
612 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
613 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
614 > BITS_PER_LONG);
615
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 high_memory = __va((max_pfn << PAGE_SHIFT));
Jiang Liud5c017d2013-07-03 15:04:31 -0700617 set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
Jiang Liu0c988532013-07-03 15:03:24 -0700618 free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620#ifdef CONFIG_PA11
621 if (hppa_dma_ops == &pcxl_dma_ops) {
622 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
Helge Deller4255f0d2009-09-27 23:26:01 -0400623 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
624 + PCXL_DMA_MAP_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 } else {
626 pcxl_dma_start = 0;
Helge Deller4255f0d2009-09-27 23:26:01 -0400627 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 }
629#else
Helge Deller4255f0d2009-09-27 23:26:01 -0400630 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631#endif
632
Jiang Liu7d2c7742013-07-03 15:04:07 -0700633 mem_init_print_info(NULL);
Helge Dellerce8420b2006-10-14 22:10:44 +0200634#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
635 printk("virtual kernel memory layout:\n"
636 " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
Kyle McMartin53faf292006-10-16 20:34:00 -0400637 " memory : 0x%p - 0x%p (%4ld MB)\n"
Helge Dellerce8420b2006-10-14 22:10:44 +0200638 " .init : 0x%p - 0x%p (%4ld kB)\n"
639 " .data : 0x%p - 0x%p (%4ld kB)\n"
640 " .text : 0x%p - 0x%p (%4ld kB)\n",
641
642 (void*)VMALLOC_START, (void*)VMALLOC_END,
643 (VMALLOC_END - VMALLOC_START) >> 20,
644
645 __va(0), high_memory,
646 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
647
Kyle McMartin53faf292006-10-16 20:34:00 -0400648 __init_begin, __init_end,
649 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
Helge Dellerce8420b2006-10-14 22:10:44 +0200650
Kyle McMartin53faf292006-10-16 20:34:00 -0400651 _etext, _edata,
652 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
Helge Dellerce8420b2006-10-14 22:10:44 +0200653
Kyle McMartin53faf292006-10-16 20:34:00 -0400654 _text, _etext,
655 ((unsigned long)_etext - (unsigned long)_text) >> 10);
Helge Dellerce8420b2006-10-14 22:10:44 +0200656#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657}
658
Helge Deller8039de12006-01-10 20:35:03 -0500659unsigned long *empty_zero_page __read_mostly;
Kyle McMartin22febf12008-05-26 01:54:35 -0400660EXPORT_SYMBOL(empty_zero_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661
David Rientjesb2b755b2011-03-24 15:18:15 -0700662void show_mem(unsigned int filter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663{
Mel Gormanaec6a882014-01-21 15:49:13 -0800664 int total = 0,reserved = 0;
665 pg_data_t *pgdat;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
667 printk(KERN_INFO "Mem-info:\n");
David Rientjes7bf02ea2011-05-24 17:11:16 -0700668 show_free_areas(filter);
Mel Gormanaec6a882014-01-21 15:49:13 -0800669
670 for_each_online_pgdat(pgdat) {
671 unsigned long flags;
672 int zoneid;
673
674 pgdat_resize_lock(pgdat, &flags);
675 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
676 struct zone *zone = &pgdat->node_zones[zoneid];
677 if (!populated_zone(zone))
678 continue;
679
680 total += zone->present_pages;
681 reserved = zone->present_pages - zone->managed_pages;
682 }
683 pgdat_resize_unlock(pgdat, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 printk(KERN_INFO "%d pages of RAM\n", total);
687 printk(KERN_INFO "%d reserved pages\n", reserved);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689#ifdef CONFIG_DISCONTIGMEM
690 {
691 struct zonelist *zl;
Mel Gorman54a6eb52008-04-28 02:12:16 -0700692 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 for (i = 0; i < npmem_ranges; i++) {
Mel Gorman4413a0f2008-05-12 14:02:19 -0700695 zl = node_zonelist(i, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 for (j = 0; j < MAX_NR_ZONES; j++) {
Mel Gormandd1a2392008-04-28 02:12:17 -0700697 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -0700698 struct zone *zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700 printk("Zone list for zone %d on node %d: ", j, i);
Mel Gorman54a6eb52008-04-28 02:12:16 -0700701 for_each_zone_zonelist(zone, z, zl, j)
702 printk("[%d/%s] ", zone_to_nid(zone),
703 zone->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 printk("\n");
705 }
706 }
707 }
708#endif
709}
710
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711/*
712 * pagetable_init() sets up the page tables
713 *
714 * Note that gateway_init() places the Linux gateway page at page 0.
715 * Since gateway pages cannot be dereferenced this has the desirable
716 * side effect of trapping those pesky NULL-reference errors in the
717 * kernel.
718 */
719static void __init pagetable_init(void)
720{
721 int range;
722
723 /* Map each physical memory range to its kernel vaddr */
724
725 for (range = 0; range < npmem_ranges; range++) {
726 unsigned long start_paddr;
727 unsigned long end_paddr;
728 unsigned long size;
729
730 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
731 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
732 size = pmem_ranges[range].pages << PAGE_SHIFT;
733
734 map_pages((unsigned long)__va(start_paddr), start_paddr,
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500735 size, PAGE_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 }
737
738#ifdef CONFIG_BLK_DEV_INITRD
739 if (initrd_end && initrd_end > mem_limit) {
Helge Deller1bcdd852006-01-13 13:21:06 -0700740 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 map_pages(initrd_start, __pa(initrd_start),
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500742 initrd_end - initrd_start, PAGE_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 }
744#endif
745
746 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747}
748
749static void __init gateway_init(void)
750{
751 unsigned long linux_gateway_page_addr;
752 /* FIXME: This is 'const' in order to trick the compiler
753 into not treating it as DP-relative data. */
754 extern void * const linux_gateway_page;
755
756 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
757
758 /*
759 * Setup Linux Gateway page.
760 *
761 * The Linux gateway page will reside in kernel space (on virtual
762 * page 0), so it doesn't need to be aliased into user space.
763 */
764
765 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
James Bottomleyd7dd2ff2011-04-14 18:25:21 -0500766 PAGE_SIZE, PAGE_GATEWAY, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767}
768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769void __init paging_init(void)
770{
771 int i;
772
773 setup_bootmem();
774 pagetable_init();
775 gateway_init();
776 flush_cache_all_local(); /* start with known state */
Matthew Wilcoxce339412006-01-10 20:47:49 -0500777 flush_tlb_all_local(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779 for (i = 0; i < npmem_ranges; i++) {
Christoph Lameterf06a9682006-09-25 23:31:10 -0700780 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Christoph Lameter00592832007-02-10 01:43:12 -0800782 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783
784#ifdef CONFIG_DISCONTIGMEM
785 /* Need to initialize the pfnnid_map before we can initialize
786 the zone */
787 {
788 int j;
789 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
790 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
791 j++) {
792 pfnnid_map[j] = i;
793 }
794 }
795#endif
796
Johannes Weiner9109fb72008-07-23 21:27:20 -0700797 free_area_init_node(i, zones_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 pmem_ranges[i].start_pfn, NULL);
799 }
800}
801
802#ifdef CONFIG_PA20
803
804/*
Simon Arlott70226722007-05-11 20:42:34 +0100805 * Currently, all PA20 chips have 18 bit protection IDs, which is the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 * limiting factor (space ids are 32 bits).
807 */
808
809#define NR_SPACE_IDS 262144
810
811#else
812
813/*
Simon Arlott70226722007-05-11 20:42:34 +0100814 * Currently we have a one-to-one relationship between space IDs and
815 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
816 * support 15 bit protection IDs, so that is the limiting factor.
817 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 * probably not worth the effort for a special case here.
819 */
820
821#define NR_SPACE_IDS 32768
822
823#endif /* !CONFIG_PA20 */
824
825#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
826#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
827
828static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
829static unsigned long dirty_space_id[SID_ARRAY_SIZE];
830static unsigned long space_id_index;
831static unsigned long free_space_ids = NR_SPACE_IDS - 1;
832static unsigned long dirty_space_ids = 0;
833
834static DEFINE_SPINLOCK(sid_lock);
835
836unsigned long alloc_sid(void)
837{
838 unsigned long index;
839
840 spin_lock(&sid_lock);
841
842 if (free_space_ids == 0) {
843 if (dirty_space_ids != 0) {
844 spin_unlock(&sid_lock);
845 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
846 spin_lock(&sid_lock);
847 }
Helge Deller2fd83032006-04-20 20:40:23 +0000848 BUG_ON(free_space_ids == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 }
850
851 free_space_ids--;
852
853 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
854 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
855 space_id_index = index;
856
857 spin_unlock(&sid_lock);
858
859 return index << SPACEID_SHIFT;
860}
861
862void free_sid(unsigned long spaceid)
863{
864 unsigned long index = spaceid >> SPACEID_SHIFT;
865 unsigned long *dirty_space_offset;
866
867 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
868 index &= (BITS_PER_LONG - 1);
869
870 spin_lock(&sid_lock);
871
Helge Deller2fd83032006-04-20 20:40:23 +0000872 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
874 *dirty_space_offset |= (1L << index);
875 dirty_space_ids++;
876
877 spin_unlock(&sid_lock);
878}
879
880
881#ifdef CONFIG_SMP
882static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
883{
884 int i;
885
886 /* NOTE: sid_lock must be held upon entry */
887
888 *ndirtyptr = dirty_space_ids;
889 if (dirty_space_ids != 0) {
890 for (i = 0; i < SID_ARRAY_SIZE; i++) {
891 dirty_array[i] = dirty_space_id[i];
892 dirty_space_id[i] = 0;
893 }
894 dirty_space_ids = 0;
895 }
896
897 return;
898}
899
900static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
901{
902 int i;
903
904 /* NOTE: sid_lock must be held upon entry */
905
906 if (ndirty != 0) {
907 for (i = 0; i < SID_ARRAY_SIZE; i++) {
908 space_id[i] ^= dirty_array[i];
909 }
910
911 free_space_ids += ndirty;
912 space_id_index = 0;
913 }
914}
915
916#else /* CONFIG_SMP */
917
918static void recycle_sids(void)
919{
920 int i;
921
922 /* NOTE: sid_lock must be held upon entry */
923
924 if (dirty_space_ids != 0) {
925 for (i = 0; i < SID_ARRAY_SIZE; i++) {
926 space_id[i] ^= dirty_space_id[i];
927 dirty_space_id[i] = 0;
928 }
929
930 free_space_ids += dirty_space_ids;
931 dirty_space_ids = 0;
932 space_id_index = 0;
933 }
934}
935#endif
936
937/*
938 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
939 * purged, we can safely reuse the space ids that were released but
940 * not flushed from the tlb.
941 */
942
943#ifdef CONFIG_SMP
944
945static unsigned long recycle_ndirty;
946static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
Helge Deller2fd83032006-04-20 20:40:23 +0000947static unsigned int recycle_inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
949void flush_tlb_all(void)
950{
951 int do_recycle;
952
Helge Deller416821d2013-05-10 21:24:01 +0000953 __inc_irq_stat(irq_tlb_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 do_recycle = 0;
955 spin_lock(&sid_lock);
956 if (dirty_space_ids > RECYCLE_THRESHOLD) {
Helge Deller2fd83032006-04-20 20:40:23 +0000957 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
959 recycle_inuse++;
960 do_recycle++;
961 }
962 spin_unlock(&sid_lock);
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200963 on_each_cpu(flush_tlb_all_local, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 if (do_recycle) {
965 spin_lock(&sid_lock);
966 recycle_sids(recycle_ndirty,recycle_dirty_array);
967 recycle_inuse = 0;
968 spin_unlock(&sid_lock);
969 }
970}
971#else
972void flush_tlb_all(void)
973{
Helge Deller416821d2013-05-10 21:24:01 +0000974 __inc_irq_stat(irq_tlb_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 spin_lock(&sid_lock);
Matthew Wilcox1b2425e2006-01-10 20:47:49 -0500976 flush_tlb_all_local(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 recycle_sids();
978 spin_unlock(&sid_lock);
979}
980#endif
981
982#ifdef CONFIG_BLK_DEV_INITRD
983void free_initrd_mem(unsigned long start, unsigned long end)
984{
Jiang Liu7d2c7742013-07-03 15:04:07 -0700985 free_reserved_area((void *)start, (void *)end, -1, "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986}
987#endif