blob: ce0da689a89dac1e1eaf7ae45d77986b550796ac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/parisc/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright 1999 SuSE GmbH
6 * changed by Philipp Rumpf
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2004 Randolph Chung (tausq@debian.org)
Helge Dellera8f44e32007-01-28 14:58:52 +01009 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/bootmem.h>
17#include <linux/delay.h>
18#include <linux/init.h>
19#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
20#include <linux/initrd.h>
21#include <linux/swap.h>
22#include <linux/unistd.h>
23#include <linux/nodemask.h> /* for node_online_map */
24#include <linux/pagemap.h> /* for release_pages and page_cache_release */
25
26#include <asm/pgalloc.h>
Helge Dellerce8420b2006-10-14 22:10:44 +020027#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/tlb.h>
29#include <asm/pdc_chassis.h>
30#include <asm/mmzone.h>
Heiko Carstensa581c2a2006-07-01 04:36:30 -070031#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035extern int data_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37#ifdef CONFIG_DISCONTIGMEM
Helge Deller8039de12006-01-10 20:35:03 -050038struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
39bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly;
40unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#endif
42
43static struct resource data_resource = {
44 .name = "Kernel data",
45 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
46};
47
48static struct resource code_resource = {
49 .name = "Kernel code",
50 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
51};
52
53static struct resource pdcdata_resource = {
54 .name = "PDC data (Page Zero)",
55 .start = 0,
56 .end = 0x9ff,
57 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
58};
59
Helge Deller8039de12006-01-10 20:35:03 -050060static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62/* The following array is initialized from the firmware specific
63 * information retrieved in kernel/inventory.c.
64 */
65
Helge Deller8039de12006-01-10 20:35:03 -050066physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
67int npmem_ranges __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
Helge Dellera8f44e32007-01-28 14:58:52 +010069#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#define MAX_MEM (~0UL)
Helge Dellera8f44e32007-01-28 14:58:52 +010071#else /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#define MAX_MEM (3584U*1024U*1024U)
Helge Dellera8f44e32007-01-28 14:58:52 +010073#endif /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Helge Deller8039de12006-01-10 20:35:03 -050075static unsigned long mem_limit __read_mostly = MAX_MEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77static void __init mem_limit_func(void)
78{
79 char *cp, *end;
80 unsigned long limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82 /* We need this before __setup() functions are called */
83
84 limit = MAX_MEM;
Alon Bar-Lev668f9932007-02-12 00:54:16 -080085 for (cp = boot_command_line; *cp; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 if (memcmp(cp, "mem=", 4) == 0) {
87 cp += 4;
88 limit = memparse(cp, &end);
89 if (end != cp)
90 break;
91 cp = end;
92 } else {
93 while (*cp != ' ' && *cp)
94 ++cp;
95 while (*cp == ' ')
96 ++cp;
97 }
98 }
99
100 if (limit < mem_limit)
101 mem_limit = limit;
102}
103
104#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
105
106static void __init setup_bootmem(void)
107{
108 unsigned long bootmap_size;
109 unsigned long mem_max;
110 unsigned long bootmap_pages;
111 unsigned long bootmap_start_pfn;
112 unsigned long bootmap_pfn;
113#ifndef CONFIG_DISCONTIGMEM
114 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
115 int npmem_holes;
116#endif
117 int i, sysram_resource_count;
118
119 disable_sr_hashing(); /* Turn off space register hashing */
120
121 /*
122 * Sort the ranges. Since the number of ranges is typically
123 * small, and performance is not an issue here, just do
124 * a simple insertion sort.
125 */
126
127 for (i = 1; i < npmem_ranges; i++) {
128 int j;
129
130 for (j = i; j > 0; j--) {
131 unsigned long tmp;
132
133 if (pmem_ranges[j-1].start_pfn <
134 pmem_ranges[j].start_pfn) {
135
136 break;
137 }
138 tmp = pmem_ranges[j-1].start_pfn;
139 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
140 pmem_ranges[j].start_pfn = tmp;
141 tmp = pmem_ranges[j-1].pages;
142 pmem_ranges[j-1].pages = pmem_ranges[j].pages;
143 pmem_ranges[j].pages = tmp;
144 }
145 }
146
147#ifndef CONFIG_DISCONTIGMEM
148 /*
149 * Throw out ranges that are too far apart (controlled by
150 * MAX_GAP).
151 */
152
153 for (i = 1; i < npmem_ranges; i++) {
154 if (pmem_ranges[i].start_pfn -
155 (pmem_ranges[i-1].start_pfn +
156 pmem_ranges[i-1].pages) > MAX_GAP) {
157 npmem_ranges = i;
158 printk("Large gap in memory detected (%ld pages). "
159 "Consider turning on CONFIG_DISCONTIGMEM\n",
160 pmem_ranges[i].start_pfn -
161 (pmem_ranges[i-1].start_pfn +
162 pmem_ranges[i-1].pages));
163 break;
164 }
165 }
166#endif
167
168 if (npmem_ranges > 1) {
169
170 /* Print the memory ranges */
171
172 printk(KERN_INFO "Memory Ranges:\n");
173
174 for (i = 0; i < npmem_ranges; i++) {
175 unsigned long start;
176 unsigned long size;
177
178 size = (pmem_ranges[i].pages << PAGE_SHIFT);
179 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
180 printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
181 i,start, start + (size - 1), size >> 20);
182 }
183 }
184
185 sysram_resource_count = npmem_ranges;
186 for (i = 0; i < sysram_resource_count; i++) {
187 struct resource *res = &sysram_resources[i];
188 res->name = "System RAM";
189 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
190 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
191 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
192 request_resource(&iomem_resource, res);
193 }
194
195 /*
196 * For 32 bit kernels we limit the amount of memory we can
197 * support, in order to preserve enough kernel address space
198 * for other purposes. For 64 bit kernels we don't normally
199 * limit the memory, but this mechanism can be used to
200 * artificially limit the amount of memory (and it is written
201 * to work with multiple memory ranges).
202 */
203
204 mem_limit_func(); /* check for "mem=" argument */
205
206 mem_max = 0;
207 num_physpages = 0;
208 for (i = 0; i < npmem_ranges; i++) {
209 unsigned long rsize;
210
211 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
212 if ((mem_max + rsize) > mem_limit) {
213 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
214 if (mem_max == mem_limit)
215 npmem_ranges = i;
216 else {
217 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
218 - (mem_max >> PAGE_SHIFT);
219 npmem_ranges = i + 1;
220 mem_max = mem_limit;
221 }
222 num_physpages += pmem_ranges[i].pages;
223 break;
224 }
225 num_physpages += pmem_ranges[i].pages;
226 mem_max += rsize;
227 }
228
229 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
230
231#ifndef CONFIG_DISCONTIGMEM
232 /* Merge the ranges, keeping track of the holes */
233
234 {
235 unsigned long end_pfn;
236 unsigned long hole_pages;
237
238 npmem_holes = 0;
239 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
240 for (i = 1; i < npmem_ranges; i++) {
241
242 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
243 if (hole_pages) {
244 pmem_holes[npmem_holes].start_pfn = end_pfn;
245 pmem_holes[npmem_holes++].pages = hole_pages;
246 end_pfn += hole_pages;
247 }
248 end_pfn += pmem_ranges[i].pages;
249 }
250
251 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
252 npmem_ranges = 1;
253 }
254#endif
255
256 bootmap_pages = 0;
257 for (i = 0; i < npmem_ranges; i++)
258 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
259
260 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
261
262#ifdef CONFIG_DISCONTIGMEM
263 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
264 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
265 NODE_DATA(i)->bdata = &bmem_data[i];
266 }
267 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
268
269 for (i = 0; i < npmem_ranges; i++)
270 node_set_online(i);
271#endif
272
273 /*
274 * Initialize and free the full range of memory in each range.
275 * Note that the only writing these routines do are to the bootmap,
276 * and we've made sure to locate the bootmap properly so that they
277 * won't be writing over anything important.
278 */
279
280 bootmap_pfn = bootmap_start_pfn;
281 max_pfn = 0;
282 for (i = 0; i < npmem_ranges; i++) {
283 unsigned long start_pfn;
284 unsigned long npages;
285
286 start_pfn = pmem_ranges[i].start_pfn;
287 npages = pmem_ranges[i].pages;
288
289 bootmap_size = init_bootmem_node(NODE_DATA(i),
290 bootmap_pfn,
291 start_pfn,
292 (start_pfn + npages) );
293 free_bootmem_node(NODE_DATA(i),
294 (start_pfn << PAGE_SHIFT),
295 (npages << PAGE_SHIFT) );
296 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
297 if ((start_pfn + npages) > max_pfn)
298 max_pfn = start_pfn + npages;
299 }
300
Grant Grundler5cdb8202006-01-10 20:47:57 -0500301 /* IOMMU is always used to access "high mem" on those boxes
302 * that can support enough mem that a PCI device couldn't
303 * directly DMA to any physical addresses.
304 * ISA DMA support will need to revisit this.
305 */
306 max_low_pfn = max_pfn;
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
309 printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n");
310 BUG();
311 }
312
313 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
314
315#define PDC_CONSOLE_IO_IODC_SIZE 32768
316
317 reserve_bootmem_node(NODE_DATA(0), 0UL,
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800318 (unsigned long)(PAGE0->mem_free +
319 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
Kyle McMartinc51d4762006-08-13 20:39:48 -0400320 reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800321 (unsigned long)(_end - _text), BOOTMEM_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800323 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
324 BOOTMEM_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
326#ifndef CONFIG_DISCONTIGMEM
327
328 /* reserve the holes */
329
330 for (i = 0; i < npmem_holes; i++) {
331 reserve_bootmem_node(NODE_DATA(0),
332 (pmem_holes[i].start_pfn << PAGE_SHIFT),
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800333 (pmem_holes[i].pages << PAGE_SHIFT),
334 BOOTMEM_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 }
336#endif
337
338#ifdef CONFIG_BLK_DEV_INITRD
339 if (initrd_start) {
340 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
341 if (__pa(initrd_start) < mem_max) {
342 unsigned long initrd_reserve;
343
344 if (__pa(initrd_end) > mem_max) {
345 initrd_reserve = mem_max - __pa(initrd_start);
346 } else {
347 initrd_reserve = initrd_end - initrd_start;
348 }
349 initrd_below_start_ok = 1;
350 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
351
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800352 reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
353 initrd_reserve, BOOTMEM_DEFAULT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 }
355 }
356#endif
357
358 data_resource.start = virt_to_phys(&data_start);
Kyle McMartinc51d4762006-08-13 20:39:48 -0400359 data_resource.end = virt_to_phys(_end) - 1;
360 code_resource.start = virt_to_phys(_text);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 code_resource.end = virt_to_phys(&data_start)-1;
362
363 /* We don't know which region the kernel will be in, so try
364 * all of them.
365 */
366 for (i = 0; i < sysram_resource_count; i++) {
367 struct resource *res = &sysram_resources[i];
368 request_resource(res, &code_resource);
369 request_resource(res, &data_resource);
370 }
371 request_resource(&sysram_resources[0], &pdcdata_resource);
372}
373
374void free_initmem(void)
375{
Helge Deller2fd83032006-04-20 20:40:23 +0000376 unsigned long addr, init_begin, init_end;
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 printk(KERN_INFO "Freeing unused kernel memory: ");
379
Helge Deller81a3de32006-01-15 12:11:50 -0700380#ifdef CONFIG_DEBUG_KERNEL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 /* Attempt to catch anyone trying to execute code here
382 * by filling the page with BRK insns.
383 *
384 * If we disable interrupts for all CPUs, then IPI stops working.
385 * Kinda breaks the global cache flushing.
386 */
387 local_irq_disable();
388
Kyle McMartinc51d4762006-08-13 20:39:48 -0400389 memset(__init_begin, 0x00,
390 (unsigned long)__init_end - (unsigned long)__init_begin);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
392 flush_data_cache();
393 asm volatile("sync" : : );
Kyle McMartinc51d4762006-08-13 20:39:48 -0400394 flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 asm volatile("sync" : : );
396
397 local_irq_enable();
398#endif
399
Helge Deller2fd83032006-04-20 20:40:23 +0000400 /* align __init_begin and __init_end to page size,
401 ignoring linker script where we might have tried to save RAM */
Kyle McMartinc51d4762006-08-13 20:39:48 -0400402 init_begin = PAGE_ALIGN((unsigned long)(__init_begin));
403 init_end = PAGE_ALIGN((unsigned long)(__init_end));
Helge Deller2fd83032006-04-20 20:40:23 +0000404 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 ClearPageReserved(virt_to_page(addr));
Nick Piggin7835e982006-03-22 00:08:40 -0800406 init_page_count(virt_to_page(addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 free_page(addr);
408 num_physpages++;
409 totalram_pages++;
410 }
411
412 /* set up a new led state on systems shipped LED State panel */
413 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
414
Helge Deller2fd83032006-04-20 20:40:23 +0000415 printk("%luk freed\n", (init_end - init_begin) >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
Helge Deller1bcdd852006-01-13 13:21:06 -0700418
419#ifdef CONFIG_DEBUG_RODATA
420void mark_rodata_ro(void)
421{
Helge Deller1bcdd852006-01-13 13:21:06 -0700422 /* rodata memory was already mapped with KERNEL_RO access rights by
423 pagetable_init() and map_pages(). No need to do additional stuff here */
424 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
Heiko Carstensa581c2a2006-07-01 04:36:30 -0700425 (unsigned long)(__end_rodata - __start_rodata) >> 10);
Helge Deller1bcdd852006-01-13 13:21:06 -0700426}
427#endif
428
429
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430/*
431 * Just an arbitrary offset to serve as a "hole" between mapping areas
432 * (between top of physical memory and a potential pcxl dma mapping
433 * area, and below the vmalloc mapping area).
434 *
435 * The current 32K value just means that there will be a 32K "hole"
436 * between mapping areas. That means that any out-of-bounds memory
437 * accesses will hopefully be caught. The vmalloc() routines leaves
438 * a hole of 4kB between each vmalloced area for the same reason.
439 */
440
441 /* Leave room for gateway page expansion */
442#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
443#error KERNEL_MAP_START is in gateway reserved region
444#endif
445#define MAP_START (KERNEL_MAP_START)
446
447#define VM_MAP_OFFSET (32*1024)
448#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
449 & ~(VM_MAP_OFFSET-1)))
450
Helge Deller8039de12006-01-10 20:35:03 -0500451void *vmalloc_start __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452EXPORT_SYMBOL(vmalloc_start);
453
454#ifdef CONFIG_PA11
Helge Deller8039de12006-01-10 20:35:03 -0500455unsigned long pcxl_dma_start __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456#endif
457
458void __init mem_init(void)
459{
Helge Dellerce8420b2006-10-14 22:10:44 +0200460 int codesize, reservedpages, datasize, initsize;
Helge Dellerce8420b2006-10-14 22:10:44 +0200461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 high_memory = __va((max_pfn << PAGE_SHIFT));
463
464#ifndef CONFIG_DISCONTIGMEM
465 max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
466 totalram_pages += free_all_bootmem();
467#else
468 {
469 int i;
470
471 for (i = 0; i < npmem_ranges; i++)
472 totalram_pages += free_all_bootmem_node(NODE_DATA(i));
473 }
474#endif
475
Kyle McMartin53faf292006-10-16 20:34:00 -0400476 codesize = (unsigned long)_etext - (unsigned long)_text;
477 datasize = (unsigned long)_edata - (unsigned long)_etext;
478 initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Helge Dellerce8420b2006-10-14 22:10:44 +0200480 reservedpages = 0;
Kyle McMartin53faf292006-10-16 20:34:00 -0400481{
482 unsigned long pfn;
483#ifdef CONFIG_DISCONTIGMEM
484 int i;
485
486 for (i = 0; i < npmem_ranges; i++) {
487 for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) {
488 if (PageReserved(pfn_to_page(pfn)))
489 reservedpages++;
490 }
491 }
492#else /* !CONFIG_DISCONTIGMEM */
493 for (pfn = 0; pfn < max_pfn; pfn++) {
Helge Dellerce8420b2006-10-14 22:10:44 +0200494 /*
495 * Only count reserved RAM pages
496 */
Kyle McMartin53faf292006-10-16 20:34:00 -0400497 if (PageReserved(pfn_to_page(pfn)))
Helge Dellerce8420b2006-10-14 22:10:44 +0200498 reservedpages++;
Kyle McMartin53faf292006-10-16 20:34:00 -0400499 }
500#endif
501}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
503#ifdef CONFIG_PA11
504 if (hppa_dma_ops == &pcxl_dma_ops) {
505 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
506 vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
507 } else {
508 pcxl_dma_start = 0;
509 vmalloc_start = SET_MAP_OFFSET(MAP_START);
510 }
511#else
512 vmalloc_start = SET_MAP_OFFSET(MAP_START);
513#endif
514
Kyle McMartin53faf292006-10-16 20:34:00 -0400515 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
516 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
Helge Dellerce8420b2006-10-14 22:10:44 +0200517 num_physpages << (PAGE_SHIFT-10),
518 codesize >> 10,
519 reservedpages << (PAGE_SHIFT-10),
520 datasize >> 10,
Kyle McMartin53faf292006-10-16 20:34:00 -0400521 initsize >> 10
522 );
Helge Dellerce8420b2006-10-14 22:10:44 +0200523
524#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
525 printk("virtual kernel memory layout:\n"
526 " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
Kyle McMartin53faf292006-10-16 20:34:00 -0400527 " memory : 0x%p - 0x%p (%4ld MB)\n"
Helge Dellerce8420b2006-10-14 22:10:44 +0200528 " .init : 0x%p - 0x%p (%4ld kB)\n"
529 " .data : 0x%p - 0x%p (%4ld kB)\n"
530 " .text : 0x%p - 0x%p (%4ld kB)\n",
531
532 (void*)VMALLOC_START, (void*)VMALLOC_END,
533 (VMALLOC_END - VMALLOC_START) >> 20,
534
535 __va(0), high_memory,
536 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
537
Kyle McMartin53faf292006-10-16 20:34:00 -0400538 __init_begin, __init_end,
539 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
Helge Dellerce8420b2006-10-14 22:10:44 +0200540
Kyle McMartin53faf292006-10-16 20:34:00 -0400541 _etext, _edata,
542 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
Helge Dellerce8420b2006-10-14 22:10:44 +0200543
Kyle McMartin53faf292006-10-16 20:34:00 -0400544 _text, _etext,
545 ((unsigned long)_etext - (unsigned long)_text) >> 10);
Helge Dellerce8420b2006-10-14 22:10:44 +0200546#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547}
548
Helge Deller8039de12006-01-10 20:35:03 -0500549unsigned long *empty_zero_page __read_mostly;
Kyle McMartin22febf12008-05-26 01:54:35 -0400550EXPORT_SYMBOL(empty_zero_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
552void show_mem(void)
553{
554 int i,free = 0,total = 0,reserved = 0;
555 int shared = 0, cached = 0;
556
557 printk(KERN_INFO "Mem-info:\n");
558 show_free_areas();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559#ifndef CONFIG_DISCONTIGMEM
560 i = max_mapnr;
561 while (i-- > 0) {
562 total++;
563 if (PageReserved(mem_map+i))
564 reserved++;
565 else if (PageSwapCache(mem_map+i))
566 cached++;
567 else if (!page_count(&mem_map[i]))
568 free++;
569 else
570 shared += page_count(&mem_map[i]) - 1;
571 }
572#else
573 for (i = 0; i < npmem_ranges; i++) {
574 int j;
575
576 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
577 struct page *p;
Dave Hansen208d54e2005-10-29 18:16:52 -0700578 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Dave Hansen208d54e2005-10-29 18:16:52 -0700580 pgdat_resize_lock(NODE_DATA(i), &flags);
Dave Hansen408fde82005-06-23 00:07:37 -0700581 p = nid_page_nr(i, j) - node_start_pfn(i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583 total++;
584 if (PageReserved(p))
585 reserved++;
586 else if (PageSwapCache(p))
587 cached++;
588 else if (!page_count(p))
589 free++;
590 else
591 shared += page_count(p) - 1;
Dave Hansen208d54e2005-10-29 18:16:52 -0700592 pgdat_resize_unlock(NODE_DATA(i), &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 }
594 }
595#endif
596 printk(KERN_INFO "%d pages of RAM\n", total);
597 printk(KERN_INFO "%d reserved pages\n", reserved);
598 printk(KERN_INFO "%d pages shared\n", shared);
599 printk(KERN_INFO "%d pages swap cached\n", cached);
600
601
602#ifdef CONFIG_DISCONTIGMEM
603 {
604 struct zonelist *zl;
Mel Gorman54a6eb52008-04-28 02:12:16 -0700605 int i, j;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
607 for (i = 0; i < npmem_ranges; i++) {
Mel Gorman4413a0f2008-05-12 14:02:19 -0700608 zl = node_zonelist(i, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 for (j = 0; j < MAX_NR_ZONES; j++) {
Mel Gormandd1a2392008-04-28 02:12:17 -0700610 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -0700611 struct zone *zone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
613 printk("Zone list for zone %d on node %d: ", j, i);
Mel Gorman54a6eb52008-04-28 02:12:16 -0700614 for_each_zone_zonelist(zone, z, zl, j)
615 printk("[%d/%s] ", zone_to_nid(zone),
616 zone->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 printk("\n");
618 }
619 }
620 }
621#endif
622}
623
624
625static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
626{
627 pgd_t *pg_dir;
628 pmd_t *pmd;
629 pte_t *pg_table;
630 unsigned long end_paddr;
631 unsigned long start_pmd;
632 unsigned long start_pte;
633 unsigned long tmp1;
634 unsigned long tmp2;
635 unsigned long address;
636 unsigned long ro_start;
637 unsigned long ro_end;
638 unsigned long fv_addr;
639 unsigned long gw_addr;
640 extern const unsigned long fault_vector_20;
641 extern void * const linux_gateway_page;
642
Kyle McMartinc51d4762006-08-13 20:39:48 -0400643 ro_start = __pa((unsigned long)_text);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 ro_end = __pa((unsigned long)&data_start);
645 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
646 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
647
648 end_paddr = start_paddr + size;
649
650 pg_dir = pgd_offset_k(start_vaddr);
651
652#if PTRS_PER_PMD == 1
653 start_pmd = 0;
654#else
655 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
656#endif
657 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
658
659 address = start_paddr;
660 while (address < end_paddr) {
661#if PTRS_PER_PMD == 1
662 pmd = (pmd_t *)__pa(pg_dir);
663#else
664 pmd = (pmd_t *)pgd_address(*pg_dir);
665
666 /*
667 * pmd is physical at this point
668 */
669
670 if (!pmd) {
671 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
672 pmd = (pmd_t *) __pa(pmd);
673 }
674
675 pgd_populate(NULL, pg_dir, __va(pmd));
676#endif
677 pg_dir++;
678
679 /* now change pmd to kernel virtual addresses */
680
681 pmd = (pmd_t *)__va(pmd) + start_pmd;
682 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
683
684 /*
685 * pg_table is physical at this point
686 */
687
688 pg_table = (pte_t *)pmd_address(*pmd);
689 if (!pg_table) {
690 pg_table = (pte_t *)
691 alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
692 pg_table = (pte_t *) __pa(pg_table);
693 }
694
695 pmd_populate_kernel(NULL, pmd, __va(pg_table));
696
697 /* now change pg_table to kernel virtual addresses */
698
699 pg_table = (pte_t *) __va(pg_table) + start_pte;
700 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
701 pte_t pte;
702
703 /*
704 * Map the fault vector writable so we can
705 * write the HPMC checksum.
706 */
Helge Deller2fd83032006-04-20 20:40:23 +0000707#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 if (address >= ro_start && address < ro_end
709 && address != fv_addr
710 && address != gw_addr)
711 pte = __mk_pte(address, PAGE_KERNEL_RO);
712 else
Helge Deller2fd83032006-04-20 20:40:23 +0000713#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 pte = __mk_pte(address, pgprot);
715
716 if (address >= end_paddr)
717 pte_val(pte) = 0;
718
719 set_pte(pg_table, pte);
720
721 address += PAGE_SIZE;
722 }
723 start_pte = 0;
724
725 if (address >= end_paddr)
726 break;
727 }
728 start_pmd = 0;
729 }
730}
731
732/*
733 * pagetable_init() sets up the page tables
734 *
735 * Note that gateway_init() places the Linux gateway page at page 0.
736 * Since gateway pages cannot be dereferenced this has the desirable
737 * side effect of trapping those pesky NULL-reference errors in the
738 * kernel.
739 */
740static void __init pagetable_init(void)
741{
742 int range;
743
744 /* Map each physical memory range to its kernel vaddr */
745
746 for (range = 0; range < npmem_ranges; range++) {
747 unsigned long start_paddr;
748 unsigned long end_paddr;
749 unsigned long size;
750
751 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
752 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
753 size = pmem_ranges[range].pages << PAGE_SHIFT;
754
755 map_pages((unsigned long)__va(start_paddr), start_paddr,
756 size, PAGE_KERNEL);
757 }
758
759#ifdef CONFIG_BLK_DEV_INITRD
760 if (initrd_end && initrd_end > mem_limit) {
Helge Deller1bcdd852006-01-13 13:21:06 -0700761 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 map_pages(initrd_start, __pa(initrd_start),
763 initrd_end - initrd_start, PAGE_KERNEL);
764 }
765#endif
766
767 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
768 memset(empty_zero_page, 0, PAGE_SIZE);
769}
770
771static void __init gateway_init(void)
772{
773 unsigned long linux_gateway_page_addr;
774 /* FIXME: This is 'const' in order to trick the compiler
775 into not treating it as DP-relative data. */
776 extern void * const linux_gateway_page;
777
778 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
779
780 /*
781 * Setup Linux Gateway page.
782 *
783 * The Linux gateway page will reside in kernel space (on virtual
784 * page 0), so it doesn't need to be aliased into user space.
785 */
786
787 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
788 PAGE_SIZE, PAGE_GATEWAY);
789}
790
791#ifdef CONFIG_HPUX
792void
793map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
794{
795 pgd_t *pg_dir;
796 pmd_t *pmd;
797 pte_t *pg_table;
798 unsigned long start_pmd;
799 unsigned long start_pte;
800 unsigned long address;
801 unsigned long hpux_gw_page_addr;
802 /* FIXME: This is 'const' in order to trick the compiler
803 into not treating it as DP-relative data. */
804 extern void * const hpux_gateway_page;
805
806 hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
807
808 /*
809 * Setup HP-UX Gateway page.
810 *
811 * The HP-UX gateway page resides in the user address space,
812 * so it needs to be aliased into each process.
813 */
814
815 pg_dir = pgd_offset(mm,hpux_gw_page_addr);
816
817#if PTRS_PER_PMD == 1
818 start_pmd = 0;
819#else
820 start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
821#endif
822 start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
823
824 address = __pa(&hpux_gateway_page);
825#if PTRS_PER_PMD == 1
826 pmd = (pmd_t *)__pa(pg_dir);
827#else
828 pmd = (pmd_t *) pgd_address(*pg_dir);
829
830 /*
831 * pmd is physical at this point
832 */
833
834 if (!pmd) {
835 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
836 pmd = (pmd_t *) __pa(pmd);
837 }
838
839 __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
840#endif
841 /* now change pmd to kernel virtual addresses */
842
843 pmd = (pmd_t *)__va(pmd) + start_pmd;
844
845 /*
846 * pg_table is physical at this point
847 */
848
849 pg_table = (pte_t *) pmd_address(*pmd);
850 if (!pg_table)
851 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
852
853 __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
854
855 /* now change pg_table to kernel virtual addresses */
856
857 pg_table = (pte_t *) __va(pg_table) + start_pte;
858 set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
859}
860EXPORT_SYMBOL(map_hpux_gateway_page);
861#endif
862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863void __init paging_init(void)
864{
865 int i;
866
867 setup_bootmem();
868 pagetable_init();
869 gateway_init();
870 flush_cache_all_local(); /* start with known state */
Matthew Wilcoxce339412006-01-10 20:47:49 -0500871 flush_tlb_all_local(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873 for (i = 0; i < npmem_ranges; i++) {
Christoph Lameterf06a9682006-09-25 23:31:10 -0700874 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
Christoph Lameter00592832007-02-10 01:43:12 -0800876 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878#ifdef CONFIG_DISCONTIGMEM
879 /* Need to initialize the pfnnid_map before we can initialize
880 the zone */
881 {
882 int j;
883 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
884 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
885 j++) {
886 pfnnid_map[j] = i;
887 }
888 }
889#endif
890
891 free_area_init_node(i, NODE_DATA(i), zones_size,
892 pmem_ranges[i].start_pfn, NULL);
893 }
894}
895
896#ifdef CONFIG_PA20
897
898/*
Simon Arlott70226722007-05-11 20:42:34 +0100899 * Currently, all PA20 chips have 18 bit protection IDs, which is the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 * limiting factor (space ids are 32 bits).
901 */
902
903#define NR_SPACE_IDS 262144
904
905#else
906
907/*
Simon Arlott70226722007-05-11 20:42:34 +0100908 * Currently we have a one-to-one relationship between space IDs and
909 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
910 * support 15 bit protection IDs, so that is the limiting factor.
911 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 * probably not worth the effort for a special case here.
913 */
914
915#define NR_SPACE_IDS 32768
916
917#endif /* !CONFIG_PA20 */
918
919#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
920#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
921
922static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
923static unsigned long dirty_space_id[SID_ARRAY_SIZE];
924static unsigned long space_id_index;
925static unsigned long free_space_ids = NR_SPACE_IDS - 1;
926static unsigned long dirty_space_ids = 0;
927
928static DEFINE_SPINLOCK(sid_lock);
929
930unsigned long alloc_sid(void)
931{
932 unsigned long index;
933
934 spin_lock(&sid_lock);
935
936 if (free_space_ids == 0) {
937 if (dirty_space_ids != 0) {
938 spin_unlock(&sid_lock);
939 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
940 spin_lock(&sid_lock);
941 }
Helge Deller2fd83032006-04-20 20:40:23 +0000942 BUG_ON(free_space_ids == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 }
944
945 free_space_ids--;
946
947 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
948 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
949 space_id_index = index;
950
951 spin_unlock(&sid_lock);
952
953 return index << SPACEID_SHIFT;
954}
955
956void free_sid(unsigned long spaceid)
957{
958 unsigned long index = spaceid >> SPACEID_SHIFT;
959 unsigned long *dirty_space_offset;
960
961 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
962 index &= (BITS_PER_LONG - 1);
963
964 spin_lock(&sid_lock);
965
Helge Deller2fd83032006-04-20 20:40:23 +0000966 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
968 *dirty_space_offset |= (1L << index);
969 dirty_space_ids++;
970
971 spin_unlock(&sid_lock);
972}
973
974
975#ifdef CONFIG_SMP
976static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
977{
978 int i;
979
980 /* NOTE: sid_lock must be held upon entry */
981
982 *ndirtyptr = dirty_space_ids;
983 if (dirty_space_ids != 0) {
984 for (i = 0; i < SID_ARRAY_SIZE; i++) {
985 dirty_array[i] = dirty_space_id[i];
986 dirty_space_id[i] = 0;
987 }
988 dirty_space_ids = 0;
989 }
990
991 return;
992}
993
994static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
995{
996 int i;
997
998 /* NOTE: sid_lock must be held upon entry */
999
1000 if (ndirty != 0) {
1001 for (i = 0; i < SID_ARRAY_SIZE; i++) {
1002 space_id[i] ^= dirty_array[i];
1003 }
1004
1005 free_space_ids += ndirty;
1006 space_id_index = 0;
1007 }
1008}
1009
1010#else /* CONFIG_SMP */
1011
1012static void recycle_sids(void)
1013{
1014 int i;
1015
1016 /* NOTE: sid_lock must be held upon entry */
1017
1018 if (dirty_space_ids != 0) {
1019 for (i = 0; i < SID_ARRAY_SIZE; i++) {
1020 space_id[i] ^= dirty_space_id[i];
1021 dirty_space_id[i] = 0;
1022 }
1023
1024 free_space_ids += dirty_space_ids;
1025 dirty_space_ids = 0;
1026 space_id_index = 0;
1027 }
1028}
1029#endif
1030
1031/*
1032 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
1033 * purged, we can safely reuse the space ids that were released but
1034 * not flushed from the tlb.
1035 */
1036
1037#ifdef CONFIG_SMP
1038
1039static unsigned long recycle_ndirty;
1040static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
Helge Deller2fd83032006-04-20 20:40:23 +00001041static unsigned int recycle_inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
1043void flush_tlb_all(void)
1044{
1045 int do_recycle;
1046
1047 do_recycle = 0;
1048 spin_lock(&sid_lock);
1049 if (dirty_space_ids > RECYCLE_THRESHOLD) {
Helge Deller2fd83032006-04-20 20:40:23 +00001050 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
1052 recycle_inuse++;
1053 do_recycle++;
1054 }
1055 spin_unlock(&sid_lock);
Matthew Wilcoxce339412006-01-10 20:47:49 -05001056 on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 if (do_recycle) {
1058 spin_lock(&sid_lock);
1059 recycle_sids(recycle_ndirty,recycle_dirty_array);
1060 recycle_inuse = 0;
1061 spin_unlock(&sid_lock);
1062 }
1063}
1064#else
1065void flush_tlb_all(void)
1066{
1067 spin_lock(&sid_lock);
Matthew Wilcox1b2425e2006-01-10 20:47:49 -05001068 flush_tlb_all_local(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 recycle_sids();
1070 spin_unlock(&sid_lock);
1071}
1072#endif
1073
1074#ifdef CONFIG_BLK_DEV_INITRD
1075void free_initrd_mem(unsigned long start, unsigned long end)
1076{
Helge Deller94c3e872006-03-07 13:34:45 -07001077 if (start >= end)
1078 return;
1079 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 for (; start < end; start += PAGE_SIZE) {
1081 ClearPageReserved(virt_to_page(start));
Nick Piggin7835e982006-03-22 00:08:40 -08001082 init_page_count(virt_to_page(start));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 free_page(start);
1084 num_physpages++;
1085 totalram_pages++;
1086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087}
1088#endif