blob: da05893294b5b0930acf202f1f616f8a3b7e5f07 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Initialize MMU support.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/kernel.h>
8#include <linux/init.h>
9
10#include <linux/bootmem.h>
11#include <linux/efi.h>
12#include <linux/elf.h>
13#include <linux/mm.h>
14#include <linux/mmzone.h>
15#include <linux/module.h>
16#include <linux/personality.h>
17#include <linux/reboot.h>
18#include <linux/slab.h>
19#include <linux/swap.h>
20#include <linux/proc_fs.h>
21#include <linux/bitops.h>
Bob Picco139b8302007-01-30 02:11:09 -080022#include <linux/kexec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24#include <asm/a.out.h>
25#include <asm/dma.h>
26#include <asm/ia32.h>
27#include <asm/io.h>
28#include <asm/machvec.h>
29#include <asm/numa.h>
30#include <asm/patch.h>
31#include <asm/pgalloc.h>
32#include <asm/sal.h>
33#include <asm/sections.h>
34#include <asm/system.h>
35#include <asm/tlb.h>
36#include <asm/uaccess.h>
37#include <asm/unistd.h>
38#include <asm/mca.h>
39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
42extern void ia64_tlb_init (void);
43
44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
45
46#ifdef CONFIG_VIRTUAL_MEM_MAP
47unsigned long vmalloc_end = VMALLOC_END_INIT;
48EXPORT_SYMBOL(vmalloc_end);
49struct page *vmem_map;
50EXPORT_SYMBOL(vmem_map);
51#endif
52
Robin Holtfde740e2005-04-25 13:13:16 -070053struct page *zero_page_memmap_ptr; /* map entry for zero page */
Linus Torvalds1da177e2005-04-16 15:20:36 -070054EXPORT_SYMBOL(zero_page_memmap_ptr);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056void
KAMEZAWA Hiroyuki954ffcb2007-10-16 01:25:44 -070057__ia64_sync_icache_dcache (pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
59 unsigned long addr;
60 struct page *page;
61
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 page = pte_page(pte);
63 addr = (unsigned long) page_address(page);
64
65 if (test_bit(PG_arch_1, &page->flags))
66 return; /* i-cache is already coherent with d-cache */
67
Christoph Lameter273988f2008-04-09 13:05:41 -070068 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
70}
71
Jan Beulichcde14bb2007-02-05 18:46:40 -080072/*
73 * Since DMA is i-cache coherent, any (complete) pages that were written via
74 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
75 * flush them when they get mapped into an executable vm-area.
76 */
77void
78dma_mark_clean(void *addr, size_t size)
79{
80 unsigned long pg_addr, end;
81
82 pg_addr = PAGE_ALIGN((unsigned long) addr);
83 end = (unsigned long) addr + size;
84 while (pg_addr + PAGE_SIZE <= end) {
85 struct page *page = virt_to_page(pg_addr);
86 set_bit(PG_arch_1, &page->flags);
87 pg_addr += PAGE_SIZE;
88 }
89}
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091inline void
92ia64_set_rbs_bot (void)
93{
94 unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
95
96 if (stack_size > MAX_USER_STACK_SIZE)
97 stack_size = MAX_USER_STACK_SIZE;
KAMEZAWA Hiroyuki83d2cd32007-03-23 12:17:46 +090098 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/*
102 * This performs some platform-dependent address space initialization.
103 * On IA-64, we want to setup the VM area for the register backing
104 * store (which grows upwards) and install the gateway page which is
105 * used for signal trampolines, etc.
106 */
107void
108ia64_init_addr_space (void)
109{
110 struct vm_area_struct *vma;
111
112 ia64_set_rbs_bot();
113
114 /*
115 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
116 * the problem. When the process attempts to write to the register backing store
117 * for the first time, it will get a SEGFAULT in this case.
118 */
Robert P. J. Dayc3762222007-02-10 01:45:03 -0800119 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 if (vma) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 vma->vm_mm = current->mm;
122 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
123 vma->vm_end = vma->vm_start + PAGE_SIZE;
Hugh Dickins46dea3d2005-10-29 18:16:20 -0700124 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
Coly Li3ed75eb2007-10-18 23:39:15 -0700125 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 down_write(&current->mm->mmap_sem);
127 if (insert_vm_struct(current->mm, vma)) {
128 up_write(&current->mm->mmap_sem);
129 kmem_cache_free(vm_area_cachep, vma);
130 return;
131 }
132 up_write(&current->mm->mmap_sem);
133 }
134
135 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
136 if (!(current->personality & MMAP_PAGE_ZERO)) {
Robert P. J. Dayc3762222007-02-10 01:45:03 -0800137 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 if (vma) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 vma->vm_mm = current->mm;
140 vma->vm_end = PAGE_SIZE;
141 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
142 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
143 down_write(&current->mm->mmap_sem);
144 if (insert_vm_struct(current->mm, vma)) {
145 up_write(&current->mm->mmap_sem);
146 kmem_cache_free(vm_area_cachep, vma);
147 return;
148 }
149 up_write(&current->mm->mmap_sem);
150 }
151 }
152}
153
154void
155free_initmem (void)
156{
157 unsigned long addr, eaddr;
158
159 addr = (unsigned long) ia64_imva(__init_begin);
160 eaddr = (unsigned long) ia64_imva(__init_end);
161 while (addr < eaddr) {
162 ClearPageReserved(virt_to_page(addr));
Nick Piggin7835e982006-03-22 00:08:40 -0800163 init_page_count(virt_to_page(addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 free_page(addr);
165 ++totalram_pages;
166 addr += PAGE_SIZE;
167 }
168 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
169 (__init_end - __init_begin) >> 10);
170}
171
Chen, Kenneth Wdae280662006-03-22 16:54:15 -0800172void __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173free_initrd_mem (unsigned long start, unsigned long end)
174{
175 struct page *page;
176 /*
177 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
178 * Thus EFI and the kernel may have different page sizes. It is
179 * therefore possible to have the initrd share the same page as
180 * the end of the kernel (given current setup).
181 *
182 * To avoid freeing/using the wrong page (kernel sized) we:
183 * - align up the beginning of initrd
184 * - align down the end of initrd
185 *
186 * | |
187 * |=============| a000
188 * | |
189 * | |
190 * | | 9000
191 * |/////////////|
192 * |/////////////|
193 * |=============| 8000
194 * |///INITRD////|
195 * |/////////////|
196 * |/////////////| 7000
197 * | |
198 * |KKKKKKKKKKKKK|
199 * |=============| 6000
200 * |KKKKKKKKKKKKK|
201 * |KKKKKKKKKKKKK|
202 * K=kernel using 8KB pages
203 *
204 * In this example, we must free page 8000 ONLY. So we must align up
205 * initrd_start and keep initrd_end as is.
206 */
207 start = PAGE_ALIGN(start);
208 end = end & PAGE_MASK;
209
210 if (start < end)
211 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
212
213 for (; start < end; start += PAGE_SIZE) {
214 if (!virt_addr_valid(start))
215 continue;
216 page = virt_to_page(start);
217 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800218 init_page_count(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 free_page(start);
220 ++totalram_pages;
221 }
222}
223
224/*
225 * This installs a clean page in the kernel's page table.
226 */
Chen, Kenneth Wdae280662006-03-22 16:54:15 -0800227static struct page * __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
229{
230 pgd_t *pgd;
231 pud_t *pud;
232 pmd_t *pmd;
233 pte_t *pte;
234
235 if (!PageReserved(page))
236 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
237 page_address(page));
238
239 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 {
242 pud = pud_alloc(&init_mm, pgd, address);
243 if (!pud)
244 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 pmd = pmd_alloc(&init_mm, pud, address);
246 if (!pmd)
247 goto out;
Hugh Dickins872fec12005-10-29 18:16:21 -0700248 pte = pte_alloc_kernel(pmd, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 if (!pte)
250 goto out;
Hugh Dickins872fec12005-10-29 18:16:21 -0700251 if (!pte_none(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 set_pte(pte, mk_pte(page, pgprot));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 }
Hugh Dickins872fec12005-10-29 18:16:21 -0700255 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 /* no need for flush_tlb */
257 return page;
258}
259
Chen, Kenneth W914a4ea2006-03-12 09:08:26 -0800260static void __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261setup_gate (void)
262{
263 struct page *page;
264
265 /*
David Mosberger-Tangad597bd2005-06-08 10:45:00 -0700266 * Map the gate page twice: once read-only to export the ELF
267 * headers etc. and once execute-only page to enable
268 * privilege-promotion via "epc":
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 */
270 page = virt_to_page(ia64_imva(__start_gate_section));
271 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
272#ifdef HAVE_BUGGY_SEGREL
273 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
274 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
275#else
276 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
David Mosberger-Tangad597bd2005-06-08 10:45:00 -0700277 /* Fill in the holes (if any) with read-only zero pages: */
278 {
279 unsigned long addr;
280
281 for (addr = GATE_ADDR + PAGE_SIZE;
282 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
283 addr += PAGE_SIZE)
284 {
285 put_kernel_page(ZERO_PAGE(0), addr,
286 PAGE_READONLY);
287 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
288 PAGE_READONLY);
289 }
290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291#endif
292 ia64_patch_gate();
293}
294
295void __devinit
296ia64_mmu_init (void *my_cpu_data)
297{
Chen, Kenneth W00b659852006-10-13 10:08:13 -0700298 unsigned long pta, impl_va_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 extern void __devinit tlb_init (void);
300
301#ifdef CONFIG_DISABLE_VHPT
302# define VHPT_ENABLE_BIT 0
303#else
304# define VHPT_ENABLE_BIT 1
305#endif
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 /*
308 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
309 * address space. The IA-64 architecture guarantees that at least 50 bits of
310 * virtual address space are implemented but if we pick a large enough page size
311 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
312 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
313 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
314 * problem in practice. Alternatively, we could truncate the top of the mapped
315 * address space to not permit mappings that would overlap with the VMLPT.
316 * --davidm 00/12/06
317 */
318# define pte_bits 3
319# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
320 /*
321 * The virtual page table has to cover the entire implemented address space within
322 * a region even though not all of this space may be mappable. The reason for
323 * this is that the Access bit and Dirty bit fault handlers perform
324 * non-speculative accesses to the virtual page table, so the address range of the
325 * virtual page table itself needs to be covered by virtual page table.
326 */
327# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
328# define POW2(n) (1ULL << (n))
329
330 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
331
332 if (impl_va_bits < 51 || impl_va_bits > 61)
333 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
Peter Chubb6cf07a82005-08-23 20:07:00 -0700334 /*
335 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
336 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
337 * the test makes sure that our mapped space doesn't overlap the
338 * unimplemented hole in the middle of the region.
339 */
340 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
341 (mapped_space_bits > impl_va_bits - 1))
342 panic("Cannot build a big enough virtual-linear page table"
343 " to cover mapped address space.\n"
344 " Try using a smaller page size.\n");
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347 /* place the VMLPT at the end of each page-table mapped region: */
348 pta = POW2(61) - POW2(vmlpt_bits);
349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 /*
351 * Set the (virtually mapped linear) page table address. Bit
352 * 8 selects between the short and long format, bits 2-7 the
353 * size of the table, and bit 0 whether the VHPT walker is
354 * enabled.
355 */
356 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
357
358 ia64_tlb_init();
359
360#ifdef CONFIG_HUGETLB_PAGE
361 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
362 ia64_srlz_d();
363#endif
364}
365
366#ifdef CONFIG_VIRTUAL_MEM_MAP
Bob Piccoe44e41d2006-06-28 12:55:43 -0400367int vmemmap_find_next_valid_pfn(int node, int i)
368{
369 unsigned long end_address, hole_next_pfn;
370 unsigned long stop_address;
371 pg_data_t *pgdat = NODE_DATA(node);
372
373 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
374 end_address = PAGE_ALIGN(end_address);
375
376 stop_address = (unsigned long) &vmem_map[
377 pgdat->node_start_pfn + pgdat->node_spanned_pages];
378
379 do {
380 pgd_t *pgd;
381 pud_t *pud;
382 pmd_t *pmd;
383 pte_t *pte;
384
385 pgd = pgd_offset_k(end_address);
386 if (pgd_none(*pgd)) {
387 end_address += PGDIR_SIZE;
388 continue;
389 }
390
391 pud = pud_offset(pgd, end_address);
392 if (pud_none(*pud)) {
393 end_address += PUD_SIZE;
394 continue;
395 }
396
397 pmd = pmd_offset(pud, end_address);
398 if (pmd_none(*pmd)) {
399 end_address += PMD_SIZE;
400 continue;
401 }
402
403 pte = pte_offset_kernel(pmd, end_address);
404retry_pte:
405 if (pte_none(*pte)) {
406 end_address += PAGE_SIZE;
407 pte++;
408 if ((end_address < stop_address) &&
409 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
410 goto retry_pte;
411 continue;
412 }
413 /* Found next valid vmem_map page */
414 break;
415 } while (end_address < stop_address);
416
417 end_address = min(end_address, stop_address);
418 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
419 hole_next_pfn = end_address / sizeof(struct page);
420 return hole_next_pfn - pgdat->node_start_pfn;
421}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Chen, Kenneth Wdae280662006-03-22 16:54:15 -0800423int __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424create_mem_map_page_table (u64 start, u64 end, void *arg)
425{
426 unsigned long address, start_page, end_page;
427 struct page *map_start, *map_end;
428 int node;
429 pgd_t *pgd;
430 pud_t *pud;
431 pmd_t *pmd;
432 pte_t *pte;
433
434 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
435 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
436
437 start_page = (unsigned long) map_start & PAGE_MASK;
438 end_page = PAGE_ALIGN((unsigned long) map_end);
439 node = paddr_to_nid(__pa(start));
440
441 for (address = start_page; address < end_page; address += PAGE_SIZE) {
442 pgd = pgd_offset_k(address);
443 if (pgd_none(*pgd))
444 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
445 pud = pud_offset(pgd, address);
446
447 if (pud_none(*pud))
448 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
449 pmd = pmd_offset(pud, address);
450
451 if (pmd_none(*pmd))
452 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
453 pte = pte_offset_kernel(pmd, address);
454
455 if (pte_none(*pte))
456 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
457 PAGE_KERNEL));
458 }
459 return 0;
460}
461
462struct memmap_init_callback_data {
463 struct page *start;
464 struct page *end;
465 int nid;
466 unsigned long zone;
467};
468
Adrian Bunk18b8bef2007-10-29 13:49:47 +0100469static int __meminit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470virtual_memmap_init (u64 start, u64 end, void *arg)
471{
472 struct memmap_init_callback_data *args;
473 struct page *map_start, *map_end;
474
475 args = (struct memmap_init_callback_data *) arg;
476 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
477 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
478
479 if (map_start < args->start)
480 map_start = args->start;
481 if (map_end > args->end)
482 map_end = args->end;
483
484 /*
485 * We have to initialize "out of bounds" struct page elements that fit completely
486 * on the same pages that were allocated for the "in bounds" elements because they
487 * may be referenced later (and found to be "reserved").
488 */
489 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
490 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
491 / sizeof(struct page));
492
493 if (map_start < map_end)
494 memmap_init_zone((unsigned long)(map_end - map_start),
Dave Hansena2f3aa022007-01-10 23:15:30 -0800495 args->nid, args->zone, page_to_pfn(map_start),
496 MEMMAP_EARLY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 return 0;
498}
499
Adrian Bunk18b8bef2007-10-29 13:49:47 +0100500void __meminit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501memmap_init (unsigned long size, int nid, unsigned long zone,
502 unsigned long start_pfn)
503{
504 if (!vmem_map)
Dave Hansena2f3aa022007-01-10 23:15:30 -0800505 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 else {
507 struct page *start;
508 struct memmap_init_callback_data args;
509
510 start = pfn_to_page(start_pfn);
511 args.start = start;
512 args.end = start + size;
513 args.nid = nid;
514 args.zone = zone;
515
516 efi_memmap_walk(virtual_memmap_init, &args);
517 }
518}
519
520int
521ia64_pfn_valid (unsigned long pfn)
522{
523 char byte;
524 struct page *pg = pfn_to_page(pfn);
525
526 return (__get_user(byte, (char __user *) pg) == 0)
527 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
528 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
529}
530EXPORT_SYMBOL(ia64_pfn_valid);
531
Chen, Kenneth Wdae280662006-03-22 16:54:15 -0800532int __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533find_largest_hole (u64 start, u64 end, void *arg)
534{
535 u64 *max_gap = arg;
536
537 static u64 last_end = PAGE_OFFSET;
538
539 /* NOTE: this algorithm assumes efi memmap table is ordered */
540
541 if (*max_gap < (start - last_end))
542 *max_gap = start - last_end;
543 last_end = end;
544 return 0;
545}
Mel Gorman05e0caa2006-09-27 01:49:54 -0700546
Bob Picco139b8302007-01-30 02:11:09 -0800547#endif /* CONFIG_VIRTUAL_MEM_MAP */
548
Mel Gorman05e0caa2006-09-27 01:49:54 -0700549int __init
Tony Luck8b9c1062006-12-12 11:18:55 -0800550register_active_ranges(u64 start, u64 end, void *arg)
Mel Gorman05e0caa2006-09-27 01:49:54 -0700551{
Bob Picco139b8302007-01-30 02:11:09 -0800552 int nid = paddr_to_nid(__pa(start));
553
554 if (nid < 0)
555 nid = 0;
556#ifdef CONFIG_KEXEC
557 if (start > crashk_res.start && start < crashk_res.end)
558 start = crashk_res.end;
559 if (end > crashk_res.start && end < crashk_res.end)
560 end = crashk_res.start;
561#endif
562
563 if (start < end)
564 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
565 __pa(end) >> PAGE_SHIFT);
Mel Gorman05e0caa2006-09-27 01:49:54 -0700566 return 0;
567}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Chen, Kenneth Wdae280662006-03-22 16:54:15 -0800569static int __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570count_reserved_pages (u64 start, u64 end, void *arg)
571{
572 unsigned long num_reserved = 0;
573 unsigned long *count = arg;
574
575 for (; start < end; start += PAGE_SIZE)
576 if (PageReserved(virt_to_page(start)))
577 ++num_reserved;
578 *count += num_reserved;
579 return 0;
580}
581
Zou Nan haia3f5c332007-03-20 13:41:57 -0700582int
583find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
584{
585 unsigned long pfn_start, pfn_end;
586#ifdef CONFIG_FLATMEM
587 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
588 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
589#else
590 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
591 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
592#endif
593 min_low_pfn = min(min_low_pfn, pfn_start);
594 max_low_pfn = max(max_low_pfn, pfn_end);
595 return 0;
596}
597
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598/*
599 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
600 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
601 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
602 * useful for performance testing, but conceivably could also come in handy for debugging
603 * purposes.
604 */
605
Chen, Kenneth W03906ea2006-03-12 09:10:59 -0800606static int nolwsys __initdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
608static int __init
609nolwsys_setup (char *s)
610{
611 nolwsys = 1;
612 return 1;
613}
614
615__setup("nolwsys", nolwsys_setup);
616
Chen, Kenneth Wdae280662006-03-22 16:54:15 -0800617void __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618mem_init (void)
619{
620 long reserved_pages, codesize, datasize, initsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 pg_data_t *pgdat;
622 int i;
623 static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
624
Robin Holtfde740e2005-04-25 13:13:16 -0700625 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
626 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
627 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
628
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629#ifdef CONFIG_PCI
630 /*
631 * This needs to be called _after_ the command line has been parsed but _before_
632 * any drivers that may need the PCI DMA interface are initialized or bootmem has
633 * been freed.
634 */
635 platform_dma_init();
636#endif
637
Bob Picco2d4b1fa2005-10-04 15:13:57 -0400638#ifdef CONFIG_FLATMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 if (!mem_map)
640 BUG();
641 max_mapnr = max_low_pfn;
642#endif
643
644 high_memory = __va(max_low_pfn * PAGE_SIZE);
645
646 kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
647 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
648 kclist_add(&kcore_kernel, _stext, _end - _stext);
649
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -0800650 for_each_online_pgdat(pgdat)
bob.picco564601a2005-06-30 09:52:00 -0700651 if (pgdat->bdata->node_bootmem_map)
652 totalram_pages += free_all_bootmem_node(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654 reserved_pages = 0;
655 efi_memmap_walk(count_reserved_pages, &reserved_pages);
656
657 codesize = (unsigned long) _etext - (unsigned long) _stext;
658 datasize = (unsigned long) _edata - (unsigned long) _etext;
659 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
660
661 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
662 "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
663 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
664 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
667 /*
668 * For fsyscall entrpoints with no light-weight handler, use the ordinary
669 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
670 * code can tell them apart.
671 */
672 for (i = 0; i < NR_syscalls; ++i) {
673 extern unsigned long fsyscall_table[NR_syscalls];
674 extern unsigned long sys_call_table[NR_syscalls];
675
676 if (!fsyscall_table[i] || nolwsys)
677 fsyscall_table[i] = sys_call_table[i] | 1;
678 }
679 setup_gate();
680
681#ifdef CONFIG_IA32_SUPPORT
682 ia32_mem_init();
683#endif
684}
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900685
686#ifdef CONFIG_MEMORY_HOTPLUG
687void online_page(struct page *page)
688{
689 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800690 init_page_count(page);
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900691 __free_page(page);
692 totalram_pages++;
693 num_physpages++;
694}
695
Yasunori Gotobc02af92006-06-27 02:53:30 -0700696int arch_add_memory(int nid, u64 start, u64 size)
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900697{
698 pg_data_t *pgdat;
699 struct zone *zone;
700 unsigned long start_pfn = start >> PAGE_SHIFT;
701 unsigned long nr_pages = size >> PAGE_SHIFT;
702 int ret;
703
Yasunori Gotobc02af92006-06-27 02:53:30 -0700704 pgdat = NODE_DATA(nid);
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900705
706 zone = pgdat->node_zones + ZONE_NORMAL;
707 ret = __add_pages(zone, start_pfn, nr_pages);
708
709 if (ret)
710 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
Harvey Harrisond4ed8082008-03-04 15:15:00 -0800711 __func__, ret);
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900712
713 return ret;
714}
KAMEZAWA Hiroyuki48e94192007-10-16 01:26:14 -0700715#ifdef CONFIG_MEMORY_HOTREMOVE
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900716int remove_memory(u64 start, u64 size)
717{
KAMEZAWA Hiroyukide33b822007-10-16 01:26:13 -0700718 unsigned long start_pfn, end_pfn;
719 unsigned long timeout = 120 * HZ;
720 int ret;
721 start_pfn = start >> PAGE_SHIFT;
722 end_pfn = start_pfn + (size >> PAGE_SHIFT);
723 ret = offline_pages(start_pfn, end_pfn, timeout);
724 if (ret)
725 goto out;
726 /* we can free mem_map at this point */
727out:
728 return ret;
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900729}
KAMEZAWA Hiroyuki9c576ff2006-04-27 05:25:00 -0400730EXPORT_SYMBOL_GPL(remove_memory);
KAMEZAWA Hiroyuki48e94192007-10-16 01:26:14 -0700731#endif /* CONFIG_MEMORY_HOTREMOVE */
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900732#endif