blob: 2da841110727b223c4e9c4720a6351a94c92a7c1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Initialize MMU support.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/kernel.h>
8#include <linux/init.h>
9
10#include <linux/bootmem.h>
11#include <linux/efi.h>
12#include <linux/elf.h>
13#include <linux/mm.h>
14#include <linux/mmzone.h>
15#include <linux/module.h>
16#include <linux/personality.h>
17#include <linux/reboot.h>
18#include <linux/slab.h>
19#include <linux/swap.h>
20#include <linux/proc_fs.h>
21#include <linux/bitops.h>
Bob Picco139b8302007-01-30 02:11:09 -080022#include <linux/kexec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24#include <asm/a.out.h>
25#include <asm/dma.h>
26#include <asm/ia32.h>
27#include <asm/io.h>
28#include <asm/machvec.h>
29#include <asm/numa.h>
30#include <asm/patch.h>
31#include <asm/pgalloc.h>
32#include <asm/sal.h>
33#include <asm/sections.h>
34#include <asm/system.h>
35#include <asm/tlb.h>
36#include <asm/uaccess.h>
37#include <asm/unistd.h>
38#include <asm/mca.h>
39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
Robin Holtfde740e2005-04-25 13:13:16 -070042DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
43DEFINE_PER_CPU(long, __pgtable_quicklist_size);
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045extern void ia64_tlb_init (void);
46
47unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
48
49#ifdef CONFIG_VIRTUAL_MEM_MAP
50unsigned long vmalloc_end = VMALLOC_END_INIT;
51EXPORT_SYMBOL(vmalloc_end);
52struct page *vmem_map;
53EXPORT_SYMBOL(vmem_map);
54#endif
55
Robin Holtfde740e2005-04-25 13:13:16 -070056struct page *zero_page_memmap_ptr; /* map entry for zero page */
Linus Torvalds1da177e2005-04-16 15:20:36 -070057EXPORT_SYMBOL(zero_page_memmap_ptr);
58
Robin Holtfde740e2005-04-25 13:13:16 -070059#define MIN_PGT_PAGES 25UL
Tony Lucke96c9b42005-04-25 13:16:59 -070060#define MAX_PGT_FREES_PER_PASS 16L
Robin Holtfde740e2005-04-25 13:13:16 -070061#define PGT_FRACTION_OF_NODE_MEM 16
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Robin Holtfde740e2005-04-25 13:13:16 -070063static inline long
64max_pgt_pages(void)
65{
66 u64 node_free_pages, max_pgt_pages;
67
68#ifndef CONFIG_NUMA
69 node_free_pages = nr_free_pages();
70#else
Christoph Lameter91954812007-02-10 01:43:04 -080071 node_free_pages = node_page_state(numa_node_id(), NR_FREE_PAGES);
Robin Holtfde740e2005-04-25 13:13:16 -070072#endif
73 max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
74 max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
75 return max_pgt_pages;
76}
77
78static inline long
79min_pages_to_free(void)
80{
81 long pages_to_free;
82
83 pages_to_free = pgtable_quicklist_size - max_pgt_pages();
84 pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
85 return pages_to_free;
86}
87
88void
89check_pgt_cache(void)
90{
91 long pages_to_free;
92
93 if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
94 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96 preempt_disable();
Robin Holtfde740e2005-04-25 13:13:16 -070097 while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
98 while (pages_to_free--) {
99 free_page((unsigned long)pgtable_quicklist_alloc());
100 }
101 preempt_enable();
102 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 }
104 preempt_enable();
105}
106
107void
108lazy_mmu_prot_update (pte_t pte)
109{
110 unsigned long addr;
111 struct page *page;
Zhang, Yanmin5e485212006-02-23 11:07:20 +0800112 unsigned long order;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114 if (!pte_exec(pte))
115 return; /* not an executable page... */
116
117 page = pte_page(pte);
118 addr = (unsigned long) page_address(page);
119
120 if (test_bit(PG_arch_1, &page->flags))
121 return; /* i-cache is already coherent with d-cache */
122
Zhang, Yanmin5e485212006-02-23 11:07:20 +0800123 if (PageCompound(page)) {
Christoph Lameterd85f3382007-05-06 14:49:39 -0700124 order = compound_order(page);
Zhang, Yanmin5e485212006-02-23 11:07:20 +0800125 flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
126 }
127 else
128 flush_icache_range(addr, addr + PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
130}
131
Jan Beulichcde14bb2007-02-05 18:46:40 -0800132/*
133 * Since DMA is i-cache coherent, any (complete) pages that were written via
134 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
135 * flush them when they get mapped into an executable vm-area.
136 */
137void
138dma_mark_clean(void *addr, size_t size)
139{
140 unsigned long pg_addr, end;
141
142 pg_addr = PAGE_ALIGN((unsigned long) addr);
143 end = (unsigned long) addr + size;
144 while (pg_addr + PAGE_SIZE <= end) {
145 struct page *page = virt_to_page(pg_addr);
146 set_bit(PG_arch_1, &page->flags);
147 pg_addr += PAGE_SIZE;
148 }
149}
150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151inline void
152ia64_set_rbs_bot (void)
153{
154 unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
155
156 if (stack_size > MAX_USER_STACK_SIZE)
157 stack_size = MAX_USER_STACK_SIZE;
KAMEZAWA Hiroyuki83d2cd32007-03-23 12:17:46 +0900158 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159}
160
161/*
162 * This performs some platform-dependent address space initialization.
163 * On IA-64, we want to setup the VM area for the register backing
164 * store (which grows upwards) and install the gateway page which is
165 * used for signal trampolines, etc.
166 */
167void
168ia64_init_addr_space (void)
169{
170 struct vm_area_struct *vma;
171
172 ia64_set_rbs_bot();
173
174 /*
175 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
176 * the problem. When the process attempts to write to the register backing store
177 * for the first time, it will get a SEGFAULT in this case.
178 */
Robert P. J. Dayc3762222007-02-10 01:45:03 -0800179 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 if (vma) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 vma->vm_mm = current->mm;
182 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
183 vma->vm_end = vma->vm_start + PAGE_SIZE;
184 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
Hugh Dickins46dea3d2005-10-29 18:16:20 -0700185 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 down_write(&current->mm->mmap_sem);
187 if (insert_vm_struct(current->mm, vma)) {
188 up_write(&current->mm->mmap_sem);
189 kmem_cache_free(vm_area_cachep, vma);
190 return;
191 }
192 up_write(&current->mm->mmap_sem);
193 }
194
195 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
196 if (!(current->personality & MMAP_PAGE_ZERO)) {
Robert P. J. Dayc3762222007-02-10 01:45:03 -0800197 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 if (vma) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 vma->vm_mm = current->mm;
200 vma->vm_end = PAGE_SIZE;
201 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
202 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
203 down_write(&current->mm->mmap_sem);
204 if (insert_vm_struct(current->mm, vma)) {
205 up_write(&current->mm->mmap_sem);
206 kmem_cache_free(vm_area_cachep, vma);
207 return;
208 }
209 up_write(&current->mm->mmap_sem);
210 }
211 }
212}
213
214void
215free_initmem (void)
216{
217 unsigned long addr, eaddr;
218
219 addr = (unsigned long) ia64_imva(__init_begin);
220 eaddr = (unsigned long) ia64_imva(__init_end);
221 while (addr < eaddr) {
222 ClearPageReserved(virt_to_page(addr));
Nick Piggin7835e982006-03-22 00:08:40 -0800223 init_page_count(virt_to_page(addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 free_page(addr);
225 ++totalram_pages;
226 addr += PAGE_SIZE;
227 }
228 printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
229 (__init_end - __init_begin) >> 10);
230}
231
Chen, Kenneth Wdae28062006-03-22 16:54:15 -0800232void __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233free_initrd_mem (unsigned long start, unsigned long end)
234{
235 struct page *page;
236 /*
237 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
238 * Thus EFI and the kernel may have different page sizes. It is
239 * therefore possible to have the initrd share the same page as
240 * the end of the kernel (given current setup).
241 *
242 * To avoid freeing/using the wrong page (kernel sized) we:
243 * - align up the beginning of initrd
244 * - align down the end of initrd
245 *
246 * | |
247 * |=============| a000
248 * | |
249 * | |
250 * | | 9000
251 * |/////////////|
252 * |/////////////|
253 * |=============| 8000
254 * |///INITRD////|
255 * |/////////////|
256 * |/////////////| 7000
257 * | |
258 * |KKKKKKKKKKKKK|
259 * |=============| 6000
260 * |KKKKKKKKKKKKK|
261 * |KKKKKKKKKKKKK|
262 * K=kernel using 8KB pages
263 *
264 * In this example, we must free page 8000 ONLY. So we must align up
265 * initrd_start and keep initrd_end as is.
266 */
267 start = PAGE_ALIGN(start);
268 end = end & PAGE_MASK;
269
270 if (start < end)
271 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
272
273 for (; start < end; start += PAGE_SIZE) {
274 if (!virt_addr_valid(start))
275 continue;
276 page = virt_to_page(start);
277 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800278 init_page_count(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 free_page(start);
280 ++totalram_pages;
281 }
282}
283
284/*
285 * This installs a clean page in the kernel's page table.
286 */
Chen, Kenneth Wdae28062006-03-22 16:54:15 -0800287static struct page * __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
289{
290 pgd_t *pgd;
291 pud_t *pud;
292 pmd_t *pmd;
293 pte_t *pte;
294
295 if (!PageReserved(page))
296 printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
297 page_address(page));
298
299 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 {
302 pud = pud_alloc(&init_mm, pgd, address);
303 if (!pud)
304 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 pmd = pmd_alloc(&init_mm, pud, address);
306 if (!pmd)
307 goto out;
Hugh Dickins872fec12005-10-29 18:16:21 -0700308 pte = pte_alloc_kernel(pmd, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 if (!pte)
310 goto out;
Hugh Dickins872fec12005-10-29 18:16:21 -0700311 if (!pte_none(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 set_pte(pte, mk_pte(page, pgprot));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 }
Hugh Dickins872fec12005-10-29 18:16:21 -0700315 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 /* no need for flush_tlb */
317 return page;
318}
319
Chen, Kenneth W914a4ea2006-03-12 09:08:26 -0800320static void __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321setup_gate (void)
322{
323 struct page *page;
324
325 /*
David Mosberger-Tangad597bd2005-06-08 10:45:00 -0700326 * Map the gate page twice: once read-only to export the ELF
327 * headers etc. and once execute-only page to enable
328 * privilege-promotion via "epc":
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 */
330 page = virt_to_page(ia64_imva(__start_gate_section));
331 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
332#ifdef HAVE_BUGGY_SEGREL
333 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
334 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
335#else
336 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
David Mosberger-Tangad597bd2005-06-08 10:45:00 -0700337 /* Fill in the holes (if any) with read-only zero pages: */
338 {
339 unsigned long addr;
340
341 for (addr = GATE_ADDR + PAGE_SIZE;
342 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
343 addr += PAGE_SIZE)
344 {
345 put_kernel_page(ZERO_PAGE(0), addr,
346 PAGE_READONLY);
347 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
348 PAGE_READONLY);
349 }
350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351#endif
352 ia64_patch_gate();
353}
354
355void __devinit
356ia64_mmu_init (void *my_cpu_data)
357{
358 unsigned long psr, pta, impl_va_bits;
359 extern void __devinit tlb_init (void);
360
361#ifdef CONFIG_DISABLE_VHPT
362# define VHPT_ENABLE_BIT 0
363#else
364# define VHPT_ENABLE_BIT 1
365#endif
366
367 /* Pin mapping for percpu area into TLB */
368 psr = ia64_clear_ic();
369 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
370 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
371 PERCPU_PAGE_SHIFT);
372
373 ia64_set_psr(psr);
374 ia64_srlz_i();
375
376 /*
377 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
378 * address space. The IA-64 architecture guarantees that at least 50 bits of
379 * virtual address space are implemented but if we pick a large enough page size
380 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
381 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
382 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
383 * problem in practice. Alternatively, we could truncate the top of the mapped
384 * address space to not permit mappings that would overlap with the VMLPT.
385 * --davidm 00/12/06
386 */
387# define pte_bits 3
388# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
389 /*
390 * The virtual page table has to cover the entire implemented address space within
391 * a region even though not all of this space may be mappable. The reason for
392 * this is that the Access bit and Dirty bit fault handlers perform
393 * non-speculative accesses to the virtual page table, so the address range of the
394 * virtual page table itself needs to be covered by virtual page table.
395 */
396# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
397# define POW2(n) (1ULL << (n))
398
399 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
400
401 if (impl_va_bits < 51 || impl_va_bits > 61)
402 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
Peter Chubb6cf07a82005-08-23 20:07:00 -0700403 /*
404 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
405 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
406 * the test makes sure that our mapped space doesn't overlap the
407 * unimplemented hole in the middle of the region.
408 */
409 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
410 (mapped_space_bits > impl_va_bits - 1))
411 panic("Cannot build a big enough virtual-linear page table"
412 " to cover mapped address space.\n"
413 " Try using a smaller page size.\n");
414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416 /* place the VMLPT at the end of each page-table mapped region: */
417 pta = POW2(61) - POW2(vmlpt_bits);
418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 /*
420 * Set the (virtually mapped linear) page table address. Bit
421 * 8 selects between the short and long format, bits 2-7 the
422 * size of the table, and bit 0 whether the VHPT walker is
423 * enabled.
424 */
425 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
426
427 ia64_tlb_init();
428
429#ifdef CONFIG_HUGETLB_PAGE
430 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
431 ia64_srlz_d();
432#endif
433}
434
435#ifdef CONFIG_VIRTUAL_MEM_MAP
Bob Piccoe44e41d2006-06-28 12:55:43 -0400436int vmemmap_find_next_valid_pfn(int node, int i)
437{
438 unsigned long end_address, hole_next_pfn;
439 unsigned long stop_address;
440 pg_data_t *pgdat = NODE_DATA(node);
441
442 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
443 end_address = PAGE_ALIGN(end_address);
444
445 stop_address = (unsigned long) &vmem_map[
446 pgdat->node_start_pfn + pgdat->node_spanned_pages];
447
448 do {
449 pgd_t *pgd;
450 pud_t *pud;
451 pmd_t *pmd;
452 pte_t *pte;
453
454 pgd = pgd_offset_k(end_address);
455 if (pgd_none(*pgd)) {
456 end_address += PGDIR_SIZE;
457 continue;
458 }
459
460 pud = pud_offset(pgd, end_address);
461 if (pud_none(*pud)) {
462 end_address += PUD_SIZE;
463 continue;
464 }
465
466 pmd = pmd_offset(pud, end_address);
467 if (pmd_none(*pmd)) {
468 end_address += PMD_SIZE;
469 continue;
470 }
471
472 pte = pte_offset_kernel(pmd, end_address);
473retry_pte:
474 if (pte_none(*pte)) {
475 end_address += PAGE_SIZE;
476 pte++;
477 if ((end_address < stop_address) &&
478 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
479 goto retry_pte;
480 continue;
481 }
482 /* Found next valid vmem_map page */
483 break;
484 } while (end_address < stop_address);
485
486 end_address = min(end_address, stop_address);
487 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
488 hole_next_pfn = end_address / sizeof(struct page);
489 return hole_next_pfn - pgdat->node_start_pfn;
490}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Chen, Kenneth Wdae28062006-03-22 16:54:15 -0800492int __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493create_mem_map_page_table (u64 start, u64 end, void *arg)
494{
495 unsigned long address, start_page, end_page;
496 struct page *map_start, *map_end;
497 int node;
498 pgd_t *pgd;
499 pud_t *pud;
500 pmd_t *pmd;
501 pte_t *pte;
502
503 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
504 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
505
506 start_page = (unsigned long) map_start & PAGE_MASK;
507 end_page = PAGE_ALIGN((unsigned long) map_end);
508 node = paddr_to_nid(__pa(start));
509
510 for (address = start_page; address < end_page; address += PAGE_SIZE) {
511 pgd = pgd_offset_k(address);
512 if (pgd_none(*pgd))
513 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
514 pud = pud_offset(pgd, address);
515
516 if (pud_none(*pud))
517 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
518 pmd = pmd_offset(pud, address);
519
520 if (pmd_none(*pmd))
521 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
522 pte = pte_offset_kernel(pmd, address);
523
524 if (pte_none(*pte))
525 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
526 PAGE_KERNEL));
527 }
528 return 0;
529}
530
531struct memmap_init_callback_data {
532 struct page *start;
533 struct page *end;
534 int nid;
535 unsigned long zone;
536};
537
538static int
539virtual_memmap_init (u64 start, u64 end, void *arg)
540{
541 struct memmap_init_callback_data *args;
542 struct page *map_start, *map_end;
543
544 args = (struct memmap_init_callback_data *) arg;
545 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
546 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
547
548 if (map_start < args->start)
549 map_start = args->start;
550 if (map_end > args->end)
551 map_end = args->end;
552
553 /*
554 * We have to initialize "out of bounds" struct page elements that fit completely
555 * on the same pages that were allocated for the "in bounds" elements because they
556 * may be referenced later (and found to be "reserved").
557 */
558 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
559 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
560 / sizeof(struct page));
561
562 if (map_start < map_end)
563 memmap_init_zone((unsigned long)(map_end - map_start),
Dave Hansena2f3aa02007-01-10 23:15:30 -0800564 args->nid, args->zone, page_to_pfn(map_start),
565 MEMMAP_EARLY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 return 0;
567}
568
569void
570memmap_init (unsigned long size, int nid, unsigned long zone,
571 unsigned long start_pfn)
572{
573 if (!vmem_map)
Dave Hansena2f3aa02007-01-10 23:15:30 -0800574 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 else {
576 struct page *start;
577 struct memmap_init_callback_data args;
578
579 start = pfn_to_page(start_pfn);
580 args.start = start;
581 args.end = start + size;
582 args.nid = nid;
583 args.zone = zone;
584
585 efi_memmap_walk(virtual_memmap_init, &args);
586 }
587}
588
589int
590ia64_pfn_valid (unsigned long pfn)
591{
592 char byte;
593 struct page *pg = pfn_to_page(pfn);
594
595 return (__get_user(byte, (char __user *) pg) == 0)
596 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
597 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
598}
599EXPORT_SYMBOL(ia64_pfn_valid);
600
Chen, Kenneth Wdae28062006-03-22 16:54:15 -0800601int __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602find_largest_hole (u64 start, u64 end, void *arg)
603{
604 u64 *max_gap = arg;
605
606 static u64 last_end = PAGE_OFFSET;
607
608 /* NOTE: this algorithm assumes efi memmap table is ordered */
609
610 if (*max_gap < (start - last_end))
611 *max_gap = start - last_end;
612 last_end = end;
613 return 0;
614}
Mel Gorman05e0caa2006-09-27 01:49:54 -0700615
Bob Picco139b8302007-01-30 02:11:09 -0800616#endif /* CONFIG_VIRTUAL_MEM_MAP */
617
Mel Gorman05e0caa2006-09-27 01:49:54 -0700618int __init
Tony Luck8b9c1062006-12-12 11:18:55 -0800619register_active_ranges(u64 start, u64 end, void *arg)
Mel Gorman05e0caa2006-09-27 01:49:54 -0700620{
Bob Picco139b8302007-01-30 02:11:09 -0800621 int nid = paddr_to_nid(__pa(start));
622
623 if (nid < 0)
624 nid = 0;
625#ifdef CONFIG_KEXEC
626 if (start > crashk_res.start && start < crashk_res.end)
627 start = crashk_res.end;
628 if (end > crashk_res.start && end < crashk_res.end)
629 end = crashk_res.start;
630#endif
631
632 if (start < end)
633 add_active_range(nid, __pa(start) >> PAGE_SHIFT,
634 __pa(end) >> PAGE_SHIFT);
Mel Gorman05e0caa2006-09-27 01:49:54 -0700635 return 0;
636}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
Chen, Kenneth Wdae28062006-03-22 16:54:15 -0800638static int __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639count_reserved_pages (u64 start, u64 end, void *arg)
640{
641 unsigned long num_reserved = 0;
642 unsigned long *count = arg;
643
644 for (; start < end; start += PAGE_SIZE)
645 if (PageReserved(virt_to_page(start)))
646 ++num_reserved;
647 *count += num_reserved;
648 return 0;
649}
650
Zou Nan haia3f5c332007-03-20 13:41:57 -0700651int
652find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
653{
654 unsigned long pfn_start, pfn_end;
655#ifdef CONFIG_FLATMEM
656 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
657 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
658#else
659 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
660 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
661#endif
662 min_low_pfn = min(min_low_pfn, pfn_start);
663 max_low_pfn = max(max_low_pfn, pfn_end);
664 return 0;
665}
666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667/*
668 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
669 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
670 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
671 * useful for performance testing, but conceivably could also come in handy for debugging
672 * purposes.
673 */
674
Chen, Kenneth W03906ea2006-03-12 09:10:59 -0800675static int nolwsys __initdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677static int __init
678nolwsys_setup (char *s)
679{
680 nolwsys = 1;
681 return 1;
682}
683
684__setup("nolwsys", nolwsys_setup);
685
Chen, Kenneth Wdae28062006-03-22 16:54:15 -0800686void __init
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687mem_init (void)
688{
689 long reserved_pages, codesize, datasize, initsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 pg_data_t *pgdat;
691 int i;
692 static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
693
Robin Holtfde740e2005-04-25 13:13:16 -0700694 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
695 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
696 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
697
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698#ifdef CONFIG_PCI
699 /*
700 * This needs to be called _after_ the command line has been parsed but _before_
701 * any drivers that may need the PCI DMA interface are initialized or bootmem has
702 * been freed.
703 */
704 platform_dma_init();
705#endif
706
Bob Picco2d4b1fa2005-10-04 15:13:57 -0400707#ifdef CONFIG_FLATMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 if (!mem_map)
709 BUG();
710 max_mapnr = max_low_pfn;
711#endif
712
713 high_memory = __va(max_low_pfn * PAGE_SIZE);
714
715 kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
716 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
717 kclist_add(&kcore_kernel, _stext, _end - _stext);
718
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -0800719 for_each_online_pgdat(pgdat)
bob.picco564601a2005-06-30 09:52:00 -0700720 if (pgdat->bdata->node_bootmem_map)
721 totalram_pages += free_all_bootmem_node(pgdat);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
723 reserved_pages = 0;
724 efi_memmap_walk(count_reserved_pages, &reserved_pages);
725
726 codesize = (unsigned long) _etext - (unsigned long) _stext;
727 datasize = (unsigned long) _edata - (unsigned long) _etext;
728 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
729
730 printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
731 "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
732 num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
733 reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
734
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
736 /*
737 * For fsyscall entrpoints with no light-weight handler, use the ordinary
738 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
739 * code can tell them apart.
740 */
741 for (i = 0; i < NR_syscalls; ++i) {
742 extern unsigned long fsyscall_table[NR_syscalls];
743 extern unsigned long sys_call_table[NR_syscalls];
744
745 if (!fsyscall_table[i] || nolwsys)
746 fsyscall_table[i] = sys_call_table[i] | 1;
747 }
748 setup_gate();
749
750#ifdef CONFIG_IA32_SUPPORT
751 ia32_mem_init();
752#endif
753}
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900754
755#ifdef CONFIG_MEMORY_HOTPLUG
756void online_page(struct page *page)
757{
758 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800759 init_page_count(page);
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900760 __free_page(page);
761 totalram_pages++;
762 num_physpages++;
763}
764
Yasunori Gotobc02af92006-06-27 02:53:30 -0700765int arch_add_memory(int nid, u64 start, u64 size)
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900766{
767 pg_data_t *pgdat;
768 struct zone *zone;
769 unsigned long start_pfn = start >> PAGE_SHIFT;
770 unsigned long nr_pages = size >> PAGE_SHIFT;
771 int ret;
772
Yasunori Gotobc02af92006-06-27 02:53:30 -0700773 pgdat = NODE_DATA(nid);
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900774
775 zone = pgdat->node_zones + ZONE_NORMAL;
776 ret = __add_pages(zone, start_pfn, nr_pages);
777
778 if (ret)
779 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
780 __FUNCTION__, ret);
781
782 return ret;
783}
784
785int remove_memory(u64 start, u64 size)
786{
787 return -EINVAL;
788}
KAMEZAWA Hiroyuki9c576ff2006-04-27 05:25:00 -0400789EXPORT_SYMBOL_GPL(remove_memory);
Yasunori Goto1681b8e2006-01-07 11:50:38 +0900790#endif