Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Initialize MMU support. |
| 3 | * |
| 4 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
| 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 6 | */ |
| 7 | #include <linux/config.h> |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/init.h> |
| 10 | |
| 11 | #include <linux/bootmem.h> |
| 12 | #include <linux/efi.h> |
| 13 | #include <linux/elf.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/mmzone.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/personality.h> |
| 18 | #include <linux/reboot.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/swap.h> |
| 21 | #include <linux/proc_fs.h> |
| 22 | #include <linux/bitops.h> |
| 23 | |
| 24 | #include <asm/a.out.h> |
| 25 | #include <asm/dma.h> |
| 26 | #include <asm/ia32.h> |
| 27 | #include <asm/io.h> |
| 28 | #include <asm/machvec.h> |
| 29 | #include <asm/numa.h> |
| 30 | #include <asm/patch.h> |
| 31 | #include <asm/pgalloc.h> |
| 32 | #include <asm/sal.h> |
| 33 | #include <asm/sections.h> |
| 34 | #include <asm/system.h> |
| 35 | #include <asm/tlb.h> |
| 36 | #include <asm/uaccess.h> |
| 37 | #include <asm/unistd.h> |
| 38 | #include <asm/mca.h> |
| 39 | |
| 40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 41 | |
Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 42 | DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist); |
| 43 | DEFINE_PER_CPU(long, __pgtable_quicklist_size); |
| 44 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | extern void ia64_tlb_init (void); |
| 46 | |
| 47 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; |
| 48 | |
| 49 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 50 | unsigned long vmalloc_end = VMALLOC_END_INIT; |
| 51 | EXPORT_SYMBOL(vmalloc_end); |
| 52 | struct page *vmem_map; |
| 53 | EXPORT_SYMBOL(vmem_map); |
| 54 | #endif |
| 55 | |
Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 56 | struct page *zero_page_memmap_ptr; /* map entry for zero page */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | EXPORT_SYMBOL(zero_page_memmap_ptr); |
| 58 | |
Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 59 | #define MIN_PGT_PAGES 25UL |
Tony Luck | e96c9b4 | 2005-04-25 13:16:59 -0700 | [diff] [blame] | 60 | #define MAX_PGT_FREES_PER_PASS 16L |
Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 61 | #define PGT_FRACTION_OF_NODE_MEM 16 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 63 | static inline long |
| 64 | max_pgt_pages(void) |
| 65 | { |
| 66 | u64 node_free_pages, max_pgt_pages; |
| 67 | |
| 68 | #ifndef CONFIG_NUMA |
| 69 | node_free_pages = nr_free_pages(); |
| 70 | #else |
| 71 | node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id())); |
| 72 | #endif |
| 73 | max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; |
| 74 | max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); |
| 75 | return max_pgt_pages; |
| 76 | } |
| 77 | |
| 78 | static inline long |
| 79 | min_pages_to_free(void) |
| 80 | { |
| 81 | long pages_to_free; |
| 82 | |
| 83 | pages_to_free = pgtable_quicklist_size - max_pgt_pages(); |
| 84 | pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS); |
| 85 | return pages_to_free; |
| 86 | } |
| 87 | |
| 88 | void |
| 89 | check_pgt_cache(void) |
| 90 | { |
| 91 | long pages_to_free; |
| 92 | |
| 93 | if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES)) |
| 94 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
| 96 | preempt_disable(); |
Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 97 | while (unlikely((pages_to_free = min_pages_to_free()) > 0)) { |
| 98 | while (pages_to_free--) { |
| 99 | free_page((unsigned long)pgtable_quicklist_alloc()); |
| 100 | } |
| 101 | preempt_enable(); |
| 102 | preempt_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | } |
| 104 | preempt_enable(); |
| 105 | } |
| 106 | |
| 107 | void |
| 108 | lazy_mmu_prot_update (pte_t pte) |
| 109 | { |
| 110 | unsigned long addr; |
| 111 | struct page *page; |
| 112 | |
| 113 | if (!pte_exec(pte)) |
| 114 | return; /* not an executable page... */ |
| 115 | |
| 116 | page = pte_page(pte); |
| 117 | addr = (unsigned long) page_address(page); |
| 118 | |
| 119 | if (test_bit(PG_arch_1, &page->flags)) |
| 120 | return; /* i-cache is already coherent with d-cache */ |
| 121 | |
| 122 | flush_icache_range(addr, addr + PAGE_SIZE); |
| 123 | set_bit(PG_arch_1, &page->flags); /* mark page as clean */ |
| 124 | } |
| 125 | |
| 126 | inline void |
| 127 | ia64_set_rbs_bot (void) |
| 128 | { |
| 129 | unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16; |
| 130 | |
| 131 | if (stack_size > MAX_USER_STACK_SIZE) |
| 132 | stack_size = MAX_USER_STACK_SIZE; |
| 133 | current->thread.rbs_bot = STACK_TOP - stack_size; |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * This performs some platform-dependent address space initialization. |
| 138 | * On IA-64, we want to setup the VM area for the register backing |
| 139 | * store (which grows upwards) and install the gateway page which is |
| 140 | * used for signal trampolines, etc. |
| 141 | */ |
| 142 | void |
| 143 | ia64_init_addr_space (void) |
| 144 | { |
| 145 | struct vm_area_struct *vma; |
| 146 | |
| 147 | ia64_set_rbs_bot(); |
| 148 | |
| 149 | /* |
| 150 | * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore |
| 151 | * the problem. When the process attempts to write to the register backing store |
| 152 | * for the first time, it will get a SEGFAULT in this case. |
| 153 | */ |
| 154 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); |
| 155 | if (vma) { |
| 156 | memset(vma, 0, sizeof(*vma)); |
| 157 | vma->vm_mm = current->mm; |
| 158 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; |
| 159 | vma->vm_end = vma->vm_start + PAGE_SIZE; |
| 160 | vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; |
| 161 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP; |
| 162 | down_write(¤t->mm->mmap_sem); |
| 163 | if (insert_vm_struct(current->mm, vma)) { |
| 164 | up_write(¤t->mm->mmap_sem); |
| 165 | kmem_cache_free(vm_area_cachep, vma); |
| 166 | return; |
| 167 | } |
| 168 | up_write(¤t->mm->mmap_sem); |
| 169 | } |
| 170 | |
| 171 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ |
| 172 | if (!(current->personality & MMAP_PAGE_ZERO)) { |
| 173 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); |
| 174 | if (vma) { |
| 175 | memset(vma, 0, sizeof(*vma)); |
| 176 | vma->vm_mm = current->mm; |
| 177 | vma->vm_end = PAGE_SIZE; |
| 178 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); |
| 179 | vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED; |
| 180 | down_write(¤t->mm->mmap_sem); |
| 181 | if (insert_vm_struct(current->mm, vma)) { |
| 182 | up_write(¤t->mm->mmap_sem); |
| 183 | kmem_cache_free(vm_area_cachep, vma); |
| 184 | return; |
| 185 | } |
| 186 | up_write(¤t->mm->mmap_sem); |
| 187 | } |
| 188 | } |
| 189 | } |
| 190 | |
| 191 | void |
| 192 | free_initmem (void) |
| 193 | { |
| 194 | unsigned long addr, eaddr; |
| 195 | |
| 196 | addr = (unsigned long) ia64_imva(__init_begin); |
| 197 | eaddr = (unsigned long) ia64_imva(__init_end); |
| 198 | while (addr < eaddr) { |
| 199 | ClearPageReserved(virt_to_page(addr)); |
| 200 | set_page_count(virt_to_page(addr), 1); |
| 201 | free_page(addr); |
| 202 | ++totalram_pages; |
| 203 | addr += PAGE_SIZE; |
| 204 | } |
| 205 | printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n", |
| 206 | (__init_end - __init_begin) >> 10); |
| 207 | } |
| 208 | |
| 209 | void |
| 210 | free_initrd_mem (unsigned long start, unsigned long end) |
| 211 | { |
| 212 | struct page *page; |
| 213 | /* |
| 214 | * EFI uses 4KB pages while the kernel can use 4KB or bigger. |
| 215 | * Thus EFI and the kernel may have different page sizes. It is |
| 216 | * therefore possible to have the initrd share the same page as |
| 217 | * the end of the kernel (given current setup). |
| 218 | * |
| 219 | * To avoid freeing/using the wrong page (kernel sized) we: |
| 220 | * - align up the beginning of initrd |
| 221 | * - align down the end of initrd |
| 222 | * |
| 223 | * | | |
| 224 | * |=============| a000 |
| 225 | * | | |
| 226 | * | | |
| 227 | * | | 9000 |
| 228 | * |/////////////| |
| 229 | * |/////////////| |
| 230 | * |=============| 8000 |
| 231 | * |///INITRD////| |
| 232 | * |/////////////| |
| 233 | * |/////////////| 7000 |
| 234 | * | | |
| 235 | * |KKKKKKKKKKKKK| |
| 236 | * |=============| 6000 |
| 237 | * |KKKKKKKKKKKKK| |
| 238 | * |KKKKKKKKKKKKK| |
| 239 | * K=kernel using 8KB pages |
| 240 | * |
| 241 | * In this example, we must free page 8000 ONLY. So we must align up |
| 242 | * initrd_start and keep initrd_end as is. |
| 243 | */ |
| 244 | start = PAGE_ALIGN(start); |
| 245 | end = end & PAGE_MASK; |
| 246 | |
| 247 | if (start < end) |
| 248 | printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); |
| 249 | |
| 250 | for (; start < end; start += PAGE_SIZE) { |
| 251 | if (!virt_addr_valid(start)) |
| 252 | continue; |
| 253 | page = virt_to_page(start); |
| 254 | ClearPageReserved(page); |
| 255 | set_page_count(page, 1); |
| 256 | free_page(start); |
| 257 | ++totalram_pages; |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | /* |
| 262 | * This installs a clean page in the kernel's page table. |
| 263 | */ |
| 264 | struct page * |
| 265 | put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) |
| 266 | { |
| 267 | pgd_t *pgd; |
| 268 | pud_t *pud; |
| 269 | pmd_t *pmd; |
| 270 | pte_t *pte; |
| 271 | |
| 272 | if (!PageReserved(page)) |
| 273 | printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n", |
| 274 | page_address(page)); |
| 275 | |
| 276 | pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ |
| 277 | |
| 278 | spin_lock(&init_mm.page_table_lock); |
| 279 | { |
| 280 | pud = pud_alloc(&init_mm, pgd, address); |
| 281 | if (!pud) |
| 282 | goto out; |
| 283 | |
| 284 | pmd = pmd_alloc(&init_mm, pud, address); |
| 285 | if (!pmd) |
| 286 | goto out; |
| 287 | pte = pte_alloc_map(&init_mm, pmd, address); |
| 288 | if (!pte) |
| 289 | goto out; |
| 290 | if (!pte_none(*pte)) { |
| 291 | pte_unmap(pte); |
| 292 | goto out; |
| 293 | } |
| 294 | set_pte(pte, mk_pte(page, pgprot)); |
| 295 | pte_unmap(pte); |
| 296 | } |
| 297 | out: spin_unlock(&init_mm.page_table_lock); |
| 298 | /* no need for flush_tlb */ |
| 299 | return page; |
| 300 | } |
| 301 | |
| 302 | static void |
| 303 | setup_gate (void) |
| 304 | { |
| 305 | struct page *page; |
| 306 | |
| 307 | /* |
David Mosberger-Tang | ad597bd | 2005-06-08 10:45:00 -0700 | [diff] [blame^] | 308 | * Map the gate page twice: once read-only to export the ELF |
| 309 | * headers etc. and once execute-only page to enable |
| 310 | * privilege-promotion via "epc": |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | */ |
| 312 | page = virt_to_page(ia64_imva(__start_gate_section)); |
| 313 | put_kernel_page(page, GATE_ADDR, PAGE_READONLY); |
| 314 | #ifdef HAVE_BUGGY_SEGREL |
| 315 | page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); |
| 316 | put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); |
| 317 | #else |
| 318 | put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); |
David Mosberger-Tang | ad597bd | 2005-06-08 10:45:00 -0700 | [diff] [blame^] | 319 | /* Fill in the holes (if any) with read-only zero pages: */ |
| 320 | { |
| 321 | unsigned long addr; |
| 322 | |
| 323 | for (addr = GATE_ADDR + PAGE_SIZE; |
| 324 | addr < GATE_ADDR + PERCPU_PAGE_SIZE; |
| 325 | addr += PAGE_SIZE) |
| 326 | { |
| 327 | put_kernel_page(ZERO_PAGE(0), addr, |
| 328 | PAGE_READONLY); |
| 329 | put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE, |
| 330 | PAGE_READONLY); |
| 331 | } |
| 332 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | #endif |
| 334 | ia64_patch_gate(); |
| 335 | } |
| 336 | |
| 337 | void __devinit |
| 338 | ia64_mmu_init (void *my_cpu_data) |
| 339 | { |
| 340 | unsigned long psr, pta, impl_va_bits; |
| 341 | extern void __devinit tlb_init (void); |
| 342 | |
| 343 | #ifdef CONFIG_DISABLE_VHPT |
| 344 | # define VHPT_ENABLE_BIT 0 |
| 345 | #else |
| 346 | # define VHPT_ENABLE_BIT 1 |
| 347 | #endif |
| 348 | |
| 349 | /* Pin mapping for percpu area into TLB */ |
| 350 | psr = ia64_clear_ic(); |
| 351 | ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR, |
| 352 | pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), |
| 353 | PERCPU_PAGE_SHIFT); |
| 354 | |
| 355 | ia64_set_psr(psr); |
| 356 | ia64_srlz_i(); |
| 357 | |
| 358 | /* |
| 359 | * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped |
| 360 | * address space. The IA-64 architecture guarantees that at least 50 bits of |
| 361 | * virtual address space are implemented but if we pick a large enough page size |
| 362 | * (e.g., 64KB), the mapped address space is big enough that it will overlap with |
| 363 | * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages, |
| 364 | * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a |
| 365 | * problem in practice. Alternatively, we could truncate the top of the mapped |
| 366 | * address space to not permit mappings that would overlap with the VMLPT. |
| 367 | * --davidm 00/12/06 |
| 368 | */ |
| 369 | # define pte_bits 3 |
| 370 | # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) |
| 371 | /* |
| 372 | * The virtual page table has to cover the entire implemented address space within |
| 373 | * a region even though not all of this space may be mappable. The reason for |
| 374 | * this is that the Access bit and Dirty bit fault handlers perform |
| 375 | * non-speculative accesses to the virtual page table, so the address range of the |
| 376 | * virtual page table itself needs to be covered by virtual page table. |
| 377 | */ |
| 378 | # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits) |
| 379 | # define POW2(n) (1ULL << (n)) |
| 380 | |
| 381 | impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61))); |
| 382 | |
| 383 | if (impl_va_bits < 51 || impl_va_bits > 61) |
| 384 | panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); |
| 385 | |
| 386 | /* place the VMLPT at the end of each page-table mapped region: */ |
| 387 | pta = POW2(61) - POW2(vmlpt_bits); |
| 388 | |
| 389 | if (POW2(mapped_space_bits) >= pta) |
| 390 | panic("mm/init: overlap between virtually mapped linear page table and " |
| 391 | "mapped kernel space!"); |
| 392 | /* |
| 393 | * Set the (virtually mapped linear) page table address. Bit |
| 394 | * 8 selects between the short and long format, bits 2-7 the |
| 395 | * size of the table, and bit 0 whether the VHPT walker is |
| 396 | * enabled. |
| 397 | */ |
| 398 | ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT); |
| 399 | |
| 400 | ia64_tlb_init(); |
| 401 | |
| 402 | #ifdef CONFIG_HUGETLB_PAGE |
| 403 | ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2); |
| 404 | ia64_srlz_d(); |
| 405 | #endif |
| 406 | } |
| 407 | |
| 408 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 409 | |
| 410 | int |
| 411 | create_mem_map_page_table (u64 start, u64 end, void *arg) |
| 412 | { |
| 413 | unsigned long address, start_page, end_page; |
| 414 | struct page *map_start, *map_end; |
| 415 | int node; |
| 416 | pgd_t *pgd; |
| 417 | pud_t *pud; |
| 418 | pmd_t *pmd; |
| 419 | pte_t *pte; |
| 420 | |
| 421 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); |
| 422 | map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); |
| 423 | |
| 424 | start_page = (unsigned long) map_start & PAGE_MASK; |
| 425 | end_page = PAGE_ALIGN((unsigned long) map_end); |
| 426 | node = paddr_to_nid(__pa(start)); |
| 427 | |
| 428 | for (address = start_page; address < end_page; address += PAGE_SIZE) { |
| 429 | pgd = pgd_offset_k(address); |
| 430 | if (pgd_none(*pgd)) |
| 431 | pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
| 432 | pud = pud_offset(pgd, address); |
| 433 | |
| 434 | if (pud_none(*pud)) |
| 435 | pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
| 436 | pmd = pmd_offset(pud, address); |
| 437 | |
| 438 | if (pmd_none(*pmd)) |
| 439 | pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
| 440 | pte = pte_offset_kernel(pmd, address); |
| 441 | |
| 442 | if (pte_none(*pte)) |
| 443 | set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, |
| 444 | PAGE_KERNEL)); |
| 445 | } |
| 446 | return 0; |
| 447 | } |
| 448 | |
| 449 | struct memmap_init_callback_data { |
| 450 | struct page *start; |
| 451 | struct page *end; |
| 452 | int nid; |
| 453 | unsigned long zone; |
| 454 | }; |
| 455 | |
| 456 | static int |
| 457 | virtual_memmap_init (u64 start, u64 end, void *arg) |
| 458 | { |
| 459 | struct memmap_init_callback_data *args; |
| 460 | struct page *map_start, *map_end; |
| 461 | |
| 462 | args = (struct memmap_init_callback_data *) arg; |
| 463 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); |
| 464 | map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); |
| 465 | |
| 466 | if (map_start < args->start) |
| 467 | map_start = args->start; |
| 468 | if (map_end > args->end) |
| 469 | map_end = args->end; |
| 470 | |
| 471 | /* |
| 472 | * We have to initialize "out of bounds" struct page elements that fit completely |
| 473 | * on the same pages that were allocated for the "in bounds" elements because they |
| 474 | * may be referenced later (and found to be "reserved"). |
| 475 | */ |
| 476 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); |
| 477 | map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) |
| 478 | / sizeof(struct page)); |
| 479 | |
| 480 | if (map_start < map_end) |
| 481 | memmap_init_zone((unsigned long)(map_end - map_start), |
| 482 | args->nid, args->zone, page_to_pfn(map_start)); |
| 483 | return 0; |
| 484 | } |
| 485 | |
| 486 | void |
| 487 | memmap_init (unsigned long size, int nid, unsigned long zone, |
| 488 | unsigned long start_pfn) |
| 489 | { |
| 490 | if (!vmem_map) |
| 491 | memmap_init_zone(size, nid, zone, start_pfn); |
| 492 | else { |
| 493 | struct page *start; |
| 494 | struct memmap_init_callback_data args; |
| 495 | |
| 496 | start = pfn_to_page(start_pfn); |
| 497 | args.start = start; |
| 498 | args.end = start + size; |
| 499 | args.nid = nid; |
| 500 | args.zone = zone; |
| 501 | |
| 502 | efi_memmap_walk(virtual_memmap_init, &args); |
| 503 | } |
| 504 | } |
| 505 | |
| 506 | int |
| 507 | ia64_pfn_valid (unsigned long pfn) |
| 508 | { |
| 509 | char byte; |
| 510 | struct page *pg = pfn_to_page(pfn); |
| 511 | |
| 512 | return (__get_user(byte, (char __user *) pg) == 0) |
| 513 | && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) |
| 514 | || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); |
| 515 | } |
| 516 | EXPORT_SYMBOL(ia64_pfn_valid); |
| 517 | |
| 518 | int |
| 519 | find_largest_hole (u64 start, u64 end, void *arg) |
| 520 | { |
| 521 | u64 *max_gap = arg; |
| 522 | |
| 523 | static u64 last_end = PAGE_OFFSET; |
| 524 | |
| 525 | /* NOTE: this algorithm assumes efi memmap table is ordered */ |
| 526 | |
| 527 | if (*max_gap < (start - last_end)) |
| 528 | *max_gap = start - last_end; |
| 529 | last_end = end; |
| 530 | return 0; |
| 531 | } |
| 532 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ |
| 533 | |
| 534 | static int |
| 535 | count_reserved_pages (u64 start, u64 end, void *arg) |
| 536 | { |
| 537 | unsigned long num_reserved = 0; |
| 538 | unsigned long *count = arg; |
| 539 | |
| 540 | for (; start < end; start += PAGE_SIZE) |
| 541 | if (PageReserved(virt_to_page(start))) |
| 542 | ++num_reserved; |
| 543 | *count += num_reserved; |
| 544 | return 0; |
| 545 | } |
| 546 | |
| 547 | /* |
| 548 | * Boot command-line option "nolwsys" can be used to disable the use of any light-weight |
| 549 | * system call handler. When this option is in effect, all fsyscalls will end up bubbling |
| 550 | * down into the kernel and calling the normal (heavy-weight) syscall handler. This is |
| 551 | * useful for performance testing, but conceivably could also come in handy for debugging |
| 552 | * purposes. |
| 553 | */ |
| 554 | |
| 555 | static int nolwsys; |
| 556 | |
| 557 | static int __init |
| 558 | nolwsys_setup (char *s) |
| 559 | { |
| 560 | nolwsys = 1; |
| 561 | return 1; |
| 562 | } |
| 563 | |
| 564 | __setup("nolwsys", nolwsys_setup); |
| 565 | |
| 566 | void |
| 567 | mem_init (void) |
| 568 | { |
| 569 | long reserved_pages, codesize, datasize, initsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | pg_data_t *pgdat; |
| 571 | int i; |
| 572 | static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; |
| 573 | |
Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 574 | BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); |
| 575 | BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); |
| 576 | BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); |
| 577 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | #ifdef CONFIG_PCI |
| 579 | /* |
| 580 | * This needs to be called _after_ the command line has been parsed but _before_ |
| 581 | * any drivers that may need the PCI DMA interface are initialized or bootmem has |
| 582 | * been freed. |
| 583 | */ |
| 584 | platform_dma_init(); |
| 585 | #endif |
| 586 | |
| 587 | #ifndef CONFIG_DISCONTIGMEM |
| 588 | if (!mem_map) |
| 589 | BUG(); |
| 590 | max_mapnr = max_low_pfn; |
| 591 | #endif |
| 592 | |
| 593 | high_memory = __va(max_low_pfn * PAGE_SIZE); |
| 594 | |
| 595 | kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE); |
| 596 | kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); |
| 597 | kclist_add(&kcore_kernel, _stext, _end - _stext); |
| 598 | |
| 599 | for_each_pgdat(pgdat) |
| 600 | totalram_pages += free_all_bootmem_node(pgdat); |
| 601 | |
| 602 | reserved_pages = 0; |
| 603 | efi_memmap_walk(count_reserved_pages, &reserved_pages); |
| 604 | |
| 605 | codesize = (unsigned long) _etext - (unsigned long) _stext; |
| 606 | datasize = (unsigned long) _edata - (unsigned long) _etext; |
| 607 | initsize = (unsigned long) __init_end - (unsigned long) __init_begin; |
| 608 | |
| 609 | printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " |
| 610 | "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10), |
| 611 | num_physpages << (PAGE_SHIFT - 10), codesize >> 10, |
| 612 | reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); |
| 613 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | |
| 615 | /* |
| 616 | * For fsyscall entrpoints with no light-weight handler, use the ordinary |
| 617 | * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry |
| 618 | * code can tell them apart. |
| 619 | */ |
| 620 | for (i = 0; i < NR_syscalls; ++i) { |
| 621 | extern unsigned long fsyscall_table[NR_syscalls]; |
| 622 | extern unsigned long sys_call_table[NR_syscalls]; |
| 623 | |
| 624 | if (!fsyscall_table[i] || nolwsys) |
| 625 | fsyscall_table[i] = sys_call_table[i] | 1; |
| 626 | } |
| 627 | setup_gate(); |
| 628 | |
| 629 | #ifdef CONFIG_IA32_SUPPORT |
| 630 | ia32_mem_init(); |
| 631 | #endif |
| 632 | } |