Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Initialize MMU support. |
| 3 | * |
| 4 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
| 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 6 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/kernel.h> |
| 8 | #include <linux/init.h> |
| 9 | |
| 10 | #include <linux/bootmem.h> |
| 11 | #include <linux/efi.h> |
| 12 | #include <linux/elf.h> |
Tejun Heo | 98e4ae8 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 13 | #include <linux/memblock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> |
| 15 | #include <linux/mmzone.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/personality.h> |
| 18 | #include <linux/reboot.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/swap.h> |
| 21 | #include <linux/proc_fs.h> |
| 22 | #include <linux/bitops.h> |
Bob Picco | 139b830 | 2007-01-30 02:11:09 -0800 | [diff] [blame] | 23 | #include <linux/kexec.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <asm/dma.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <asm/io.h> |
| 27 | #include <asm/machvec.h> |
| 28 | #include <asm/numa.h> |
| 29 | #include <asm/patch.h> |
| 30 | #include <asm/pgalloc.h> |
| 31 | #include <asm/sal.h> |
| 32 | #include <asm/sections.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/tlb.h> |
| 34 | #include <asm/uaccess.h> |
| 35 | #include <asm/unistd.h> |
| 36 | #include <asm/mca.h> |
Isaku Yamahata | dd97d5c | 2009-03-04 21:05:34 +0900 | [diff] [blame] | 37 | #include <asm/paravirt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | extern void ia64_tlb_init (void); |
| 40 | |
| 41 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; |
| 42 | |
| 43 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
Tejun Heo | 126b3fc | 2009-10-02 13:28:55 +0900 | [diff] [blame] | 44 | unsigned long VMALLOC_END = VMALLOC_END_INIT; |
| 45 | EXPORT_SYMBOL(VMALLOC_END); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | struct page *vmem_map; |
| 47 | EXPORT_SYMBOL(vmem_map); |
| 48 | #endif |
| 49 | |
Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 50 | struct page *zero_page_memmap_ptr; /* map entry for zero page */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | EXPORT_SYMBOL(zero_page_memmap_ptr); |
| 52 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | void |
KAMEZAWA Hiroyuki | 954ffcb | 2007-10-16 01:25:44 -0700 | [diff] [blame] | 54 | __ia64_sync_icache_dcache (pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | { |
| 56 | unsigned long addr; |
| 57 | struct page *page; |
| 58 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | page = pte_page(pte); |
| 60 | addr = (unsigned long) page_address(page); |
| 61 | |
| 62 | if (test_bit(PG_arch_1, &page->flags)) |
| 63 | return; /* i-cache is already coherent with d-cache */ |
| 64 | |
Christoph Lameter | 273988f | 2008-04-09 13:05:41 -0700 | [diff] [blame] | 65 | flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | set_bit(PG_arch_1, &page->flags); /* mark page as clean */ |
| 67 | } |
| 68 | |
Jan Beulich | cde14bb | 2007-02-05 18:46:40 -0800 | [diff] [blame] | 69 | /* |
| 70 | * Since DMA is i-cache coherent, any (complete) pages that were written via |
| 71 | * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to |
| 72 | * flush them when they get mapped into an executable vm-area. |
| 73 | */ |
| 74 | void |
| 75 | dma_mark_clean(void *addr, size_t size) |
| 76 | { |
| 77 | unsigned long pg_addr, end; |
| 78 | |
| 79 | pg_addr = PAGE_ALIGN((unsigned long) addr); |
| 80 | end = (unsigned long) addr + size; |
| 81 | while (pg_addr + PAGE_SIZE <= end) { |
| 82 | struct page *page = virt_to_page(pg_addr); |
| 83 | set_bit(PG_arch_1, &page->flags); |
| 84 | pg_addr += PAGE_SIZE; |
| 85 | } |
| 86 | } |
| 87 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | inline void |
| 89 | ia64_set_rbs_bot (void) |
| 90 | { |
Jiri Slaby | 02b763b | 2010-01-06 16:24:30 +0100 | [diff] [blame] | 91 | unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | |
| 93 | if (stack_size > MAX_USER_STACK_SIZE) |
| 94 | stack_size = MAX_USER_STACK_SIZE; |
KAMEZAWA Hiroyuki | 83d2cd3 | 2007-03-23 12:17:46 +0900 | [diff] [blame] | 95 | current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | /* |
| 99 | * This performs some platform-dependent address space initialization. |
| 100 | * On IA-64, we want to setup the VM area for the register backing |
| 101 | * store (which grows upwards) and install the gateway page which is |
| 102 | * used for signal trampolines, etc. |
| 103 | */ |
| 104 | void |
| 105 | ia64_init_addr_space (void) |
| 106 | { |
| 107 | struct vm_area_struct *vma; |
| 108 | |
| 109 | ia64_set_rbs_bot(); |
| 110 | |
| 111 | /* |
| 112 | * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore |
| 113 | * the problem. When the process attempts to write to the register backing store |
| 114 | * for the first time, it will get a SEGFAULT in this case. |
| 115 | */ |
Robert P. J. Day | c376222 | 2007-02-10 01:45:03 -0800 | [diff] [blame] | 116 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | if (vma) { |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 118 | INIT_LIST_HEAD(&vma->anon_vma_chain); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | vma->vm_mm = current->mm; |
| 120 | vma->vm_start = current->thread.rbs_bot & PAGE_MASK; |
| 121 | vma->vm_end = vma->vm_start + PAGE_SIZE; |
Hugh Dickins | 46dea3d | 2005-10-29 18:16:20 -0700 | [diff] [blame] | 122 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; |
Coly Li | 3ed75eb | 2007-10-18 23:39:15 -0700 | [diff] [blame] | 123 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | down_write(¤t->mm->mmap_sem); |
| 125 | if (insert_vm_struct(current->mm, vma)) { |
| 126 | up_write(¤t->mm->mmap_sem); |
| 127 | kmem_cache_free(vm_area_cachep, vma); |
| 128 | return; |
| 129 | } |
| 130 | up_write(¤t->mm->mmap_sem); |
| 131 | } |
| 132 | |
| 133 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ |
| 134 | if (!(current->personality & MMAP_PAGE_ZERO)) { |
Robert P. J. Day | c376222 | 2007-02-10 01:45:03 -0800 | [diff] [blame] | 135 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | if (vma) { |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 137 | INIT_LIST_HEAD(&vma->anon_vma_chain); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | vma->vm_mm = current->mm; |
| 139 | vma->vm_end = PAGE_SIZE; |
| 140 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); |
Konstantin Khlebnikov | 314e51b | 2012-10-08 16:29:02 -0700 | [diff] [blame] | 141 | vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | |
| 142 | VM_DONTEXPAND | VM_DONTDUMP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | down_write(¤t->mm->mmap_sem); |
| 144 | if (insert_vm_struct(current->mm, vma)) { |
| 145 | up_write(¤t->mm->mmap_sem); |
| 146 | kmem_cache_free(vm_area_cachep, vma); |
| 147 | return; |
| 148 | } |
| 149 | up_write(¤t->mm->mmap_sem); |
| 150 | } |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | void |
| 155 | free_initmem (void) |
| 156 | { |
Jiang Liu | 1119969 | 2013-07-03 15:02:48 -0700 | [diff] [blame] | 157 | free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end), |
Jiang Liu | dbe67df | 2013-07-03 15:02:51 -0700 | [diff] [blame] | 158 | -1, "unused kernel"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | } |
| 160 | |
Chen, Kenneth W | dae28066 | 2006-03-22 16:54:15 -0800 | [diff] [blame] | 161 | void __init |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | free_initrd_mem (unsigned long start, unsigned long end) |
| 163 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | /* |
| 165 | * EFI uses 4KB pages while the kernel can use 4KB or bigger. |
| 166 | * Thus EFI and the kernel may have different page sizes. It is |
| 167 | * therefore possible to have the initrd share the same page as |
| 168 | * the end of the kernel (given current setup). |
| 169 | * |
| 170 | * To avoid freeing/using the wrong page (kernel sized) we: |
| 171 | * - align up the beginning of initrd |
| 172 | * - align down the end of initrd |
| 173 | * |
| 174 | * | | |
| 175 | * |=============| a000 |
| 176 | * | | |
| 177 | * | | |
| 178 | * | | 9000 |
| 179 | * |/////////////| |
| 180 | * |/////////////| |
| 181 | * |=============| 8000 |
| 182 | * |///INITRD////| |
| 183 | * |/////////////| |
| 184 | * |/////////////| 7000 |
| 185 | * | | |
| 186 | * |KKKKKKKKKKKKK| |
| 187 | * |=============| 6000 |
| 188 | * |KKKKKKKKKKKKK| |
| 189 | * |KKKKKKKKKKKKK| |
| 190 | * K=kernel using 8KB pages |
| 191 | * |
| 192 | * In this example, we must free page 8000 ONLY. So we must align up |
| 193 | * initrd_start and keep initrd_end as is. |
| 194 | */ |
| 195 | start = PAGE_ALIGN(start); |
| 196 | end = end & PAGE_MASK; |
| 197 | |
| 198 | if (start < end) |
| 199 | printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); |
| 200 | |
| 201 | for (; start < end; start += PAGE_SIZE) { |
| 202 | if (!virt_addr_valid(start)) |
| 203 | continue; |
Jiang Liu | 66f6259 | 2013-04-29 15:06:39 -0700 | [diff] [blame] | 204 | free_reserved_page(virt_to_page(start)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | } |
| 206 | } |
| 207 | |
| 208 | /* |
| 209 | * This installs a clean page in the kernel's page table. |
| 210 | */ |
Chen, Kenneth W | dae28066 | 2006-03-22 16:54:15 -0800 | [diff] [blame] | 211 | static struct page * __init |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) |
| 213 | { |
| 214 | pgd_t *pgd; |
| 215 | pud_t *pud; |
| 216 | pmd_t *pmd; |
| 217 | pte_t *pte; |
| 218 | |
| 219 | if (!PageReserved(page)) |
| 220 | printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n", |
| 221 | page_address(page)); |
| 222 | |
| 223 | pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ |
| 224 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | { |
| 226 | pud = pud_alloc(&init_mm, pgd, address); |
| 227 | if (!pud) |
| 228 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | pmd = pmd_alloc(&init_mm, pud, address); |
| 230 | if (!pmd) |
| 231 | goto out; |
Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 232 | pte = pte_alloc_kernel(pmd, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | if (!pte) |
| 234 | goto out; |
Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 235 | if (!pte_none(*pte)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | set_pte(pte, mk_pte(page, pgprot)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | } |
Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 239 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | /* no need for flush_tlb */ |
| 241 | return page; |
| 242 | } |
| 243 | |
Chen, Kenneth W | 914a4ea | 2006-03-12 09:08:26 -0800 | [diff] [blame] | 244 | static void __init |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | setup_gate (void) |
| 246 | { |
Isaku Yamahata | e4ff5b8 | 2009-03-04 21:05:42 +0900 | [diff] [blame] | 247 | void *gate_section; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | struct page *page; |
| 249 | |
| 250 | /* |
David Mosberger-Tang | ad597bd | 2005-06-08 10:45:00 -0700 | [diff] [blame] | 251 | * Map the gate page twice: once read-only to export the ELF |
| 252 | * headers etc. and once execute-only page to enable |
| 253 | * privilege-promotion via "epc": |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | */ |
Isaku Yamahata | e4ff5b8 | 2009-03-04 21:05:42 +0900 | [diff] [blame] | 255 | gate_section = paravirt_get_gate_section(); |
| 256 | page = virt_to_page(ia64_imva(gate_section)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | put_kernel_page(page, GATE_ADDR, PAGE_READONLY); |
| 258 | #ifdef HAVE_BUGGY_SEGREL |
Isaku Yamahata | e4ff5b8 | 2009-03-04 21:05:42 +0900 | [diff] [blame] | 259 | page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); |
| 261 | #else |
| 262 | put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); |
David Mosberger-Tang | ad597bd | 2005-06-08 10:45:00 -0700 | [diff] [blame] | 263 | /* Fill in the holes (if any) with read-only zero pages: */ |
| 264 | { |
| 265 | unsigned long addr; |
| 266 | |
| 267 | for (addr = GATE_ADDR + PAGE_SIZE; |
| 268 | addr < GATE_ADDR + PERCPU_PAGE_SIZE; |
| 269 | addr += PAGE_SIZE) |
| 270 | { |
| 271 | put_kernel_page(ZERO_PAGE(0), addr, |
| 272 | PAGE_READONLY); |
| 273 | put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE, |
| 274 | PAGE_READONLY); |
| 275 | } |
| 276 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | #endif |
| 278 | ia64_patch_gate(); |
| 279 | } |
| 280 | |
Greg Kroah-Hartman | 5b5e76e | 2012-12-21 14:05:13 -0800 | [diff] [blame] | 281 | void ia64_mmu_init(void *my_cpu_data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | { |
Chen, Kenneth W | 00b65985 | 2006-10-13 10:08:13 -0700 | [diff] [blame] | 283 | unsigned long pta, impl_va_bits; |
Greg Kroah-Hartman | 5b5e76e | 2012-12-21 14:05:13 -0800 | [diff] [blame] | 284 | extern void tlb_init(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | |
| 286 | #ifdef CONFIG_DISABLE_VHPT |
| 287 | # define VHPT_ENABLE_BIT 0 |
| 288 | #else |
| 289 | # define VHPT_ENABLE_BIT 1 |
| 290 | #endif |
| 291 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | /* |
| 293 | * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped |
| 294 | * address space. The IA-64 architecture guarantees that at least 50 bits of |
| 295 | * virtual address space are implemented but if we pick a large enough page size |
| 296 | * (e.g., 64KB), the mapped address space is big enough that it will overlap with |
| 297 | * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages, |
| 298 | * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a |
| 299 | * problem in practice. Alternatively, we could truncate the top of the mapped |
| 300 | * address space to not permit mappings that would overlap with the VMLPT. |
| 301 | * --davidm 00/12/06 |
| 302 | */ |
| 303 | # define pte_bits 3 |
| 304 | # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) |
| 305 | /* |
| 306 | * The virtual page table has to cover the entire implemented address space within |
| 307 | * a region even though not all of this space may be mappable. The reason for |
| 308 | * this is that the Access bit and Dirty bit fault handlers perform |
| 309 | * non-speculative accesses to the virtual page table, so the address range of the |
| 310 | * virtual page table itself needs to be covered by virtual page table. |
| 311 | */ |
| 312 | # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits) |
| 313 | # define POW2(n) (1ULL << (n)) |
| 314 | |
| 315 | impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61))); |
| 316 | |
| 317 | if (impl_va_bits < 51 || impl_va_bits > 61) |
| 318 | panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); |
Peter Chubb | 6cf07a8 | 2005-08-23 20:07:00 -0700 | [diff] [blame] | 319 | /* |
| 320 | * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, |
| 321 | * which must fit into "vmlpt_bits - pte_bits" slots. Second half of |
| 322 | * the test makes sure that our mapped space doesn't overlap the |
| 323 | * unimplemented hole in the middle of the region. |
| 324 | */ |
| 325 | if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || |
| 326 | (mapped_space_bits > impl_va_bits - 1)) |
| 327 | panic("Cannot build a big enough virtual-linear page table" |
| 328 | " to cover mapped address space.\n" |
| 329 | " Try using a smaller page size.\n"); |
| 330 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | |
| 332 | /* place the VMLPT at the end of each page-table mapped region: */ |
| 333 | pta = POW2(61) - POW2(vmlpt_bits); |
| 334 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | /* |
| 336 | * Set the (virtually mapped linear) page table address. Bit |
| 337 | * 8 selects between the short and long format, bits 2-7 the |
| 338 | * size of the table, and bit 0 whether the VHPT walker is |
| 339 | * enabled. |
| 340 | */ |
| 341 | ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT); |
| 342 | |
| 343 | ia64_tlb_init(); |
| 344 | |
| 345 | #ifdef CONFIG_HUGETLB_PAGE |
| 346 | ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2); |
| 347 | ia64_srlz_d(); |
| 348 | #endif |
| 349 | } |
| 350 | |
| 351 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
Bob Picco | e44e41d | 2006-06-28 12:55:43 -0400 | [diff] [blame] | 352 | int vmemmap_find_next_valid_pfn(int node, int i) |
| 353 | { |
| 354 | unsigned long end_address, hole_next_pfn; |
| 355 | unsigned long stop_address; |
| 356 | pg_data_t *pgdat = NODE_DATA(node); |
| 357 | |
| 358 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; |
| 359 | end_address = PAGE_ALIGN(end_address); |
Xishi Qiu | 6408068 | 2013-11-12 15:07:17 -0800 | [diff] [blame] | 360 | stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)]; |
Bob Picco | e44e41d | 2006-06-28 12:55:43 -0400 | [diff] [blame] | 361 | |
| 362 | do { |
| 363 | pgd_t *pgd; |
| 364 | pud_t *pud; |
| 365 | pmd_t *pmd; |
| 366 | pte_t *pte; |
| 367 | |
| 368 | pgd = pgd_offset_k(end_address); |
| 369 | if (pgd_none(*pgd)) { |
| 370 | end_address += PGDIR_SIZE; |
| 371 | continue; |
| 372 | } |
| 373 | |
| 374 | pud = pud_offset(pgd, end_address); |
| 375 | if (pud_none(*pud)) { |
| 376 | end_address += PUD_SIZE; |
| 377 | continue; |
| 378 | } |
| 379 | |
| 380 | pmd = pmd_offset(pud, end_address); |
| 381 | if (pmd_none(*pmd)) { |
| 382 | end_address += PMD_SIZE; |
| 383 | continue; |
| 384 | } |
| 385 | |
| 386 | pte = pte_offset_kernel(pmd, end_address); |
| 387 | retry_pte: |
| 388 | if (pte_none(*pte)) { |
| 389 | end_address += PAGE_SIZE; |
| 390 | pte++; |
| 391 | if ((end_address < stop_address) && |
| 392 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) |
| 393 | goto retry_pte; |
| 394 | continue; |
| 395 | } |
| 396 | /* Found next valid vmem_map page */ |
| 397 | break; |
| 398 | } while (end_address < stop_address); |
| 399 | |
| 400 | end_address = min(end_address, stop_address); |
| 401 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; |
| 402 | hole_next_pfn = end_address / sizeof(struct page); |
| 403 | return hole_next_pfn - pgdat->node_start_pfn; |
| 404 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | |
Matthew Wilcox | e088a4a | 2009-05-22 13:49:49 -0700 | [diff] [blame] | 406 | int __init create_mem_map_page_table(u64 start, u64 end, void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | { |
| 408 | unsigned long address, start_page, end_page; |
| 409 | struct page *map_start, *map_end; |
| 410 | int node; |
| 411 | pgd_t *pgd; |
| 412 | pud_t *pud; |
| 413 | pmd_t *pmd; |
| 414 | pte_t *pte; |
| 415 | |
| 416 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); |
| 417 | map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); |
| 418 | |
| 419 | start_page = (unsigned long) map_start & PAGE_MASK; |
| 420 | end_page = PAGE_ALIGN((unsigned long) map_end); |
| 421 | node = paddr_to_nid(__pa(start)); |
| 422 | |
| 423 | for (address = start_page; address < end_page; address += PAGE_SIZE) { |
| 424 | pgd = pgd_offset_k(address); |
| 425 | if (pgd_none(*pgd)) |
| 426 | pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
| 427 | pud = pud_offset(pgd, address); |
| 428 | |
| 429 | if (pud_none(*pud)) |
| 430 | pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
| 431 | pmd = pmd_offset(pud, address); |
| 432 | |
| 433 | if (pmd_none(*pmd)) |
| 434 | pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
| 435 | pte = pte_offset_kernel(pmd, address); |
| 436 | |
| 437 | if (pte_none(*pte)) |
| 438 | set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, |
| 439 | PAGE_KERNEL)); |
| 440 | } |
| 441 | return 0; |
| 442 | } |
| 443 | |
| 444 | struct memmap_init_callback_data { |
| 445 | struct page *start; |
| 446 | struct page *end; |
| 447 | int nid; |
| 448 | unsigned long zone; |
| 449 | }; |
| 450 | |
Adrian Bunk | 18b8bef | 2007-10-29 13:49:47 +0100 | [diff] [blame] | 451 | static int __meminit |
Matthew Wilcox | e088a4a | 2009-05-22 13:49:49 -0700 | [diff] [blame] | 452 | virtual_memmap_init(u64 start, u64 end, void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | { |
| 454 | struct memmap_init_callback_data *args; |
| 455 | struct page *map_start, *map_end; |
| 456 | |
| 457 | args = (struct memmap_init_callback_data *) arg; |
| 458 | map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); |
| 459 | map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); |
| 460 | |
| 461 | if (map_start < args->start) |
| 462 | map_start = args->start; |
| 463 | if (map_end > args->end) |
| 464 | map_end = args->end; |
| 465 | |
| 466 | /* |
| 467 | * We have to initialize "out of bounds" struct page elements that fit completely |
| 468 | * on the same pages that were allocated for the "in bounds" elements because they |
| 469 | * may be referenced later (and found to be "reserved"). |
| 470 | */ |
| 471 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); |
| 472 | map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) |
| 473 | / sizeof(struct page)); |
| 474 | |
| 475 | if (map_start < map_end) |
| 476 | memmap_init_zone((unsigned long)(map_end - map_start), |
Dave Hansen | a2f3aa02 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 477 | args->nid, args->zone, page_to_pfn(map_start), |
| 478 | MEMMAP_EARLY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | return 0; |
| 480 | } |
| 481 | |
Adrian Bunk | 18b8bef | 2007-10-29 13:49:47 +0100 | [diff] [blame] | 482 | void __meminit |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | memmap_init (unsigned long size, int nid, unsigned long zone, |
| 484 | unsigned long start_pfn) |
| 485 | { |
| 486 | if (!vmem_map) |
Dave Hansen | a2f3aa02 | 2007-01-10 23:15:30 -0800 | [diff] [blame] | 487 | memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | else { |
| 489 | struct page *start; |
| 490 | struct memmap_init_callback_data args; |
| 491 | |
| 492 | start = pfn_to_page(start_pfn); |
| 493 | args.start = start; |
| 494 | args.end = start + size; |
| 495 | args.nid = nid; |
| 496 | args.zone = zone; |
| 497 | |
| 498 | efi_memmap_walk(virtual_memmap_init, &args); |
| 499 | } |
| 500 | } |
| 501 | |
| 502 | int |
| 503 | ia64_pfn_valid (unsigned long pfn) |
| 504 | { |
| 505 | char byte; |
| 506 | struct page *pg = pfn_to_page(pfn); |
| 507 | |
| 508 | return (__get_user(byte, (char __user *) pg) == 0) |
| 509 | && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) |
| 510 | || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); |
| 511 | } |
| 512 | EXPORT_SYMBOL(ia64_pfn_valid); |
| 513 | |
Matthew Wilcox | e088a4a | 2009-05-22 13:49:49 -0700 | [diff] [blame] | 514 | int __init find_largest_hole(u64 start, u64 end, void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | { |
| 516 | u64 *max_gap = arg; |
| 517 | |
| 518 | static u64 last_end = PAGE_OFFSET; |
| 519 | |
| 520 | /* NOTE: this algorithm assumes efi memmap table is ordered */ |
| 521 | |
| 522 | if (*max_gap < (start - last_end)) |
| 523 | *max_gap = start - last_end; |
| 524 | last_end = end; |
| 525 | return 0; |
| 526 | } |
Mel Gorman | 05e0caa | 2006-09-27 01:49:54 -0700 | [diff] [blame] | 527 | |
Bob Picco | 139b830 | 2007-01-30 02:11:09 -0800 | [diff] [blame] | 528 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ |
| 529 | |
Matthew Wilcox | e088a4a | 2009-05-22 13:49:49 -0700 | [diff] [blame] | 530 | int __init register_active_ranges(u64 start, u64 len, int nid) |
Mel Gorman | 05e0caa | 2006-09-27 01:49:54 -0700 | [diff] [blame] | 531 | { |
Zoltan Menyhart | 98075d2 | 2008-04-11 15:21:35 -0700 | [diff] [blame] | 532 | u64 end = start + len; |
Bob Picco | 139b830 | 2007-01-30 02:11:09 -0800 | [diff] [blame] | 533 | |
Bob Picco | 139b830 | 2007-01-30 02:11:09 -0800 | [diff] [blame] | 534 | #ifdef CONFIG_KEXEC |
| 535 | if (start > crashk_res.start && start < crashk_res.end) |
| 536 | start = crashk_res.end; |
| 537 | if (end > crashk_res.start && end < crashk_res.end) |
| 538 | end = crashk_res.start; |
| 539 | #endif |
| 540 | |
| 541 | if (start < end) |
Tejun Heo | 98e4ae8 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 542 | memblock_add_node(__pa(start), end - start, nid); |
Mel Gorman | 05e0caa | 2006-09-27 01:49:54 -0700 | [diff] [blame] | 543 | return 0; |
| 544 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | |
Zou Nan hai | a3f5c33 | 2007-03-20 13:41:57 -0700 | [diff] [blame] | 546 | int |
Matthew Wilcox | e088a4a | 2009-05-22 13:49:49 -0700 | [diff] [blame] | 547 | find_max_min_low_pfn (u64 start, u64 end, void *arg) |
Zou Nan hai | a3f5c33 | 2007-03-20 13:41:57 -0700 | [diff] [blame] | 548 | { |
| 549 | unsigned long pfn_start, pfn_end; |
| 550 | #ifdef CONFIG_FLATMEM |
| 551 | pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; |
| 552 | pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; |
| 553 | #else |
| 554 | pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; |
| 555 | pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT; |
| 556 | #endif |
| 557 | min_low_pfn = min(min_low_pfn, pfn_start); |
| 558 | max_low_pfn = max(max_low_pfn, pfn_end); |
| 559 | return 0; |
| 560 | } |
| 561 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | /* |
| 563 | * Boot command-line option "nolwsys" can be used to disable the use of any light-weight |
| 564 | * system call handler. When this option is in effect, all fsyscalls will end up bubbling |
| 565 | * down into the kernel and calling the normal (heavy-weight) syscall handler. This is |
| 566 | * useful for performance testing, but conceivably could also come in handy for debugging |
| 567 | * purposes. |
| 568 | */ |
| 569 | |
Chen, Kenneth W | 03906ea | 2006-03-12 09:10:59 -0800 | [diff] [blame] | 570 | static int nolwsys __initdata; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | |
| 572 | static int __init |
| 573 | nolwsys_setup (char *s) |
| 574 | { |
| 575 | nolwsys = 1; |
| 576 | return 1; |
| 577 | } |
| 578 | |
| 579 | __setup("nolwsys", nolwsys_setup); |
| 580 | |
Chen, Kenneth W | dae28066 | 2006-03-22 16:54:15 -0800 | [diff] [blame] | 581 | void __init |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | mem_init (void) |
| 583 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | |
Robin Holt | fde740e | 2005-04-25 13:13:16 -0700 | [diff] [blame] | 586 | BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); |
| 587 | BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); |
| 588 | BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); |
| 589 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | #ifdef CONFIG_PCI |
| 591 | /* |
| 592 | * This needs to be called _after_ the command line has been parsed but _before_ |
| 593 | * any drivers that may need the PCI DMA interface are initialized or bootmem has |
| 594 | * been freed. |
| 595 | */ |
| 596 | platform_dma_init(); |
| 597 | #endif |
| 598 | |
Bob Picco | 2d4b1fa | 2005-10-04 15:13:57 -0400 | [diff] [blame] | 599 | #ifdef CONFIG_FLATMEM |
Stoyan Gaydarov | 80a03e2 | 2009-03-10 00:10:30 -0500 | [diff] [blame] | 600 | BUG_ON(!mem_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | #endif |
| 602 | |
Jiang Liu | b57b63a | 2013-07-03 15:04:25 -0700 | [diff] [blame] | 603 | set_max_mapnr(max_low_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | high_memory = __va(max_low_pfn * PAGE_SIZE); |
Jiang Liu | b57b63a | 2013-07-03 15:04:25 -0700 | [diff] [blame] | 605 | free_all_bootmem(); |
Jiang Liu | de4bcdd | 2013-07-03 15:03:58 -0700 | [diff] [blame] | 606 | mem_init_print_info(NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | |
| 608 | /* |
| 609 | * For fsyscall entrpoints with no light-weight handler, use the ordinary |
| 610 | * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry |
| 611 | * code can tell them apart. |
| 612 | */ |
| 613 | for (i = 0; i < NR_syscalls; ++i) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | extern unsigned long sys_call_table[NR_syscalls]; |
Isaku Yamahata | dd97d5c | 2009-03-04 21:05:34 +0900 | [diff] [blame] | 615 | unsigned long *fsyscall_table = paravirt_get_fsyscall_table(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | |
| 617 | if (!fsyscall_table[i] || nolwsys) |
| 618 | fsyscall_table[i] = sys_call_table[i] | 1; |
| 619 | } |
| 620 | setup_gate(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | } |
Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 622 | |
| 623 | #ifdef CONFIG_MEMORY_HOTPLUG |
Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 624 | int arch_add_memory(int nid, u64 start, u64 size) |
Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 625 | { |
| 626 | pg_data_t *pgdat; |
| 627 | struct zone *zone; |
| 628 | unsigned long start_pfn = start >> PAGE_SHIFT; |
| 629 | unsigned long nr_pages = size >> PAGE_SHIFT; |
| 630 | int ret; |
| 631 | |
Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 632 | pgdat = NODE_DATA(nid); |
Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 633 | |
| 634 | zone = pgdat->node_zones + ZONE_NORMAL; |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 635 | ret = __add_pages(nid, zone, start_pfn, nr_pages); |
Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 636 | |
| 637 | if (ret) |
| 638 | printk("%s: Problem encountered in __add_pages() as ret=%d\n", |
Harvey Harrison | d4ed808 | 2008-03-04 15:15:00 -0800 | [diff] [blame] | 639 | __func__, ret); |
Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 640 | |
| 641 | return ret; |
| 642 | } |
Wen Congyang | 24d335c | 2013-02-22 16:32:58 -0800 | [diff] [blame] | 643 | |
| 644 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 645 | int arch_remove_memory(u64 start, u64 size) |
| 646 | { |
| 647 | unsigned long start_pfn = start >> PAGE_SHIFT; |
| 648 | unsigned long nr_pages = size >> PAGE_SHIFT; |
| 649 | struct zone *zone; |
| 650 | int ret; |
| 651 | |
| 652 | zone = page_zone(pfn_to_page(start_pfn)); |
| 653 | ret = __remove_pages(zone, start_pfn, nr_pages); |
| 654 | if (ret) |
| 655 | pr_warn("%s: Problem encountered in __remove_pages() as" |
| 656 | " ret=%d\n", __func__, ret); |
| 657 | |
| 658 | return ret; |
| 659 | } |
| 660 | #endif |
Yasunori Goto | 1681b8e | 2006-01-07 11:50:38 +0900 | [diff] [blame] | 661 | #endif |
Huang, Xiaolan | 839052d | 2008-05-15 10:18:41 +0800 | [diff] [blame] | 662 | |
| 663 | /* |
| 664 | * Even when CONFIG_IA32_SUPPORT is not enabled it is |
| 665 | * useful to have the Linux/x86 domain registered to |
| 666 | * avoid an attempted module load when emulators call |
| 667 | * personality(PER_LINUX32). This saves several milliseconds |
| 668 | * on each such call. |
| 669 | */ |
| 670 | static struct exec_domain ia32_exec_domain; |
| 671 | |
| 672 | static int __init |
| 673 | per_linux32_init(void) |
| 674 | { |
| 675 | ia32_exec_domain.name = "Linux/x86"; |
| 676 | ia32_exec_domain.handler = NULL; |
| 677 | ia32_exec_domain.pers_low = PER_LINUX32; |
| 678 | ia32_exec_domain.pers_high = PER_LINUX32; |
| 679 | ia32_exec_domain.signal_map = default_exec_domain.signal_map; |
| 680 | ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap; |
| 681 | register_exec_domain(&ia32_exec_domain); |
| 682 | |
| 683 | return 0; |
| 684 | } |
| 685 | |
| 686 | __initcall(per_linux32_init); |
Mel Gorman | aec6a88 | 2014-01-21 15:49:13 -0800 | [diff] [blame] | 687 | |
| 688 | /** |
| 689 | * show_mem - give short summary of memory stats |
| 690 | * |
| 691 | * Shows a simple page count of reserved and used pages in the system. |
| 692 | * For discontig machines, it does this on a per-pgdat basis. |
| 693 | */ |
| 694 | void show_mem(unsigned int filter) |
| 695 | { |
| 696 | int total_reserved = 0; |
| 697 | unsigned long total_present = 0; |
| 698 | pg_data_t *pgdat; |
| 699 | |
| 700 | printk(KERN_INFO "Mem-info:\n"); |
| 701 | show_free_areas(filter); |
| 702 | printk(KERN_INFO "Node memory in pages:\n"); |
| 703 | for_each_online_pgdat(pgdat) { |
| 704 | unsigned long present; |
| 705 | unsigned long flags; |
| 706 | int reserved = 0; |
| 707 | int nid = pgdat->node_id; |
| 708 | int zoneid; |
| 709 | |
| 710 | if (skip_free_areas_node(filter, nid)) |
| 711 | continue; |
| 712 | pgdat_resize_lock(pgdat, &flags); |
| 713 | |
| 714 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
| 715 | struct zone *zone = &pgdat->node_zones[zoneid]; |
| 716 | if (!populated_zone(zone)) |
| 717 | continue; |
| 718 | |
| 719 | reserved += zone->present_pages - zone->managed_pages; |
| 720 | } |
| 721 | present = pgdat->node_present_pages; |
| 722 | |
| 723 | pgdat_resize_unlock(pgdat, &flags); |
| 724 | total_present += present; |
| 725 | total_reserved += reserved; |
| 726 | printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, ", |
| 727 | nid, present, reserved); |
| 728 | } |
| 729 | printk(KERN_INFO "%ld pages of RAM\n", total_present); |
| 730 | printk(KERN_INFO "%d reserved pages\n", total_reserved); |
| 731 | printk(KERN_INFO "Total of %ld pages in page table cache\n", |
| 732 | quicklist_total_size()); |
| 733 | printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); |
| 734 | } |