blob: 05ac6c6123caad98f56276868f9b2e85cd73cbb4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
Ralf Baechleb8688682007-09-11 18:05:33 +010011#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/string.h>
19#include <linux/types.h>
20#include <linux/pagemap.h>
21#include <linux/ptrace.h>
22#include <linux/mman.h>
23#include <linux/mm.h>
24#include <linux/bootmem.h>
25#include <linux/highmem.h>
26#include <linux/swap.h>
Daniel Jacobowitz3d503752005-01-20 19:59:54 -050027#include <linux/proc_fs.h>
Dave Hansen22a98352006-03-27 01:16:04 -080028#include <linux/pfn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Ralf Baechle9975e772007-08-13 12:44:41 +010030#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/bootinfo.h>
32#include <asm/cachectl.h>
33#include <asm/cpu.h>
34#include <asm/dma.h>
Ralf Baechlef8829ca2006-10-21 23:17:35 +010035#include <asm/kmap_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/mmu_context.h>
37#include <asm/sections.h>
38#include <asm/pgtable.h>
39#include <asm/pgalloc.h>
40#include <asm/tlb.h>
Ralf Baechlef8829ca2006-10-21 23:17:35 +010041#include <asm/fixmap.h>
42
43/* Atomicity and interruptability */
44#ifdef CONFIG_MIPS_MT_SMTC
45
46#include <asm/mipsmtregs.h>
47
48#define ENTER_CRITICAL(flags) \
49 { \
50 unsigned int mvpflags; \
51 local_irq_save(flags);\
52 mvpflags = dvpe()
53#define EXIT_CRITICAL(flags) \
54 evpe(mvpflags); \
55 local_irq_restore(flags); \
56 }
57#else
58
59#define ENTER_CRITICAL(flags) local_irq_save(flags)
60#define EXIT_CRITICAL(flags) local_irq_restore(flags)
61
62#endif /* CONFIG_MIPS_MT_SMTC */
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/*
67 * We have up to 8 empty zeroed pages so we can map one of the right colour
68 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
69 * where we have to avoid VCED / VECI exceptions for good performance at
70 * any price. Since page is never written to after the initialization we
71 * don't have to care about aliases on other CPUs.
72 */
73unsigned long empty_zero_page, zero_page_mask;
74
75/*
76 * Not static inline because used by IP27 special magic initialization code
77 */
78unsigned long setup_zero_pages(void)
79{
Nick Piggin8dfcc9b2006-03-22 00:08:05 -080080 unsigned int order;
81 unsigned long size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 struct page *page;
83
84 if (cpu_has_vce)
85 order = 3;
86 else
87 order = 0;
88
89 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
90 if (!empty_zero_page)
91 panic("Oh boy, that early out of memory?");
92
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020093 page = virt_to_page((void *)empty_zero_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -080094 split_page(page, order);
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020095 while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
Hugh Dickins68352e62005-12-12 00:37:23 -080096 SetPageReserved(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 page++;
98 }
99
100 size = PAGE_SIZE << order;
101 zero_page_mask = (size - 1) & PAGE_MASK;
102
103 return 1UL << order;
104}
105
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100106/*
107 * These are almost like kmap_atomic / kunmap_atmic except they take an
108 * additional address argument as the hint.
109 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111#define kmap_get_fixmap_pte(vaddr) \
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000112 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100114#ifdef CONFIG_MIPS_MT_SMTC
115static pte_t *kmap_coherent_pte;
116static void __init kmap_coherent_init(void)
117{
118 unsigned long vaddr;
119
120 /* cache the first coherent kmap pte */
121 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
122 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
123}
124#else
125static inline void kmap_coherent_init(void) {}
126#endif
127
Ralf Baechle7575a492007-03-23 21:36:37 +0000128void *kmap_coherent(struct page *page, unsigned long addr)
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100129{
130 enum fixed_addresses idx;
131 unsigned long vaddr, flags, entrylo;
132 unsigned long old_ctx;
133 pte_t pte;
134 int tlbidx;
135
Ralf Baechleb8688682007-09-11 18:05:33 +0100136 BUG_ON(Page_dcache_dirty(page));
137
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100138 inc_preempt_count();
139 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
140#ifdef CONFIG_MIPS_MT_SMTC
141 idx += FIX_N_COLOURS * smp_processor_id();
142#endif
143 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
144 pte = mk_pte(page, PAGE_KERNEL);
Chris Dearman962f4802007-09-19 00:46:32 +0100145#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100146 entrylo = pte.pte_high;
147#else
148 entrylo = pte_val(pte) >> 6;
149#endif
150
151 ENTER_CRITICAL(flags);
152 old_ctx = read_c0_entryhi();
153 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
154 write_c0_entrylo0(entrylo);
155 write_c0_entrylo1(entrylo);
156#ifdef CONFIG_MIPS_MT_SMTC
157 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
158 /* preload TLB instead of local_flush_tlb_one() */
159 mtc0_tlbw_hazard();
160 tlb_probe();
161 tlb_probe_hazard();
162 tlbidx = read_c0_index();
163 mtc0_tlbw_hazard();
164 if (tlbidx < 0)
165 tlb_write_random();
166 else
167 tlb_write_indexed();
168#else
169 tlbidx = read_c0_wired();
170 write_c0_wired(tlbidx + 1);
171 write_c0_index(tlbidx);
172 mtc0_tlbw_hazard();
173 tlb_write_indexed();
174#endif
175 tlbw_use_hazard();
176 write_c0_entryhi(old_ctx);
177 EXIT_CRITICAL(flags);
178
179 return (void*) vaddr;
180}
181
182#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
183
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100184void kunmap_coherent(void)
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100185{
186#ifndef CONFIG_MIPS_MT_SMTC
187 unsigned int wired;
188 unsigned long flags, old_ctx;
189
190 ENTER_CRITICAL(flags);
191 old_ctx = read_c0_entryhi();
192 wired = read_c0_wired() - 1;
193 write_c0_wired(wired);
194 write_c0_index(wired);
195 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
196 write_c0_entrylo0(0);
197 write_c0_entrylo1(0);
198 mtc0_tlbw_hazard();
199 tlb_write_indexed();
200 tlbw_use_hazard();
201 write_c0_entryhi(old_ctx);
202 EXIT_CRITICAL(flags);
203#endif
204 dec_preempt_count();
205 preempt_check_resched();
206}
207
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000208void copy_user_highpage(struct page *to, struct page *from,
209 unsigned long vaddr, struct vm_area_struct *vma)
210{
211 void *vfrom, *vto;
212
213 vto = kmap_atomic(to, KM_USER1);
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000214 if (cpu_has_dc_aliases &&
215 page_mapped(from) && !Page_dcache_dirty(from)) {
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000216 vfrom = kmap_coherent(from, vaddr);
217 copy_page(vto, vfrom);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100218 kunmap_coherent();
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000219 } else {
220 vfrom = kmap_atomic(from, KM_USER0);
221 copy_page(vto, vfrom);
222 kunmap_atomic(vfrom, KM_USER0);
223 }
Ralf Baechle39b8d522008-04-28 17:14:26 +0100224 if ((!cpu_has_ic_fills_f_dc) ||
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000225 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
226 flush_data_cache_page((unsigned long)vto);
227 kunmap_atomic(vto, KM_USER1);
228 /* Make sure this page is cleared on other CPU's too before using it */
229 smp_wmb();
230}
231
232EXPORT_SYMBOL(copy_user_highpage);
233
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100234void copy_to_user_page(struct vm_area_struct *vma,
235 struct page *page, unsigned long vaddr, void *dst, const void *src,
236 unsigned long len)
237{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000238 if (cpu_has_dc_aliases &&
239 page_mapped(page) && !Page_dcache_dirty(page)) {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100240 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
241 memcpy(vto, src, len);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100242 kunmap_coherent();
Ralf Baechle985c30e2007-10-15 16:30:24 +0100243 } else {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100244 memcpy(dst, src, len);
Ralf Baechle985c30e2007-10-15 16:30:24 +0100245 if (cpu_has_dc_aliases)
246 SetPageDcacheDirty(page);
247 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100248 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
249 flush_cache_page(vma, vaddr, page_to_pfn(page));
250}
251
252EXPORT_SYMBOL(copy_to_user_page);
253
254void copy_from_user_page(struct vm_area_struct *vma,
255 struct page *page, unsigned long vaddr, void *dst, const void *src,
256 unsigned long len)
257{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000258 if (cpu_has_dc_aliases &&
259 page_mapped(page) && !Page_dcache_dirty(page)) {
Ralf Baechle985c30e2007-10-15 16:30:24 +0100260 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100261 memcpy(dst, vfrom, len);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100262 kunmap_coherent();
Ralf Baechle985c30e2007-10-15 16:30:24 +0100263 } else {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100264 memcpy(dst, src, len);
Ralf Baechle985c30e2007-10-15 16:30:24 +0100265 if (cpu_has_dc_aliases)
266 SetPageDcacheDirty(page);
267 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100268}
269
270EXPORT_SYMBOL(copy_from_user_page);
271
272
273#ifdef CONFIG_HIGHMEM
Ralf Baechlebf15f762007-02-19 15:00:49 +0000274unsigned long highstart_pfn, highend_pfn;
275
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100276pte_t *kmap_pte;
277pgprot_t kmap_prot;
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279static void __init kmap_init(void)
280{
281 unsigned long kmap_vstart;
282
283 /* cache the first kmap pte */
284 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
285 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
286
287 kmap_prot = PAGE_KERNEL;
288}
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100289#endif /* CONFIG_HIGHMEM */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Ralf Baechle84fd0892005-02-07 16:13:07 +0000291void __init fixrange_init(unsigned long start, unsigned long end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 pgd_t *pgd_base)
293{
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100294#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 pgd_t *pgd;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000296 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 pmd_t *pmd;
298 pte_t *pte;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000299 int i, j, k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 unsigned long vaddr;
301
302 vaddr = start;
303 i = __pgd_offset(vaddr);
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000304 j = __pud_offset(vaddr);
305 k = __pmd_offset(vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 pgd = pgd_base + i;
307
308 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000309 pud = (pud_t *)pgd;
310 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
311 pmd = (pmd_t *)pud;
312 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
313 if (pmd_none(*pmd)) {
314 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100315 set_pmd(pmd, __pmd((unsigned long)pte));
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000316 if (pte != pte_offset_kernel(pmd, 0))
317 BUG();
318 }
319 vaddr += PMD_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 }
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000321 k = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 }
323 j = 0;
324 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100325#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Yoichi Yuasab4819b52005-06-25 14:54:31 -0700328#ifndef CONFIG_NEED_MULTIPLE_NODES
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900329static int __init page_is_ram(unsigned long pagenr)
330{
331 int i;
332
333 for (i = 0; i < boot_mem_map.nr_map; i++) {
334 unsigned long addr, end;
335
336 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
337 /* not usable memory */
338 continue;
339
340 addr = PFN_UP(boot_mem_map.map[i].addr);
341 end = PFN_DOWN(boot_mem_map.map[i].addr +
342 boot_mem_map.map[i].size);
343
344 if (pagenr >= addr && pagenr < end)
345 return 1;
346 }
347
348 return 0;
349}
350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351void __init paging_init(void)
352{
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000353 unsigned long max_zone_pfns[MAX_NR_ZONES];
354 unsigned long lastpfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
356 pagetable_init();
357
358#ifdef CONFIG_HIGHMEM
359 kmap_init();
360#endif
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100361 kmap_coherent_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Atsushi Nemoto05502332007-03-21 00:36:02 +0900363#ifdef CONFIG_ZONE_DMA
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000364 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365#endif
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000366#ifdef CONFIG_ZONE_DMA32
367 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
368#endif
369 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
370 lastpfn = max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371#ifdef CONFIG_HIGHMEM
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000372 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
373 lastpfn = highend_pfn;
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100374
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000375 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100376 printk(KERN_WARNING "This processor doesn't support highmem."
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000377 " %ldk highmem ignored\n",
378 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
379 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
380 lastpfn = max_low_pfn;
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100381 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382#endif
383
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000384 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385}
386
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500387static struct kcore_list kcore_mem, kcore_vmalloc;
388#ifdef CONFIG_64BIT
389static struct kcore_list kcore_kseg0;
390#endif
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392void __init mem_init(void)
393{
394 unsigned long codesize, reservedpages, datasize, initsize;
395 unsigned long tmp, ram;
396
397#ifdef CONFIG_HIGHMEM
398#ifdef CONFIG_DISCONTIGMEM
399#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
400#endif
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900401 max_mapnr = highend_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402#else
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900403 max_mapnr = max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404#endif
405 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
406
407 totalram_pages += free_all_bootmem();
408 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
409
410 reservedpages = ram = 0;
411 for (tmp = 0; tmp < max_low_pfn; tmp++)
412 if (page_is_ram(tmp)) {
413 ram++;
Chad Reeseb1c231f2006-05-30 17:16:49 -0700414 if (PageReserved(pfn_to_page(tmp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 reservedpages++;
416 }
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900417 num_physpages = ram;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419#ifdef CONFIG_HIGHMEM
420 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
Ralf Baechlea8049c52007-11-04 04:42:03 +0000421 struct page *page = pfn_to_page(tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423 if (!page_is_ram(tmp)) {
424 SetPageReserved(page);
425 continue;
426 }
427 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800428 init_page_count(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 __free_page(page);
430 totalhigh_pages++;
431 }
432 totalram_pages += totalhigh_pages;
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900433 num_physpages += totalhigh_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434#endif
435
436 codesize = (unsigned long) &_etext - (unsigned long) &_text;
437 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
438 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
439
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500440#ifdef CONFIG_64BIT
441 if ((unsigned long) &_text > (unsigned long) CKSEG0)
442 /* The -4 is a hack so that user tools don't have to handle
443 the overflow. */
444 kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
445#endif
446 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
447 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
448 VMALLOC_END-VMALLOC_START);
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
451 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
452 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
453 ram << (PAGE_SHIFT-10),
454 codesize >> 10,
455 reservedpages << (PAGE_SHIFT-10),
456 datasize >> 10,
457 initsize >> 10,
458 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
459}
Yoichi Yuasab4819b52005-06-25 14:54:31 -0700460#endif /* !CONFIG_NEED_MULTIPLE_NODES */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
Atsushi Nemotoc44e8d52006-12-30 00:43:59 +0900462void free_init_pages(const char *what, unsigned long begin, unsigned long end)
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000463{
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200464 unsigned long pfn;
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000465
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200466 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
467 struct page *page = pfn_to_page(pfn);
468 void *addr = phys_to_virt(PFN_PHYS(pfn));
469
470 ClearPageReserved(page);
471 init_page_count(page);
472 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
473 __free_page(page);
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000474 totalram_pages++;
475 }
476 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
477}
478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479#ifdef CONFIG_BLK_DEV_INITRD
480void free_initrd_mem(unsigned long start, unsigned long end)
481{
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200482 free_init_pages("initrd memory",
483 virt_to_phys((void *)start),
484 virt_to_phys((void *)end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485}
486#endif
487
Atsushi Nemotofb4bb132007-07-22 23:44:20 +0900488void __init_refok free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
Atsushi Nemotoc44e8d52006-12-30 00:43:59 +0900490 prom_free_prom_memory();
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200491 free_init_pages("unused kernel memory",
492 __pa_symbol(&__init_begin),
493 __pa_symbol(&__init_end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494}
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900495
496unsigned long pgd_current[NR_CPUS];
497/*
498 * On 64-bit we've got three-level pagetables with a slightly
499 * different layout ...
500 */
501#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
Ralf Baechle9975e772007-08-13 12:44:41 +0100502
503/*
504 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
505 * are constants. So we use the variants from asm-offset.h until that gcc
506 * will officially be retired.
507 */
508pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900509#ifdef CONFIG_64BIT
510#ifdef MODULE_START
511pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
512#endif
513pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
514#endif
515pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);