blob: 2efcbd24c82fcfa8d2741243101eb5df116e2d85 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
Ralf Baechleb8688682007-09-11 18:05:33 +010011#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010016#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/bootmem.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
Daniel Jacobowitz3d503752005-01-20 19:59:54 -050028#include <linux/proc_fs.h>
Dave Hansen22a98352006-03-27 01:16:04 -080029#include <linux/pfn.h>
Kevin Cernekee0f334a32009-09-07 11:11:31 -070030#include <linux/hardirq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Ralf Baechle9975e772007-08-13 12:44:41 +010033#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/bootinfo.h>
35#include <asm/cachectl.h>
36#include <asm/cpu.h>
37#include <asm/dma.h>
Ralf Baechlef8829ca2006-10-21 23:17:35 +010038#include <asm/kmap_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <asm/mmu_context.h>
40#include <asm/sections.h>
41#include <asm/pgtable.h>
42#include <asm/pgalloc.h>
43#include <asm/tlb.h>
Ralf Baechlef8829ca2006-10-21 23:17:35 +010044#include <asm/fixmap.h>
45
46/* Atomicity and interruptability */
47#ifdef CONFIG_MIPS_MT_SMTC
48
49#include <asm/mipsmtregs.h>
50
51#define ENTER_CRITICAL(flags) \
52 { \
53 unsigned int mvpflags; \
54 local_irq_save(flags);\
55 mvpflags = dvpe()
56#define EXIT_CRITICAL(flags) \
57 evpe(mvpflags); \
58 local_irq_restore(flags); \
59 }
60#else
61
62#define ENTER_CRITICAL(flags) local_irq_save(flags)
63#define EXIT_CRITICAL(flags) local_irq_restore(flags)
64
65#endif /* CONFIG_MIPS_MT_SMTC */
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
67DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/*
70 * We have up to 8 empty zeroed pages so we can map one of the right colour
71 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
72 * where we have to avoid VCED / VECI exceptions for good performance at
73 * any price. Since page is never written to after the initialization we
74 * don't have to care about aliases on other CPUs.
75 */
76unsigned long empty_zero_page, zero_page_mask;
Ralf Baechle497d2ad2008-06-06 14:23:06 +010077EXPORT_SYMBOL_GPL(empty_zero_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79/*
80 * Not static inline because used by IP27 special magic initialization code
81 */
82unsigned long setup_zero_pages(void)
83{
Nick Piggin8dfcc9b2006-03-22 00:08:05 -080084 unsigned int order;
85 unsigned long size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 struct page *page;
87
88 if (cpu_has_vce)
89 order = 3;
90 else
91 order = 0;
92
93 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
94 if (!empty_zero_page)
95 panic("Oh boy, that early out of memory?");
96
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020097 page = virt_to_page((void *)empty_zero_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -080098 split_page(page, order);
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020099 while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
Hugh Dickins68352e62005-12-12 00:37:23 -0800100 SetPageReserved(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 page++;
102 }
103
104 size = PAGE_SIZE << order;
105 zero_page_mask = (size - 1) & PAGE_MASK;
106
107 return 1UL << order;
108}
109
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100110#ifdef CONFIG_MIPS_MT_SMTC
111static pte_t *kmap_coherent_pte;
112static void __init kmap_coherent_init(void)
113{
114 unsigned long vaddr;
115
116 /* cache the first coherent kmap pte */
117 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
118 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
119}
120#else
121static inline void kmap_coherent_init(void) {}
122#endif
123
Ralf Baechle7575a492007-03-23 21:36:37 +0000124void *kmap_coherent(struct page *page, unsigned long addr)
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100125{
126 enum fixed_addresses idx;
127 unsigned long vaddr, flags, entrylo;
128 unsigned long old_ctx;
129 pte_t pte;
130 int tlbidx;
131
Ralf Baechleb8688682007-09-11 18:05:33 +0100132 BUG_ON(Page_dcache_dirty(page));
133
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100134 inc_preempt_count();
135 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
136#ifdef CONFIG_MIPS_MT_SMTC
Kevin Cernekee0f334a32009-09-07 11:11:31 -0700137 idx += FIX_N_COLOURS * smp_processor_id() +
138 (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
139#else
140 idx += in_interrupt() ? FIX_N_COLOURS : 0;
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100141#endif
142 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
143 pte = mk_pte(page, PAGE_KERNEL);
Chris Dearman962f4802007-09-19 00:46:32 +0100144#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100145 entrylo = pte.pte_high;
146#else
David Daney6dd93442010-02-10 15:12:47 -0800147 entrylo = pte_to_entrylo(pte_val(pte));
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100148#endif
149
150 ENTER_CRITICAL(flags);
151 old_ctx = read_c0_entryhi();
152 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
153 write_c0_entrylo0(entrylo);
154 write_c0_entrylo1(entrylo);
155#ifdef CONFIG_MIPS_MT_SMTC
156 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
157 /* preload TLB instead of local_flush_tlb_one() */
158 mtc0_tlbw_hazard();
159 tlb_probe();
160 tlb_probe_hazard();
161 tlbidx = read_c0_index();
162 mtc0_tlbw_hazard();
163 if (tlbidx < 0)
164 tlb_write_random();
165 else
166 tlb_write_indexed();
167#else
168 tlbidx = read_c0_wired();
169 write_c0_wired(tlbidx + 1);
170 write_c0_index(tlbidx);
171 mtc0_tlbw_hazard();
172 tlb_write_indexed();
173#endif
174 tlbw_use_hazard();
175 write_c0_entryhi(old_ctx);
176 EXIT_CRITICAL(flags);
177
178 return (void*) vaddr;
179}
180
181#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
182
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100183void kunmap_coherent(void)
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100184{
185#ifndef CONFIG_MIPS_MT_SMTC
186 unsigned int wired;
187 unsigned long flags, old_ctx;
188
189 ENTER_CRITICAL(flags);
190 old_ctx = read_c0_entryhi();
191 wired = read_c0_wired() - 1;
192 write_c0_wired(wired);
193 write_c0_index(wired);
194 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
195 write_c0_entrylo0(0);
196 write_c0_entrylo1(0);
197 mtc0_tlbw_hazard();
198 tlb_write_indexed();
199 tlbw_use_hazard();
200 write_c0_entryhi(old_ctx);
201 EXIT_CRITICAL(flags);
202#endif
203 dec_preempt_count();
204 preempt_check_resched();
205}
206
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000207void copy_user_highpage(struct page *to, struct page *from,
208 unsigned long vaddr, struct vm_area_struct *vma)
209{
210 void *vfrom, *vto;
211
212 vto = kmap_atomic(to, KM_USER1);
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000213 if (cpu_has_dc_aliases &&
214 page_mapped(from) && !Page_dcache_dirty(from)) {
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000215 vfrom = kmap_coherent(from, vaddr);
216 copy_page(vto, vfrom);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100217 kunmap_coherent();
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000218 } else {
219 vfrom = kmap_atomic(from, KM_USER0);
220 copy_page(vto, vfrom);
221 kunmap_atomic(vfrom, KM_USER0);
222 }
Ralf Baechle39b8d522008-04-28 17:14:26 +0100223 if ((!cpu_has_ic_fills_f_dc) ||
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000224 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
225 flush_data_cache_page((unsigned long)vto);
226 kunmap_atomic(vto, KM_USER1);
227 /* Make sure this page is cleared on other CPU's too before using it */
228 smp_wmb();
229}
230
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100231void copy_to_user_page(struct vm_area_struct *vma,
232 struct page *page, unsigned long vaddr, void *dst, const void *src,
233 unsigned long len)
234{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000235 if (cpu_has_dc_aliases &&
236 page_mapped(page) && !Page_dcache_dirty(page)) {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100237 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
238 memcpy(vto, src, len);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100239 kunmap_coherent();
Ralf Baechle985c30e2007-10-15 16:30:24 +0100240 } else {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100241 memcpy(dst, src, len);
Ralf Baechle985c30e2007-10-15 16:30:24 +0100242 if (cpu_has_dc_aliases)
243 SetPageDcacheDirty(page);
244 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100245 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
246 flush_cache_page(vma, vaddr, page_to_pfn(page));
247}
248
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100249void copy_from_user_page(struct vm_area_struct *vma,
250 struct page *page, unsigned long vaddr, void *dst, const void *src,
251 unsigned long len)
252{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000253 if (cpu_has_dc_aliases &&
254 page_mapped(page) && !Page_dcache_dirty(page)) {
Ralf Baechle985c30e2007-10-15 16:30:24 +0100255 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100256 memcpy(dst, vfrom, len);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100257 kunmap_coherent();
Ralf Baechle985c30e2007-10-15 16:30:24 +0100258 } else {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100259 memcpy(dst, src, len);
Ralf Baechle985c30e2007-10-15 16:30:24 +0100260 if (cpu_has_dc_aliases)
261 SetPageDcacheDirty(page);
262 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100263}
264
Ralf Baechle84fd0892005-02-07 16:13:07 +0000265void __init fixrange_init(unsigned long start, unsigned long end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 pgd_t *pgd_base)
267{
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100268#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 pgd_t *pgd;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000270 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 pmd_t *pmd;
272 pte_t *pte;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000273 int i, j, k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 unsigned long vaddr;
275
276 vaddr = start;
277 i = __pgd_offset(vaddr);
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000278 j = __pud_offset(vaddr);
279 k = __pmd_offset(vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 pgd = pgd_base + i;
281
282 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000283 pud = (pud_t *)pgd;
284 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
285 pmd = (pmd_t *)pud;
286 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
287 if (pmd_none(*pmd)) {
288 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100289 set_pmd(pmd, __pmd((unsigned long)pte));
Ralf Baechleb72b7092009-03-30 14:49:44 +0200290 BUG_ON(pte != pte_offset_kernel(pmd, 0));
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000291 }
292 vaddr += PMD_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 }
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000294 k = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 }
296 j = 0;
297 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100298#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Yoichi Yuasab4819b52005-06-25 14:54:31 -0700301#ifndef CONFIG_NEED_MULTIPLE_NODES
Wu Fengguang61ef2482010-01-22 16:16:19 +0800302int page_is_ram(unsigned long pagenr)
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900303{
304 int i;
305
306 for (i = 0; i < boot_mem_map.nr_map; i++) {
307 unsigned long addr, end;
308
309 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
310 /* not usable memory */
311 continue;
312
313 addr = PFN_UP(boot_mem_map.map[i].addr);
314 end = PFN_DOWN(boot_mem_map.map[i].addr +
315 boot_mem_map.map[i].size);
316
317 if (pagenr >= addr && pagenr < end)
318 return 1;
319 }
320
321 return 0;
322}
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324void __init paging_init(void)
325{
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000326 unsigned long max_zone_pfns[MAX_NR_ZONES];
327 unsigned long lastpfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
329 pagetable_init();
330
331#ifdef CONFIG_HIGHMEM
332 kmap_init();
333#endif
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100334 kmap_coherent_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Atsushi Nemoto05502332007-03-21 00:36:02 +0900336#ifdef CONFIG_ZONE_DMA
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000337 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338#endif
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000339#ifdef CONFIG_ZONE_DMA32
340 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
341#endif
342 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
343 lastpfn = max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344#ifdef CONFIG_HIGHMEM
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000345 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
346 lastpfn = highend_pfn;
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100347
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000348 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100349 printk(KERN_WARNING "This processor doesn't support highmem."
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000350 " %ldk highmem ignored\n",
351 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
352 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
353 lastpfn = max_low_pfn;
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100354 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355#endif
356
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000357 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358}
359
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500360#ifdef CONFIG_64BIT
361static struct kcore_list kcore_kseg0;
362#endif
363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364void __init mem_init(void)
365{
366 unsigned long codesize, reservedpages, datasize, initsize;
367 unsigned long tmp, ram;
368
369#ifdef CONFIG_HIGHMEM
370#ifdef CONFIG_DISCONTIGMEM
371#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
372#endif
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900373 max_mapnr = highend_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374#else
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900375 max_mapnr = max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376#endif
377 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
378
379 totalram_pages += free_all_bootmem();
380 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
381
382 reservedpages = ram = 0;
383 for (tmp = 0; tmp < max_low_pfn; tmp++)
384 if (page_is_ram(tmp)) {
385 ram++;
Chad Reeseb1c231f2006-05-30 17:16:49 -0700386 if (PageReserved(pfn_to_page(tmp)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 reservedpages++;
388 }
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900389 num_physpages = ram;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
391#ifdef CONFIG_HIGHMEM
392 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
Ralf Baechlea8049c52007-11-04 04:42:03 +0000393 struct page *page = pfn_to_page(tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395 if (!page_is_ram(tmp)) {
396 SetPageReserved(page);
397 continue;
398 }
399 ClearPageReserved(page);
Nick Piggin7835e982006-03-22 00:08:40 -0800400 init_page_count(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 __free_page(page);
402 totalhigh_pages++;
403 }
404 totalram_pages += totalhigh_pages;
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900405 num_physpages += totalhigh_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406#endif
407
408 codesize = (unsigned long) &_etext - (unsigned long) &_text;
409 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
410 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
411
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500412#ifdef CONFIG_64BIT
413 if ((unsigned long) &_text > (unsigned long) CKSEG0)
414 /* The -4 is a hack so that user tools don't have to handle
415 the overflow. */
KAMEZAWA Hiroyukic30bb2a2009-09-22 16:45:43 -0700416 kclist_add(&kcore_kseg0, (void *) CKSEG0,
417 0x80000000 - 4, KCORE_TEXT);
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500418#endif
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
421 "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
Geert Uytterhoevencc013a82009-09-21 17:02:36 -0700422 nr_free_pages() << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 ram << (PAGE_SHIFT-10),
424 codesize >> 10,
425 reservedpages << (PAGE_SHIFT-10),
426 datasize >> 10,
427 initsize >> 10,
Andreas Fenkart4b529402010-01-08 14:42:31 -0800428 totalhigh_pages << (PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
Yoichi Yuasab4819b52005-06-25 14:54:31 -0700430#endif /* !CONFIG_NEED_MULTIPLE_NODES */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
Atsushi Nemotoc44e8d52006-12-30 00:43:59 +0900432void free_init_pages(const char *what, unsigned long begin, unsigned long end)
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000433{
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200434 unsigned long pfn;
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000435
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200436 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
437 struct page *page = pfn_to_page(pfn);
438 void *addr = phys_to_virt(PFN_PHYS(pfn));
439
440 ClearPageReserved(page);
441 init_page_count(page);
442 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
443 __free_page(page);
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000444 totalram_pages++;
445 }
446 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
447}
448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449#ifdef CONFIG_BLK_DEV_INITRD
450void free_initrd_mem(unsigned long start, unsigned long end)
451{
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200452 free_init_pages("initrd memory",
453 virt_to_phys((void *)start),
454 virt_to_phys((void *)end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455}
456#endif
457
Atsushi Nemotofb4bb132007-07-22 23:44:20 +0900458void __init_refok free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459{
Atsushi Nemotoc44e8d52006-12-30 00:43:59 +0900460 prom_free_prom_memory();
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200461 free_init_pages("unused kernel memory",
462 __pa_symbol(&__init_begin),
463 __pa_symbol(&__init_end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900465
David Daney826222842009-10-14 12:16:56 -0700466#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900467unsigned long pgd_current[NR_CPUS];
David Daney826222842009-10-14 12:16:56 -0700468#endif
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900469/*
470 * On 64-bit we've got three-level pagetables with a slightly
471 * different layout ...
472 */
473#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
Ralf Baechle9975e772007-08-13 12:44:41 +0100474
475/*
476 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
477 * are constants. So we use the variants from asm-offset.h until that gcc
478 * will officially be retired.
479 */
480pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
David Daney325f8a02009-12-04 13:52:36 -0800481#ifndef __PAGETABLE_PMD_FOLDED
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900482pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
483#endif
484pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);