blob: 023c164b9eb69f9b579cb71baae3a2d2646268ef [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
Ralf Baechleb8688682007-09-11 18:05:33 +010011#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010016#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/bootmem.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
Daniel Jacobowitz3d503752005-01-20 19:59:54 -050028#include <linux/proc_fs.h>
Dave Hansen22a98352006-03-27 01:16:04 -080029#include <linux/pfn.h>
Kevin Cernekee0f334a32009-09-07 11:11:31 -070030#include <linux/hardirq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/gfp.h>
David Howells2f96b8c2013-04-12 00:10:25 +010032#include <linux/kcore.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Ralf Baechle9975e772007-08-13 12:44:41 +010034#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/bootinfo.h>
36#include <asm/cachectl.h>
37#include <asm/cpu.h>
38#include <asm/dma.h>
Ralf Baechlef8829ca2006-10-21 23:17:35 +010039#include <asm/kmap_types.h>
Paul Burtoncbd95a892015-07-10 16:52:38 +010040#include <asm/maar.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/mmu_context.h>
42#include <asm/sections.h>
43#include <asm/pgtable.h>
44#include <asm/pgalloc.h>
45#include <asm/tlb.h>
Ralf Baechlef8829ca2006-10-21 23:17:35 +010046#include <asm/fixmap.h>
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/*
49 * We have up to 8 empty zeroed pages so we can map one of the right colour
Ralf Baechle70342282013-01-22 12:59:30 +010050 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 * where we have to avoid VCED / VECI exceptions for good performance at
52 * any price. Since page is never written to after the initialization we
53 * don't have to care about aliases on other CPUs.
54 */
55unsigned long empty_zero_page, zero_page_mask;
Ralf Baechle497d2ad2008-06-06 14:23:06 +010056EXPORT_SYMBOL_GPL(empty_zero_page);
Ard Biesheuvel0b700682014-09-12 22:17:23 +020057EXPORT_SYMBOL(zero_page_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59/*
60 * Not static inline because used by IP27 special magic initialization code
61 */
Jiang Liu316059222013-04-29 15:06:43 -070062void setup_zero_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{
Jiang Liu316059222013-04-29 15:06:43 -070064 unsigned int order, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 struct page *page;
66
67 if (cpu_has_vce)
68 order = 3;
69 else
70 order = 0;
71
72 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
73 if (!empty_zero_page)
74 panic("Oh boy, that early out of memory?");
75
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020076 page = virt_to_page((void *)empty_zero_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -080077 split_page(page, order);
Jiang Liu316059222013-04-29 15:06:43 -070078 for (i = 0; i < (1 << order); i++, page++)
79 mark_page_reserved(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Jiang Liu316059222013-04-29 15:06:43 -070081 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
Paul Burtone2a9e5a2014-03-03 12:08:40 +000084static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
Ralf Baechlef8829ca2006-10-21 23:17:35 +010085{
86 enum fixed_addresses idx;
87 unsigned long vaddr, flags, entrylo;
88 unsigned long old_ctx;
89 pte_t pte;
90 int tlbidx;
91
Ralf Baechleb8688682007-09-11 18:05:33 +010092 BUG_ON(Page_dcache_dirty(page));
93
David Hildenbrandce019482015-05-11 17:52:10 +020094 preempt_disable();
Peter Zijlstrabdb43802013-09-10 12:15:23 +020095 pagefault_disable();
Ralf Baechlef8829ca2006-10-21 23:17:35 +010096 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
Kevin Cernekee0f334a32009-09-07 11:11:31 -070097 idx += in_interrupt() ? FIX_N_COLOURS : 0;
Ralf Baechlef8829ca2006-10-21 23:17:35 +010098 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
Paul Burtone2a9e5a2014-03-03 12:08:40 +000099 pte = mk_pte(page, prot);
Ralf Baechle34adb282014-11-22 00:16:48 +0100100#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
Steven J. Hillc5b36782015-02-26 18:16:38 -0600101 entrylo = pte_to_entrylo(pte.pte_high);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100102#else
David Daney6dd93442010-02-10 15:12:47 -0800103 entrylo = pte_to_entrylo(pte_val(pte));
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100104#endif
105
Ralf Baechleb633648c52014-05-23 16:29:44 +0200106 local_irq_save(flags);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100107 old_ctx = read_c0_entryhi();
108 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
109 write_c0_entrylo0(entrylo);
110 write_c0_entrylo1(entrylo);
Steven J. Hillc5b36782015-02-26 18:16:38 -0600111#ifdef CONFIG_XPA
112 entrylo = (pte.pte_low & _PFNX_MASK);
113 writex_c0_entrylo0(entrylo);
114 writex_c0_entrylo1(entrylo);
115#endif
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100116 tlbidx = read_c0_wired();
117 write_c0_wired(tlbidx + 1);
118 write_c0_index(tlbidx);
119 mtc0_tlbw_hazard();
120 tlb_write_indexed();
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100121 tlbw_use_hazard();
122 write_c0_entryhi(old_ctx);
Ralf Baechleb633648c52014-05-23 16:29:44 +0200123 local_irq_restore(flags);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100124
125 return (void*) vaddr;
126}
127
Paul Burtone2a9e5a2014-03-03 12:08:40 +0000128void *kmap_coherent(struct page *page, unsigned long addr)
129{
130 return __kmap_pgprot(page, addr, PAGE_KERNEL);
131}
132
133void *kmap_noncoherent(struct page *page, unsigned long addr)
134{
135 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
136}
137
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100138void kunmap_coherent(void)
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100139{
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100140 unsigned int wired;
141 unsigned long flags, old_ctx;
142
Ralf Baechleb633648c52014-05-23 16:29:44 +0200143 local_irq_save(flags);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100144 old_ctx = read_c0_entryhi();
145 wired = read_c0_wired() - 1;
146 write_c0_wired(wired);
147 write_c0_index(wired);
148 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
149 write_c0_entrylo0(0);
150 write_c0_entrylo1(0);
151 mtc0_tlbw_hazard();
152 tlb_write_indexed();
153 tlbw_use_hazard();
154 write_c0_entryhi(old_ctx);
Ralf Baechleb633648c52014-05-23 16:29:44 +0200155 local_irq_restore(flags);
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200156 pagefault_enable();
David Hildenbrandce019482015-05-11 17:52:10 +0200157 preempt_enable();
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100158}
159
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000160void copy_user_highpage(struct page *to, struct page *from,
161 unsigned long vaddr, struct vm_area_struct *vma)
162{
163 void *vfrom, *vto;
164
Cong Wang9c020482011-11-25 23:14:15 +0800165 vto = kmap_atomic(to);
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000166 if (cpu_has_dc_aliases &&
167 page_mapped(from) && !Page_dcache_dirty(from)) {
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000168 vfrom = kmap_coherent(from, vaddr);
169 copy_page(vto, vfrom);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100170 kunmap_coherent();
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000171 } else {
Cong Wang9c020482011-11-25 23:14:15 +0800172 vfrom = kmap_atomic(from);
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000173 copy_page(vto, vfrom);
Cong Wang9c020482011-11-25 23:14:15 +0800174 kunmap_atomic(vfrom);
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000175 }
Ralf Baechle39b8d522008-04-28 17:14:26 +0100176 if ((!cpu_has_ic_fills_f_dc) ||
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000177 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
178 flush_data_cache_page((unsigned long)vto);
Cong Wang9c020482011-11-25 23:14:15 +0800179 kunmap_atomic(vto);
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000180 /* Make sure this page is cleared on other CPU's too before using it */
181 smp_wmb();
182}
183
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100184void copy_to_user_page(struct vm_area_struct *vma,
185 struct page *page, unsigned long vaddr, void *dst, const void *src,
186 unsigned long len)
187{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000188 if (cpu_has_dc_aliases &&
189 page_mapped(page) && !Page_dcache_dirty(page)) {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100190 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
191 memcpy(vto, src, len);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100192 kunmap_coherent();
Ralf Baechle985c30e2007-10-15 16:30:24 +0100193 } else {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100194 memcpy(dst, src, len);
Ralf Baechle985c30e2007-10-15 16:30:24 +0100195 if (cpu_has_dc_aliases)
196 SetPageDcacheDirty(page);
197 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100198 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
199 flush_cache_page(vma, vaddr, page_to_pfn(page));
200}
201
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100202void copy_from_user_page(struct vm_area_struct *vma,
203 struct page *page, unsigned long vaddr, void *dst, const void *src,
204 unsigned long len)
205{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000206 if (cpu_has_dc_aliases &&
207 page_mapped(page) && !Page_dcache_dirty(page)) {
Ralf Baechle985c30e2007-10-15 16:30:24 +0100208 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100209 memcpy(dst, vfrom, len);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100210 kunmap_coherent();
Ralf Baechle985c30e2007-10-15 16:30:24 +0100211 } else {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100212 memcpy(dst, src, len);
Ralf Baechle985c30e2007-10-15 16:30:24 +0100213 if (cpu_has_dc_aliases)
214 SetPageDcacheDirty(page);
215 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100216}
Geert Uytterhoevenbf9621a2013-09-05 11:22:45 +0200217EXPORT_SYMBOL_GPL(copy_from_user_page);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100218
Ralf Baechle84fd0892005-02-07 16:13:07 +0000219void __init fixrange_init(unsigned long start, unsigned long end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 pgd_t *pgd_base)
221{
Ralf Baechleb633648c52014-05-23 16:29:44 +0200222#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 pgd_t *pgd;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000224 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 pmd_t *pmd;
226 pte_t *pte;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000227 int i, j, k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 unsigned long vaddr;
229
230 vaddr = start;
231 i = __pgd_offset(vaddr);
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000232 j = __pud_offset(vaddr);
233 k = __pmd_offset(vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 pgd = pgd_base + i;
235
Kevin Cernekee464fd832011-01-05 23:31:30 -0800236 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000237 pud = (pud_t *)pgd;
Kevin Cernekee464fd832011-01-05 23:31:30 -0800238 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000239 pmd = (pmd_t *)pud;
Kevin Cernekee464fd832011-01-05 23:31:30 -0800240 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000241 if (pmd_none(*pmd)) {
242 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100243 set_pmd(pmd, __pmd((unsigned long)pte));
Ralf Baechleb72b7092009-03-30 14:49:44 +0200244 BUG_ON(pte != pte_offset_kernel(pmd, 0));
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000245 }
246 vaddr += PMD_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 }
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000248 k = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 }
250 j = 0;
251 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100252#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Paul Burtondef3ab52015-09-25 08:59:36 -0700255unsigned __weak platform_maar_init(unsigned num_pairs)
256{
257 struct maar_config cfg[BOOT_MEM_MAP_MAX];
258 unsigned i, num_configured, num_cfg = 0;
259 phys_addr_t skip;
260
261 for (i = 0; i < boot_mem_map.nr_map; i++) {
262 switch (boot_mem_map.map[i].type) {
263 case BOOT_MEM_RAM:
264 case BOOT_MEM_INIT_RAM:
265 break;
266 default:
267 continue;
268 }
269
270 skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
271
272 cfg[num_cfg].lower = boot_mem_map.map[i].addr;
273 cfg[num_cfg].lower += skip;
274
275 cfg[num_cfg].upper = cfg[num_cfg].lower;
276 cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
277 cfg[num_cfg].upper -= skip;
278
279 cfg[num_cfg].attrs = MIPS_MAAR_S;
280 num_cfg++;
281 }
282
283 num_configured = maar_config(cfg, num_cfg, num_pairs);
284 if (num_configured < num_cfg)
285 pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
286 num_pairs, num_cfg);
287
288 return num_configured;
289}
290
291static void maar_init(void)
292{
293 unsigned num_maars, used, i;
Paul Burton651ca7f2015-09-25 08:59:37 -0700294 phys_addr_t lower, upper, attr;
Paul Burtondef3ab52015-09-25 08:59:36 -0700295
296 if (!cpu_has_maar)
297 return;
298
299 /* Detect the number of MAARs */
300 write_c0_maari(~0);
301 back_to_back_c0_hazard();
302 num_maars = read_c0_maari() + 1;
303
304 /* MAARs should be in pairs */
305 WARN_ON(num_maars % 2);
306
307 /* Configure the required MAARs */
308 used = platform_maar_init(num_maars / 2);
309
310 /* Disable any further MAARs */
311 for (i = (used * 2); i < num_maars; i++) {
312 write_c0_maari(i);
313 back_to_back_c0_hazard();
314 write_c0_maar(0);
315 back_to_back_c0_hazard();
316 }
Paul Burton651ca7f2015-09-25 08:59:37 -0700317
318 pr_info("MAAR configuration:\n");
319 for (i = 0; i < num_maars; i += 2) {
320 write_c0_maari(i);
321 back_to_back_c0_hazard();
322 upper = read_c0_maar();
323
324 write_c0_maari(i + 1);
325 back_to_back_c0_hazard();
326 lower = read_c0_maar();
327
328 attr = lower & upper;
329 lower = (lower & MIPS_MAAR_ADDR) << 4;
330 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
331
332 pr_info(" [%d]: ", i / 2);
333 if (!(attr & MIPS_MAAR_V)) {
334 pr_cont("disabled\n");
335 continue;
336 }
337
338 pr_cont("%pa-%pa", &lower, &upper);
339
340 if (attr & MIPS_MAAR_S)
341 pr_cont(" speculate");
342
343 pr_cont("\n");
344 }
Paul Burtondef3ab52015-09-25 08:59:36 -0700345}
346
Yoichi Yuasab4819b52005-06-25 14:54:31 -0700347#ifndef CONFIG_NEED_MULTIPLE_NODES
Wu Fengguang61ef2482010-01-22 16:16:19 +0800348int page_is_ram(unsigned long pagenr)
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900349{
350 int i;
351
352 for (i = 0; i < boot_mem_map.nr_map; i++) {
353 unsigned long addr, end;
354
David Daney43064c02011-11-22 14:38:03 +0000355 switch (boot_mem_map.map[i].type) {
356 case BOOT_MEM_RAM:
357 case BOOT_MEM_INIT_RAM:
358 break;
359 default:
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900360 /* not usable memory */
361 continue;
David Daney43064c02011-11-22 14:38:03 +0000362 }
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900363
364 addr = PFN_UP(boot_mem_map.map[i].addr);
365 end = PFN_DOWN(boot_mem_map.map[i].addr +
366 boot_mem_map.map[i].size);
367
368 if (pagenr >= addr && pagenr < end)
369 return 1;
370 }
371
372 return 0;
373}
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375void __init paging_init(void)
376{
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000377 unsigned long max_zone_pfns[MAX_NR_ZONES];
David Daneyd3ce0e92011-01-24 14:51:37 -0800378 unsigned long lastpfn __maybe_unused;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
380 pagetable_init();
381
382#ifdef CONFIG_HIGHMEM
383 kmap_init();
384#endif
Atsushi Nemoto05502332007-03-21 00:36:02 +0900385#ifdef CONFIG_ZONE_DMA
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000386 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387#endif
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000388#ifdef CONFIG_ZONE_DMA32
389 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
390#endif
391 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
392 lastpfn = max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393#ifdef CONFIG_HIGHMEM
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000394 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
395 lastpfn = highend_pfn;
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100396
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000397 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100398 printk(KERN_WARNING "This processor doesn't support highmem."
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000399 " %ldk highmem ignored\n",
400 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
401 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
402 lastpfn = max_low_pfn;
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404#endif
405
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000406 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407}
408
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500409#ifdef CONFIG_64BIT
410static struct kcore_list kcore_kseg0;
411#endif
412
Jiang Liu11321372013-07-03 15:04:04 -0700413static inline void mem_init_free_highmem(void)
414{
415#ifdef CONFIG_HIGHMEM
416 unsigned long tmp;
417
418 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
419 struct page *page = pfn_to_page(tmp);
420
421 if (!page_is_ram(tmp))
422 SetPageReserved(page);
423 else
424 free_highmem_page(page);
425 }
426#endif
427}
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429void __init mem_init(void)
430{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431#ifdef CONFIG_HIGHMEM
432#ifdef CONFIG_DISCONTIGMEM
433#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
434#endif
Kevin Cernekeeb6da0ff2010-05-30 00:32:51 -0700435 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436#else
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900437 max_mapnr = max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438#endif
439 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
440
Paul Burtonab9988a2014-07-14 10:32:15 +0100441 maar_init();
Jiang Liu0c988532013-07-03 15:03:24 -0700442 free_all_bootmem();
Jiang Liu316059222013-04-29 15:06:43 -0700443 setup_zero_pages(); /* Setup zeroed pages. */
Jiang Liu11321372013-07-03 15:04:04 -0700444 mem_init_free_highmem();
445 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500447#ifdef CONFIG_64BIT
448 if ((unsigned long) &_text > (unsigned long) CKSEG0)
449 /* The -4 is a hack so that user tools don't have to handle
450 the overflow. */
KAMEZAWA Hiroyukic30bb2a2009-09-22 16:45:43 -0700451 kclist_add(&kcore_kseg0, (void *) CKSEG0,
452 0x80000000 - 4, KCORE_TEXT);
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500453#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
Yoichi Yuasab4819b52005-06-25 14:54:31 -0700455#endif /* !CONFIG_NEED_MULTIPLE_NODES */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
Atsushi Nemotoc44e8d52006-12-30 00:43:59 +0900457void free_init_pages(const char *what, unsigned long begin, unsigned long end)
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000458{
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200459 unsigned long pfn;
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000460
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200461 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
462 struct page *page = pfn_to_page(pfn);
463 void *addr = phys_to_virt(PFN_PHYS(pfn));
464
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200465 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
Jiang Liu316059222013-04-29 15:06:43 -0700466 free_reserved_page(page);
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000467 }
468 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
469}
470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471#ifdef CONFIG_BLK_DEV_INITRD
472void free_initrd_mem(unsigned long start, unsigned long end)
473{
Jiang Liu11199692013-07-03 15:02:48 -0700474 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
475 "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
477#endif
478
Markos Chandras0893d3f2014-01-15 14:06:03 +0000479void (*free_init_pages_eva)(void *begin, void *end) = NULL;
480
Atsushi Nemotofb4bb132007-07-22 23:44:20 +0900481void __init_refok free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
Atsushi Nemotoc44e8d52006-12-30 00:43:59 +0900483 prom_free_prom_memory();
Markos Chandras0893d3f2014-01-15 14:06:03 +0000484 /*
485 * Let the platform define a specific function to free the
486 * init section since EVA may have used any possible mapping
487 * between virtual and physical addresses.
488 */
489 if (free_init_pages_eva)
490 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
491 else
492 free_initmem_default(POISON_FREE_INITMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493}
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900494
David Daney826222842009-10-14 12:16:56 -0700495#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900496unsigned long pgd_current[NR_CPUS];
David Daney826222842009-10-14 12:16:56 -0700497#endif
Ralf Baechle9975e772007-08-13 12:44:41 +0100498
499/*
500 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
501 * are constants. So we use the variants from asm-offset.h until that gcc
502 * will officially be retired.
David Daney485172b2012-08-14 11:08:01 -0700503 *
504 * Align swapper_pg_dir in to 64K, allows its address to be loaded
505 * with a single LUI instruction in the TLB handlers. If we used
506 * __aligned(64K), its size would get rounded up to the alignment
507 * size, and waste space. So we place it in its own section and align
508 * it in the linker script.
Ralf Baechle9975e772007-08-13 12:44:41 +0100509 */
David Daney485172b2012-08-14 11:08:01 -0700510pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
David Daney325f8a02009-12-04 13:52:36 -0800511#ifndef __PAGETABLE_PMD_FOLDED
David Daney485172b2012-08-14 11:08:01 -0700512pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900513#endif
David Daney485172b2012-08-14 11:08:01 -0700514pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;