blob: 48907cc3bdb77311526c1b1ca173b20983091f8d [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 Memory management initialisation
2 *
3 * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Modified by David Howells (dhowells@redhat.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12#include <linux/signal.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/ptrace.h>
19#include <linux/mman.h>
David Howellsb920de12008-02-08 04:19:31 -080020#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/swap.h>
23#include <linux/smp.h>
24#include <linux/init.h>
25#include <linux/initrd.h>
26#include <linux/highmem.h>
27#include <linux/pagemap.h>
28#include <linux/bootmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/gfp.h>
David Howellsb920de12008-02-08 04:19:31 -080030
31#include <asm/processor.h>
32#include <asm/system.h>
33#include <asm/uaccess.h>
34#include <asm/pgtable.h>
35#include <asm/pgalloc.h>
36#include <asm/dma.h>
37#include <asm/tlb.h>
38#include <asm/sections.h>
39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
42unsigned long highstart_pfn, highend_pfn;
43
Mark Salter5a226c62010-10-27 17:28:56 +010044#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
45static struct vm_struct user_iomap_vm;
46#endif
47
David Howellsb920de12008-02-08 04:19:31 -080048/*
49 * set up paging
50 */
51void __init paging_init(void)
52{
53 unsigned long zones_size[MAX_NR_ZONES] = {0,};
54 pte_t *ppte;
55 int loop;
56
57 /* main kernel space -> RAM mapping is handled as 1:1 transparent by
58 * the MMU */
59 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
60 memset(kernel_vmalloc_ptes, 0, sizeof(kernel_vmalloc_ptes));
61
62 /* load the VMALLOC area PTE table addresses into the kernel PGD */
63 ppte = kernel_vmalloc_ptes;
64 for (loop = VMALLOC_START / (PAGE_SIZE * PTRS_PER_PTE);
65 loop < VMALLOC_END / (PAGE_SIZE * PTRS_PER_PTE);
66 loop++
67 ) {
68 set_pgd(swapper_pg_dir + loop, __pgd(__pa(ppte) | _PAGE_TABLE));
69 ppte += PAGE_SIZE / sizeof(pte_t);
70 }
71
72 /* declare the sizes of the RAM zones (only use the normal zone) */
73 zones_size[ZONE_NORMAL] =
Johannes Weiner3560e242008-07-23 21:28:09 -070074 contig_page_data.bdata->node_low_pfn -
75 contig_page_data.bdata->node_min_pfn;
David Howellsb920de12008-02-08 04:19:31 -080076
77 /* pass the memory from the bootmem allocator to the main allocator */
78 free_area_init(zones_size);
79
Mark Salter5a226c62010-10-27 17:28:56 +010080#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
81 /* The Atomic Operation Unit registers need to be mapped to userspace
82 * for all processes. The following uses vm_area_register_early() to
83 * reserve the first page of the vmalloc area and sets the pte for that
84 * page.
85 *
86 * glibc hardcodes this virtual mapping, so we're pretty much stuck with
87 * it from now on.
88 */
89 user_iomap_vm.flags = VM_USERMAP;
90 user_iomap_vm.size = 1 << PAGE_SHIFT;
91 vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
92 ppte = kernel_vmalloc_ptes;
93 set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
94 PAGE_USERIO));
95#endif
96
David Howells492e6752010-10-27 17:28:49 +010097 local_flush_tlb_all();
David Howellsb920de12008-02-08 04:19:31 -080098}
99
100/*
101 * transfer all the memory from the bootmem allocator to the runtime allocator
102 */
103void __init mem_init(void)
104{
105 int codesize, reservedpages, datasize, initsize;
106 int tmp;
107
Stoyan Gaydarov292aa142010-10-27 17:28:33 +0100108 BUG_ON(!mem_map);
David Howellsb920de12008-02-08 04:19:31 -0800109
Johannes Weiner3560e242008-07-23 21:28:09 -0700110#define START_PFN (contig_page_data.bdata->node_min_pfn)
David Howellsb920de12008-02-08 04:19:31 -0800111#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
112
113 max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
114 high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE);
115
116 /* clear the zero-page */
117 memset(empty_zero_page, 0, PAGE_SIZE);
118
119 /* this will put all low memory onto the freelists */
120 totalram_pages += free_all_bootmem();
121
122 reservedpages = 0;
123 for (tmp = 0; tmp < num_physpages; tmp++)
124 if (PageReserved(&mem_map[tmp]))
125 reservedpages++;
126
127 codesize = (unsigned long) &_etext - (unsigned long) &_stext;
128 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
129 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
130
131 printk(KERN_INFO
132 "Memory: %luk/%luk available"
133 " (%dk kernel code, %dk reserved, %dk data, %dk init,"
134 " %ldk highmem)\n",
Geert Uytterhoevencc013a82009-09-21 17:02:36 -0700135 nr_free_pages() << (PAGE_SHIFT - 10),
David Howellsb920de12008-02-08 04:19:31 -0800136 max_mapnr << (PAGE_SHIFT - 10),
137 codesize >> 10,
138 reservedpages << (PAGE_SHIFT - 10),
139 datasize >> 10,
140 initsize >> 10,
Andreas Fenkart4b529402010-01-08 14:42:31 -0800141 totalhigh_pages << (PAGE_SHIFT - 10));
David Howellsb920de12008-02-08 04:19:31 -0800142}
143
144/*
145 *
146 */
147void free_init_pages(char *what, unsigned long begin, unsigned long end)
148{
149 unsigned long addr;
150
151 for (addr = begin; addr < end; addr += PAGE_SIZE) {
152 ClearPageReserved(virt_to_page(addr));
153 init_page_count(virt_to_page(addr));
154 memset((void *) addr, 0xcc, PAGE_SIZE);
155 free_page(addr);
156 totalram_pages++;
157 }
158 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
159}
160
161/*
162 * recycle memory containing stuff only required for initialisation
163 */
164void free_initmem(void)
165{
166 free_init_pages("unused kernel memory",
167 (unsigned long) &__init_begin,
168 (unsigned long) &__init_end);
169}
170
171/*
172 * dispose of the memory on which the initial ramdisk resided
173 */
174#ifdef CONFIG_BLK_DEV_INITRD
175void free_initrd_mem(unsigned long start, unsigned long end)
176{
177 free_init_pages("initrd memory", start, end);
178}
179#endif