blob: fabb7c6f48d24a512415adff781137b4d7c8c859 [file] [log] [blame]
Paul Mundt01066622007-03-28 16:38:13 +09001/*
2 * linux/arch/sh/mm/init.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1999 Niibe Yutaka
Paul Mundt01066622007-03-28 16:38:13 +09005 * Copyright (C) 2002 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
9 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mm.h>
11#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/bootmem.h>
Paul Mundt2cb7ce32006-09-27 18:20:58 +090014#include <linux/proc_fs.h>
Paul Mundt27641de2007-05-14 10:48:01 +090015#include <linux/pagemap.h>
Paul Mundt01066622007-03-28 16:38:13 +090016#include <linux/percpu.h>
17#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/tlb.h>
20#include <asm/cacheflush.h>
Paul Mundt07cbb412007-06-06 12:23:06 +090021#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <asm/cache.h>
23
24DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
25pgd_t swapper_pg_dir[PTRS_PER_PGD];
Stuart Menefyc6feb612008-09-05 16:06:42 +090026
27#ifdef CONFIG_SUPERH32
28/*
29 * Handle trivial transitions between cached and uncached
30 * segments, making use of the 1:1 mapping relationship in
31 * 512MB lowmem.
32 *
33 * This is the offset of the uncached section from its cached alias.
34 * Default value only valid in 29 bit mode, in 32bit mode will be
35 * overridden in pmb_init.
36 */
37unsigned long cached_to_uncached = P2SEG - P1SEG;
38#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Yoshinori Sato11cbb702006-12-07 18:07:27 +090040#ifdef CONFIG_MMU
Linus Torvalds1da177e2005-04-16 15:20:36 -070041static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
42{
43 pgd_t *pgd;
Paul Mundt26ff6c12006-09-27 15:13:36 +090044 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 pmd_t *pmd;
46 pte_t *pte;
47
Stuart Menefy99a596f2006-11-21 15:38:05 +090048 pgd = pgd_offset_k(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 if (pgd_none(*pgd)) {
50 pgd_ERROR(*pgd);
51 return;
52 }
53
Stuart Menefy99a596f2006-11-21 15:38:05 +090054 pud = pud_alloc(NULL, pgd, addr);
55 if (unlikely(!pud)) {
56 pud_ERROR(*pud);
57 return;
Paul Mundt26ff6c12006-09-27 15:13:36 +090058 }
59
Stuart Menefy99a596f2006-11-21 15:38:05 +090060 pmd = pmd_alloc(NULL, pud, addr);
61 if (unlikely(!pmd)) {
62 pmd_ERROR(*pmd);
63 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 }
65
66 pte = pte_offset_kernel(pmd, addr);
67 if (!pte_none(*pte)) {
68 pte_ERROR(*pte);
69 return;
70 }
71
72 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
Paul Mundt997d0032009-06-19 15:37:11 +090073 local_flush_tlb_one(get_asid(), addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
76/*
77 * As a performance optimization, other platforms preserve the fixmap mapping
78 * across a context switch, we don't presently do this, but this could be done
79 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
Simon Arlotte868d612007-05-14 08:15:10 +090080 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 * give up a TLB entry for each mapping we want to preserve. While this may be
82 * viable for a small number of fixmaps, it's not particularly useful for
83 * everything and needs to be carefully evaluated. (ie, we may want this for
84 * the vsyscall page).
85 *
86 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
87 * in at __set_fixmap() time to determine the appropriate behavior to follow.
88 *
89 * -- PFM.
90 */
91void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
92{
93 unsigned long address = __fix_to_virt(idx);
94
95 if (idx >= __end_of_fixed_addresses) {
96 BUG();
97 return;
98 }
99
100 set_pte_phys(address, phys, prot);
101}
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900102
103void __init page_table_range_init(unsigned long start, unsigned long end,
104 pgd_t *pgd_base)
105{
106 pgd_t *pgd;
107 pud_t *pud;
108 pmd_t *pmd;
Paul Mundt0906a3a2009-09-03 17:21:10 +0900109 pte_t *pte;
110 int i, j, k;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900111 unsigned long vaddr;
112
Paul Mundt0906a3a2009-09-03 17:21:10 +0900113 vaddr = start;
114 i = __pgd_offset(vaddr);
115 j = __pud_offset(vaddr);
116 k = __pmd_offset(vaddr);
117 pgd = pgd_base + i;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900118
Paul Mundt0906a3a2009-09-03 17:21:10 +0900119 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
120 pud = (pud_t *)pgd;
121 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
122 pmd = (pmd_t *)pud;
123 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
124 if (pmd_none(*pmd)) {
125 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
126 pmd_populate_kernel(&init_mm, pmd, pte);
127 BUG_ON(pte != pte_offset_kernel(pmd, 0));
128 }
129 vaddr += PMD_SIZE;
130 }
131 k = 0;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900132 }
Paul Mundt0906a3a2009-09-03 17:21:10 +0900133 j = 0;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900134 }
135}
Yoshinori Sato11cbb702006-12-07 18:07:27 +0900136#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/*
139 * paging_init() sets up the page tables
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 */
141void __init paging_init(void)
142{
Paul Mundt2de212e2007-06-06 12:09:54 +0900143 unsigned long max_zone_pfns[MAX_NR_ZONES];
Paul Mundt0906a3a2009-09-03 17:21:10 +0900144 unsigned long vaddr, end;
Paul Mundt01066622007-03-28 16:38:13 +0900145 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Paul Mundt01066622007-03-28 16:38:13 +0900147 /* We don't need to map the kernel through the TLB, as
148 * it is permanatly mapped using P1. So clear the
149 * entire pgd. */
150 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Stuart Menefy6e4662f2006-11-21 13:53:44 +0900152 /* Set an initial value for the MMU.TTB so we don't have to
153 * check for a null value. */
154 set_TTB(swapper_pg_dir);
155
Paul Mundtacca4f42008-11-10 20:00:45 +0900156 /*
157 * Populate the relevant portions of swapper_pg_dir so that
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900158 * we can use the fixmap entries without calling kmalloc.
Paul Mundtacca4f42008-11-10 20:00:45 +0900159 * pte's will be filled in by __set_fixmap().
160 */
161 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
Paul Mundt0906a3a2009-09-03 17:21:10 +0900162 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
163 page_table_range_init(vaddr, end, swapper_pg_dir);
Paul Mundtacca4f42008-11-10 20:00:45 +0900164
165 kmap_coherent_init();
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900166
Paul Mundt2de212e2007-06-06 12:09:54 +0900167 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
168
Paul Mundt01066622007-03-28 16:38:13 +0900169 for_each_online_node(nid) {
170 pg_data_t *pgdat = NODE_DATA(nid);
Paul Mundt01066622007-03-28 16:38:13 +0900171 unsigned long low, start_pfn;
172
Johannes Weiner3560e242008-07-23 21:28:09 -0700173 start_pfn = pgdat->bdata->node_min_pfn;
Paul Mundt01066622007-03-28 16:38:13 +0900174 low = pgdat->bdata->node_low_pfn;
175
Paul Mundt2de212e2007-06-06 12:09:54 +0900176 if (max_zone_pfns[ZONE_NORMAL] < low)
177 max_zone_pfns[ZONE_NORMAL] = low;
Paul Mundt01066622007-03-28 16:38:13 +0900178
179 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
180 nid, start_pfn, low);
Paul Mundt01066622007-03-28 16:38:13 +0900181 }
Paul Mundt2de212e2007-06-06 12:09:54 +0900182
183 free_area_init_nodes(max_zone_pfns);
Stuart Menefycbaa1182007-11-30 17:06:36 +0900184
185 /* Set up the uncached fixmap */
186 set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
Paul Mundt2cb7ce32006-09-27 18:20:58 +0900189static struct kcore_list kcore_mem, kcore_vmalloc;
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191void __init mem_init(void)
192{
Paul Mundtdfbb9042007-05-23 17:48:36 +0900193 int codesize, datasize, initsize;
Paul Mundt01066622007-03-28 16:38:13 +0900194 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Paul Mundt2de212e2007-06-06 12:09:54 +0900196 num_physpages = 0;
197 high_memory = NULL;
198
Paul Mundt01066622007-03-28 16:38:13 +0900199 for_each_online_node(nid) {
200 pg_data_t *pgdat = NODE_DATA(nid);
201 unsigned long node_pages = 0;
202 void *node_high_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Paul Mundt01066622007-03-28 16:38:13 +0900204 num_physpages += pgdat->node_present_pages;
205
206 if (pgdat->node_spanned_pages)
207 node_pages = free_all_bootmem_node(pgdat);
208
209 totalram_pages += node_pages;
210
Paul Mundt2de212e2007-06-06 12:09:54 +0900211 node_high_memory = (void *)__va((pgdat->node_start_pfn +
212 pgdat->node_spanned_pages) <<
213 PAGE_SHIFT);
Paul Mundt01066622007-03-28 16:38:13 +0900214 if (node_high_memory > high_memory)
215 high_memory = node_high_memory;
216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
Paul Mundt37443ef2009-08-15 12:29:49 +0900218 /* Set this up early, so we can take care of the zero page */
219 cpu_cache_init();
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 /* clear the zero-page */
222 memset(empty_zero_page, 0, PAGE_SIZE);
223 __flush_wback_region(empty_zero_page, PAGE_SIZE);
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 codesize = (unsigned long) &_etext - (unsigned long) &_text;
226 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
227 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
228
Paul Mundt2cb7ce32006-09-27 18:20:58 +0900229 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
230 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
231 VMALLOC_END - VMALLOC_START);
232
233 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
Paul Mundtdfbb9042007-05-23 17:48:36 +0900234 "%dk data, %dk init)\n",
Geert Uytterhoevencc013a82009-09-21 17:02:36 -0700235 nr_free_pages() << (PAGE_SHIFT-10),
Paul Mundt2de212e2007-06-06 12:09:54 +0900236 num_physpages << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 codesize >> 10,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 datasize >> 10,
239 initsize >> 10);
240
Paul Mundt19f9a342006-09-27 18:33:49 +0900241 /* Initialize the vDSO */
242 vsyscall_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243}
244
245void free_initmem(void)
246{
247 unsigned long addr;
Paul Mundt65463b72005-11-07 00:58:24 -0800248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 addr = (unsigned long)(&__init_begin);
250 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
251 ClearPageReserved(virt_to_page(addr));
Nick Piggin7835e982006-03-22 00:08:40 -0800252 init_page_count(virt_to_page(addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 free_page(addr);
254 totalram_pages++;
255 }
Paul Mundt07cbb412007-06-06 12:23:06 +0900256 printk("Freeing unused kernel memory: %ldk freed\n",
257 ((unsigned long)&__init_end -
258 (unsigned long)&__init_begin) >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
261#ifdef CONFIG_BLK_DEV_INITRD
262void free_initrd_mem(unsigned long start, unsigned long end)
263{
264 unsigned long p;
265 for (p = start; p < end; p += PAGE_SIZE) {
266 ClearPageReserved(virt_to_page(p));
Nick Piggin7835e982006-03-22 00:08:40 -0800267 init_page_count(virt_to_page(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 free_page(p);
269 totalram_pages++;
270 }
Paul Mundt2de212e2007-06-06 12:09:54 +0900271 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
273#endif
Paul Mundt33d63bd2007-06-07 11:32:52 +0900274
Paul Mundtc15c5f82008-09-20 20:21:33 +0900275#if THREAD_SHIFT < PAGE_SHIFT
276static struct kmem_cache *thread_info_cache;
277
278struct thread_info *alloc_thread_info(struct task_struct *tsk)
279{
280 struct thread_info *ti;
281
282 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
283 if (unlikely(ti == NULL))
284 return NULL;
285#ifdef CONFIG_DEBUG_STACK_USAGE
286 memset(ti, 0, THREAD_SIZE);
287#endif
288 return ti;
289}
290
291void free_thread_info(struct thread_info *ti)
292{
293 kmem_cache_free(thread_info_cache, ti);
294}
295
296void thread_info_cache_init(void)
297{
298 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
299 THREAD_SIZE, 0, NULL);
300 BUG_ON(thread_info_cache == NULL);
301}
302#endif /* THREAD_SHIFT < PAGE_SHIFT */
303
Paul Mundt33d63bd2007-06-07 11:32:52 +0900304#ifdef CONFIG_MEMORY_HOTPLUG
Paul Mundt33d63bd2007-06-07 11:32:52 +0900305int arch_add_memory(int nid, u64 start, u64 size)
306{
307 pg_data_t *pgdat;
308 unsigned long start_pfn = start >> PAGE_SHIFT;
309 unsigned long nr_pages = size >> PAGE_SHIFT;
310 int ret;
311
312 pgdat = NODE_DATA(nid);
313
314 /* We only have ZONE_NORMAL, so this is easy.. */
Gary Hadec04fc582009-01-06 14:39:14 -0800315 ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
316 start_pfn, nr_pages);
Paul Mundt33d63bd2007-06-07 11:32:52 +0900317 if (unlikely(ret))
Harvey Harrison866e6b92008-03-04 15:23:47 -0800318 printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
Paul Mundt33d63bd2007-06-07 11:32:52 +0900319
320 return ret;
321}
322EXPORT_SYMBOL_GPL(arch_add_memory);
323
Paul Mundt357d5942007-06-11 15:32:07 +0900324#ifdef CONFIG_NUMA
Paul Mundt33d63bd2007-06-07 11:32:52 +0900325int memory_add_physaddr_to_nid(u64 addr)
326{
327 /* Node 0 for now.. */
328 return 0;
329}
330EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
331#endif
Paul Mundt3159e7d2008-09-05 15:39:12 +0900332#endif /* CONFIG_MEMORY_HOTPLUG */