blob: dffa6c74948981bc31796b14401539f713b7acee [file] [log] [blame]
Paul Mundt01066622007-03-28 16:38:13 +09001/*
2 * linux/arch/sh/mm/init.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1999 Niibe Yutaka
Paul Mundt01066622007-03-28 16:38:13 +09005 * Copyright (C) 2002 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
9 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mm.h>
11#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/bootmem.h>
Paul Mundt2cb7ce32006-09-27 18:20:58 +090014#include <linux/proc_fs.h>
Paul Mundt27641de2007-05-14 10:48:01 +090015#include <linux/pagemap.h>
Paul Mundt01066622007-03-28 16:38:13 +090016#include <linux/percpu.h>
17#include <linux/io.h>
Paul Mundt94c28512009-10-27 17:07:45 +090018#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/tlb.h>
21#include <asm/cacheflush.h>
Paul Mundt07cbb412007-06-06 12:23:06 +090022#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cache.h>
24
25DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
26pgd_t swapper_pg_dir[PTRS_PER_PGD];
Stuart Menefyc6feb612008-09-05 16:06:42 +090027
28#ifdef CONFIG_SUPERH32
29/*
Stuart Menefyc6feb612008-09-05 16:06:42 +090030 * This is the offset of the uncached section from its cached alias.
Paul Mundt3125ee72010-01-21 15:54:31 +090031 *
32 * Legacy platforms handle trivial transitions between cached and
33 * uncached segments by making use of the 1:1 mapping relationship in
34 * 512MB lowmem, others via a special uncached mapping.
35 *
36 * Default value only valid in 29 bit mode, in 32bit mode this will be
37 * updated by the early PMB initialization code.
Stuart Menefyc6feb612008-09-05 16:06:42 +090038 */
39unsigned long cached_to_uncached = P2SEG - P1SEG;
Paul Mundt3125ee72010-01-21 15:54:31 +090040unsigned long uncached_size = 0x20000000;
Stuart Menefyc6feb612008-09-05 16:06:42 +090041#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Yoshinori Sato11cbb702006-12-07 18:07:27 +090043#ifdef CONFIG_MMU
Matt Fleming07cad4d2009-11-17 22:03:41 +000044static pte_t *__get_pte_phys(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045{
46 pgd_t *pgd;
Paul Mundt26ff6c12006-09-27 15:13:36 +090047 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 pmd_t *pmd;
49 pte_t *pte;
50
Stuart Menefy99a596f2006-11-21 15:38:05 +090051 pgd = pgd_offset_k(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 if (pgd_none(*pgd)) {
53 pgd_ERROR(*pgd);
Matt Fleming07cad4d2009-11-17 22:03:41 +000054 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 }
56
Stuart Menefy99a596f2006-11-21 15:38:05 +090057 pud = pud_alloc(NULL, pgd, addr);
58 if (unlikely(!pud)) {
59 pud_ERROR(*pud);
Matt Fleming07cad4d2009-11-17 22:03:41 +000060 return NULL;
Paul Mundt26ff6c12006-09-27 15:13:36 +090061 }
62
Stuart Menefy99a596f2006-11-21 15:38:05 +090063 pmd = pmd_alloc(NULL, pud, addr);
64 if (unlikely(!pmd)) {
65 pmd_ERROR(*pmd);
Matt Fleming07cad4d2009-11-17 22:03:41 +000066 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 }
68
69 pte = pte_offset_kernel(pmd, addr);
Matt Fleming07cad4d2009-11-17 22:03:41 +000070 return pte;
71}
72
73static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
74{
75 pte_t *pte;
76
77 pte = __get_pte_phys(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 if (!pte_none(*pte)) {
79 pte_ERROR(*pte);
80 return;
81 }
82
83 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
Paul Mundt997d0032009-06-19 15:37:11 +090084 local_flush_tlb_one(get_asid(), addr);
Matt Fleming07cad4d2009-11-17 22:03:41 +000085
86 if (pgprot_val(prot) & _PAGE_WIRED)
87 tlb_wire_entry(NULL, addr, *pte);
88}
89
90static void clear_pte_phys(unsigned long addr, pgprot_t prot)
91{
92 pte_t *pte;
93
94 pte = __get_pte_phys(addr);
95
96 if (pgprot_val(prot) & _PAGE_WIRED)
97 tlb_unwire_entry();
98
99 set_pte(pte, pfn_pte(0, __pgprot(0)));
100 local_flush_tlb_one(get_asid(), addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101}
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
104{
105 unsigned long address = __fix_to_virt(idx);
106
107 if (idx >= __end_of_fixed_addresses) {
108 BUG();
109 return;
110 }
111
112 set_pte_phys(address, phys, prot);
113}
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900114
Matt Fleming07cad4d2009-11-17 22:03:41 +0000115void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
116{
117 unsigned long address = __fix_to_virt(idx);
118
119 if (idx >= __end_of_fixed_addresses) {
120 BUG();
121 return;
122 }
123
124 clear_pte_phys(address, prot);
125}
126
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900127void __init page_table_range_init(unsigned long start, unsigned long end,
128 pgd_t *pgd_base)
129{
130 pgd_t *pgd;
131 pud_t *pud;
132 pmd_t *pmd;
Paul Mundt0906a3a2009-09-03 17:21:10 +0900133 pte_t *pte;
134 int i, j, k;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900135 unsigned long vaddr;
136
Paul Mundt0906a3a2009-09-03 17:21:10 +0900137 vaddr = start;
138 i = __pgd_offset(vaddr);
139 j = __pud_offset(vaddr);
140 k = __pmd_offset(vaddr);
141 pgd = pgd_base + i;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900142
Paul Mundt0906a3a2009-09-03 17:21:10 +0900143 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
144 pud = (pud_t *)pgd;
145 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
Matt Fleming5d9b4b12009-12-13 14:38:50 +0000146#ifdef __PAGETABLE_PMD_FOLDED
Paul Mundt0906a3a2009-09-03 17:21:10 +0900147 pmd = (pmd_t *)pud;
Matt Fleming5d9b4b12009-12-13 14:38:50 +0000148#else
149 pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
150 pud_populate(&init_mm, pud, pmd);
151 pmd += k;
152#endif
Paul Mundt0906a3a2009-09-03 17:21:10 +0900153 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
154 if (pmd_none(*pmd)) {
155 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
156 pmd_populate_kernel(&init_mm, pmd, pte);
157 BUG_ON(pte != pte_offset_kernel(pmd, 0));
158 }
159 vaddr += PMD_SIZE;
160 }
161 k = 0;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900162 }
Paul Mundt0906a3a2009-09-03 17:21:10 +0900163 j = 0;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900164 }
165}
Yoshinori Sato11cbb702006-12-07 18:07:27 +0900166#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168/*
169 * paging_init() sets up the page tables
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 */
171void __init paging_init(void)
172{
Paul Mundt2de212e2007-06-06 12:09:54 +0900173 unsigned long max_zone_pfns[MAX_NR_ZONES];
Paul Mundt0906a3a2009-09-03 17:21:10 +0900174 unsigned long vaddr, end;
Paul Mundt01066622007-03-28 16:38:13 +0900175 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Paul Mundt01066622007-03-28 16:38:13 +0900177 /* We don't need to map the kernel through the TLB, as
178 * it is permanatly mapped using P1. So clear the
179 * entire pgd. */
180 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
Stuart Menefy6e4662f2006-11-21 13:53:44 +0900182 /* Set an initial value for the MMU.TTB so we don't have to
183 * check for a null value. */
184 set_TTB(swapper_pg_dir);
185
Paul Mundtacca4f42008-11-10 20:00:45 +0900186 /*
187 * Populate the relevant portions of swapper_pg_dir so that
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900188 * we can use the fixmap entries without calling kmalloc.
Paul Mundtacca4f42008-11-10 20:00:45 +0900189 * pte's will be filled in by __set_fixmap().
190 */
191 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
Paul Mundt0906a3a2009-09-03 17:21:10 +0900192 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
193 page_table_range_init(vaddr, end, swapper_pg_dir);
Paul Mundtacca4f42008-11-10 20:00:45 +0900194
195 kmap_coherent_init();
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900196
Paul Mundt2de212e2007-06-06 12:09:54 +0900197 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
198
Paul Mundt01066622007-03-28 16:38:13 +0900199 for_each_online_node(nid) {
200 pg_data_t *pgdat = NODE_DATA(nid);
Paul Mundt01066622007-03-28 16:38:13 +0900201 unsigned long low, start_pfn;
202
Johannes Weiner3560e242008-07-23 21:28:09 -0700203 start_pfn = pgdat->bdata->node_min_pfn;
Paul Mundt01066622007-03-28 16:38:13 +0900204 low = pgdat->bdata->node_low_pfn;
205
Paul Mundt2de212e2007-06-06 12:09:54 +0900206 if (max_zone_pfns[ZONE_NORMAL] < low)
207 max_zone_pfns[ZONE_NORMAL] = low;
Paul Mundt01066622007-03-28 16:38:13 +0900208
209 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
210 nid, start_pfn, low);
Paul Mundt01066622007-03-28 16:38:13 +0900211 }
Paul Mundt2de212e2007-06-06 12:09:54 +0900212
213 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
Paul Mundt94c28512009-10-27 17:07:45 +0900216/*
217 * Early initialization for any I/O MMUs we might have.
218 */
219static void __init iommu_init(void)
220{
221 no_iommu_init();
222}
223
Paul Mundtd9b94872010-01-18 21:08:32 +0900224unsigned int mem_init_done = 0;
225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226void __init mem_init(void)
227{
Paul Mundtdfbb9042007-05-23 17:48:36 +0900228 int codesize, datasize, initsize;
Paul Mundt01066622007-03-28 16:38:13 +0900229 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Paul Mundt94c28512009-10-27 17:07:45 +0900231 iommu_init();
232
Paul Mundt2de212e2007-06-06 12:09:54 +0900233 num_physpages = 0;
234 high_memory = NULL;
235
Paul Mundt01066622007-03-28 16:38:13 +0900236 for_each_online_node(nid) {
237 pg_data_t *pgdat = NODE_DATA(nid);
238 unsigned long node_pages = 0;
239 void *node_high_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Paul Mundt01066622007-03-28 16:38:13 +0900241 num_physpages += pgdat->node_present_pages;
242
243 if (pgdat->node_spanned_pages)
244 node_pages = free_all_bootmem_node(pgdat);
245
246 totalram_pages += node_pages;
247
Paul Mundt2de212e2007-06-06 12:09:54 +0900248 node_high_memory = (void *)__va((pgdat->node_start_pfn +
249 pgdat->node_spanned_pages) <<
250 PAGE_SHIFT);
Paul Mundt01066622007-03-28 16:38:13 +0900251 if (node_high_memory > high_memory)
252 high_memory = node_high_memory;
253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Paul Mundt37443ef2009-08-15 12:29:49 +0900255 /* Set this up early, so we can take care of the zero page */
256 cpu_cache_init();
257
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 /* clear the zero-page */
259 memset(empty_zero_page, 0, PAGE_SIZE);
260 __flush_wback_region(empty_zero_page, PAGE_SIZE);
261
Paul Mundt35f99c02010-01-20 18:48:17 +0900262 /* Initialize the vDSO */
263 vsyscall_init();
264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 codesize = (unsigned long) &_etext - (unsigned long) &_text;
266 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
267 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
268
Paul Mundt2cb7ce32006-09-27 18:20:58 +0900269 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
Paul Mundtdfbb9042007-05-23 17:48:36 +0900270 "%dk data, %dk init)\n",
Geert Uytterhoevencc013a82009-09-21 17:02:36 -0700271 nr_free_pages() << (PAGE_SHIFT-10),
Paul Mundt2de212e2007-06-06 12:09:54 +0900272 num_physpages << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 codesize >> 10,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 datasize >> 10,
275 initsize >> 10);
276
Paul Mundt35f99c02010-01-20 18:48:17 +0900277 printk(KERN_INFO "virtual kernel memory layout:\n"
278 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
279#ifdef CONFIG_HIGHMEM
280 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
281#endif
282 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
Paul Mundt3125ee72010-01-21 15:54:31 +0900283 " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
284 " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
Paul Mundt35f99c02010-01-20 18:48:17 +0900285 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
286 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
287 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
288 FIXADDR_START, FIXADDR_TOP,
289 (FIXADDR_TOP - FIXADDR_START) >> 10,
290
291#ifdef CONFIG_HIGHMEM
292 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
293 (LAST_PKMAP*PAGE_SIZE) >> 10,
294#endif
295
296 (unsigned long)VMALLOC_START, VMALLOC_END,
297 (VMALLOC_END - VMALLOC_START) >> 20,
298
299 (unsigned long)memory_start, (unsigned long)high_memory,
300 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
301
Paul Mundt3125ee72010-01-21 15:54:31 +0900302 (unsigned long)memory_start + cached_to_uncached,
303 (unsigned long)memory_start + cached_to_uncached + uncached_size,
304 uncached_size >> 20,
305
Paul Mundt35f99c02010-01-20 18:48:17 +0900306 (unsigned long)&__init_begin, (unsigned long)&__init_end,
307 ((unsigned long)&__init_end -
308 (unsigned long)&__init_begin) >> 10,
309
310 (unsigned long)&_etext, (unsigned long)&_edata,
311 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
312
313 (unsigned long)&_text, (unsigned long)&_etext,
314 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
Paul Mundtd9b94872010-01-18 21:08:32 +0900315
316 mem_init_done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319void free_initmem(void)
320{
321 unsigned long addr;
Paul Mundt65463b72005-11-07 00:58:24 -0800322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 addr = (unsigned long)(&__init_begin);
324 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
325 ClearPageReserved(virt_to_page(addr));
Nick Piggin7835e982006-03-22 00:08:40 -0800326 init_page_count(virt_to_page(addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 free_page(addr);
328 totalram_pages++;
329 }
Paul Mundt07cbb412007-06-06 12:23:06 +0900330 printk("Freeing unused kernel memory: %ldk freed\n",
331 ((unsigned long)&__init_end -
332 (unsigned long)&__init_begin) >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333}
334
335#ifdef CONFIG_BLK_DEV_INITRD
336void free_initrd_mem(unsigned long start, unsigned long end)
337{
338 unsigned long p;
339 for (p = start; p < end; p += PAGE_SIZE) {
340 ClearPageReserved(virt_to_page(p));
Nick Piggin7835e982006-03-22 00:08:40 -0800341 init_page_count(virt_to_page(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 free_page(p);
343 totalram_pages++;
344 }
Paul Mundt2de212e2007-06-06 12:09:54 +0900345 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346}
347#endif
Paul Mundt33d63bd2007-06-07 11:32:52 +0900348
349#ifdef CONFIG_MEMORY_HOTPLUG
Paul Mundt33d63bd2007-06-07 11:32:52 +0900350int arch_add_memory(int nid, u64 start, u64 size)
351{
352 pg_data_t *pgdat;
353 unsigned long start_pfn = start >> PAGE_SHIFT;
354 unsigned long nr_pages = size >> PAGE_SHIFT;
355 int ret;
356
357 pgdat = NODE_DATA(nid);
358
359 /* We only have ZONE_NORMAL, so this is easy.. */
Gary Hadec04fc582009-01-06 14:39:14 -0800360 ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
361 start_pfn, nr_pages);
Paul Mundt33d63bd2007-06-07 11:32:52 +0900362 if (unlikely(ret))
Harvey Harrison866e6b92008-03-04 15:23:47 -0800363 printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
Paul Mundt33d63bd2007-06-07 11:32:52 +0900364
365 return ret;
366}
367EXPORT_SYMBOL_GPL(arch_add_memory);
368
Paul Mundt357d5942007-06-11 15:32:07 +0900369#ifdef CONFIG_NUMA
Paul Mundt33d63bd2007-06-07 11:32:52 +0900370int memory_add_physaddr_to_nid(u64 addr)
371{
372 /* Node 0 for now.. */
373 return 0;
374}
375EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
376#endif
Matt Fleming1f69b6a2009-10-06 21:22:25 +0000377
Paul Mundt3159e7d2008-09-05 15:39:12 +0900378#endif /* CONFIG_MEMORY_HOTPLUG */