blob: fe578a286fdd7fdb0930b9e139c5c35817640f11 [file] [log] [blame]
Paul Mundt01066622007-03-28 16:38:13 +09001/*
2 * linux/arch/sh/mm/init.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1999 Niibe Yutaka
Paul Mundt01066622007-03-28 16:38:13 +09005 * Copyright (C) 2002 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
9 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mm.h>
11#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/bootmem.h>
Paul Mundt2cb7ce32006-09-27 18:20:58 +090014#include <linux/proc_fs.h>
Paul Mundt27641de2007-05-14 10:48:01 +090015#include <linux/pagemap.h>
Paul Mundt01066622007-03-28 16:38:13 +090016#include <linux/percpu.h>
17#include <linux/io.h>
Paul Mundt94c28512009-10-27 17:07:45 +090018#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/tlb.h>
21#include <asm/cacheflush.h>
Paul Mundt07cbb412007-06-06 12:23:06 +090022#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cache.h>
24
25DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
26pgd_t swapper_pg_dir[PTRS_PER_PGD];
Stuart Menefyc6feb612008-09-05 16:06:42 +090027
28#ifdef CONFIG_SUPERH32
29/*
30 * Handle trivial transitions between cached and uncached
31 * segments, making use of the 1:1 mapping relationship in
32 * 512MB lowmem.
33 *
34 * This is the offset of the uncached section from its cached alias.
35 * Default value only valid in 29 bit mode, in 32bit mode will be
36 * overridden in pmb_init.
37 */
38unsigned long cached_to_uncached = P2SEG - P1SEG;
39#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Yoshinori Sato11cbb702006-12-07 18:07:27 +090041#ifdef CONFIG_MMU
Matt Fleming07cad4d2009-11-17 22:03:41 +000042static pte_t *__get_pte_phys(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043{
44 pgd_t *pgd;
Paul Mundt26ff6c12006-09-27 15:13:36 +090045 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 pmd_t *pmd;
47 pte_t *pte;
48
Stuart Menefy99a596f2006-11-21 15:38:05 +090049 pgd = pgd_offset_k(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 if (pgd_none(*pgd)) {
51 pgd_ERROR(*pgd);
Matt Fleming07cad4d2009-11-17 22:03:41 +000052 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 }
54
Stuart Menefy99a596f2006-11-21 15:38:05 +090055 pud = pud_alloc(NULL, pgd, addr);
56 if (unlikely(!pud)) {
57 pud_ERROR(*pud);
Matt Fleming07cad4d2009-11-17 22:03:41 +000058 return NULL;
Paul Mundt26ff6c12006-09-27 15:13:36 +090059 }
60
Stuart Menefy99a596f2006-11-21 15:38:05 +090061 pmd = pmd_alloc(NULL, pud, addr);
62 if (unlikely(!pmd)) {
63 pmd_ERROR(*pmd);
Matt Fleming07cad4d2009-11-17 22:03:41 +000064 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 }
66
67 pte = pte_offset_kernel(pmd, addr);
Matt Fleming07cad4d2009-11-17 22:03:41 +000068 return pte;
69}
70
71static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
72{
73 pte_t *pte;
74
75 pte = __get_pte_phys(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 if (!pte_none(*pte)) {
77 pte_ERROR(*pte);
78 return;
79 }
80
81 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
Paul Mundt997d0032009-06-19 15:37:11 +090082 local_flush_tlb_one(get_asid(), addr);
Matt Fleming07cad4d2009-11-17 22:03:41 +000083
84 if (pgprot_val(prot) & _PAGE_WIRED)
85 tlb_wire_entry(NULL, addr, *pte);
86}
87
88static void clear_pte_phys(unsigned long addr, pgprot_t prot)
89{
90 pte_t *pte;
91
92 pte = __get_pte_phys(addr);
93
94 if (pgprot_val(prot) & _PAGE_WIRED)
95 tlb_unwire_entry();
96
97 set_pte(pte, pfn_pte(0, __pgprot(0)));
98 local_flush_tlb_one(get_asid(), addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/*
102 * As a performance optimization, other platforms preserve the fixmap mapping
103 * across a context switch, we don't presently do this, but this could be done
104 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
Simon Arlotte868d612007-05-14 08:15:10 +0900105 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 * give up a TLB entry for each mapping we want to preserve. While this may be
107 * viable for a small number of fixmaps, it's not particularly useful for
108 * everything and needs to be carefully evaluated. (ie, we may want this for
109 * the vsyscall page).
110 *
111 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
112 * in at __set_fixmap() time to determine the appropriate behavior to follow.
113 *
114 * -- PFM.
115 */
116void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
117{
118 unsigned long address = __fix_to_virt(idx);
119
120 if (idx >= __end_of_fixed_addresses) {
121 BUG();
122 return;
123 }
124
125 set_pte_phys(address, phys, prot);
126}
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900127
Matt Fleming07cad4d2009-11-17 22:03:41 +0000128void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
129{
130 unsigned long address = __fix_to_virt(idx);
131
132 if (idx >= __end_of_fixed_addresses) {
133 BUG();
134 return;
135 }
136
137 clear_pte_phys(address, prot);
138}
139
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900140void __init page_table_range_init(unsigned long start, unsigned long end,
141 pgd_t *pgd_base)
142{
143 pgd_t *pgd;
144 pud_t *pud;
145 pmd_t *pmd;
Paul Mundt0906a3a2009-09-03 17:21:10 +0900146 pte_t *pte;
147 int i, j, k;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900148 unsigned long vaddr;
149
Paul Mundt0906a3a2009-09-03 17:21:10 +0900150 vaddr = start;
151 i = __pgd_offset(vaddr);
152 j = __pud_offset(vaddr);
153 k = __pmd_offset(vaddr);
154 pgd = pgd_base + i;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900155
Paul Mundt0906a3a2009-09-03 17:21:10 +0900156 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
157 pud = (pud_t *)pgd;
158 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
Matt Fleming5d9b4b12009-12-13 14:38:50 +0000159#ifdef __PAGETABLE_PMD_FOLDED
Paul Mundt0906a3a2009-09-03 17:21:10 +0900160 pmd = (pmd_t *)pud;
Matt Fleming5d9b4b12009-12-13 14:38:50 +0000161#else
162 pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
163 pud_populate(&init_mm, pud, pmd);
164 pmd += k;
165#endif
Paul Mundt0906a3a2009-09-03 17:21:10 +0900166 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
167 if (pmd_none(*pmd)) {
168 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
169 pmd_populate_kernel(&init_mm, pmd, pte);
170 BUG_ON(pte != pte_offset_kernel(pmd, 0));
171 }
172 vaddr += PMD_SIZE;
173 }
174 k = 0;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900175 }
Paul Mundt0906a3a2009-09-03 17:21:10 +0900176 j = 0;
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900177 }
178}
Yoshinori Sato11cbb702006-12-07 18:07:27 +0900179#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181/*
182 * paging_init() sets up the page tables
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 */
184void __init paging_init(void)
185{
Paul Mundt2de212e2007-06-06 12:09:54 +0900186 unsigned long max_zone_pfns[MAX_NR_ZONES];
Paul Mundt0906a3a2009-09-03 17:21:10 +0900187 unsigned long vaddr, end;
Paul Mundt01066622007-03-28 16:38:13 +0900188 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Paul Mundt01066622007-03-28 16:38:13 +0900190 /* We don't need to map the kernel through the TLB, as
191 * it is permanatly mapped using P1. So clear the
192 * entire pgd. */
193 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Stuart Menefy6e4662f2006-11-21 13:53:44 +0900195 /* Set an initial value for the MMU.TTB so we don't have to
196 * check for a null value. */
197 set_TTB(swapper_pg_dir);
198
Paul Mundtacca4f42008-11-10 20:00:45 +0900199 /*
200 * Populate the relevant portions of swapper_pg_dir so that
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900201 * we can use the fixmap entries without calling kmalloc.
Paul Mundtacca4f42008-11-10 20:00:45 +0900202 * pte's will be filled in by __set_fixmap().
203 */
204 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
Paul Mundt0906a3a2009-09-03 17:21:10 +0900205 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
206 page_table_range_init(vaddr, end, swapper_pg_dir);
Paul Mundtacca4f42008-11-10 20:00:45 +0900207
208 kmap_coherent_init();
Stuart Menefy2adb4e12007-11-30 17:59:55 +0900209
Paul Mundt2de212e2007-06-06 12:09:54 +0900210 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
211
Paul Mundt01066622007-03-28 16:38:13 +0900212 for_each_online_node(nid) {
213 pg_data_t *pgdat = NODE_DATA(nid);
Paul Mundt01066622007-03-28 16:38:13 +0900214 unsigned long low, start_pfn;
215
Johannes Weiner3560e242008-07-23 21:28:09 -0700216 start_pfn = pgdat->bdata->node_min_pfn;
Paul Mundt01066622007-03-28 16:38:13 +0900217 low = pgdat->bdata->node_low_pfn;
218
Paul Mundt2de212e2007-06-06 12:09:54 +0900219 if (max_zone_pfns[ZONE_NORMAL] < low)
220 max_zone_pfns[ZONE_NORMAL] = low;
Paul Mundt01066622007-03-28 16:38:13 +0900221
222 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
223 nid, start_pfn, low);
Paul Mundt01066622007-03-28 16:38:13 +0900224 }
Paul Mundt2de212e2007-06-06 12:09:54 +0900225
226 free_area_init_nodes(max_zone_pfns);
Stuart Menefycbaa1182007-11-30 17:06:36 +0900227
228 /* Set up the uncached fixmap */
229 set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230}
231
Paul Mundt94c28512009-10-27 17:07:45 +0900232/*
233 * Early initialization for any I/O MMUs we might have.
234 */
235static void __init iommu_init(void)
236{
237 no_iommu_init();
238}
239
Paul Mundtd9b94872010-01-18 21:08:32 +0900240unsigned int mem_init_done = 0;
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242void __init mem_init(void)
243{
Paul Mundtdfbb9042007-05-23 17:48:36 +0900244 int codesize, datasize, initsize;
Paul Mundt01066622007-03-28 16:38:13 +0900245 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Paul Mundt94c28512009-10-27 17:07:45 +0900247 iommu_init();
248
Paul Mundt2de212e2007-06-06 12:09:54 +0900249 num_physpages = 0;
250 high_memory = NULL;
251
Paul Mundt01066622007-03-28 16:38:13 +0900252 for_each_online_node(nid) {
253 pg_data_t *pgdat = NODE_DATA(nid);
254 unsigned long node_pages = 0;
255 void *node_high_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Paul Mundt01066622007-03-28 16:38:13 +0900257 num_physpages += pgdat->node_present_pages;
258
259 if (pgdat->node_spanned_pages)
260 node_pages = free_all_bootmem_node(pgdat);
261
262 totalram_pages += node_pages;
263
Paul Mundt2de212e2007-06-06 12:09:54 +0900264 node_high_memory = (void *)__va((pgdat->node_start_pfn +
265 pgdat->node_spanned_pages) <<
266 PAGE_SHIFT);
Paul Mundt01066622007-03-28 16:38:13 +0900267 if (node_high_memory > high_memory)
268 high_memory = node_high_memory;
269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Paul Mundt37443ef2009-08-15 12:29:49 +0900271 /* Set this up early, so we can take care of the zero page */
272 cpu_cache_init();
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 /* clear the zero-page */
275 memset(empty_zero_page, 0, PAGE_SIZE);
276 __flush_wback_region(empty_zero_page, PAGE_SIZE);
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 codesize = (unsigned long) &_etext - (unsigned long) &_text;
279 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
280 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
281
Paul Mundt2cb7ce32006-09-27 18:20:58 +0900282 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
Paul Mundtdfbb9042007-05-23 17:48:36 +0900283 "%dk data, %dk init)\n",
Geert Uytterhoevencc013a82009-09-21 17:02:36 -0700284 nr_free_pages() << (PAGE_SHIFT-10),
Paul Mundt2de212e2007-06-06 12:09:54 +0900285 num_physpages << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 codesize >> 10,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 datasize >> 10,
288 initsize >> 10);
289
Paul Mundt19f9a342006-09-27 18:33:49 +0900290 /* Initialize the vDSO */
291 vsyscall_init();
Paul Mundtd9b94872010-01-18 21:08:32 +0900292
293 mem_init_done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294}
295
296void free_initmem(void)
297{
298 unsigned long addr;
Paul Mundt65463b72005-11-07 00:58:24 -0800299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 addr = (unsigned long)(&__init_begin);
301 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
302 ClearPageReserved(virt_to_page(addr));
Nick Piggin7835e982006-03-22 00:08:40 -0800303 init_page_count(virt_to_page(addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 free_page(addr);
305 totalram_pages++;
306 }
Paul Mundt07cbb412007-06-06 12:23:06 +0900307 printk("Freeing unused kernel memory: %ldk freed\n",
308 ((unsigned long)&__init_end -
309 (unsigned long)&__init_begin) >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310}
311
312#ifdef CONFIG_BLK_DEV_INITRD
313void free_initrd_mem(unsigned long start, unsigned long end)
314{
315 unsigned long p;
316 for (p = start; p < end; p += PAGE_SIZE) {
317 ClearPageReserved(virt_to_page(p));
Nick Piggin7835e982006-03-22 00:08:40 -0800318 init_page_count(virt_to_page(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 free_page(p);
320 totalram_pages++;
321 }
Paul Mundt2de212e2007-06-06 12:09:54 +0900322 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324#endif
Paul Mundt33d63bd2007-06-07 11:32:52 +0900325
326#ifdef CONFIG_MEMORY_HOTPLUG
Paul Mundt33d63bd2007-06-07 11:32:52 +0900327int arch_add_memory(int nid, u64 start, u64 size)
328{
329 pg_data_t *pgdat;
330 unsigned long start_pfn = start >> PAGE_SHIFT;
331 unsigned long nr_pages = size >> PAGE_SHIFT;
332 int ret;
333
334 pgdat = NODE_DATA(nid);
335
336 /* We only have ZONE_NORMAL, so this is easy.. */
Gary Hadec04fc582009-01-06 14:39:14 -0800337 ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
338 start_pfn, nr_pages);
Paul Mundt33d63bd2007-06-07 11:32:52 +0900339 if (unlikely(ret))
Harvey Harrison866e6b92008-03-04 15:23:47 -0800340 printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
Paul Mundt33d63bd2007-06-07 11:32:52 +0900341
342 return ret;
343}
344EXPORT_SYMBOL_GPL(arch_add_memory);
345
Paul Mundt357d5942007-06-11 15:32:07 +0900346#ifdef CONFIG_NUMA
Paul Mundt33d63bd2007-06-07 11:32:52 +0900347int memory_add_physaddr_to_nid(u64 addr)
348{
349 /* Node 0 for now.. */
350 return 0;
351}
352EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
353#endif
Matt Fleming1f69b6a2009-10-06 21:22:25 +0000354
Paul Mundt3159e7d2008-09-05 15:39:12 +0900355#endif /* CONFIG_MEMORY_HOTPLUG */
Matt Fleming1f69b6a2009-10-06 21:22:25 +0000356
357#ifdef CONFIG_PMB
358int __in_29bit_mode(void)
359{
360 return !(ctrl_inl(PMB_PASCR) & PASCR_SE);
361}
362#endif /* CONFIG_PMB */