Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/sh/mm/init.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 1999 Niibe Yutaka |
Paul Mundt | 2f59986 | 2011-01-07 15:19:56 +0900 | [diff] [blame] | 5 | * Copyright (C) 2002 - 2011 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * Based on linux/arch/i386/mm/init.c: |
| 8 | * Copyright (C) 1995 Linus Torvalds |
| 9 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | #include <linux/swap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/init.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/bootmem.h> |
Paul Mundt | 2cb7ce3 | 2006-09-27 18:20:58 +0900 | [diff] [blame] | 15 | #include <linux/proc_fs.h> |
Paul Mundt | 27641de | 2007-05-14 10:48:01 +0900 | [diff] [blame] | 16 | #include <linux/pagemap.h> |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 17 | #include <linux/percpu.h> |
| 18 | #include <linux/io.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 19 | #include <linux/memblock.h> |
Paul Mundt | 94c2851 | 2009-10-27 17:07:45 +0900 | [diff] [blame] | 20 | #include <linux/dma-mapping.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <asm/mmu_context.h> |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 22 | #include <asm/mmzone.h> |
Paul Mundt | c77b29d | 2010-05-18 14:53:23 +0900 | [diff] [blame] | 23 | #include <asm/kexec.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <asm/tlb.h> |
| 25 | #include <asm/cacheflush.h> |
Paul Mundt | 07cbb41 | 2007-06-06 12:23:06 +0900 | [diff] [blame] | 26 | #include <asm/sections.h> |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 27 | #include <asm/setup.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <asm/cache.h> |
Paul Mundt | b0f3ae0 | 2010-02-12 15:40:00 +0900 | [diff] [blame] | 29 | #include <asm/sizes.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
Stuart Menefy | c6feb61 | 2008-09-05 16:06:42 +0900 | [diff] [blame] | 32 | |
Paul Mundt | 19d8f84 | 2010-05-10 15:39:05 +0900 | [diff] [blame] | 33 | void __init generic_mem_init(void) |
| 34 | { |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 35 | memblock_add(__MEMORY_START, __MEMORY_SIZE); |
Paul Mundt | 19d8f84 | 2010-05-10 15:39:05 +0900 | [diff] [blame] | 36 | } |
| 37 | |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 38 | void __init __weak plat_mem_setup(void) |
| 39 | { |
| 40 | /* Nothing to see here, move along. */ |
| 41 | } |
| 42 | |
Yoshinori Sato | 11cbb70 | 2006-12-07 18:07:27 +0900 | [diff] [blame] | 43 | #ifdef CONFIG_MMU |
Matt Fleming | 07cad4d | 2009-11-17 22:03:41 +0000 | [diff] [blame] | 44 | static pte_t *__get_pte_phys(unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | { |
| 46 | pgd_t *pgd; |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 47 | pud_t *pud; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | pmd_t *pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 50 | pgd = pgd_offset_k(addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | if (pgd_none(*pgd)) { |
| 52 | pgd_ERROR(*pgd); |
Matt Fleming | 07cad4d | 2009-11-17 22:03:41 +0000 | [diff] [blame] | 53 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | } |
| 55 | |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 56 | pud = pud_alloc(NULL, pgd, addr); |
| 57 | if (unlikely(!pud)) { |
| 58 | pud_ERROR(*pud); |
Matt Fleming | 07cad4d | 2009-11-17 22:03:41 +0000 | [diff] [blame] | 59 | return NULL; |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 60 | } |
| 61 | |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 62 | pmd = pmd_alloc(NULL, pud, addr); |
| 63 | if (unlikely(!pmd)) { |
| 64 | pmd_ERROR(*pmd); |
Matt Fleming | 07cad4d | 2009-11-17 22:03:41 +0000 | [diff] [blame] | 65 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | } |
| 67 | |
Paul Mundt | 598ee69 | 2010-06-21 16:26:27 +0900 | [diff] [blame] | 68 | return pte_offset_kernel(pmd, addr); |
Matt Fleming | 07cad4d | 2009-11-17 22:03:41 +0000 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) |
| 72 | { |
| 73 | pte_t *pte; |
| 74 | |
| 75 | pte = __get_pte_phys(addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | if (!pte_none(*pte)) { |
| 77 | pte_ERROR(*pte); |
| 78 | return; |
| 79 | } |
| 80 | |
| 81 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); |
Paul Mundt | 997d003 | 2009-06-19 15:37:11 +0900 | [diff] [blame] | 82 | local_flush_tlb_one(get_asid(), addr); |
Matt Fleming | 07cad4d | 2009-11-17 22:03:41 +0000 | [diff] [blame] | 83 | |
| 84 | if (pgprot_val(prot) & _PAGE_WIRED) |
| 85 | tlb_wire_entry(NULL, addr, *pte); |
| 86 | } |
| 87 | |
| 88 | static void clear_pte_phys(unsigned long addr, pgprot_t prot) |
| 89 | { |
| 90 | pte_t *pte; |
| 91 | |
| 92 | pte = __get_pte_phys(addr); |
| 93 | |
| 94 | if (pgprot_val(prot) & _PAGE_WIRED) |
| 95 | tlb_unwire_entry(); |
| 96 | |
| 97 | set_pte(pte, pfn_pte(0, __pgprot(0))); |
| 98 | local_flush_tlb_one(get_asid(), addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | } |
| 100 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) |
| 102 | { |
| 103 | unsigned long address = __fix_to_virt(idx); |
| 104 | |
| 105 | if (idx >= __end_of_fixed_addresses) { |
| 106 | BUG(); |
| 107 | return; |
| 108 | } |
| 109 | |
| 110 | set_pte_phys(address, phys, prot); |
| 111 | } |
Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 112 | |
Matt Fleming | 07cad4d | 2009-11-17 22:03:41 +0000 | [diff] [blame] | 113 | void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) |
| 114 | { |
| 115 | unsigned long address = __fix_to_virt(idx); |
| 116 | |
| 117 | if (idx >= __end_of_fixed_addresses) { |
| 118 | BUG(); |
| 119 | return; |
| 120 | } |
| 121 | |
| 122 | clear_pte_phys(address, prot); |
| 123 | } |
| 124 | |
Paul Mundt | 598ee69 | 2010-06-21 16:26:27 +0900 | [diff] [blame] | 125 | static pmd_t * __init one_md_table_init(pud_t *pud) |
| 126 | { |
| 127 | if (pud_none(*pud)) { |
| 128 | pmd_t *pmd; |
| 129 | |
| 130 | pmd = alloc_bootmem_pages(PAGE_SIZE); |
| 131 | pud_populate(&init_mm, pud, pmd); |
| 132 | BUG_ON(pmd != pmd_offset(pud, 0)); |
| 133 | } |
| 134 | |
| 135 | return pmd_offset(pud, 0); |
| 136 | } |
| 137 | |
| 138 | static pte_t * __init one_page_table_init(pmd_t *pmd) |
| 139 | { |
| 140 | if (pmd_none(*pmd)) { |
| 141 | pte_t *pte; |
| 142 | |
| 143 | pte = alloc_bootmem_pages(PAGE_SIZE); |
| 144 | pmd_populate_kernel(&init_mm, pmd, pte); |
| 145 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); |
| 146 | } |
| 147 | |
| 148 | return pte_offset_kernel(pmd, 0); |
| 149 | } |
| 150 | |
| 151 | static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd, |
| 152 | unsigned long vaddr, pte_t *lastpte) |
| 153 | { |
| 154 | return pte; |
| 155 | } |
| 156 | |
Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 157 | void __init page_table_range_init(unsigned long start, unsigned long end, |
| 158 | pgd_t *pgd_base) |
| 159 | { |
| 160 | pgd_t *pgd; |
| 161 | pud_t *pud; |
| 162 | pmd_t *pmd; |
Paul Mundt | 598ee69 | 2010-06-21 16:26:27 +0900 | [diff] [blame] | 163 | pte_t *pte = NULL; |
Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 164 | int i, j, k; |
Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 165 | unsigned long vaddr; |
| 166 | |
Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 167 | vaddr = start; |
| 168 | i = __pgd_offset(vaddr); |
| 169 | j = __pud_offset(vaddr); |
| 170 | k = __pmd_offset(vaddr); |
| 171 | pgd = pgd_base + i; |
Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 172 | |
Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 173 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { |
| 174 | pud = (pud_t *)pgd; |
| 175 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { |
Paul Mundt | 598ee69 | 2010-06-21 16:26:27 +0900 | [diff] [blame] | 176 | pmd = one_md_table_init(pud); |
| 177 | #ifndef __PAGETABLE_PMD_FOLDED |
Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame] | 178 | pmd += k; |
| 179 | #endif |
Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 180 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { |
Paul Mundt | 598ee69 | 2010-06-21 16:26:27 +0900 | [diff] [blame] | 181 | pte = page_table_kmap_check(one_page_table_init(pmd), |
| 182 | pmd, vaddr, pte); |
Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 183 | vaddr += PMD_SIZE; |
| 184 | } |
| 185 | k = 0; |
Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 186 | } |
Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 187 | j = 0; |
Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 188 | } |
| 189 | } |
Yoshinori Sato | 11cbb70 | 2006-12-07 18:07:27 +0900 | [diff] [blame] | 190 | #endif /* CONFIG_MMU */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 192 | void __init allocate_pgdat(unsigned int nid) |
| 193 | { |
| 194 | unsigned long start_pfn, end_pfn; |
| 195 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
| 196 | unsigned long phys; |
| 197 | #endif |
| 198 | |
| 199 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); |
| 200 | |
| 201 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 202 | phys = __memblock_alloc_base(sizeof(struct pglist_data), |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 203 | SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); |
| 204 | /* Retry with all of system memory */ |
| 205 | if (!phys) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 206 | phys = __memblock_alloc_base(sizeof(struct pglist_data), |
| 207 | SMP_CACHE_BYTES, memblock_end_of_DRAM()); |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 208 | if (!phys) |
| 209 | panic("Can't allocate pgdat for node %d\n", nid); |
| 210 | |
| 211 | NODE_DATA(nid) = __va(phys); |
| 212 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
| 213 | |
| 214 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; |
| 215 | #endif |
| 216 | |
| 217 | NODE_DATA(nid)->node_start_pfn = start_pfn; |
| 218 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; |
| 219 | } |
| 220 | |
| 221 | static void __init bootmem_init_one_node(unsigned int nid) |
| 222 | { |
| 223 | unsigned long total_pages, paddr; |
| 224 | unsigned long end_pfn; |
| 225 | struct pglist_data *p; |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 226 | |
| 227 | p = NODE_DATA(nid); |
| 228 | |
| 229 | /* Nothing to do.. */ |
| 230 | if (!p->node_spanned_pages) |
| 231 | return; |
| 232 | |
| 233 | end_pfn = p->node_start_pfn + p->node_spanned_pages; |
| 234 | |
| 235 | total_pages = bootmem_bootmap_pages(p->node_spanned_pages); |
| 236 | |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 237 | paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 238 | if (!paddr) |
| 239 | panic("Can't allocate bootmap for nid[%d]\n", nid); |
| 240 | |
| 241 | init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); |
| 242 | |
| 243 | free_bootmem_with_active_regions(nid, end_pfn); |
| 244 | |
| 245 | /* |
| 246 | * XXX Handle initial reservations for the system memory node |
| 247 | * only for the moment, we'll refactor this later for handling |
| 248 | * reservations in other nodes. |
| 249 | */ |
| 250 | if (nid == 0) { |
Benjamin Herrenschmidt | 64106ca | 2010-08-04 14:11:04 +1000 | [diff] [blame] | 251 | struct memblock_region *reg; |
| 252 | |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 253 | /* Reserve the sections we're already using. */ |
Benjamin Herrenschmidt | 64106ca | 2010-08-04 14:11:04 +1000 | [diff] [blame] | 254 | for_each_memblock(reserved, reg) { |
| 255 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
| 256 | } |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 257 | } |
| 258 | |
| 259 | sparse_memory_present_with_active_regions(nid); |
| 260 | } |
| 261 | |
| 262 | static void __init do_init_bootmem(void) |
| 263 | { |
Benjamin Herrenschmidt | 64106ca | 2010-08-04 14:11:04 +1000 | [diff] [blame] | 264 | struct memblock_region *reg; |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 265 | int i; |
| 266 | |
| 267 | /* Add active regions with valid PFNs. */ |
Benjamin Herrenschmidt | 64106ca | 2010-08-04 14:11:04 +1000 | [diff] [blame] | 268 | for_each_memblock(memory, reg) { |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 269 | unsigned long start_pfn, end_pfn; |
Yinghai Lu | c7fc2de | 2010-10-12 14:07:09 -0700 | [diff] [blame] | 270 | start_pfn = memblock_region_memory_base_pfn(reg); |
| 271 | end_pfn = memblock_region_memory_end_pfn(reg); |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 272 | __add_active_range(0, start_pfn, end_pfn); |
| 273 | } |
| 274 | |
| 275 | /* All of system RAM sits in node 0 for the non-NUMA case */ |
| 276 | allocate_pgdat(0); |
| 277 | node_set_online(0); |
| 278 | |
| 279 | plat_mem_setup(); |
| 280 | |
| 281 | for_each_online_node(i) |
| 282 | bootmem_init_one_node(i); |
| 283 | |
| 284 | sparse_init(); |
| 285 | } |
| 286 | |
| 287 | static void __init early_reserve_mem(void) |
| 288 | { |
| 289 | unsigned long start_pfn; |
| 290 | |
| 291 | /* |
| 292 | * Partially used pages are not usable - thus |
| 293 | * we are rounding upwards: |
| 294 | */ |
| 295 | start_pfn = PFN_UP(__pa(_end)); |
| 296 | |
| 297 | /* |
| 298 | * Reserve the kernel text and Reserve the bootmem bitmap. We do |
| 299 | * this in two steps (first step was init_bootmem()), because |
| 300 | * this catches the (definitely buggy) case of us accidentally |
| 301 | * initializing the bootmem allocator with an invalid RAM area. |
| 302 | */ |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 303 | memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 304 | (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - |
| 305 | (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); |
| 306 | |
| 307 | /* |
| 308 | * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. |
| 309 | */ |
| 310 | if (CONFIG_ZERO_PAGE_OFFSET != 0) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 311 | memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 312 | |
| 313 | /* |
| 314 | * Handle additional early reservations |
| 315 | */ |
| 316 | check_for_initrd(); |
| 317 | reserve_crashkernel(); |
| 318 | } |
| 319 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | void __init paging_init(void) |
| 321 | { |
Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 322 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 323 | unsigned long vaddr, end; |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 324 | int nid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 326 | memblock_init(); |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 327 | sh_mv.mv_mem_init(); |
| 328 | |
| 329 | early_reserve_mem(); |
| 330 | |
Paul Mundt | 2f59986 | 2011-01-07 15:19:56 +0900 | [diff] [blame] | 331 | /* |
| 332 | * Once the early reservations are out of the way, give the |
| 333 | * platforms a chance to kick out some memory. |
| 334 | */ |
| 335 | if (sh_mv.mv_mem_reserve) |
| 336 | sh_mv.mv_mem_reserve(); |
| 337 | |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 338 | memblock_enforce_memory_limit(memory_limit); |
| 339 | memblock_analyze(); |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 340 | |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 341 | memblock_dump_all(); |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 342 | |
| 343 | /* |
| 344 | * Determine low and high memory ranges: |
| 345 | */ |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 346 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 347 | min_low_pfn = __MEMORY_START >> PAGE_SHIFT; |
| 348 | |
| 349 | nodes_clear(node_online_map); |
| 350 | |
| 351 | memory_start = (unsigned long)__va(__MEMORY_START); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 352 | memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size()); |
Paul Mundt | 4bc277a | 2010-05-11 13:32:19 +0900 | [diff] [blame] | 353 | |
| 354 | uncached_init(); |
| 355 | pmb_init(); |
| 356 | do_init_bootmem(); |
| 357 | ioremap_fixed_init(); |
| 358 | |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 359 | /* We don't need to map the kernel through the TLB, as |
| 360 | * it is permanatly mapped using P1. So clear the |
| 361 | * entire pgd. */ |
| 362 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | |
Stuart Menefy | 6e4662f | 2006-11-21 13:53:44 +0900 | [diff] [blame] | 364 | /* Set an initial value for the MMU.TTB so we don't have to |
| 365 | * check for a null value. */ |
| 366 | set_TTB(swapper_pg_dir); |
| 367 | |
Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 368 | /* |
| 369 | * Populate the relevant portions of swapper_pg_dir so that |
Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 370 | * we can use the fixmap entries without calling kmalloc. |
Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 371 | * pte's will be filled in by __set_fixmap(). |
| 372 | */ |
| 373 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 374 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
| 375 | page_table_range_init(vaddr, end, swapper_pg_dir); |
Paul Mundt | acca4f4 | 2008-11-10 20:00:45 +0900 | [diff] [blame] | 376 | |
| 377 | kmap_coherent_init(); |
Stuart Menefy | 2adb4e1 | 2007-11-30 17:59:55 +0900 | [diff] [blame] | 378 | |
Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 379 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 380 | |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 381 | for_each_online_node(nid) { |
| 382 | pg_data_t *pgdat = NODE_DATA(nid); |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 383 | unsigned long low, start_pfn; |
| 384 | |
Johannes Weiner | 3560e24 | 2008-07-23 21:28:09 -0700 | [diff] [blame] | 385 | start_pfn = pgdat->bdata->node_min_pfn; |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 386 | low = pgdat->bdata->node_low_pfn; |
| 387 | |
Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 388 | if (max_zone_pfns[ZONE_NORMAL] < low) |
| 389 | max_zone_pfns[ZONE_NORMAL] = low; |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 390 | |
| 391 | printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", |
| 392 | nid, start_pfn, low); |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 393 | } |
Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 394 | |
| 395 | free_area_init_nodes(max_zone_pfns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | } |
| 397 | |
Paul Mundt | 94c2851 | 2009-10-27 17:07:45 +0900 | [diff] [blame] | 398 | /* |
| 399 | * Early initialization for any I/O MMUs we might have. |
| 400 | */ |
| 401 | static void __init iommu_init(void) |
| 402 | { |
| 403 | no_iommu_init(); |
| 404 | } |
| 405 | |
Paul Mundt | d9b9487 | 2010-01-18 21:08:32 +0900 | [diff] [blame] | 406 | unsigned int mem_init_done = 0; |
| 407 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | void __init mem_init(void) |
| 409 | { |
Paul Mundt | dfbb904 | 2007-05-23 17:48:36 +0900 | [diff] [blame] | 410 | int codesize, datasize, initsize; |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 411 | int nid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | |
Paul Mundt | 94c2851 | 2009-10-27 17:07:45 +0900 | [diff] [blame] | 413 | iommu_init(); |
| 414 | |
Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 415 | num_physpages = 0; |
| 416 | high_memory = NULL; |
| 417 | |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 418 | for_each_online_node(nid) { |
| 419 | pg_data_t *pgdat = NODE_DATA(nid); |
| 420 | unsigned long node_pages = 0; |
| 421 | void *node_high_memory; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 423 | num_physpages += pgdat->node_present_pages; |
| 424 | |
| 425 | if (pgdat->node_spanned_pages) |
| 426 | node_pages = free_all_bootmem_node(pgdat); |
| 427 | |
| 428 | totalram_pages += node_pages; |
| 429 | |
Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 430 | node_high_memory = (void *)__va((pgdat->node_start_pfn + |
| 431 | pgdat->node_spanned_pages) << |
| 432 | PAGE_SHIFT); |
Paul Mundt | 0106662 | 2007-03-28 16:38:13 +0900 | [diff] [blame] | 433 | if (node_high_memory > high_memory) |
| 434 | high_memory = node_high_memory; |
| 435 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | |
Paul Mundt | 37443ef | 2009-08-15 12:29:49 +0900 | [diff] [blame] | 437 | /* Set this up early, so we can take care of the zero page */ |
| 438 | cpu_cache_init(); |
| 439 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | /* clear the zero-page */ |
| 441 | memset(empty_zero_page, 0, PAGE_SIZE); |
| 442 | __flush_wback_region(empty_zero_page, PAGE_SIZE); |
| 443 | |
Paul Mundt | 35f99c0 | 2010-01-20 18:48:17 +0900 | [diff] [blame] | 444 | vsyscall_init(); |
| 445 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
| 447 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; |
| 448 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; |
| 449 | |
Paul Mundt | 2cb7ce3 | 2006-09-27 18:20:58 +0900 | [diff] [blame] | 450 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " |
Paul Mundt | dfbb904 | 2007-05-23 17:48:36 +0900 | [diff] [blame] | 451 | "%dk data, %dk init)\n", |
Geert Uytterhoeven | cc013a8 | 2009-09-21 17:02:36 -0700 | [diff] [blame] | 452 | nr_free_pages() << (PAGE_SHIFT-10), |
Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 453 | num_physpages << (PAGE_SHIFT-10), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | codesize >> 10, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | datasize >> 10, |
| 456 | initsize >> 10); |
| 457 | |
Paul Mundt | 35f99c0 | 2010-01-20 18:48:17 +0900 | [diff] [blame] | 458 | printk(KERN_INFO "virtual kernel memory layout:\n" |
| 459 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
| 460 | #ifdef CONFIG_HIGHMEM |
| 461 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
| 462 | #endif |
| 463 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" |
Paul Mundt | 3125ee7 | 2010-01-21 15:54:31 +0900 | [diff] [blame] | 464 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" |
Paul Mundt | b0f3ae0 | 2010-02-12 15:40:00 +0900 | [diff] [blame] | 465 | #ifdef CONFIG_UNCACHED_MAPPING |
Paul Mundt | 3125ee7 | 2010-01-21 15:54:31 +0900 | [diff] [blame] | 466 | " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" |
Paul Mundt | b0f3ae0 | 2010-02-12 15:40:00 +0900 | [diff] [blame] | 467 | #endif |
Paul Mundt | 35f99c0 | 2010-01-20 18:48:17 +0900 | [diff] [blame] | 468 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" |
| 469 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" |
| 470 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", |
| 471 | FIXADDR_START, FIXADDR_TOP, |
| 472 | (FIXADDR_TOP - FIXADDR_START) >> 10, |
| 473 | |
| 474 | #ifdef CONFIG_HIGHMEM |
| 475 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, |
| 476 | (LAST_PKMAP*PAGE_SIZE) >> 10, |
| 477 | #endif |
| 478 | |
| 479 | (unsigned long)VMALLOC_START, VMALLOC_END, |
| 480 | (VMALLOC_END - VMALLOC_START) >> 20, |
| 481 | |
| 482 | (unsigned long)memory_start, (unsigned long)high_memory, |
| 483 | ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, |
| 484 | |
Paul Mundt | b0f3ae0 | 2010-02-12 15:40:00 +0900 | [diff] [blame] | 485 | #ifdef CONFIG_UNCACHED_MAPPING |
Paul Mundt | 9edef28 | 2010-02-17 16:28:00 +0900 | [diff] [blame] | 486 | uncached_start, uncached_end, uncached_size >> 20, |
Paul Mundt | b0f3ae0 | 2010-02-12 15:40:00 +0900 | [diff] [blame] | 487 | #endif |
Paul Mundt | 3125ee7 | 2010-01-21 15:54:31 +0900 | [diff] [blame] | 488 | |
Paul Mundt | 35f99c0 | 2010-01-20 18:48:17 +0900 | [diff] [blame] | 489 | (unsigned long)&__init_begin, (unsigned long)&__init_end, |
| 490 | ((unsigned long)&__init_end - |
| 491 | (unsigned long)&__init_begin) >> 10, |
| 492 | |
| 493 | (unsigned long)&_etext, (unsigned long)&_edata, |
| 494 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, |
| 495 | |
| 496 | (unsigned long)&_text, (unsigned long)&_etext, |
| 497 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); |
Paul Mundt | d9b9487 | 2010-01-18 21:08:32 +0900 | [diff] [blame] | 498 | |
| 499 | mem_init_done = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | } |
| 501 | |
| 502 | void free_initmem(void) |
| 503 | { |
| 504 | unsigned long addr; |
Paul Mundt | 65463b7 | 2005-11-07 00:58:24 -0800 | [diff] [blame] | 505 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | addr = (unsigned long)(&__init_begin); |
| 507 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { |
| 508 | ClearPageReserved(virt_to_page(addr)); |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 509 | init_page_count(virt_to_page(addr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 | free_page(addr); |
| 511 | totalram_pages++; |
| 512 | } |
Paul Mundt | 07cbb41 | 2007-06-06 12:23:06 +0900 | [diff] [blame] | 513 | printk("Freeing unused kernel memory: %ldk freed\n", |
| 514 | ((unsigned long)&__init_end - |
| 515 | (unsigned long)&__init_begin) >> 10); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | } |
| 517 | |
| 518 | #ifdef CONFIG_BLK_DEV_INITRD |
| 519 | void free_initrd_mem(unsigned long start, unsigned long end) |
| 520 | { |
| 521 | unsigned long p; |
| 522 | for (p = start; p < end; p += PAGE_SIZE) { |
| 523 | ClearPageReserved(virt_to_page(p)); |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 524 | init_page_count(virt_to_page(p)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | free_page(p); |
| 526 | totalram_pages++; |
| 527 | } |
Paul Mundt | 2de212e | 2007-06-06 12:09:54 +0900 | [diff] [blame] | 528 | printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | } |
| 530 | #endif |
Paul Mundt | 33d63bd | 2007-06-07 11:32:52 +0900 | [diff] [blame] | 531 | |
| 532 | #ifdef CONFIG_MEMORY_HOTPLUG |
Paul Mundt | 33d63bd | 2007-06-07 11:32:52 +0900 | [diff] [blame] | 533 | int arch_add_memory(int nid, u64 start, u64 size) |
| 534 | { |
| 535 | pg_data_t *pgdat; |
| 536 | unsigned long start_pfn = start >> PAGE_SHIFT; |
| 537 | unsigned long nr_pages = size >> PAGE_SHIFT; |
| 538 | int ret; |
| 539 | |
| 540 | pgdat = NODE_DATA(nid); |
| 541 | |
| 542 | /* We only have ZONE_NORMAL, so this is easy.. */ |
Gary Hade | c04fc58 | 2009-01-06 14:39:14 -0800 | [diff] [blame] | 543 | ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL, |
| 544 | start_pfn, nr_pages); |
Paul Mundt | 33d63bd | 2007-06-07 11:32:52 +0900 | [diff] [blame] | 545 | if (unlikely(ret)) |
Harvey Harrison | 866e6b9 | 2008-03-04 15:23:47 -0800 | [diff] [blame] | 546 | printk("%s: Failed, __add_pages() == %d\n", __func__, ret); |
Paul Mundt | 33d63bd | 2007-06-07 11:32:52 +0900 | [diff] [blame] | 547 | |
| 548 | return ret; |
| 549 | } |
| 550 | EXPORT_SYMBOL_GPL(arch_add_memory); |
| 551 | |
Paul Mundt | 357d594 | 2007-06-11 15:32:07 +0900 | [diff] [blame] | 552 | #ifdef CONFIG_NUMA |
Paul Mundt | 33d63bd | 2007-06-07 11:32:52 +0900 | [diff] [blame] | 553 | int memory_add_physaddr_to_nid(u64 addr) |
| 554 | { |
| 555 | /* Node 0 for now.. */ |
| 556 | return 0; |
| 557 | } |
| 558 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
| 559 | #endif |
Matt Fleming | 1f69b6a | 2009-10-06 21:22:25 +0000 | [diff] [blame] | 560 | |
Paul Mundt | 3159e7d | 2008-09-05 15:39:12 +0900 | [diff] [blame] | 561 | #endif /* CONFIG_MEMORY_HOTPLUG */ |