| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 1 | #include <linux/gfp.h> | 
| Jaswinder Singh Rajput | 2c1b284 | 2009-04-11 00:03:10 +0530 | [diff] [blame] | 2 | #include <linux/initrd.h> | 
| Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 3 | #include <linux/ioport.h> | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 4 | #include <linux/swap.h> | 
| Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 5 | #include <linux/memblock.h> | 
| Pekka Enberg | 1762391 | 2011-11-01 15:58:22 +0200 | [diff] [blame] | 6 | #include <linux/bootmem.h>	/* for max_low_pfn */ | 
| Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 7 |  | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 8 | #include <asm/cacheflush.h> | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 9 | #include <asm/e820.h> | 
| Pekka Enberg | 4fcb208 | 2009-03-05 14:55:08 +0200 | [diff] [blame] | 10 | #include <asm/init.h> | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 11 | #include <asm/page.h> | 
| Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 12 | #include <asm/page_types.h> | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 13 | #include <asm/sections.h> | 
| Jan Beulich | 4983439 | 2009-05-06 13:06:47 +0100 | [diff] [blame] | 14 | #include <asm/setup.h> | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 15 | #include <asm/tlbflush.h> | 
| Pekka Enberg | 9518e0e | 2009-04-28 16:00:50 +0300 | [diff] [blame] | 16 | #include <asm/tlb.h> | 
| Jaswinder Singh Rajput | 76c0692 | 2009-07-01 19:54:23 +0530 | [diff] [blame] | 17 | #include <asm/proto.h> | 
| Pekka Enberg | 1762391 | 2011-11-01 15:58:22 +0200 | [diff] [blame] | 18 | #include <asm/dma.h>		/* for MAX_DMA_PFN */ | 
| Pekka Enberg | 9518e0e | 2009-04-28 16:00:50 +0300 | [diff] [blame] | 19 |  | 
| Yinghai Lu | 5c51bdb | 2012-11-16 19:39:01 -0800 | [diff] [blame] | 20 | #include "mm_internal.h" | 
|  | 21 |  | 
| Yinghai Lu | d1b1942 | 2011-02-24 14:46:24 +0100 | [diff] [blame] | 22 | unsigned long __initdata pgt_buf_start; | 
|  | 23 | unsigned long __meminitdata pgt_buf_end; | 
|  | 24 | unsigned long __meminitdata pgt_buf_top; | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 25 |  | 
| Yinghai Lu | 9985b4c | 2012-11-16 19:39:02 -0800 | [diff] [blame] | 26 | static unsigned long min_pfn_mapped; | 
|  | 27 |  | 
| Yinghai Lu | 5c51bdb | 2012-11-16 19:39:01 -0800 | [diff] [blame] | 28 | __ref void *alloc_low_page(void) | 
|  | 29 | { | 
|  | 30 | unsigned long pfn; | 
|  | 31 | void *adr; | 
|  | 32 |  | 
|  | 33 | #ifdef CONFIG_X86_64 | 
|  | 34 | if (after_bootmem) { | 
|  | 35 | adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); | 
|  | 36 |  | 
|  | 37 | return adr; | 
|  | 38 | } | 
|  | 39 | #endif | 
|  | 40 |  | 
|  | 41 | if ((pgt_buf_end + 1) >= pgt_buf_top) { | 
|  | 42 | unsigned long ret; | 
|  | 43 | if (min_pfn_mapped >= max_pfn_mapped) | 
|  | 44 | panic("alloc_low_page: ran out of memory"); | 
|  | 45 | ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, | 
|  | 46 | max_pfn_mapped << PAGE_SHIFT, | 
|  | 47 | PAGE_SIZE, PAGE_SIZE); | 
|  | 48 | if (!ret) | 
|  | 49 | panic("alloc_low_page: can not alloc memory"); | 
|  | 50 | memblock_reserve(ret, PAGE_SIZE); | 
|  | 51 | pfn = ret >> PAGE_SHIFT; | 
|  | 52 | } else | 
|  | 53 | pfn = pgt_buf_end++; | 
|  | 54 |  | 
|  | 55 | adr = __va(pfn * PAGE_SIZE); | 
|  | 56 | clear_page(adr); | 
|  | 57 | return adr; | 
|  | 58 | } | 
|  | 59 |  | 
| Yinghai Lu | 8d57470 | 2012-11-16 19:38:58 -0800 | [diff] [blame] | 60 | /* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */ | 
|  | 61 | #define INIT_PGT_BUF_SIZE	(5 * PAGE_SIZE) | 
|  | 62 | RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); | 
|  | 63 | void  __init early_alloc_pgt_buf(void) | 
|  | 64 | { | 
|  | 65 | unsigned long tables = INIT_PGT_BUF_SIZE; | 
|  | 66 | phys_addr_t base; | 
|  | 67 |  | 
|  | 68 | base = __pa(extend_brk(tables, PAGE_SIZE)); | 
|  | 69 |  | 
|  | 70 | pgt_buf_start = base >> PAGE_SHIFT; | 
|  | 71 | pgt_buf_end = pgt_buf_start; | 
|  | 72 | pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); | 
|  | 73 | } | 
|  | 74 |  | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 75 | int after_bootmem; | 
|  | 76 |  | 
|  | 77 | int direct_gbpages | 
|  | 78 | #ifdef CONFIG_DIRECT_GBPAGES | 
|  | 79 | = 1 | 
|  | 80 | #endif | 
|  | 81 | ; | 
|  | 82 |  | 
| Jacob Shin | 844ab6f | 2012-10-24 14:24:44 -0500 | [diff] [blame] | 83 | struct map_range { | 
|  | 84 | unsigned long start; | 
|  | 85 | unsigned long end; | 
|  | 86 | unsigned page_size_mask; | 
|  | 87 | }; | 
|  | 88 |  | 
| Yinghai Lu | fa62aaf | 2012-11-16 19:38:38 -0800 | [diff] [blame] | 89 | static int page_size_mask; | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 90 |  | 
| Yinghai Lu | 22ddfca | 2012-11-16 19:38:41 -0800 | [diff] [blame] | 91 | static void __init probe_page_size_mask(void) | 
| Yinghai Lu | fa62aaf | 2012-11-16 19:38:38 -0800 | [diff] [blame] | 92 | { | 
|  | 93 | #if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK) | 
|  | 94 | /* | 
|  | 95 | * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. | 
|  | 96 | * This will simplify cpa(), which otherwise needs to support splitting | 
|  | 97 | * large pages into small in interrupt context, etc. | 
|  | 98 | */ | 
|  | 99 | if (direct_gbpages) | 
|  | 100 | page_size_mask |= 1 << PG_LEVEL_1G; | 
|  | 101 | if (cpu_has_pse) | 
|  | 102 | page_size_mask |= 1 << PG_LEVEL_2M; | 
|  | 103 | #endif | 
|  | 104 |  | 
|  | 105 | /* Enable PSE if available */ | 
|  | 106 | if (cpu_has_pse) | 
|  | 107 | set_in_cr4(X86_CR4_PSE); | 
|  | 108 |  | 
|  | 109 | /* Enable PGE if available */ | 
|  | 110 | if (cpu_has_pge) { | 
|  | 111 | set_in_cr4(X86_CR4_PGE); | 
|  | 112 | __supported_pte_mask |= _PAGE_GLOBAL; | 
|  | 113 | } | 
|  | 114 | } | 
| Stefano Stabellini | 279b706 | 2011-04-14 15:49:41 +0100 | [diff] [blame] | 115 |  | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 116 | #ifdef CONFIG_X86_32 | 
|  | 117 | #define NR_RANGE_MR 3 | 
|  | 118 | #else /* CONFIG_X86_64 */ | 
|  | 119 | #define NR_RANGE_MR 5 | 
|  | 120 | #endif | 
|  | 121 |  | 
| Jan Beulich | dc9dd5c | 2009-03-12 12:40:06 +0000 | [diff] [blame] | 122 | static int __meminit save_mr(struct map_range *mr, int nr_range, | 
|  | 123 | unsigned long start_pfn, unsigned long end_pfn, | 
|  | 124 | unsigned long page_size_mask) | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 125 | { | 
|  | 126 | if (start_pfn < end_pfn) { | 
|  | 127 | if (nr_range >= NR_RANGE_MR) | 
|  | 128 | panic("run out of range for init_memory_mapping\n"); | 
|  | 129 | mr[nr_range].start = start_pfn<<PAGE_SHIFT; | 
|  | 130 | mr[nr_range].end   = end_pfn<<PAGE_SHIFT; | 
|  | 131 | mr[nr_range].page_size_mask = page_size_mask; | 
|  | 132 | nr_range++; | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | return nr_range; | 
|  | 136 | } | 
|  | 137 |  | 
| Yinghai Lu | aeebe84 | 2012-11-16 19:38:55 -0800 | [diff] [blame] | 138 | /* | 
|  | 139 | * adjust the page_size_mask for small range to go with | 
|  | 140 | *	big page size instead small one if nearby are ram too. | 
|  | 141 | */ | 
|  | 142 | static void __init_refok adjust_range_page_size_mask(struct map_range *mr, | 
|  | 143 | int nr_range) | 
|  | 144 | { | 
|  | 145 | int i; | 
|  | 146 |  | 
|  | 147 | for (i = 0; i < nr_range; i++) { | 
|  | 148 | if ((page_size_mask & (1<<PG_LEVEL_2M)) && | 
|  | 149 | !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) { | 
|  | 150 | unsigned long start = round_down(mr[i].start, PMD_SIZE); | 
|  | 151 | unsigned long end = round_up(mr[i].end, PMD_SIZE); | 
|  | 152 |  | 
|  | 153 | #ifdef CONFIG_X86_32 | 
|  | 154 | if ((end >> PAGE_SHIFT) > max_low_pfn) | 
|  | 155 | continue; | 
|  | 156 | #endif | 
|  | 157 |  | 
|  | 158 | if (memblock_is_region_memory(start, end - start)) | 
|  | 159 | mr[i].page_size_mask |= 1<<PG_LEVEL_2M; | 
|  | 160 | } | 
|  | 161 | if ((page_size_mask & (1<<PG_LEVEL_1G)) && | 
|  | 162 | !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) { | 
|  | 163 | unsigned long start = round_down(mr[i].start, PUD_SIZE); | 
|  | 164 | unsigned long end = round_up(mr[i].end, PUD_SIZE); | 
|  | 165 |  | 
|  | 166 | if (memblock_is_region_memory(start, end - start)) | 
|  | 167 | mr[i].page_size_mask |= 1<<PG_LEVEL_1G; | 
|  | 168 | } | 
|  | 169 | } | 
|  | 170 | } | 
|  | 171 |  | 
| Yinghai Lu | 4e33e06 | 2012-11-16 19:38:39 -0800 | [diff] [blame] | 172 | static int __meminit split_mem_range(struct map_range *mr, int nr_range, | 
|  | 173 | unsigned long start, | 
|  | 174 | unsigned long end) | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 175 | { | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 176 | unsigned long start_pfn, end_pfn; | 
|  | 177 | unsigned long pos; | 
| Yinghai Lu | 4e33e06 | 2012-11-16 19:38:39 -0800 | [diff] [blame] | 178 | int i; | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 179 |  | 
|  | 180 | /* head if not big page alignment ? */ | 
|  | 181 | start_pfn = start >> PAGE_SHIFT; | 
|  | 182 | pos = start_pfn << PAGE_SHIFT; | 
|  | 183 | #ifdef CONFIG_X86_32 | 
|  | 184 | /* | 
|  | 185 | * Don't use a large page for the first 2/4MB of memory | 
|  | 186 | * because there are often fixed size MTRRs in there | 
|  | 187 | * and overlapping MTRRs into large pages can cause | 
|  | 188 | * slowdowns. | 
|  | 189 | */ | 
|  | 190 | if (pos == 0) | 
|  | 191 | end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT); | 
|  | 192 | else | 
|  | 193 | end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 
|  | 194 | << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 195 | #else /* CONFIG_X86_64 */ | 
|  | 196 | end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) | 
|  | 197 | << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 198 | #endif | 
|  | 199 | if (end_pfn > (end >> PAGE_SHIFT)) | 
|  | 200 | end_pfn = end >> PAGE_SHIFT; | 
|  | 201 | if (start_pfn < end_pfn) { | 
|  | 202 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 
|  | 203 | pos = end_pfn << PAGE_SHIFT; | 
|  | 204 | } | 
|  | 205 |  | 
|  | 206 | /* big page (2M) range */ | 
|  | 207 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 
|  | 208 | << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 209 | #ifdef CONFIG_X86_32 | 
|  | 210 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 211 | #else /* CONFIG_X86_64 */ | 
|  | 212 | end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) | 
|  | 213 | << (PUD_SHIFT - PAGE_SHIFT); | 
|  | 214 | if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT))) | 
|  | 215 | end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)); | 
|  | 216 | #endif | 
|  | 217 |  | 
|  | 218 | if (start_pfn < end_pfn) { | 
|  | 219 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 
|  | 220 | page_size_mask & (1<<PG_LEVEL_2M)); | 
|  | 221 | pos = end_pfn << PAGE_SHIFT; | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | #ifdef CONFIG_X86_64 | 
|  | 225 | /* big page (1G) range */ | 
|  | 226 | start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) | 
|  | 227 | << (PUD_SHIFT - PAGE_SHIFT); | 
|  | 228 | end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); | 
|  | 229 | if (start_pfn < end_pfn) { | 
|  | 230 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 
|  | 231 | page_size_mask & | 
|  | 232 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); | 
|  | 233 | pos = end_pfn << PAGE_SHIFT; | 
|  | 234 | } | 
|  | 235 |  | 
|  | 236 | /* tail is not big page (1G) alignment */ | 
|  | 237 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 
|  | 238 | << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 239 | end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | 
|  | 240 | if (start_pfn < end_pfn) { | 
|  | 241 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 
|  | 242 | page_size_mask & (1<<PG_LEVEL_2M)); | 
|  | 243 | pos = end_pfn << PAGE_SHIFT; | 
|  | 244 | } | 
|  | 245 | #endif | 
|  | 246 |  | 
|  | 247 | /* tail is not big page (2M) alignment */ | 
|  | 248 | start_pfn = pos>>PAGE_SHIFT; | 
|  | 249 | end_pfn = end>>PAGE_SHIFT; | 
|  | 250 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 
|  | 251 |  | 
|  | 252 | /* try to merge same page size and continuous */ | 
|  | 253 | for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { | 
|  | 254 | unsigned long old_start; | 
|  | 255 | if (mr[i].end != mr[i+1].start || | 
|  | 256 | mr[i].page_size_mask != mr[i+1].page_size_mask) | 
|  | 257 | continue; | 
|  | 258 | /* move it */ | 
|  | 259 | old_start = mr[i].start; | 
|  | 260 | memmove(&mr[i], &mr[i+1], | 
|  | 261 | (nr_range - 1 - i) * sizeof(struct map_range)); | 
|  | 262 | mr[i--].start = old_start; | 
|  | 263 | nr_range--; | 
|  | 264 | } | 
|  | 265 |  | 
| Yinghai Lu | aeebe84 | 2012-11-16 19:38:55 -0800 | [diff] [blame] | 266 | if (!after_bootmem) | 
|  | 267 | adjust_range_page_size_mask(mr, nr_range); | 
|  | 268 |  | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 269 | for (i = 0; i < nr_range; i++) | 
| Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 270 | printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", | 
|  | 271 | mr[i].start, mr[i].end - 1, | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 272 | (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( | 
|  | 273 | (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); | 
|  | 274 |  | 
| Yinghai Lu | 4e33e06 | 2012-11-16 19:38:39 -0800 | [diff] [blame] | 275 | return nr_range; | 
|  | 276 | } | 
|  | 277 |  | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 278 | static struct range pfn_mapped[E820_X_MAX]; | 
|  | 279 | static int nr_pfn_mapped; | 
|  | 280 |  | 
|  | 281 | static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) | 
|  | 282 | { | 
|  | 283 | nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX, | 
|  | 284 | nr_pfn_mapped, start_pfn, end_pfn); | 
|  | 285 | nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX); | 
|  | 286 |  | 
|  | 287 | max_pfn_mapped = max(max_pfn_mapped, end_pfn); | 
|  | 288 |  | 
|  | 289 | if (start_pfn < (1UL<<(32-PAGE_SHIFT))) | 
|  | 290 | max_low_pfn_mapped = max(max_low_pfn_mapped, | 
|  | 291 | min(end_pfn, 1UL<<(32-PAGE_SHIFT))); | 
|  | 292 | } | 
|  | 293 |  | 
|  | 294 | bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) | 
|  | 295 | { | 
|  | 296 | int i; | 
|  | 297 |  | 
|  | 298 | for (i = 0; i < nr_pfn_mapped; i++) | 
|  | 299 | if ((start_pfn >= pfn_mapped[i].start) && | 
|  | 300 | (end_pfn <= pfn_mapped[i].end)) | 
|  | 301 | return true; | 
|  | 302 |  | 
|  | 303 | return false; | 
|  | 304 | } | 
|  | 305 |  | 
| Yinghai Lu | 2086fe1 | 2012-11-16 19:38:40 -0800 | [diff] [blame] | 306 | /* | 
| Yinghai Lu | 4e33e06 | 2012-11-16 19:38:39 -0800 | [diff] [blame] | 307 | * Setup the direct mapping of the physical memory at PAGE_OFFSET. | 
|  | 308 | * This runs before bootmem is initialized and gets pages directly from | 
|  | 309 | * the physical memory. To access them they are temporarily mapped. | 
|  | 310 | */ | 
|  | 311 | unsigned long __init_refok init_memory_mapping(unsigned long start, | 
|  | 312 | unsigned long end) | 
|  | 313 | { | 
|  | 314 | struct map_range mr[NR_RANGE_MR]; | 
|  | 315 | unsigned long ret = 0; | 
|  | 316 | int nr_range, i; | 
|  | 317 |  | 
|  | 318 | pr_info("init_memory_mapping: [mem %#010lx-%#010lx]\n", | 
|  | 319 | start, end - 1); | 
|  | 320 |  | 
|  | 321 | memset(mr, 0, sizeof(mr)); | 
|  | 322 | nr_range = split_mem_range(mr, 0, start, end); | 
|  | 323 |  | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 324 | for (i = 0; i < nr_range; i++) | 
|  | 325 | ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, | 
|  | 326 | mr[i].page_size_mask); | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 327 |  | 
|  | 328 | #ifdef CONFIG_X86_32 | 
|  | 329 | early_ioremap_page_table_range_init(); | 
|  | 330 |  | 
|  | 331 | load_cr3(swapper_pg_dir); | 
|  | 332 | #endif | 
|  | 333 |  | 
| Pekka Enberg | f765090 | 2009-03-05 14:55:05 +0200 | [diff] [blame] | 334 | __flush_tlb_all(); | 
|  | 335 |  | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 336 | add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); | 
|  | 337 |  | 
| Yinghai Lu | c14fa0b | 2012-11-16 19:38:44 -0800 | [diff] [blame] | 338 | return ret >> PAGE_SHIFT; | 
|  | 339 | } | 
|  | 340 |  | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 341 | /* | 
| Yinghai Lu | 8d57470 | 2012-11-16 19:38:58 -0800 | [diff] [blame] | 342 | * would have hole in the middle or ends, and only ram parts will be mapped. | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 343 | */ | 
| Yinghai Lu | 8d57470 | 2012-11-16 19:38:58 -0800 | [diff] [blame] | 344 | static unsigned long __init init_range_memory_mapping( | 
|  | 345 | unsigned long range_start, | 
| Yinghai Lu | f763ad1 | 2012-11-16 19:38:57 -0800 | [diff] [blame] | 346 | unsigned long range_end) | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 347 | { | 
|  | 348 | unsigned long start_pfn, end_pfn; | 
| Yinghai Lu | 8d57470 | 2012-11-16 19:38:58 -0800 | [diff] [blame] | 349 | unsigned long mapped_ram_size = 0; | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 350 | int i; | 
|  | 351 |  | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 352 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { | 
|  | 353 | u64 start = (u64)start_pfn << PAGE_SHIFT; | 
|  | 354 | u64 end = (u64)end_pfn << PAGE_SHIFT; | 
|  | 355 |  | 
| Yinghai Lu | f763ad1 | 2012-11-16 19:38:57 -0800 | [diff] [blame] | 356 | if (end <= range_start) | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 357 | continue; | 
|  | 358 |  | 
| Yinghai Lu | f763ad1 | 2012-11-16 19:38:57 -0800 | [diff] [blame] | 359 | if (start < range_start) | 
|  | 360 | start = range_start; | 
|  | 361 |  | 
|  | 362 | if (start >= range_end) | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 363 | continue; | 
|  | 364 |  | 
| Yinghai Lu | f763ad1 | 2012-11-16 19:38:57 -0800 | [diff] [blame] | 365 | if (end > range_end) | 
|  | 366 | end = range_end; | 
|  | 367 |  | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 368 | init_memory_mapping(start, end); | 
| Yinghai Lu | 8d57470 | 2012-11-16 19:38:58 -0800 | [diff] [blame] | 369 |  | 
|  | 370 | mapped_ram_size += end - start; | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 371 | } | 
| Yinghai Lu | 8d57470 | 2012-11-16 19:38:58 -0800 | [diff] [blame] | 372 |  | 
|  | 373 | return mapped_ram_size; | 
| Jacob Shin | 66520eb | 2012-11-16 19:38:52 -0800 | [diff] [blame] | 374 | } | 
|  | 375 |  | 
| Yinghai Lu | 8d57470 | 2012-11-16 19:38:58 -0800 | [diff] [blame] | 376 | /* (PUD_SHIFT-PMD_SHIFT)/2 */ | 
|  | 377 | #define STEP_SIZE_SHIFT 5 | 
| Yinghai Lu | c14fa0b | 2012-11-16 19:38:44 -0800 | [diff] [blame] | 378 | void __init init_mem_mapping(void) | 
|  | 379 | { | 
| Yinghai Lu | 8d57470 | 2012-11-16 19:38:58 -0800 | [diff] [blame] | 380 | unsigned long end, real_end, start, last_start; | 
|  | 381 | unsigned long step_size; | 
|  | 382 | unsigned long addr; | 
|  | 383 | unsigned long mapped_ram_size = 0; | 
|  | 384 | unsigned long new_mapped_ram_size; | 
| Yinghai Lu | ab95193 | 2012-11-16 19:38:45 -0800 | [diff] [blame] | 385 |  | 
| Yinghai Lu | c14fa0b | 2012-11-16 19:38:44 -0800 | [diff] [blame] | 386 | probe_page_size_mask(); | 
|  | 387 |  | 
| Yinghai Lu | c14fa0b | 2012-11-16 19:38:44 -0800 | [diff] [blame] | 388 | #ifdef CONFIG_X86_64 | 
| Yinghai Lu | ab95193 | 2012-11-16 19:38:45 -0800 | [diff] [blame] | 389 | end = max_pfn << PAGE_SHIFT; | 
| Yinghai Lu | c14fa0b | 2012-11-16 19:38:44 -0800 | [diff] [blame] | 390 | #else | 
| Yinghai Lu | ab95193 | 2012-11-16 19:38:45 -0800 | [diff] [blame] | 391 | end = max_low_pfn << PAGE_SHIFT; | 
| Yinghai Lu | c14fa0b | 2012-11-16 19:38:44 -0800 | [diff] [blame] | 392 | #endif | 
| Yinghai Lu | ab95193 | 2012-11-16 19:38:45 -0800 | [diff] [blame] | 393 |  | 
| Yinghai Lu | f763ad1 | 2012-11-16 19:38:57 -0800 | [diff] [blame] | 394 | /* the ISA range is always mapped regardless of memory holes */ | 
|  | 395 | init_memory_mapping(0, ISA_END_ADDRESS); | 
| Yinghai Lu | 8d57470 | 2012-11-16 19:38:58 -0800 | [diff] [blame] | 396 |  | 
|  | 397 | /* xen has big range in reserved near end of ram, skip it at first */ | 
|  | 398 | addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, | 
|  | 399 | PAGE_SIZE); | 
|  | 400 | real_end = addr + PMD_SIZE; | 
|  | 401 |  | 
|  | 402 | /* step_size need to be small so pgt_buf from BRK could cover it */ | 
|  | 403 | step_size = PMD_SIZE; | 
|  | 404 | max_pfn_mapped = 0; /* will get exact value next */ | 
|  | 405 | min_pfn_mapped = real_end >> PAGE_SHIFT; | 
|  | 406 | last_start = start = real_end; | 
|  | 407 | while (last_start > ISA_END_ADDRESS) { | 
|  | 408 | if (last_start > step_size) { | 
|  | 409 | start = round_down(last_start - 1, step_size); | 
|  | 410 | if (start < ISA_END_ADDRESS) | 
|  | 411 | start = ISA_END_ADDRESS; | 
|  | 412 | } else | 
|  | 413 | start = ISA_END_ADDRESS; | 
|  | 414 | new_mapped_ram_size = init_range_memory_mapping(start, | 
|  | 415 | last_start); | 
|  | 416 | last_start = start; | 
|  | 417 | min_pfn_mapped = last_start >> PAGE_SHIFT; | 
|  | 418 | /* only increase step_size after big range get mapped */ | 
|  | 419 | if (new_mapped_ram_size > mapped_ram_size) | 
|  | 420 | step_size <<= STEP_SIZE_SHIFT; | 
|  | 421 | mapped_ram_size += new_mapped_ram_size; | 
|  | 422 | } | 
|  | 423 |  | 
|  | 424 | if (real_end < end) | 
|  | 425 | init_range_memory_mapping(real_end, end); | 
|  | 426 |  | 
| Yinghai Lu | f763ad1 | 2012-11-16 19:38:57 -0800 | [diff] [blame] | 427 | #ifdef CONFIG_X86_64 | 
|  | 428 | if (max_pfn > max_low_pfn) { | 
|  | 429 | /* can we preseve max_low_pfn ?*/ | 
|  | 430 | max_low_pfn = max_pfn; | 
|  | 431 | } | 
|  | 432 | #endif | 
| Yinghai Lu | c14fa0b | 2012-11-16 19:38:44 -0800 | [diff] [blame] | 433 | early_memtest(0, max_pfn_mapped << PAGE_SHIFT); | 
| Yinghai Lu | 22ddfca | 2012-11-16 19:38:41 -0800 | [diff] [blame] | 434 | } | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 435 |  | 
| Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 436 | /* | 
|  | 437 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address | 
|  | 438 | * is valid. The argument is a physical page number. | 
|  | 439 | * | 
|  | 440 | * | 
|  | 441 | * On x86, access has to be given to the first megabyte of ram because that area | 
|  | 442 | * contains bios code and data regions used by X and dosemu and similar apps. | 
|  | 443 | * Access has to be given to non-kernel-ram areas as well, these contain the PCI | 
|  | 444 | * mmio resources as well as potential bios/acpi data regions. | 
|  | 445 | */ | 
|  | 446 | int devmem_is_allowed(unsigned long pagenr) | 
|  | 447 | { | 
| T Makphaibulchoke | 73e8f3d | 2012-08-28 21:21:43 -0600 | [diff] [blame] | 448 | if (pagenr < 256) | 
| Pekka Enberg | 540aca0 | 2009-03-04 11:46:40 +0200 | [diff] [blame] | 449 | return 1; | 
|  | 450 | if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) | 
|  | 451 | return 0; | 
|  | 452 | if (!page_is_ram(pagenr)) | 
|  | 453 | return 1; | 
|  | 454 | return 0; | 
|  | 455 | } | 
|  | 456 |  | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 457 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | 
|  | 458 | { | 
| Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 459 | unsigned long addr; | 
|  | 460 | unsigned long begin_aligned, end_aligned; | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 461 |  | 
| Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 462 | /* Make sure boundaries are page aligned */ | 
|  | 463 | begin_aligned = PAGE_ALIGN(begin); | 
|  | 464 | end_aligned   = end & PAGE_MASK; | 
|  | 465 |  | 
|  | 466 | if (WARN_ON(begin_aligned != begin || end_aligned != end)) { | 
|  | 467 | begin = begin_aligned; | 
|  | 468 | end   = end_aligned; | 
|  | 469 | } | 
|  | 470 |  | 
|  | 471 | if (begin >= end) | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 472 | return; | 
|  | 473 |  | 
| Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 474 | addr = begin; | 
|  | 475 |  | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 476 | /* | 
|  | 477 | * If debugging page accesses then do not free this memory but | 
|  | 478 | * mark them not present - any buggy init-section access will | 
|  | 479 | * create a kernel page fault: | 
|  | 480 | */ | 
|  | 481 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 482 | printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n", | 
|  | 483 | begin, end - 1); | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 484 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | 
|  | 485 | #else | 
|  | 486 | /* | 
|  | 487 | * We just marked the kernel text read only above, now that | 
|  | 488 | * we are going to free part of that, we need to make that | 
| Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 489 | * writeable and non-executable first. | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 490 | */ | 
| Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 491 | set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 492 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); | 
|  | 493 |  | 
|  | 494 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | 
|  | 495 |  | 
|  | 496 | for (; addr < end; addr += PAGE_SIZE) { | 
|  | 497 | ClearPageReserved(virt_to_page(addr)); | 
|  | 498 | init_page_count(virt_to_page(addr)); | 
| Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 499 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | 
| Pekka Enberg | e5b2bb5 | 2009-03-03 13:15:06 +0200 | [diff] [blame] | 500 | free_page(addr); | 
|  | 501 | totalram_pages++; | 
|  | 502 | } | 
|  | 503 | #endif | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | void free_initmem(void) | 
|  | 507 | { | 
|  | 508 | free_init_pages("unused kernel memory", | 
|  | 509 | (unsigned long)(&__init_begin), | 
|  | 510 | (unsigned long)(&__init_end)); | 
|  | 511 | } | 
| Pekka Enberg | 731ddea | 2009-03-04 11:13:40 +0200 | [diff] [blame] | 512 |  | 
|  | 513 | #ifdef CONFIG_BLK_DEV_INITRD | 
| Jan Beulich | 0d26d1d | 2012-06-18 11:30:20 +0100 | [diff] [blame] | 514 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 
| Pekka Enberg | 731ddea | 2009-03-04 11:13:40 +0200 | [diff] [blame] | 515 | { | 
| Yinghai Lu | c967da6 | 2010-03-28 19:42:55 -0700 | [diff] [blame] | 516 | /* | 
|  | 517 | * end could be not aligned, and We can not align that, | 
|  | 518 | * decompresser could be confused by aligned initrd_end | 
|  | 519 | * We already reserve the end partial page before in | 
|  | 520 | *   - i386_start_kernel() | 
|  | 521 | *   - x86_64_start_kernel() | 
|  | 522 | *   - relocate_initrd() | 
|  | 523 | * So here We can do PAGE_ALIGN() safely to get partial page to be freed | 
|  | 524 | */ | 
|  | 525 | free_init_pages("initrd memory", start, PAGE_ALIGN(end)); | 
| Pekka Enberg | 731ddea | 2009-03-04 11:13:40 +0200 | [diff] [blame] | 526 | } | 
|  | 527 | #endif | 
| Pekka Enberg | 1762391 | 2011-11-01 15:58:22 +0200 | [diff] [blame] | 528 |  | 
|  | 529 | void __init zone_sizes_init(void) | 
|  | 530 | { | 
|  | 531 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 
|  | 532 |  | 
|  | 533 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 
|  | 534 |  | 
|  | 535 | #ifdef CONFIG_ZONE_DMA | 
|  | 536 | max_zone_pfns[ZONE_DMA]		= MAX_DMA_PFN; | 
|  | 537 | #endif | 
|  | 538 | #ifdef CONFIG_ZONE_DMA32 | 
|  | 539 | max_zone_pfns[ZONE_DMA32]	= MAX_DMA32_PFN; | 
|  | 540 | #endif | 
|  | 541 | max_zone_pfns[ZONE_NORMAL]	= max_low_pfn; | 
|  | 542 | #ifdef CONFIG_HIGHMEM | 
|  | 543 | max_zone_pfns[ZONE_HIGHMEM]	= max_pfn; | 
|  | 544 | #endif | 
|  | 545 |  | 
|  | 546 | free_area_init_nodes(max_zone_pfns); | 
|  | 547 | } | 
|  | 548 |  |