blob: 2ce2c8e8c99c44e66862db3c51cd53ee53a17eae [file] [log] [blame]
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09001#include <linux/gfp.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +05302#include <linux/initrd.h>
Pekka Enberg540aca02009-03-04 11:46:40 +02003#include <linux/ioport.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +02004#include <linux/swap.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07005#include <linux/memblock.h>
Pekka Enberg17623912011-11-01 15:58:22 +02006#include <linux/bootmem.h> /* for max_low_pfn */
Pekka Enberg540aca02009-03-04 11:46:40 +02007
Pekka Enberge5b2bb52009-03-03 13:15:06 +02008#include <asm/cacheflush.h>
Pekka Enbergf7650902009-03-05 14:55:05 +02009#include <asm/e820.h>
Pekka Enberg4fcb2082009-03-05 14:55:08 +020010#include <asm/init.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +020011#include <asm/page.h>
Pekka Enberg540aca02009-03-04 11:46:40 +020012#include <asm/page_types.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +020013#include <asm/sections.h>
Jan Beulich49834392009-05-06 13:06:47 +010014#include <asm/setup.h>
Pekka Enbergf7650902009-03-05 14:55:05 +020015#include <asm/tlbflush.h>
Pekka Enberg9518e0e2009-04-28 16:00:50 +030016#include <asm/tlb.h>
Jaswinder Singh Rajput76c06922009-07-01 19:54:23 +053017#include <asm/proto.h>
Pekka Enberg17623912011-11-01 15:58:22 +020018#include <asm/dma.h> /* for MAX_DMA_PFN */
Fenghua Yucd745be2012-12-20 23:44:31 -080019#include <asm/microcode.h>
Pekka Enberg9518e0e2009-04-28 16:00:50 +030020
Dave Hansend17d8f92014-07-31 08:40:59 -070021/*
22 * We need to define the tracepoints somewhere, and tlb.c
23 * is only compied when SMP=y.
24 */
25#define CREATE_TRACE_POINTS
26#include <trace/events/tlb.h>
27
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080028#include "mm_internal.h"
29
Juergen Gross281d4072014-11-03 14:01:47 +010030/*
31 * Tables translating between page_cache_type_t and pte encoding.
32 * Minimal supported modes are defined statically, modified if more supported
33 * cache modes are available.
34 * Index into __cachemode2pte_tbl is the cachemode.
35 * Index into __pte2cachemode_tbl are the caching attribute bits of the pte
36 * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
37 */
38uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
39 [_PAGE_CACHE_MODE_WB] = 0,
40 [_PAGE_CACHE_MODE_WC] = _PAGE_PWT,
41 [_PAGE_CACHE_MODE_UC_MINUS] = _PAGE_PCD,
42 [_PAGE_CACHE_MODE_UC] = _PAGE_PCD | _PAGE_PWT,
43 [_PAGE_CACHE_MODE_WT] = _PAGE_PCD,
44 [_PAGE_CACHE_MODE_WP] = _PAGE_PCD,
45};
Juergen Gross31bb7722015-01-22 12:43:17 +010046EXPORT_SYMBOL(__cachemode2pte_tbl);
Juergen Gross281d4072014-11-03 14:01:47 +010047uint8_t __pte2cachemode_tbl[8] = {
48 [__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB,
49 [__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC,
50 [__pte2cm_idx(_PAGE_PCD)] = _PAGE_CACHE_MODE_UC_MINUS,
51 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD)] = _PAGE_CACHE_MODE_UC,
52 [__pte2cm_idx(_PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
53 [__pte2cm_idx(_PAGE_PWT | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC,
54 [__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
55 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
56};
Juergen Gross31bb7722015-01-22 12:43:17 +010057EXPORT_SYMBOL(__pte2cachemode_tbl);
Juergen Gross281d4072014-11-03 14:01:47 +010058
Yinghai Lucf470652012-11-16 19:39:07 -080059static unsigned long __initdata pgt_buf_start;
60static unsigned long __initdata pgt_buf_end;
61static unsigned long __initdata pgt_buf_top;
Pekka Enbergf7650902009-03-05 14:55:05 +020062
Yinghai Lu9985b4c2012-11-16 19:39:02 -080063static unsigned long min_pfn_mapped;
64
Yinghai Luc9b32342013-01-24 12:19:42 -080065static bool __initdata can_use_brk_pgt = true;
66
Stefano Stabelliniddd35092012-11-16 19:39:05 -080067/*
68 * Pages returned are already directly mapped.
69 *
70 * Changing that is likely to break Xen, see commit:
71 *
72 * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
73 *
74 * for detailed information.
75 */
Yinghai Lu22c8ca22012-11-16 19:39:04 -080076__ref void *alloc_low_pages(unsigned int num)
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080077{
78 unsigned long pfn;
Yinghai Lu22c8ca22012-11-16 19:39:04 -080079 int i;
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080080
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080081 if (after_bootmem) {
Yinghai Lu22c8ca22012-11-16 19:39:04 -080082 unsigned int order;
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080083
Yinghai Lu22c8ca22012-11-16 19:39:04 -080084 order = get_order((unsigned long)num << PAGE_SHIFT);
85 return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
86 __GFP_ZERO, order);
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080087 }
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080088
Yinghai Luc9b32342013-01-24 12:19:42 -080089 if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080090 unsigned long ret;
91 if (min_pfn_mapped >= max_pfn_mapped)
Zhi Yong Wud4dd1002013-11-12 15:08:28 -080092 panic("alloc_low_pages: ran out of memory");
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080093 ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
94 max_pfn_mapped << PAGE_SHIFT,
Yinghai Lu22c8ca22012-11-16 19:39:04 -080095 PAGE_SIZE * num , PAGE_SIZE);
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080096 if (!ret)
Zhi Yong Wud4dd1002013-11-12 15:08:28 -080097 panic("alloc_low_pages: can not alloc memory");
Yinghai Lu22c8ca22012-11-16 19:39:04 -080098 memblock_reserve(ret, PAGE_SIZE * num);
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080099 pfn = ret >> PAGE_SHIFT;
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800100 } else {
101 pfn = pgt_buf_end;
102 pgt_buf_end += num;
Yinghai Luc9b32342013-01-24 12:19:42 -0800103 printk(KERN_DEBUG "BRK [%#010lx, %#010lx] PGTABLE\n",
104 pfn << PAGE_SHIFT, (pgt_buf_end << PAGE_SHIFT) - 1);
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800105 }
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800106
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800107 for (i = 0; i < num; i++) {
108 void *adr;
109
110 adr = __va((pfn + i) << PAGE_SHIFT);
111 clear_page(adr);
112 }
113
114 return __va(pfn << PAGE_SHIFT);
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800115}
116
Yinghai Lu527bf122013-08-12 16:43:24 -0700117/* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */
118#define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE)
Yinghai Lu8d574702012-11-16 19:38:58 -0800119RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
120void __init early_alloc_pgt_buf(void)
121{
122 unsigned long tables = INIT_PGT_BUF_SIZE;
123 phys_addr_t base;
124
125 base = __pa(extend_brk(tables, PAGE_SIZE));
126
127 pgt_buf_start = base >> PAGE_SHIFT;
128 pgt_buf_end = pgt_buf_start;
129 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
130}
131
Pekka Enbergf7650902009-03-05 14:55:05 +0200132int after_bootmem;
133
Luis R. Rodrigueze5008ab2015-03-04 17:24:12 -0800134static int page_size_mask;
135
Luis R. Rodriguezd9fd5792015-03-04 17:24:11 -0800136int direct_gbpages = IS_ENABLED(CONFIG_DIRECT_GBPAGES);
Pekka Enbergf7650902009-03-05 14:55:05 +0200137
Yinghai Lu148b2092012-11-16 19:39:08 -0800138static void __init init_gbpages(void)
139{
Luis R. Rodrigueze5008ab2015-03-04 17:24:12 -0800140 if (!IS_ENABLED(CONFIG_ENABLE_DIRECT_GBPAGES)) {
Yinghai Lu148b2092012-11-16 19:39:08 -0800141 direct_gbpages = 0;
Luis R. Rodrigueze5008ab2015-03-04 17:24:12 -0800142 return;
143 }
144 if (direct_gbpages && cpu_has_gbpages) {
145 printk(KERN_INFO "Using GB pages for direct mapping\n");
146 page_size_mask |= 1 << PG_LEVEL_1G;
147 } else
148 direct_gbpages = 0;
Yinghai Lu148b2092012-11-16 19:39:08 -0800149}
150
Jacob Shin844ab6f2012-10-24 14:24:44 -0500151struct map_range {
152 unsigned long start;
153 unsigned long end;
154 unsigned page_size_mask;
155};
156
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800157static void __init probe_page_size_mask(void)
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800158{
Yinghai Lu148b2092012-11-16 19:39:08 -0800159 init_gbpages();
160
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800161#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
162 /*
163 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
164 * This will simplify cpa(), which otherwise needs to support splitting
165 * large pages into small in interrupt context, etc.
166 */
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800167 if (cpu_has_pse)
168 page_size_mask |= 1 << PG_LEVEL_2M;
169#endif
170
171 /* Enable PSE if available */
172 if (cpu_has_pse)
Andy Lutomirski375074c2014-10-24 15:58:07 -0700173 cr4_set_bits_and_update_boot(X86_CR4_PSE);
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800174
175 /* Enable PGE if available */
176 if (cpu_has_pge) {
Andy Lutomirski375074c2014-10-24 15:58:07 -0700177 cr4_set_bits_and_update_boot(X86_CR4_PGE);
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800178 __supported_pte_mask |= _PAGE_GLOBAL;
179 }
180}
Stefano Stabellini279b7062011-04-14 15:49:41 +0100181
Pekka Enbergf7650902009-03-05 14:55:05 +0200182#ifdef CONFIG_X86_32
183#define NR_RANGE_MR 3
184#else /* CONFIG_X86_64 */
185#define NR_RANGE_MR 5
186#endif
187
Jan Beulichdc9dd5c2009-03-12 12:40:06 +0000188static int __meminit save_mr(struct map_range *mr, int nr_range,
189 unsigned long start_pfn, unsigned long end_pfn,
190 unsigned long page_size_mask)
Pekka Enbergf7650902009-03-05 14:55:05 +0200191{
192 if (start_pfn < end_pfn) {
193 if (nr_range >= NR_RANGE_MR)
194 panic("run out of range for init_memory_mapping\n");
195 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
196 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
197 mr[nr_range].page_size_mask = page_size_mask;
198 nr_range++;
199 }
200
201 return nr_range;
202}
203
Yinghai Luaeebe842012-11-16 19:38:55 -0800204/*
205 * adjust the page_size_mask for small range to go with
206 * big page size instead small one if nearby are ram too.
207 */
208static void __init_refok adjust_range_page_size_mask(struct map_range *mr,
209 int nr_range)
210{
211 int i;
212
213 for (i = 0; i < nr_range; i++) {
214 if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
215 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
216 unsigned long start = round_down(mr[i].start, PMD_SIZE);
217 unsigned long end = round_up(mr[i].end, PMD_SIZE);
218
219#ifdef CONFIG_X86_32
220 if ((end >> PAGE_SHIFT) > max_low_pfn)
221 continue;
222#endif
223
224 if (memblock_is_region_memory(start, end - start))
225 mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
226 }
227 if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
228 !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
229 unsigned long start = round_down(mr[i].start, PUD_SIZE);
230 unsigned long end = round_up(mr[i].end, PUD_SIZE);
231
232 if (memblock_is_region_memory(start, end - start))
233 mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
234 }
235 }
236}
237
Dave Hansenf15e0512015-02-10 13:20:30 -0800238static const char *page_size_string(struct map_range *mr)
239{
240 static const char str_1g[] = "1G";
241 static const char str_2m[] = "2M";
242 static const char str_4m[] = "4M";
243 static const char str_4k[] = "4k";
244
245 if (mr->page_size_mask & (1<<PG_LEVEL_1G))
246 return str_1g;
247 /*
248 * 32-bit without PAE has a 4M large page size.
249 * PG_LEVEL_2M is misnamed, but we can at least
250 * print out the right size in the string.
251 */
252 if (IS_ENABLED(CONFIG_X86_32) &&
253 !IS_ENABLED(CONFIG_X86_PAE) &&
254 mr->page_size_mask & (1<<PG_LEVEL_2M))
255 return str_4m;
256
257 if (mr->page_size_mask & (1<<PG_LEVEL_2M))
258 return str_2m;
259
260 return str_4k;
261}
262
Yinghai Lu4e33e062012-11-16 19:38:39 -0800263static int __meminit split_mem_range(struct map_range *mr, int nr_range,
264 unsigned long start,
265 unsigned long end)
Pekka Enbergf7650902009-03-05 14:55:05 +0200266{
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800267 unsigned long start_pfn, end_pfn, limit_pfn;
Yinghai Lu1829ae92012-11-16 19:39:14 -0800268 unsigned long pfn;
Yinghai Lu4e33e062012-11-16 19:38:39 -0800269 int i;
Pekka Enbergf7650902009-03-05 14:55:05 +0200270
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800271 limit_pfn = PFN_DOWN(end);
272
Pekka Enbergf7650902009-03-05 14:55:05 +0200273 /* head if not big page alignment ? */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800274 pfn = start_pfn = PFN_DOWN(start);
Pekka Enbergf7650902009-03-05 14:55:05 +0200275#ifdef CONFIG_X86_32
276 /*
277 * Don't use a large page for the first 2/4MB of memory
278 * because there are often fixed size MTRRs in there
279 * and overlapping MTRRs into large pages can cause
280 * slowdowns.
281 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800282 if (pfn == 0)
Yinghai Lu84d77002012-11-16 19:39:13 -0800283 end_pfn = PFN_DOWN(PMD_SIZE);
Pekka Enbergf7650902009-03-05 14:55:05 +0200284 else
Yinghai Lu1829ae92012-11-16 19:39:14 -0800285 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200286#else /* CONFIG_X86_64 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800287 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200288#endif
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800289 if (end_pfn > limit_pfn)
290 end_pfn = limit_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200291 if (start_pfn < end_pfn) {
292 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
Yinghai Lu1829ae92012-11-16 19:39:14 -0800293 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200294 }
295
296 /* big page (2M) range */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800297 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200298#ifdef CONFIG_X86_32
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800299 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200300#else /* CONFIG_X86_64 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800301 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800302 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
303 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200304#endif
305
306 if (start_pfn < end_pfn) {
307 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
308 page_size_mask & (1<<PG_LEVEL_2M));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800309 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200310 }
311
312#ifdef CONFIG_X86_64
313 /* big page (1G) range */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800314 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800315 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200316 if (start_pfn < end_pfn) {
317 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
318 page_size_mask &
319 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800320 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200321 }
322
323 /* tail is not big page (1G) alignment */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800324 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800325 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200326 if (start_pfn < end_pfn) {
327 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
328 page_size_mask & (1<<PG_LEVEL_2M));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800329 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200330 }
331#endif
332
333 /* tail is not big page (2M) alignment */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800334 start_pfn = pfn;
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800335 end_pfn = limit_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200336 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
337
Yinghai Lu7de3d662013-05-31 08:53:07 -0700338 if (!after_bootmem)
339 adjust_range_page_size_mask(mr, nr_range);
340
Pekka Enbergf7650902009-03-05 14:55:05 +0200341 /* try to merge same page size and continuous */
342 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
343 unsigned long old_start;
344 if (mr[i].end != mr[i+1].start ||
345 mr[i].page_size_mask != mr[i+1].page_size_mask)
346 continue;
347 /* move it */
348 old_start = mr[i].start;
349 memmove(&mr[i], &mr[i+1],
350 (nr_range - 1 - i) * sizeof(struct map_range));
351 mr[i--].start = old_start;
352 nr_range--;
353 }
354
355 for (i = 0; i < nr_range; i++)
Bjorn Helgaas365811d2012-05-29 15:06:29 -0700356 printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
357 mr[i].start, mr[i].end - 1,
Dave Hansenf15e0512015-02-10 13:20:30 -0800358 page_size_string(&mr[i]));
Pekka Enbergf7650902009-03-05 14:55:05 +0200359
Yinghai Lu4e33e062012-11-16 19:38:39 -0800360 return nr_range;
361}
362
Yinghai Lu0e691cf2013-01-24 12:20:05 -0800363struct range pfn_mapped[E820_X_MAX];
364int nr_pfn_mapped;
Jacob Shin66520eb2012-11-16 19:38:52 -0800365
366static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
367{
368 nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_X_MAX,
369 nr_pfn_mapped, start_pfn, end_pfn);
370 nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_X_MAX);
371
372 max_pfn_mapped = max(max_pfn_mapped, end_pfn);
373
374 if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
375 max_low_pfn_mapped = max(max_low_pfn_mapped,
376 min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
377}
378
379bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
380{
381 int i;
382
383 for (i = 0; i < nr_pfn_mapped; i++)
384 if ((start_pfn >= pfn_mapped[i].start) &&
385 (end_pfn <= pfn_mapped[i].end))
386 return true;
387
388 return false;
389}
390
Yinghai Lu2086fe12012-11-16 19:38:40 -0800391/*
Yinghai Lu4e33e062012-11-16 19:38:39 -0800392 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
393 * This runs before bootmem is initialized and gets pages directly from
394 * the physical memory. To access them they are temporarily mapped.
395 */
396unsigned long __init_refok init_memory_mapping(unsigned long start,
397 unsigned long end)
398{
399 struct map_range mr[NR_RANGE_MR];
400 unsigned long ret = 0;
401 int nr_range, i;
402
403 pr_info("init_memory_mapping: [mem %#010lx-%#010lx]\n",
404 start, end - 1);
405
406 memset(mr, 0, sizeof(mr));
407 nr_range = split_mem_range(mr, 0, start, end);
408
Pekka Enbergf7650902009-03-05 14:55:05 +0200409 for (i = 0; i < nr_range; i++)
410 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
411 mr[i].page_size_mask);
Pekka Enbergf7650902009-03-05 14:55:05 +0200412
Jacob Shin66520eb2012-11-16 19:38:52 -0800413 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
414
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800415 return ret >> PAGE_SHIFT;
416}
417
Jacob Shin66520eb2012-11-16 19:38:52 -0800418/*
Zhang Yanfeicf8b1662013-05-09 23:57:42 +0800419 * We need to iterate through the E820 memory map and create direct mappings
420 * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
421 * create direct mappings for all pfns from [0 to max_low_pfn) and
422 * [4GB to max_pfn) because of possible memory holes in high addresses
423 * that cannot be marked as UC by fixed/variable range MTRRs.
424 * Depending on the alignment of E820 ranges, this may possibly result
425 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
426 *
427 * init_mem_mapping() calls init_range_memory_mapping() with big range.
428 * That range would have hole in the middle or ends, and only ram parts
429 * will be mapped in init_range_memory_mapping().
Jacob Shin66520eb2012-11-16 19:38:52 -0800430 */
Yinghai Lu8d574702012-11-16 19:38:58 -0800431static unsigned long __init init_range_memory_mapping(
Yinghai Lub8fd39c2012-11-16 19:39:18 -0800432 unsigned long r_start,
433 unsigned long r_end)
Jacob Shin66520eb2012-11-16 19:38:52 -0800434{
435 unsigned long start_pfn, end_pfn;
Yinghai Lu8d574702012-11-16 19:38:58 -0800436 unsigned long mapped_ram_size = 0;
Jacob Shin66520eb2012-11-16 19:38:52 -0800437 int i;
438
Jacob Shin66520eb2012-11-16 19:38:52 -0800439 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
Yinghai Lub8fd39c2012-11-16 19:39:18 -0800440 u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
441 u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
442 if (start >= end)
Jacob Shin66520eb2012-11-16 19:38:52 -0800443 continue;
444
Yinghai Luc9b32342013-01-24 12:19:42 -0800445 /*
446 * if it is overlapping with brk pgt, we need to
447 * alloc pgt buf from memblock instead.
448 */
449 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
450 min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
Jacob Shin66520eb2012-11-16 19:38:52 -0800451 init_memory_mapping(start, end);
Yinghai Lu8d574702012-11-16 19:38:58 -0800452 mapped_ram_size += end - start;
Yinghai Luc9b32342013-01-24 12:19:42 -0800453 can_use_brk_pgt = true;
Jacob Shin66520eb2012-11-16 19:38:52 -0800454 }
Yinghai Lu8d574702012-11-16 19:38:58 -0800455
456 return mapped_ram_size;
Jacob Shin66520eb2012-11-16 19:38:52 -0800457}
458
Yinghai Lu69792872013-09-06 19:07:09 -0700459static unsigned long __init get_new_step_size(unsigned long step_size)
460{
461 /*
Jan Beulich132978b2014-12-19 16:10:54 +0000462 * Initial mapped size is PMD_SIZE (2M).
Yinghai Lu69792872013-09-06 19:07:09 -0700463 * We can not set step_size to be PUD_SIZE (1G) yet.
464 * In worse case, when we cross the 1G boundary, and
465 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
Jan Beulich132978b2014-12-19 16:10:54 +0000466 * to map 1G range with PTE. Hence we use one less than the
467 * difference of page table level shifts.
Yinghai Lu69792872013-09-06 19:07:09 -0700468 *
Jan Beulich132978b2014-12-19 16:10:54 +0000469 * Don't need to worry about overflow in the top-down case, on 32bit,
470 * when step_size is 0, round_down() returns 0 for start, and that
471 * turns it into 0x100000000ULL.
472 * In the bottom-up case, round_up(x, 0) returns 0 though too, which
473 * needs to be taken into consideration by the code below.
Yinghai Lu69792872013-09-06 19:07:09 -0700474 */
Jan Beulich132978b2014-12-19 16:10:54 +0000475 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
Yinghai Lu69792872013-09-06 19:07:09 -0700476}
477
Tang Chen0167d7d2013-11-12 15:08:02 -0800478/**
479 * memory_map_top_down - Map [map_start, map_end) top down
480 * @map_start: start address of the target memory range
481 * @map_end: end address of the target memory range
482 *
483 * This function will setup direct mapping for memory range
484 * [map_start, map_end) in top-down. That said, the page tables
485 * will be allocated at the end of the memory, and we map the
486 * memory in top-down.
487 */
488static void __init memory_map_top_down(unsigned long map_start,
489 unsigned long map_end)
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800490{
Tang Chen0167d7d2013-11-12 15:08:02 -0800491 unsigned long real_end, start, last_start;
Yinghai Lu8d574702012-11-16 19:38:58 -0800492 unsigned long step_size;
493 unsigned long addr;
494 unsigned long mapped_ram_size = 0;
Yinghai Luab951932012-11-16 19:38:45 -0800495
Yinghai Lu98e7a982013-03-06 20:18:21 -0800496 /* xen has big range in reserved near end of ram, skip it at first.*/
Tang Chen0167d7d2013-11-12 15:08:02 -0800497 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
Yinghai Lu8d574702012-11-16 19:38:58 -0800498 real_end = addr + PMD_SIZE;
499
500 /* step_size need to be small so pgt_buf from BRK could cover it */
501 step_size = PMD_SIZE;
502 max_pfn_mapped = 0; /* will get exact value next */
503 min_pfn_mapped = real_end >> PAGE_SHIFT;
504 last_start = start = real_end;
Zhang Yanfeicf8b1662013-05-09 23:57:42 +0800505
506 /*
507 * We start from the top (end of memory) and go to the bottom.
508 * The memblock_find_in_range() gets us a block of RAM from the
509 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
510 * for page table.
511 */
Tang Chen0167d7d2013-11-12 15:08:02 -0800512 while (last_start > map_start) {
Yinghai Lu8d574702012-11-16 19:38:58 -0800513 if (last_start > step_size) {
514 start = round_down(last_start - 1, step_size);
Tang Chen0167d7d2013-11-12 15:08:02 -0800515 if (start < map_start)
516 start = map_start;
Yinghai Lu8d574702012-11-16 19:38:58 -0800517 } else
Tang Chen0167d7d2013-11-12 15:08:02 -0800518 start = map_start;
Jan Beulich132978b2014-12-19 16:10:54 +0000519 mapped_ram_size += init_range_memory_mapping(start,
Yinghai Lu8d574702012-11-16 19:38:58 -0800520 last_start);
521 last_start = start;
522 min_pfn_mapped = last_start >> PAGE_SHIFT;
Jan Beulich132978b2014-12-19 16:10:54 +0000523 if (mapped_ram_size >= step_size)
Yinghai Lu69792872013-09-06 19:07:09 -0700524 step_size = get_new_step_size(step_size);
Yinghai Lu8d574702012-11-16 19:38:58 -0800525 }
526
Tang Chen0167d7d2013-11-12 15:08:02 -0800527 if (real_end < map_end)
528 init_range_memory_mapping(real_end, map_end);
529}
530
Tang Chenb959ed62013-11-12 15:08:05 -0800531/**
532 * memory_map_bottom_up - Map [map_start, map_end) bottom up
533 * @map_start: start address of the target memory range
534 * @map_end: end address of the target memory range
535 *
536 * This function will setup direct mapping for memory range
537 * [map_start, map_end) in bottom-up. Since we have limited the
538 * bottom-up allocation above the kernel, the page tables will
539 * be allocated just above the kernel and we map the memory
540 * in [map_start, map_end) in bottom-up.
541 */
542static void __init memory_map_bottom_up(unsigned long map_start,
543 unsigned long map_end)
544{
Jan Beulich132978b2014-12-19 16:10:54 +0000545 unsigned long next, start;
Tang Chenb959ed62013-11-12 15:08:05 -0800546 unsigned long mapped_ram_size = 0;
547 /* step_size need to be small so pgt_buf from BRK could cover it */
548 unsigned long step_size = PMD_SIZE;
549
550 start = map_start;
551 min_pfn_mapped = start >> PAGE_SHIFT;
552
553 /*
554 * We start from the bottom (@map_start) and go to the top (@map_end).
555 * The memblock_find_in_range() gets us a block of RAM from the
556 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
557 * for page table.
558 */
559 while (start < map_end) {
Jan Beulich132978b2014-12-19 16:10:54 +0000560 if (step_size && map_end - start > step_size) {
Tang Chenb959ed62013-11-12 15:08:05 -0800561 next = round_up(start + 1, step_size);
562 if (next > map_end)
563 next = map_end;
Jan Beulich132978b2014-12-19 16:10:54 +0000564 } else {
Tang Chenb959ed62013-11-12 15:08:05 -0800565 next = map_end;
Jan Beulich132978b2014-12-19 16:10:54 +0000566 }
Tang Chenb959ed62013-11-12 15:08:05 -0800567
Jan Beulich132978b2014-12-19 16:10:54 +0000568 mapped_ram_size += init_range_memory_mapping(start, next);
Tang Chenb959ed62013-11-12 15:08:05 -0800569 start = next;
570
Jan Beulich132978b2014-12-19 16:10:54 +0000571 if (mapped_ram_size >= step_size)
Tang Chenb959ed62013-11-12 15:08:05 -0800572 step_size = get_new_step_size(step_size);
Tang Chenb959ed62013-11-12 15:08:05 -0800573 }
574}
575
Tang Chen0167d7d2013-11-12 15:08:02 -0800576void __init init_mem_mapping(void)
577{
578 unsigned long end;
579
580 probe_page_size_mask();
581
582#ifdef CONFIG_X86_64
583 end = max_pfn << PAGE_SHIFT;
584#else
585 end = max_low_pfn << PAGE_SHIFT;
586#endif
587
588 /* the ISA range is always mapped regardless of memory holes */
589 init_memory_mapping(0, ISA_END_ADDRESS);
590
Tang Chenb959ed62013-11-12 15:08:05 -0800591 /*
592 * If the allocation is in bottom-up direction, we setup direct mapping
593 * in bottom-up, otherwise we setup direct mapping in top-down.
594 */
595 if (memblock_bottom_up()) {
596 unsigned long kernel_end = __pa_symbol(_end);
597
598 /*
599 * we need two separate calls here. This is because we want to
600 * allocate page tables above the kernel. So we first map
601 * [kernel_end, end) to make memory above the kernel be mapped
602 * as soon as possible. And then use page tables allocated above
603 * the kernel to map [ISA_END_ADDRESS, kernel_end).
604 */
605 memory_map_bottom_up(kernel_end, end);
606 memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
607 } else {
608 memory_map_top_down(ISA_END_ADDRESS, end);
609 }
Yinghai Lu8d574702012-11-16 19:38:58 -0800610
Yinghai Luf763ad12012-11-16 19:38:57 -0800611#ifdef CONFIG_X86_64
612 if (max_pfn > max_low_pfn) {
613 /* can we preseve max_low_pfn ?*/
614 max_low_pfn = max_pfn;
615 }
Yinghai Lu719272c2012-11-16 19:39:06 -0800616#else
617 early_ioremap_page_table_range_init();
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800618#endif
619
Yinghai Lu719272c2012-11-16 19:39:06 -0800620 load_cr3(swapper_pg_dir);
621 __flush_tlb_all();
Yinghai Lu719272c2012-11-16 19:39:06 -0800622
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800623 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800624}
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200625
Pekka Enberg540aca02009-03-04 11:46:40 +0200626/*
627 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
628 * is valid. The argument is a physical page number.
629 *
630 *
631 * On x86, access has to be given to the first megabyte of ram because that area
Pavel Machek801a5592015-01-02 06:11:16 +0100632 * contains BIOS code and data regions used by X and dosemu and similar apps.
Pekka Enberg540aca02009-03-04 11:46:40 +0200633 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
634 * mmio resources as well as potential bios/acpi data regions.
635 */
636int devmem_is_allowed(unsigned long pagenr)
637{
T Makphaibulchoke73e8f3d2012-08-28 21:21:43 -0600638 if (pagenr < 256)
Pekka Enberg540aca02009-03-04 11:46:40 +0200639 return 1;
640 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
641 return 0;
642 if (!page_is_ram(pagenr))
643 return 1;
644 return 0;
645}
646
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200647void free_init_pages(char *what, unsigned long begin, unsigned long end)
648{
Yinghai Luc967da62010-03-28 19:42:55 -0700649 unsigned long begin_aligned, end_aligned;
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200650
Yinghai Luc967da62010-03-28 19:42:55 -0700651 /* Make sure boundaries are page aligned */
652 begin_aligned = PAGE_ALIGN(begin);
653 end_aligned = end & PAGE_MASK;
654
655 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
656 begin = begin_aligned;
657 end = end_aligned;
658 }
659
660 if (begin >= end)
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200661 return;
662
663 /*
664 * If debugging page accesses then do not free this memory but
665 * mark them not present - any buggy init-section access will
666 * create a kernel page fault:
667 */
668#ifdef CONFIG_DEBUG_PAGEALLOC
Bjorn Helgaas365811d2012-05-29 15:06:29 -0700669 printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n",
670 begin, end - 1);
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200671 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
672#else
673 /*
674 * We just marked the kernel text read only above, now that
675 * we are going to free part of that, we need to make that
Matthieu Castet5bd5a452010-11-16 22:31:26 +0100676 * writeable and non-executable first.
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200677 */
Matthieu Castet5bd5a452010-11-16 22:31:26 +0100678 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200679 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
680
Jiang Liuc88442e2013-07-03 15:02:58 -0700681 free_reserved_area((void *)begin, (void *)end, POISON_FREE_INITMEM, what);
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200682#endif
683}
684
685void free_initmem(void)
686{
Jiang Liuc88442e2013-07-03 15:02:58 -0700687 free_init_pages("unused kernel",
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200688 (unsigned long)(&__init_begin),
689 (unsigned long)(&__init_end));
690}
Pekka Enberg731ddea2009-03-04 11:13:40 +0200691
692#ifdef CONFIG_BLK_DEV_INITRD
Jan Beulich0d26d1d2012-06-18 11:30:20 +0100693void __init free_initrd_mem(unsigned long start, unsigned long end)
Pekka Enberg731ddea2009-03-04 11:13:40 +0200694{
Fenghua Yucd745be2012-12-20 23:44:31 -0800695#ifdef CONFIG_MICROCODE_EARLY
696 /*
697 * Remember, initrd memory may contain microcode or other useful things.
698 * Before we lose initrd mem, we need to find a place to hold them
699 * now that normal virtual memory is enabled.
700 */
701 save_microcode_in_initrd();
702#endif
703
Yinghai Luc967da62010-03-28 19:42:55 -0700704 /*
705 * end could be not aligned, and We can not align that,
706 * decompresser could be confused by aligned initrd_end
707 * We already reserve the end partial page before in
708 * - i386_start_kernel()
709 * - x86_64_start_kernel()
710 * - relocate_initrd()
711 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
712 */
Jiang Liuc88442e2013-07-03 15:02:58 -0700713 free_init_pages("initrd", start, PAGE_ALIGN(end));
Pekka Enberg731ddea2009-03-04 11:13:40 +0200714}
715#endif
Pekka Enberg17623912011-11-01 15:58:22 +0200716
717void __init zone_sizes_init(void)
718{
719 unsigned long max_zone_pfns[MAX_NR_ZONES];
720
721 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
722
723#ifdef CONFIG_ZONE_DMA
Xishi Qiuc072b902014-12-10 10:09:01 +0800724 max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn);
Pekka Enberg17623912011-11-01 15:58:22 +0200725#endif
726#ifdef CONFIG_ZONE_DMA32
Xishi Qiuc072b902014-12-10 10:09:01 +0800727 max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn);
Pekka Enberg17623912011-11-01 15:58:22 +0200728#endif
729 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
730#ifdef CONFIG_HIGHMEM
731 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
732#endif
733
734 free_area_init_nodes(max_zone_pfns);
735}
736
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700737DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
738#ifdef CONFIG_SMP
739 .active_mm = &init_mm,
740 .state = 0,
741#endif
742 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
743};
744EXPORT_SYMBOL_GPL(cpu_tlbstate);
745
Juergen Grossbd809af2014-11-03 14:02:03 +0100746void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
747{
748 /* entry 0 MUST be WB (hardwired to speed up translations) */
749 BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
750
751 __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
752 __pte2cachemode_tbl[entry] = cache;
753}