blob: 5ed3c141bbd5cf43dc1725f24ad1560622621ea2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/signal.h>
9#include <linux/sched.h>
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/string.h>
13#include <linux/types.h>
14#include <linux/ptrace.h>
15#include <linux/mman.h>
16#include <linux/mm.h>
17#include <linux/hugetlb.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
22#include <linux/pagemap.h>
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -080023#include <linux/pci.h>
Jan Beulich6fb14752007-05-02 19:27:10 +020024#include <linux/pfn.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070025#include <linux/poison.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/bootmem.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070027#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/proc_fs.h>
Dave Hansen05039b92005-10-29 18:16:57 -070029#include <linux/memory_hotplug.h>
Adrian Bunk27d99f72005-11-13 16:06:51 -080030#include <linux/initrd.h>
Shaohua Li55b23552006-06-23 02:04:49 -070031#include <linux/cpumask.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
H. Peter Anvinf832ff12008-02-04 16:47:58 +010034#include <asm/asm.h>
Ingo Molnar46eaa672008-10-12 15:06:29 +020035#include <asm/bios_ebda.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/processor.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080037#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <asm/pgtable.h>
39#include <asm/dma.h>
40#include <asm/fixmap.h>
41#include <asm/e820.h>
42#include <asm/apic.h>
Ingo Molnar8550eb92008-01-30 13:34:10 +010043#include <asm/bugs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <asm/tlb.h>
45#include <asm/tlbflush.h>
Andres Salomonc10d1e22010-11-17 06:09:52 +000046#include <asm/olpc_ofw.h>
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010047#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/sections.h>
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +020049#include <asm/paravirt.h>
Ian Campbell551889a2008-02-09 23:24:09 +010050#include <asm/setup.h>
Harvey Harrison7bfeab92008-02-12 12:12:01 -080051#include <asm/cacheflush.h>
Pekka Enberg2b723942009-04-28 16:00:49 +030052#include <asm/page_types.h>
Pekka Enberg4fcb2082009-03-05 14:55:08 +020053#include <asm/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080055#include "mm_internal.h"
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057unsigned long highstart_pfn, highend_pfn;
58
Ingo Molnar8550eb92008-01-30 13:34:10 +010059static noinline int do_test_wp_bit(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Jeremy Fitzhardingedc16ecf2009-03-04 16:10:44 -080061bool __read_mostly __vmalloc_start_set = false;
Yinghai Lu4e296842008-06-24 12:18:14 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/*
64 * Creates a middle page table and puts a pointer to it in the
65 * given global directory entry. This only returns the gd entry
66 * in non-PAE compilation mode, since the middle layer is folded.
67 */
68static pmd_t * __init one_md_table_init(pgd_t *pgd)
69{
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +030070 p4d_t *p4d;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 pud_t *pud;
72 pmd_t *pmd_table;
Ingo Molnar8550eb92008-01-30 13:34:10 +010073
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +020075 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
Yinghai Lu4e37a892012-11-16 19:39:16 -080076 pmd_table = (pmd_t *)alloc_low_page();
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070077 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +020078 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +030079 p4d = p4d_offset(pgd, 0);
80 pud = pud_offset(p4d, 0);
Ingo Molnar8550eb92008-01-30 13:34:10 +010081 BUG_ON(pmd_table != pmd_offset(pud, 0));
Zhaoleia376f302008-10-31 17:43:04 +080082
83 return pmd_table;
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +020084 }
85#endif
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +030086 p4d = p4d_offset(pgd, 0);
87 pud = pud_offset(p4d, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 pmd_table = pmd_offset(pud, 0);
Ingo Molnar8550eb92008-01-30 13:34:10 +010089
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 return pmd_table;
91}
92
93/*
94 * Create a page table and place a pointer to it in a middle page
Ingo Molnar8550eb92008-01-30 13:34:10 +010095 * directory entry:
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 */
97static pte_t * __init one_page_table_init(pmd_t *pmd)
98{
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +020099 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
Yinghai Lu4e37a892012-11-16 19:39:16 -0800100 pte_t *page_table = (pte_t *)alloc_low_page();
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200101
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700102 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200104 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 }
Ingo Molnar509a80c2007-10-17 18:04:34 +0200106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 return pte_offset_kernel(pmd, 0);
108}
109
Tejun Heo458a3e62009-02-24 11:57:21 +0900110pmd_t * __init populate_extra_pmd(unsigned long vaddr)
Tejun Heo11124412009-02-20 16:29:09 +0900111{
112 int pgd_idx = pgd_index(vaddr);
113 int pmd_idx = pmd_index(vaddr);
Tejun Heo458a3e62009-02-24 11:57:21 +0900114
115 return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
116}
117
118pte_t * __init populate_extra_pte(unsigned long vaddr)
119{
120 int pte_idx = pte_index(vaddr);
Tejun Heo11124412009-02-20 16:29:09 +0900121 pmd_t *pmd;
122
Tejun Heo458a3e62009-02-24 11:57:21 +0900123 pmd = populate_extra_pmd(vaddr);
124 return one_page_table_init(pmd) + pte_idx;
Tejun Heo11124412009-02-20 16:29:09 +0900125}
126
Yinghai Lu719272c2012-11-16 19:39:06 -0800127static unsigned long __init
128page_table_range_init_count(unsigned long start, unsigned long end)
129{
130 unsigned long count = 0;
131#ifdef CONFIG_HIGHMEM
132 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
133 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
134 int pgd_idx, pmd_idx;
135 unsigned long vaddr;
136
137 if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
138 return 0;
139
140 vaddr = start;
141 pgd_idx = pgd_index(vaddr);
Minfei Huang9962eea2015-07-12 20:18:42 +0800142 pmd_idx = pmd_index(vaddr);
Yinghai Lu719272c2012-11-16 19:39:06 -0800143
144 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
145 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
146 pmd_idx++) {
147 if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
148 (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
149 count++;
150 vaddr += PMD_SIZE;
151 }
152 pmd_idx = 0;
153 }
154#endif
155 return count;
156}
157
Jan Beulicha3c60182009-01-16 11:59:33 +0000158static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
Yinghai Lu719272c2012-11-16 19:39:06 -0800159 unsigned long vaddr, pte_t *lastpte,
160 void **adr)
Jan Beulicha3c60182009-01-16 11:59:33 +0000161{
162#ifdef CONFIG_HIGHMEM
163 /*
164 * Something (early fixmap) may already have put a pte
165 * page here, which causes the page table allocation
166 * to become nonlinear. Attempt to fix it, and if it
167 * is still nonlinear then we have to bug.
168 */
169 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
170 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
171
172 if (pmd_idx_kmap_begin != pmd_idx_kmap_end
173 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
Yinghai Lu719272c2012-11-16 19:39:06 -0800174 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
Jan Beulicha3c60182009-01-16 11:59:33 +0000175 pte_t *newpte;
176 int i;
177
Pekka Enbergc4645732009-03-05 14:54:58 +0200178 BUG_ON(after_bootmem);
Yinghai Lu719272c2012-11-16 19:39:06 -0800179 newpte = *adr;
Jan Beulicha3c60182009-01-16 11:59:33 +0000180 for (i = 0; i < PTRS_PER_PTE; i++)
181 set_pte(newpte + i, pte[i]);
Yinghai Lu719272c2012-11-16 19:39:06 -0800182 *adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
Jan Beulicha3c60182009-01-16 11:59:33 +0000183
184 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
185 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
186 BUG_ON(newpte != pte_offset_kernel(pmd, 0));
187 __flush_tlb_all();
188
189 paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
190 pte = newpte;
191 }
192 BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
193 && vaddr > fix_to_virt(FIX_KMAP_END)
194 && lastpte && lastpte + PTRS_PER_PTE != pte);
195#endif
196 return pte;
197}
198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199/*
Ingo Molnar8550eb92008-01-30 13:34:10 +0100200 * This function initializes a certain range of kernel virtual memory
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 * with new bootmem page tables, everywhere page tables are missing in
202 * the given range.
Ingo Molnar8550eb92008-01-30 13:34:10 +0100203 *
204 * NOTE: The pagetables are allocated contiguous on the physical space
205 * so we can cache the place of the first one and move around without
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 * checking the pgd every time.
207 */
Ingo Molnar8550eb92008-01-30 13:34:10 +0100208static void __init
209page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 int pgd_idx, pmd_idx;
212 unsigned long vaddr;
Ingo Molnar8550eb92008-01-30 13:34:10 +0100213 pgd_t *pgd;
214 pmd_t *pmd;
Jan Beulicha3c60182009-01-16 11:59:33 +0000215 pte_t *pte = NULL;
Yinghai Lu719272c2012-11-16 19:39:06 -0800216 unsigned long count = page_table_range_init_count(start, end);
217 void *adr = NULL;
218
219 if (count)
220 adr = alloc_low_pages(count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 vaddr = start;
223 pgd_idx = pgd_index(vaddr);
224 pmd_idx = pmd_index(vaddr);
225 pgd = pgd_base + pgd_idx;
226
227 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200228 pmd = one_md_table_init(pgd);
229 pmd = pmd + pmd_index(vaddr);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100230 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
231 pmd++, pmd_idx++) {
Jan Beulicha3c60182009-01-16 11:59:33 +0000232 pte = page_table_kmap_check(one_page_table_init(pmd),
Yinghai Lu719272c2012-11-16 19:39:06 -0800233 pmd, vaddr, pte, &adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
235 vaddr += PMD_SIZE;
236 }
237 pmd_idx = 0;
238 }
239}
240
241static inline int is_kernel_text(unsigned long addr)
242{
Matthieu Castet5bd5a452010-11-16 22:31:26 +0100243 if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 return 1;
245 return 0;
246}
247
248/*
Ingo Molnar8550eb92008-01-30 13:34:10 +0100249 * This maps the physical memory to kernel virtual address space, a total
250 * of max_low_pfn pages, by creating page tables starting from address
251 * PAGE_OFFSET:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 */
Pekka Enberge53fb042009-03-05 14:55:07 +0200253unsigned long __init
254kernel_physical_mapping_init(unsigned long start,
255 unsigned long end,
256 unsigned long page_size_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257{
Pekka Enberge53fb042009-03-05 14:55:07 +0200258 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
Pekka Enbergc1fd1b42010-02-24 17:04:47 +0200259 unsigned long last_map_addr = end;
Pekka Enberge53fb042009-03-05 14:55:07 +0200260 unsigned long start_pfn, end_pfn;
Pekka Enberge7179852009-03-05 14:54:55 +0200261 pgd_t *pgd_base = swapper_pg_dir;
Ingo Molnar8550eb92008-01-30 13:34:10 +0100262 int pgd_idx, pmd_idx, pte_ofs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 unsigned long pfn;
264 pgd_t *pgd;
265 pmd_t *pmd;
266 pte_t *pte;
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700267 unsigned pages_2m, pages_4k;
268 int mapping_iter;
269
Pekka Enberge53fb042009-03-05 14:55:07 +0200270 start_pfn = start >> PAGE_SHIFT;
271 end_pfn = end >> PAGE_SHIFT;
272
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700273 /*
274 * First iteration will setup identity mapping using large/small pages
275 * based on use_pse, with other attributes same as set by
276 * the early code in head_32.S
277 *
278 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
279 * as desired for the kernel identity mapping.
280 *
281 * This two pass mechanism conforms to the TLB app note which says:
282 *
283 * "Software should not write to a paging-structure entry in a way
284 * that would change, for any linear address, both the page size
285 * and either the page frame or attributes."
286 */
287 mapping_iter = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Borislav Petkov16bf9222016-03-29 17:42:03 +0200289 if (!boot_cpu_has(X86_FEATURE_PSE))
Yinghai Lua04ad822008-06-29 00:39:06 -0700290 use_pse = 0;
291
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700292repeat:
293 pages_2m = pages_4k = 0;
Yinghai Lua04ad822008-06-29 00:39:06 -0700294 pfn = start_pfn;
295 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 pgd = pgd_base + pgd_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
298 pmd = one_md_table_init(pgd);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100299
Yinghai Lua04ad822008-06-29 00:39:06 -0700300 if (pfn >= end_pfn)
301 continue;
302#ifdef CONFIG_X86_PAE
303 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
304 pmd += pmd_idx;
305#else
306 pmd_idx = 0;
307#endif
308 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100309 pmd++, pmd_idx++) {
Ingo Molnar8550eb92008-01-30 13:34:10 +0100310 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
Ingo Molnar8550eb92008-01-30 13:34:10 +0100312 /*
313 * Map with big pages if possible, otherwise
314 * create normal page tables:
315 */
Yinghai Lua04ad822008-06-29 00:39:06 -0700316 if (use_pse) {
Ingo Molnar8550eb92008-01-30 13:34:10 +0100317 unsigned int addr2;
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100318 pgprot_t prot = PAGE_KERNEL_LARGE;
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700319 /*
320 * first pass will use the same initial
321 * identity mapping attribute + _PAGE_PSE.
322 */
323 pgprot_t init_prot =
324 __pgprot(PTE_IDENT_ATTR |
325 _PAGE_PSE);
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100326
Yinghai Lu960ddb4f2012-11-16 19:38:54 -0800327 pfn &= PMD_MASK >> PAGE_SHIFT;
Ingo Molnar8550eb92008-01-30 13:34:10 +0100328 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100329 PAGE_OFFSET + PAGE_SIZE-1;
330
Ingo Molnar8550eb92008-01-30 13:34:10 +0100331 if (is_kernel_text(addr) ||
332 is_kernel_text(addr2))
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100333 prot = PAGE_KERNEL_LARGE_EXEC;
334
Andi Kleence0c0e52008-05-02 11:46:49 +0200335 pages_2m++;
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700336 if (mapping_iter == 1)
337 set_pmd(pmd, pfn_pmd(pfn, init_prot));
338 else
339 set_pmd(pmd, pfn_pmd(pfn, prot));
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 pfn += PTRS_PER_PTE;
Ingo Molnar8550eb92008-01-30 13:34:10 +0100342 continue;
343 }
344 pte = one_page_table_init(pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Yinghai Lua04ad822008-06-29 00:39:06 -0700346 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
347 pte += pte_ofs;
348 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
Ingo Molnar8550eb92008-01-30 13:34:10 +0100349 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
350 pgprot_t prot = PAGE_KERNEL;
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700351 /*
352 * first pass will use the same initial
353 * identity mapping attribute.
354 */
355 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100356
Ingo Molnar8550eb92008-01-30 13:34:10 +0100357 if (is_kernel_text(addr))
358 prot = PAGE_KERNEL_EXEC;
Jeremy Fitzhardingef3f20de2008-01-30 13:31:09 +0100359
Andi Kleence0c0e52008-05-02 11:46:49 +0200360 pages_4k++;
Pekka Enbergc1fd1b42010-02-24 17:04:47 +0200361 if (mapping_iter == 1) {
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700362 set_pte(pte, pfn_pte(pfn, init_prot));
Pekka Enbergc1fd1b42010-02-24 17:04:47 +0200363 last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
364 } else
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700365 set_pte(pte, pfn_pte(pfn, prot));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 }
367 }
368 }
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700369 if (mapping_iter == 1) {
370 /*
371 * update direct mapping page count only in the first
372 * iteration.
373 */
374 update_page_count(PG_LEVEL_2M, pages_2m);
375 update_page_count(PG_LEVEL_4K, pages_4k);
376
377 /*
378 * local global flush tlb, which will flush the previous
379 * mappings present in both small and large page TLB's.
380 */
381 __flush_tlb_all();
382
383 /*
384 * Second iteration will set the actual desired PTE attributes.
385 */
386 mapping_iter = 2;
387 goto repeat;
388 }
Pekka Enbergc1fd1b42010-02-24 17:04:47 +0200389 return last_map_addr;
Arjan van de Venae531c22008-04-24 23:40:47 +0200390}
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392pte_t *kmap_pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Ingo Molnar8550eb92008-01-30 13:34:10 +0100394static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
395{
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +0300396 pgd_t *pgd = pgd_offset_k(vaddr);
397 p4d_t *p4d = p4d_offset(pgd, vaddr);
398 pud_t *pud = pud_offset(p4d, vaddr);
399 pmd_t *pmd = pmd_offset(pud, vaddr);
400 return pte_offset_kernel(pmd, vaddr);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100401}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
403static void __init kmap_init(void)
404{
405 unsigned long kmap_vstart;
406
Ingo Molnar8550eb92008-01-30 13:34:10 +0100407 /*
408 * Cache the first kmap pte:
409 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
411 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412}
413
Keith Packardfd940932008-10-30 19:37:09 -0700414#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415static void __init permanent_kmaps_init(pgd_t *pgd_base)
416{
Ingo Molnar8550eb92008-01-30 13:34:10 +0100417 unsigned long vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 pgd_t *pgd;
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +0300419 p4d_t *p4d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 pud_t *pud;
421 pmd_t *pmd;
422 pte_t *pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
424 vaddr = PKMAP_BASE;
425 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
426
427 pgd = swapper_pg_dir + pgd_index(vaddr);
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +0300428 p4d = p4d_offset(pgd, vaddr);
429 pud = pud_offset(p4d, vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 pmd = pmd_offset(pud, vaddr);
431 pte = pte_offset_kernel(pmd, vaddr);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100432 pkmap_page_table = pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433}
434
Yinghai Lu1d931262010-10-05 16:15:15 -0700435void __init add_highpages_with_active_regions(int nid,
436 unsigned long start_pfn, unsigned long end_pfn)
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700437{
Tejun Heo8a9ca342011-07-12 11:16:02 +0200438 phys_addr_t start, end;
439 u64 i;
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700440
Tony Luckfc6daaf2015-06-24 16:58:09 -0700441 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
Tejun Heo8a9ca342011-07-12 11:16:02 +0200442 unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
443 start_pfn, end_pfn);
444 unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
445 start_pfn, end_pfn);
446 for ( ; pfn < e_pfn; pfn++)
447 if (pfn_valid(pfn))
Jiang Liu5e7ccf82013-04-29 15:07:12 -0700448 free_highmem_page(pfn_to_page(pfn));
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700449 }
Yinghai Lub5bc6c02008-06-14 18:32:52 -0700450}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451#else
Ingo Brueckle8e32322009-01-02 14:42:00 +0100452static inline void permanent_kmaps_init(pgd_t *pgd_base)
453{
454}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455#endif /* CONFIG_HIGHMEM */
456
Attilio Rao7737b212012-08-21 21:22:38 +0100457void __init native_pagetable_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458{
Ian Campbell551889a2008-02-09 23:24:09 +0100459 unsigned long pfn, va;
Attilio Rao73090f82012-08-21 21:22:37 +0100460 pgd_t *pgd, *base = swapper_pg_dir;
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +0300461 p4d_t *p4d;
Ian Campbell551889a2008-02-09 23:24:09 +0100462 pud_t *pud;
463 pmd_t *pmd;
464 pte_t *pte;
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200465
466 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100467 * Remove any mappings which extend past the end of physical
Yinghai Lu11ed9e92012-11-16 19:39:11 -0800468 * memory from the boot time page table.
469 * In virtual address space, we should have at least two pages
470 * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
471 * definition. And max_low_pfn is set to VMALLOC_END physical
472 * address. If initial memory mapping is doing right job, we
473 * should have pte used near max_low_pfn or one pmd is not present.
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200474 */
Yinghai Lu11ed9e92012-11-16 19:39:11 -0800475 for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
Ian Campbell551889a2008-02-09 23:24:09 +0100476 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
477 pgd = base + pgd_index(va);
478 if (!pgd_present(*pgd))
479 break;
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200480
Kirill A. Shutemove0c4f672017-03-13 17:33:05 +0300481 p4d = p4d_offset(pgd, va);
482 pud = pud_offset(p4d, va);
Ian Campbell551889a2008-02-09 23:24:09 +0100483 pmd = pmd_offset(pud, va);
484 if (!pmd_present(*pmd))
485 break;
486
Yinghai Lu11ed9e92012-11-16 19:39:11 -0800487 /* should not be large page here */
488 if (pmd_large(*pmd)) {
489 pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
490 pfn, pmd, __pa(pmd));
491 BUG_ON(1);
492 }
493
Ian Campbell551889a2008-02-09 23:24:09 +0100494 pte = pte_offset_kernel(pmd, va);
495 if (!pte_present(*pte))
496 break;
497
Yinghai Lu11ed9e92012-11-16 19:39:11 -0800498 printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
499 pfn, pmd, __pa(pmd), pte, __pa(pte));
Ian Campbell551889a2008-02-09 23:24:09 +0100500 pte_clear(NULL, va, pte);
501 }
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700502 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
Attilio Rao843b8ed2012-08-21 21:22:39 +0100503 paging_init();
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200504}
505
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200506/*
507 * Build a proper pagetable for the kernel mappings. Up until this
508 * point, we've been running on some set of pagetables constructed by
509 * the boot process.
510 *
511 * If we're booting on native hardware, this will be a pagetable
Ian Campbell551889a2008-02-09 23:24:09 +0100512 * constructed in arch/x86/kernel/head_32.S. The root of the
513 * pagetable will be swapper_pg_dir.
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200514 *
515 * If we're booting paravirtualized under a hypervisor, then there are
516 * more options: we may already be running PAE, and the pagetable may
517 * or may not be based in swapper_pg_dir. In any case,
Attilio Rao7737b212012-08-21 21:22:38 +0100518 * paravirt_pagetable_init() will set up swapper_pg_dir
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200519 * appropriately for the rest of the initialization to work.
520 *
521 * In general, pagetable_init() assumes that the pagetable may already
522 * be partially populated, and so it avoids stomping on any existing
523 * mappings.
524 */
Pekka Enbergf7650902009-03-05 14:55:05 +0200525void __init early_ioremap_page_table_range_init(void)
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200526{
Pekka Enberge7179852009-03-05 14:54:55 +0200527 pgd_t *pgd_base = swapper_pg_dir;
Ingo Molnar8550eb92008-01-30 13:34:10 +0100528 unsigned long vaddr, end;
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 /*
531 * Fixed mappings, only the page table structure has to be
532 * created - mappings will be set by set_fixmap():
533 */
534 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +0200535 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
536 page_table_range_init(vaddr, end, pgd_base);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100537 early_ioremap_reset();
Yinghai Lue7b37892008-06-25 21:51:28 -0700538}
539
540static void __init pagetable_init(void)
541{
542 pgd_t *pgd_base = swapper_pg_dir;
543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 permanent_kmaps_init(pgd_base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545}
546
David Vrabelf9553712014-01-07 17:03:06 +0000547pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100548EXPORT_SYMBOL_GPL(__supported_pte_mask);
549
Yinghai Lu90d967e2008-06-23 21:00:45 +0200550/* user-defined highmem size */
551static unsigned int highmem_pages = -1;
552
553/*
554 * highmem=size forces highmem to be exactly 'size' bytes.
555 * This works even on boxes that have no highmem otherwise.
556 * This also works to reduce highmem size on bigger boxes.
557 */
558static int __init parse_highmem(char *arg)
559{
560 if (!arg)
561 return -EINVAL;
562
563 highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
564 return 0;
565}
566early_param("highmem", parse_highmem);
567
Ingo Molnar47698432009-02-12 13:31:41 +0100568#define MSG_HIGHMEM_TOO_BIG \
569 "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
570
571#define MSG_LOWMEM_TOO_SMALL \
572 "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
573/*
574 * All of RAM fits into lowmem - but if user wants highmem
575 * artificially via the highmem=x boot parameter then create
576 * it:
577 */
Yinghai Luf836e352012-11-16 19:39:09 -0800578static void __init lowmem_pfn_init(void)
Ingo Molnar47698432009-02-12 13:31:41 +0100579{
Ingo Molnard88316c22009-02-12 15:16:03 +0100580 /* max_low_pfn is 0, we already have early_res support */
581 max_low_pfn = max_pfn;
582
Ingo Molnar47698432009-02-12 13:31:41 +0100583 if (highmem_pages == -1)
584 highmem_pages = 0;
585#ifdef CONFIG_HIGHMEM
586 if (highmem_pages >= max_pfn) {
587 printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
588 pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
589 highmem_pages = 0;
590 }
591 if (highmem_pages) {
592 if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
593 printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
594 pages_to_mb(highmem_pages));
595 highmem_pages = 0;
596 }
597 max_low_pfn -= highmem_pages;
598 }
599#else
600 if (highmem_pages)
601 printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
602#endif
603}
604
605#define MSG_HIGHMEM_TOO_SMALL \
606 "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
607
608#define MSG_HIGHMEM_TRIMMED \
609 "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
610/*
611 * We have more RAM than fits into lowmem - we try to put it into
612 * highmem, also taking the highmem=x boot parameter into account:
613 */
Yinghai Luf836e352012-11-16 19:39:09 -0800614static void __init highmem_pfn_init(void)
Ingo Molnar47698432009-02-12 13:31:41 +0100615{
Ingo Molnard88316c22009-02-12 15:16:03 +0100616 max_low_pfn = MAXMEM_PFN;
617
Ingo Molnar47698432009-02-12 13:31:41 +0100618 if (highmem_pages == -1)
619 highmem_pages = max_pfn - MAXMEM_PFN;
620
621 if (highmem_pages + MAXMEM_PFN < max_pfn)
622 max_pfn = MAXMEM_PFN + highmem_pages;
623
624 if (highmem_pages + MAXMEM_PFN > max_pfn) {
625 printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
626 pages_to_mb(max_pfn - MAXMEM_PFN),
627 pages_to_mb(highmem_pages));
628 highmem_pages = 0;
629 }
Ingo Molnar47698432009-02-12 13:31:41 +0100630#ifndef CONFIG_HIGHMEM
631 /* Maximum memory usable is what is directly addressable */
632 printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
633 if (max_pfn > MAX_NONPAE_PFN)
634 printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
635 else
636 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
637 max_pfn = MAXMEM_PFN;
638#else /* !CONFIG_HIGHMEM */
639#ifndef CONFIG_HIGHMEM64G
640 if (max_pfn > MAX_NONPAE_PFN) {
641 max_pfn = MAX_NONPAE_PFN;
642 printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
643 }
644#endif /* !CONFIG_HIGHMEM64G */
645#endif /* !CONFIG_HIGHMEM */
646}
647
Yinghai Lu90d967e2008-06-23 21:00:45 +0200648/*
649 * Determine low and high memory ranges:
650 */
Yinghai Lu2ec65f82008-06-23 03:05:30 -0700651void __init find_low_pfn_range(void)
Yinghai Lu90d967e2008-06-23 21:00:45 +0200652{
Yinghai Lu2ec65f82008-06-23 03:05:30 -0700653 /* it could update max_pfn */
654
Ingo Molnard88316c22009-02-12 15:16:03 +0100655 if (max_pfn <= MAXMEM_PFN)
Ingo Molnar47698432009-02-12 13:31:41 +0100656 lowmem_pfn_init();
Ingo Molnard88316c22009-02-12 15:16:03 +0100657 else
658 highmem_pfn_init();
Yinghai Lu90d967e2008-06-23 21:00:45 +0200659}
660
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700661#ifndef CONFIG_NEED_MULTIPLE_NODES
Tejun Heod8fc3af2011-02-16 12:13:06 +0100662void __init initmem_init(void)
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700663{
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700664#ifdef CONFIG_HIGHMEM
665 highstart_pfn = highend_pfn = max_pfn;
666 if (max_pfn > max_low_pfn)
667 highstart_pfn = max_low_pfn;
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700668 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
669 pages_to_mb(highend_pfn - highstart_pfn));
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700670 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
671#else
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700672 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
673#endif
Tejun Heo0608f702011-07-14 11:44:23 +0200674
Tang Chene7e8de52014-01-21 15:49:26 -0800675 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
Tejun Heo0608f702011-07-14 11:44:23 +0200676 sparse_memory_present_with_active_regions(0);
677
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700678#ifdef CONFIG_FLATMEM
Jiang Liu46a84132013-07-03 15:04:19 -0700679 max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700680#endif
Jeremy Fitzhardingedc16ecf2009-03-04 16:10:44 -0800681 __vmalloc_start_set = true;
682
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700683 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
684 pages_to_mb(max_low_pfn));
685
686 setup_bootmem_allocator();
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700687}
Yinghai Lucb95a132008-07-02 00:31:02 -0700688#endif /* !CONFIG_NEED_MULTIPLE_NODES */
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700689
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700690void __init setup_bootmem_allocator(void)
691{
Yinghai Lub2ac82a2008-06-22 02:45:39 -0700692 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
693 max_pfn_mapped<<PAGE_SHIFT);
Yinghai Lufc5efe32009-03-04 12:21:24 -0800694 printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
Yinghai Lu4e296842008-06-24 12:18:14 -0700695}
696
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697/*
698 * paging_init() sets up the page tables - note that the first 8MB are
699 * already mapped by head.S.
700 *
701 * This routines also unmaps the page at virtual kernel address 0, so
702 * that we can trap those pesky NULL-reference errors in the kernel.
703 */
704void __init paging_init(void)
705{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 pagetable_init();
707
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 __flush_tlb_all();
709
710 kmap_init();
Yinghai Lu11cd0bc2008-06-23 19:51:10 -0700711
712 /*
713 * NOTE: at this point the bootmem allocator is fully available.
714 */
Andres Salomonc10d1e22010-11-17 06:09:52 +0000715 olpc_dt_build_devicetree();
Tejun Heo797390d2011-05-02 14:18:52 +0200716 sparse_memory_present_with_active_regions(MAX_NUMNODES);
Yinghai Lu11cd0bc2008-06-23 19:51:10 -0700717 sparse_init();
718 zone_sizes_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719}
720
721/*
722 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
Dmitri Vorobievf7f17a62008-04-21 00:47:55 +0400723 * and also on some strange 486's. All 586+'s are OK. This used to involve
724 * black magic jumps to work around some nasty CPU bugs, but fortunately the
725 * switch to using exceptions got rid of all that.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727static void __init test_wp_bit(void)
728{
Ingo Molnard7d119d2008-01-30 13:34:10 +0100729 printk(KERN_INFO
730 "Checking if this processor honours the WP bit even in supervisor mode...");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
732 /* Any page-aligned address will do, the test is non-destructive */
H. Peter Anvin8bd753b2012-09-21 12:43:06 -0700733 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_KERNEL_RO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 boot_cpu_data.wp_works_ok = do_test_wp_bit();
735 clear_fixmap(FIX_WP_TEST);
736
737 if (!boot_cpu_data.wp_works_ok) {
Ingo Molnard7d119d2008-01-30 13:34:10 +0100738 printk(KERN_CONT "No.\n");
H. Peter Anvina5c2a892012-11-28 11:50:28 -0800739 panic("Linux doesn't support CPUs with broken WP.");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 } else {
Ingo Molnard7d119d2008-01-30 13:34:10 +0100741 printk(KERN_CONT "Ok.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 }
743}
744
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745void __init mem_init(void)
746{
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -0800747 pci_iommu_alloc();
748
Andy Whitcroft05b79bd2005-06-23 00:07:57 -0700749#ifdef CONFIG_FLATMEM
Eric Sesterhenn8d8f3cb2006-10-03 23:34:58 +0200750 BUG_ON(!mem_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751#endif
Stanislaw Gruszka855c7432011-12-06 09:08:34 +0100752 /*
753 * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
754 * be done before free_all_bootmem(). Memblock use free low memory for
755 * temporary data (see find_range_array()) and for this purpose can use
756 * pages that was already passed to the buddy allocator, hence marked as
757 * not accessible in the page tables when compiled with
758 * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
759 * important here.
760 */
761 set_highmem_pages_init();
762
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 /* this will put all low memory onto the freelists */
Jiang Liu0c988532013-07-03 15:03:24 -0700764 free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Yinghai Lu4e37a892012-11-16 19:39:16 -0800766 after_bootmem = 1;
767
Jiang Liu46a84132013-07-03 15:04:19 -0700768 mem_init_print_info(NULL);
Ingo Molnard7d119d2008-01-30 13:34:10 +0100769 printk(KERN_INFO "virtual kernel memory layout:\n"
Ingo Molnar8550eb92008-01-30 13:34:10 +0100770 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700771#ifdef CONFIG_HIGHMEM
Ingo Molnar8550eb92008-01-30 13:34:10 +0100772 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700773#endif
Ingo Molnar8550eb92008-01-30 13:34:10 +0100774 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
775 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
776 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
777 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
778 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
779 FIXADDR_START, FIXADDR_TOP,
780 (FIXADDR_TOP - FIXADDR_START) >> 10,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700781
782#ifdef CONFIG_HIGHMEM
Ingo Molnar8550eb92008-01-30 13:34:10 +0100783 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
784 (LAST_PKMAP*PAGE_SIZE) >> 10,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700785#endif
786
Ingo Molnar8550eb92008-01-30 13:34:10 +0100787 VMALLOC_START, VMALLOC_END,
788 (VMALLOC_END - VMALLOC_START) >> 20,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700789
Ingo Molnar8550eb92008-01-30 13:34:10 +0100790 (unsigned long)__va(0), (unsigned long)high_memory,
791 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700792
Ingo Molnar8550eb92008-01-30 13:34:10 +0100793 (unsigned long)&__init_begin, (unsigned long)&__init_end,
794 ((unsigned long)&__init_end -
795 (unsigned long)&__init_begin) >> 10,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700796
Ingo Molnar8550eb92008-01-30 13:34:10 +0100797 (unsigned long)&_etext, (unsigned long)&_edata,
798 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700799
Ingo Molnar8550eb92008-01-30 13:34:10 +0100800 (unsigned long)&_text, (unsigned long)&_etext,
801 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700802
Jan Beulichbeeb4192008-12-16 11:45:56 +0000803 /*
804 * Check boundaries twice: Some fundamental inconsistencies can
805 * be detected at build time already.
806 */
807#define __FIXADDR_TOP (-PAGE_SIZE)
808#ifdef CONFIG_HIGHMEM
809 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
810 BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
811#endif
812#define high_memory (-128UL << 20)
813 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
814#undef high_memory
815#undef __FIXADDR_TOP
816
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700817#ifdef CONFIG_HIGHMEM
Ingo Molnar8550eb92008-01-30 13:34:10 +0100818 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
819 BUG_ON(VMALLOC_END > PKMAP_BASE);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700820#endif
Jan Beulichbeeb4192008-12-16 11:45:56 +0000821 BUG_ON(VMALLOC_START >= VMALLOC_END);
Ingo Molnar8550eb92008-01-30 13:34:10 +0100822 BUG_ON((unsigned long)high_memory > VMALLOC_START);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 if (boot_cpu_data.wp_works_ok < 0)
825 test_wp_bit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826}
827
KAMEZAWA Hiroyukiad8f5792006-05-20 15:00:03 -0700828#ifdef CONFIG_MEMORY_HOTPLUG
Dan Williams033fbae2015-08-09 15:29:06 -0400829int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
Dave Hansen05039b92005-10-29 18:16:57 -0700830{
Yasunori Goto7c7e9422006-12-22 01:11:13 -0800831 struct pglist_data *pgdata = NODE_DATA(nid);
Wang Nan03d4be62014-08-06 16:07:40 -0700832 struct zone *zone = pgdata->node_zones +
Dan Williams033fbae2015-08-09 15:29:06 -0400833 zone_for_memory(nid, start, size, ZONE_HIGHMEM, for_device);
Dave Hansen05039b92005-10-29 18:16:57 -0700834 unsigned long start_pfn = start >> PAGE_SHIFT;
835 unsigned long nr_pages = size >> PAGE_SHIFT;
836
Gary Hadec04fc582009-01-06 14:39:14 -0800837 return __add_pages(nid, zone, start_pfn, nr_pages);
Dave Hansen05039b92005-10-29 18:16:57 -0700838}
Wen Congyang24d335c2013-02-22 16:32:58 -0800839
840#ifdef CONFIG_MEMORY_HOTREMOVE
841int arch_remove_memory(u64 start, u64 size)
842{
843 unsigned long start_pfn = start >> PAGE_SHIFT;
844 unsigned long nr_pages = size >> PAGE_SHIFT;
845 struct zone *zone;
846
847 zone = page_zone(pfn_to_page(start_pfn));
848 return __remove_pages(zone, start_pfn, nr_pages);
849}
850#endif
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200851#endif
Dave Hansen05039b92005-10-29 18:16:57 -0700852
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853/*
854 * This function cannot be __init, since exceptions don't work in that
855 * section. Put this after the callers, so that it cannot be inlined.
856 */
Ingo Molnar8550eb92008-01-30 13:34:10 +0100857static noinline int do_test_wp_bit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858{
859 char tmp_reg;
860 int flag;
861
862 __asm__ __volatile__(
Ingo Molnar8550eb92008-01-30 13:34:10 +0100863 " movb %0, %1 \n"
864 "1: movb %1, %0 \n"
865 " xorl %2, %2 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 "2: \n"
H. Peter Anvinf832ff12008-02-04 16:47:58 +0100867 _ASM_EXTABLE(1b,2b)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
869 "=q" (tmp_reg),
870 "=r" (flag)
871 :"2" (1)
872 :"memory");
Ingo Molnar8550eb92008-01-30 13:34:10 +0100873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 return flag;
875}
876
Suresh Siddha502f6602009-10-28 18:46:56 -0800877int kernel_set_to_readonly __read_mostly;
Steven Rostedt16239632009-02-17 17:57:30 -0500878
879void set_kernel_text_rw(void)
880{
881 unsigned long start = PFN_ALIGN(_text);
882 unsigned long size = PFN_ALIGN(_etext) - start;
883
884 if (!kernel_set_to_readonly)
885 return;
886
887 pr_debug("Set kernel text: %lx - %lx for read write\n",
888 start, start+size);
889
890 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
891}
892
893void set_kernel_text_ro(void)
894{
895 unsigned long start = PFN_ALIGN(_text);
896 unsigned long size = PFN_ALIGN(_etext) - start;
897
898 if (!kernel_set_to_readonly)
899 return;
900
901 pr_debug("Set kernel text: %lx - %lx for read only\n",
902 start, start+size);
903
904 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
905}
906
Matthieu Castet5bd5a452010-11-16 22:31:26 +0100907static void mark_nxdata_nx(void)
908{
909 /*
910 * When this called, init has already been executed and released,
Lucas De Marchi0d2eb442011-03-17 16:24:16 -0300911 * so everything past _etext should be NX.
Matthieu Castet5bd5a452010-11-16 22:31:26 +0100912 */
913 unsigned long start = PFN_ALIGN(_etext);
914 /*
915 * This comes from is_kernel_text upper limit. Also HPAGE where used:
916 */
917 unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
918
919 if (__supported_pte_mask & _PAGE_NX)
920 printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
921 set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
922}
923
Arjan van de Ven63aaf302006-01-06 00:12:02 -0800924void mark_rodata_ro(void)
925{
Jan Beulich6fb14752007-05-02 19:27:10 +0200926 unsigned long start = PFN_ALIGN(_text);
927 unsigned long size = PFN_ALIGN(_etext) - start;
Arjan van de Ven63aaf302006-01-06 00:12:02 -0800928
Mathieu Desnoyers4e4eee02008-02-02 15:42:20 -0500929 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
930 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
931 size >> 10);
Andi Kleen0c42f392008-01-30 13:33:42 +0100932
Steven Rostedt16239632009-02-17 17:57:30 -0500933 kernel_set_to_readonly = 1;
934
Andi Kleen0c42f392008-01-30 13:33:42 +0100935#ifdef CONFIG_CPA_DEBUG
Mathieu Desnoyers4e4eee02008-02-02 15:42:20 -0500936 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
937 start, start+size);
938 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
Andi Kleen0c42f392008-01-30 13:33:42 +0100939
Mathieu Desnoyers4e4eee02008-02-02 15:42:20 -0500940 printk(KERN_INFO "Testing CPA: write protecting again\n");
941 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
Linus Torvalds602033e2007-07-26 12:07:21 -0700942#endif
Steven Rostedt8f0f9962008-05-12 21:20:56 +0200943
Jan Beulich6fb14752007-05-02 19:27:10 +0200944 start += size;
945 size = (unsigned long)__end_rodata - start;
Arjan van de Ven6d238cc2008-01-30 13:34:06 +0100946 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
Ingo Molnard7d119d2008-01-30 13:34:10 +0100947 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
948 size >> 10);
Arjan van de Ven63aaf302006-01-06 00:12:02 -0800949
Andi Kleen0c42f392008-01-30 13:33:42 +0100950#ifdef CONFIG_CPA_DEBUG
Ingo Molnard7d119d2008-01-30 13:34:10 +0100951 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
Arjan van de Ven6d238cc2008-01-30 13:34:06 +0100952 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
Andi Kleen0c42f392008-01-30 13:33:42 +0100953
Ingo Molnard7d119d2008-01-30 13:34:10 +0100954 printk(KERN_INFO "Testing CPA: write protecting again\n");
Arjan van de Ven6d238cc2008-01-30 13:34:06 +0100955 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
Andi Kleen0c42f392008-01-30 13:33:42 +0100956#endif
Matthieu Castet5bd5a452010-11-16 22:31:26 +0100957 mark_nxdata_nx();
Stephen Smalleye1a58322015-10-05 12:55:20 -0400958 if (__supported_pte_mask & _PAGE_NX)
959 debug_checkwx();
Arjan van de Ven63aaf302006-01-06 00:12:02 -0800960}