blob: 2c0f3407bd1f1ae8adcf43cacdb479c91c096669 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Dave Jones835c34a2007-10-12 21:10:53 -04002 * prepare to run common code
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
6
7#include <linux/init.h>
8#include <linux/linkage.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/string.h>
12#include <linux/percpu.h>
Thomas Gleixnereaf76e82008-01-30 13:30:19 +010013#include <linux/start_kernel.h>
Huang, Ying8b664aa2008-03-28 10:49:44 +080014#include <linux/io.h>
Yinghai Lu72d7c3b2010-08-25 13:39:17 -070015#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17#include <asm/processor.h>
18#include <asm/proto.h>
19#include <asm/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/setup.h>
21#include <asm/desc.h>
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +010022#include <asm/pgtable.h>
Vivek Goyalcfd243d2007-05-02 19:27:07 +020023#include <asm/tlbflush.h>
Andi Kleen2bc04142005-11-05 17:25:53 +010024#include <asm/sections.h>
Thomas Gleixner718fc132008-01-30 13:30:17 +010025#include <asm/kdebug.h>
Andi Kleen75175272008-01-30 13:33:17 +010026#include <asm/e820.h>
Thomas Gleixner47a3d5d2009-08-29 15:03:59 +020027#include <asm/bios_ebda.h>
H. Peter Anvin5dcd14e2013-01-29 01:05:24 -080028#include <asm/bootparam_utils.h>
Fenghua Yufeddc9d2012-12-20 23:44:30 -080029#include <asm/microcode.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080030#include <asm/kasan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080032/*
33 * Manage page tables very early on.
34 */
35extern pgd_t early_level4_pgt[PTRS_PER_PGD];
36extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
37static unsigned int __initdata next_early_pgt = 2;
Linus Torvalds5e427ec2013-05-20 11:36:03 -070038pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080039
40/* Wipe all early page tables except for the kernel symbol map */
41static void __init reset_early_page_tables(void)
Vivek Goyalcfd243d2007-05-02 19:27:07 +020042{
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080043 unsigned long i;
44
45 for (i = 0; i < PTRS_PER_PGD-1; i++)
46 early_level4_pgt[i].pgd = 0;
47
48 next_early_pgt = 0;
49
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080050 write_cr3(__pa_nodebug(early_level4_pgt));
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080051}
52
53/* Create a new PMD entry */
54int __init early_make_pgtable(unsigned long address)
55{
56 unsigned long physaddr = address - __PAGE_OFFSET;
57 unsigned long i;
58 pgdval_t pgd, *pgd_p;
Yinghai Lu6b9c75a2013-01-24 12:19:53 -080059 pudval_t pud, *pud_p;
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080060 pmdval_t pmd, *pmd_p;
61
62 /* Invalid address or early pgt is done ? */
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080063 if (physaddr >= MAXMEM || read_cr3() != __pa_nodebug(early_level4_pgt))
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080064 return -1;
65
Yinghai Lu6b9c75a2013-01-24 12:19:53 -080066again:
67 pgd_p = &early_level4_pgt[pgd_index(address)].pgd;
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080068 pgd = *pgd_p;
69
70 /*
71 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
72 * critical -- __PAGE_OFFSET would point us back into the dynamic
73 * range and we might end up looping forever...
74 */
Yinghai Lu6b9c75a2013-01-24 12:19:53 -080075 if (pgd)
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080076 pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
Yinghai Lu6b9c75a2013-01-24 12:19:53 -080077 else {
78 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080079 reset_early_page_tables();
Yinghai Lu6b9c75a2013-01-24 12:19:53 -080080 goto again;
81 }
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080082
83 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
84 for (i = 0; i < PTRS_PER_PUD; i++)
85 pud_p[i] = 0;
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080086 *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
87 }
Yinghai Lu6b9c75a2013-01-24 12:19:53 -080088 pud_p += pud_index(address);
89 pud = *pud_p;
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080090
Yinghai Lu6b9c75a2013-01-24 12:19:53 -080091 if (pud)
92 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
93 else {
94 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
95 reset_early_page_tables();
96 goto again;
97 }
98
99 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
100 for (i = 0; i < PTRS_PER_PMD; i++)
101 pmd_p[i] = 0;
102 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800103 }
H. Peter Anvin78d77df2013-05-02 10:33:46 -0700104 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
Yinghai Lu6b9c75a2013-01-24 12:19:53 -0800105 pmd_p[pmd_index(address)] = pmd;
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800106
107 return 0;
Vivek Goyalcfd243d2007-05-02 19:27:07 +0200108}
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110/* Don't add a printk in there. printk relies on the PDA which is not initialized
111 yet. */
112static void __init clear_bss(void)
113{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 memset(__bss_start, 0,
Andi Kleen2bc04142005-11-05 17:25:53 +0100115 (unsigned long) __bss_stop - (unsigned long) __bss_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116}
117
Yinghai Luf1da8342013-01-24 12:19:57 -0800118static unsigned long get_cmd_line_ptr(void)
119{
120 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
121
Yinghai Luee92d812013-01-28 20:16:44 -0800122 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
123
Yinghai Luf1da8342013-01-24 12:19:57 -0800124 return cmd_line_ptr;
125}
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127static void __init copy_bootdata(char *real_mode_data)
128{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 char * command_line;
Yinghai Luf1da8342013-01-24 12:19:57 -0800130 unsigned long cmd_line_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
H. Peter Anvin30c82642007-10-15 17:13:22 -0700132 memcpy(&boot_params, real_mode_data, sizeof boot_params);
H. Peter Anvin5dcd14e2013-01-29 01:05:24 -0800133 sanitize_boot_params(&boot_params);
Yinghai Luf1da8342013-01-24 12:19:57 -0800134 cmd_line_ptr = get_cmd_line_ptr();
135 if (cmd_line_ptr) {
136 command_line = __va(cmd_line_ptr);
H. Peter Anvin30c82642007-10-15 17:13:22 -0700137 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139}
140
Andi Kleen2605fc22014-05-02 00:44:37 +0200141asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 int i;
144
Ingo Molnarb4e04092008-02-21 13:45:16 +0100145 /*
146 * Build-time sanity checks on the kernel image and module
147 * area mappings. (these are purely build-time and produce no code)
148 */
Borislav Petkov8e3c2a82013-03-04 21:16:17 +0100149 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
150 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
Ingo Molnarb4e04092008-02-21 13:45:16 +0100151 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
Borislav Petkov8e3c2a82013-03-04 21:16:17 +0100152 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
Ingo Molnarb4e04092008-02-21 13:45:16 +0100153 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
154 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
155 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
156 (__START_KERNEL & PGDIR_MASK)));
Jan Beulich66d4bdf2008-07-31 16:48:31 +0100157 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
Ingo Molnarb4e04092008-02-21 13:45:16 +0100158
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700159 cr4_init_shadow();
160
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800161 /* Kill off the identity-map trampoline */
162 reset_early_page_tables();
163
Yinghai Lu3df0af02006-12-07 02:14:12 +0100164 clear_bss();
165
Andrey Ryabinind0f77d42015-07-02 12:09:33 +0300166 clear_page(init_level4_pgt);
167
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300168 kasan_early_init();
169
Linus Torvaldsac630dd2013-02-22 13:09:51 -0800170 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
Andy Lutomirski425be562015-05-22 16:15:47 -0700171 set_intr_gate(i, early_idt_handler_array[i]);
Glauber de Oliveira Costa9d1c6e72007-10-19 20:35:03 +0200172 load_idt((const struct desc_ptr *)&idt_descr);
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +0100173
Yinghai Lufa2bbce2013-01-24 12:19:49 -0800174 copy_bootdata(__va(real_mode_data));
175
Fenghua Yufeddc9d2012-12-20 23:44:30 -0800176 /*
177 * Load microcode early on BSP.
178 */
179 load_ucode_bsp();
180
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800181 /* set init_level4_pgt kernel high mapping*/
182 init_level4_pgt[511] = early_level4_pgt[511];
183
Jeremy Fitzhardingef97013f2008-06-25 00:19:18 -0400184 x86_64_start_reservations(real_mode_data);
185}
186
187void __init x86_64_start_reservations(char *real_mode_data)
188{
Yinghai Lufa2bbce2013-01-24 12:19:49 -0800189 /* version is always not zero if it is copied */
190 if (!boot_params.hdr.version)
191 copy_bootdata(__va(real_mode_data));
Yinghai Lu9de819f2008-01-30 13:30:46 +0100192
Thomas Gleixner47a3d5d2009-08-29 15:03:59 +0200193 reserve_ebda_region();
Andi Kleen75175272008-01-30 13:33:17 +0100194
Andy Shevchenko3fda5bb2016-01-15 22:11:07 +0200195 switch (boot_params.hdr.hardware_subarch) {
196 case X86_SUBARCH_INTEL_MID:
197 x86_intel_mid_early_setup();
198 break;
199 default:
200 break;
201 }
202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 start_kernel();
204}