blob: 9d72cf547c8888ec9fe3f48cb330632f40b6bad5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Alexander Kuleshov5b171e82015-01-28 00:16:28 +06002 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
Vivek Goyal1ab60e02007-05-02 19:27:07 +02008 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
11
12#include <linux/linkage.h>
13#include <linux/threads.h>
Siddha, Suresh Bf6c2e332005-11-05 17:25:53 +010014#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/segment.h>
Vivek Goyal67dcbb62007-05-02 19:27:06 +020016#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/page.h>
18#include <asm/msr.h>
19#include <asm/cache.h>
Cyrill Gorcunov369101d2008-05-12 15:43:38 +020020#include <asm/processor-flags.h>
Tejun Heob12d8db2009-01-13 20:41:35 +090021#include <asm/percpu.h>
H. Peter Anvin9900aa22012-04-18 17:16:49 -070022#include <asm/nops.h>
Andy Lutomirski7bbcdb12016-04-02 07:01:32 -070023#include "../entry/calling.h"
Al Viro784d5692016-01-11 11:04:34 -050024#include <asm/export.h>
Peter Zijlstraf7d3a802018-01-16 10:38:09 +010025#include <asm/nospec-branch.h>
Vivek Goyal1ab60e02007-05-02 19:27:07 +020026
Glauber de Oliveira Costa49a69782008-01-30 13:31:10 +010027#ifdef CONFIG_PARAVIRT
28#include <asm/asm-offsets.h>
29#include <asm/paravirt.h>
H. Peter Anvinffc4bc92012-04-18 17:16:48 -070030#define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
Glauber de Oliveira Costa49a69782008-01-30 13:31:10 +010031#else
H. Peter Anvinffc4bc92012-04-18 17:16:48 -070032#define GET_CR2_INTO(reg) movq %cr2, reg
H. Peter Anvin9900aa22012-04-18 17:16:49 -070033#define INTERRUPT_RETURN iretq
Glauber de Oliveira Costa49a69782008-01-30 13:31:10 +010034#endif
35
Daniel Mack3ad2f3f2010-02-03 08:01:28 +080036/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
Vivek Goyal1ab60e02007-05-02 19:27:07 +020037 * because we need identity-mapped pages.
38 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40
Eduardo Habkosta6523742008-06-25 00:19:16 -040041#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
42
Thomas Garnier021182e2016-06-21 17:47:03 -070043L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
Eduardo Habkosta6523742008-06-25 00:19:16 -040044L4_START_KERNEL = pgd_index(__START_KERNEL_map)
45L3_START_KERNEL = pud_index(__START_KERNEL_map)
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 .text
Tim Abbott4ae59b92009-09-16 16:44:28 -040048 __HEAD
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 .code64
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 .globl startup_64
51startup_64:
Vivek Goyal1ab60e02007-05-02 19:27:07 +020052 /*
Konrad Rzeszutek Wilk12562762013-02-25 15:54:10 -050053 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
Vivek Goyal1ab60e02007-05-02 19:27:07 +020054 * and someone has loaded an identity mapped page table
55 * for us. These identity mapped page tables map all of the
56 * kernel pages and possibly all of memory.
57 *
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080058 * %rsi holds a physical pointer to real_mode_data.
Vivek Goyal1ab60e02007-05-02 19:27:07 +020059 *
60 * We come here either directly from a 64bit bootloader, or from
Alexander Kuleshov5b171e82015-01-28 00:16:28 +060061 * arch/x86/boot/compressed/head_64.S.
Vivek Goyal1ab60e02007-05-02 19:27:07 +020062 *
63 * We only come here initially at boot nothing else comes here.
64 *
65 * Since we may be loaded at an address different from what we were
66 * compiled to run at we first fixup the physical addresses in our page
67 * tables and then reload them.
68 */
69
Borislav Petkov91ed1402016-03-31 16:21:02 +020070 /*
Josh Poimboeufb32f96c2016-08-18 10:59:03 -050071 * Setup stack for verify_cpu(). "-8" because initial_stack is defined
Borislav Petkov91ed1402016-03-31 16:21:02 +020072 * this way, see below. Our best guess is a NULL ptr for stack
73 * termination heuristics and we don't want to break anything which
74 * might depend on it (kgdb, ...).
75 */
76 leaq (__end_init_task - 8)(%rip), %rsp
77
Borislav Petkov04633df2015-11-05 16:57:56 +010078 /* Sanitize CPU configuration */
79 call verify_cpu
80
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080081 /*
82 * Compute the delta between the address I am compiled to run at and the
Vivek Goyal1ab60e02007-05-02 19:27:07 +020083 * address I am actually running at.
84 */
85 leaq _text(%rip), %rbp
86 subq $_text - __START_KERNEL_map, %rbp
87
88 /* Is the address not 2M aligned? */
Alexander Kuleshova4733142016-01-26 22:12:10 +010089 testl $~PMD_PAGE_MASK, %ebp
Vivek Goyal1ab60e02007-05-02 19:27:07 +020090 jnz bad_address
91
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080092 /*
93 * Is the address too large?
Vivek Goyal1ab60e02007-05-02 19:27:07 +020094 */
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080095 leaq _text(%rip), %rax
96 shrq $MAX_PHYSMEM_BITS, %rax
97 jnz bad_address
Vivek Goyal1ab60e02007-05-02 19:27:07 +020098
H. Peter Anvin8170e6b2013-01-24 12:19:52 -080099 /*
100 * Fixup the physical addresses in the page table
101 */
102 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
Eric W. Bidermanb1c931e2007-07-15 23:37:28 -0700103
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200104 addq %rbp, level3_kernel_pgt + (510*8)(%rip)
Eric W. Bidermanb1c931e2007-07-15 23:37:28 -0700105 addq %rbp, level3_kernel_pgt + (511*8)(%rip)
106
107 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200108
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800109 /*
110 * Set up the identity mapping for the switchover. These
111 * entries should *NOT* have the global bit set! This also
112 * creates a bunch of nonsense entries but that is fine --
113 * it avoids problems around wraparound.
114 */
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200115 leaq _text(%rip), %rdi
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800116 leaq early_level4_pgt(%rip), %rbx
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200117
118 movq %rdi, %rax
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800119 shrq $PGDIR_SHIFT, %rax
120
121 leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx
122 movq %rdx, 0(%rbx,%rax,8)
123 movq %rdx, 8(%rbx,%rax,8)
124
125 addq $4096, %rdx
126 movq %rdi, %rax
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200127 shrq $PUD_SHIFT, %rax
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800128 andl $(PTRS_PER_PUD-1), %eax
Zhang Yanfeie9d06262013-05-14 14:48:58 +0800129 movq %rdx, 4096(%rbx,%rax,8)
130 incl %eax
131 andl $(PTRS_PER_PUD-1), %eax
132 movq %rdx, 4096(%rbx,%rax,8)
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200133
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800134 addq $8192, %rbx
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200135 movq %rdi, %rax
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800136 shrq $PMD_SHIFT, %rdi
137 addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
138 leaq (_end - 1)(%rip), %rcx
139 shrq $PMD_SHIFT, %rcx
140 subq %rdi, %rcx
141 incl %ecx
142
1431:
144 andq $(PTRS_PER_PMD - 1), %rdi
145 movq %rax, (%rbx,%rdi,8)
146 incq %rdi
147 addq $PMD_SIZE, %rax
148 decl %ecx
149 jnz 1b
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200150
Thomas Gleixner31eedd82008-02-15 17:29:12 +0100151 /*
152 * Fixup the kernel text+data virtual addresses. Note that
153 * we might write invalid pmds, when the kernel is relocated
154 * cleanup_highmap() fixes this up along with the mappings
155 * beyond _end.
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200156 */
157 leaq level2_kernel_pgt(%rip), %rdi
158 leaq 4096(%rdi), %r8
159 /* See if it is a valid page table entry */
Denys Vlasenko3e1aa7c2015-03-06 21:55:32 +01001601: testb $1, 0(%rdi)
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200161 jz 2f
162 addq %rbp, 0(%rdi)
163 /* Go to the next page */
1642: addq $8, %rdi
165 cmp %r8, %rdi
166 jne 1b
167
168 /* Fixup phys_base */
169 addq %rbp, phys_base(%rip)
170
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800171 movq $(early_level4_pgt - __START_KERNEL_map), %rax
172 jmp 1f
Vivek Goyal90b1c202007-05-02 19:27:07 +0200173ENTRY(secondary_startup_64)
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200174 /*
Konrad Rzeszutek Wilk12562762013-02-25 15:54:10 -0500175 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200176 * and someone has loaded a mapped page table.
177 *
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800178 * %rsi holds a physical pointer to real_mode_data.
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200179 *
180 * We come here either from startup_64 (using physical addresses)
181 * or from trampoline.S (using virtual addresses).
182 *
183 * Using virtual addresses from trampoline.S removes the need
184 * to have any identity mapped pages in the kernel page table
185 * after the boot processor executes this code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 */
187
Borislav Petkov04633df2015-11-05 16:57:56 +0100188 /* Sanitize CPU configuration */
189 call verify_cpu
190
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800191 movq $(init_level4_pgt - __START_KERNEL_map), %rax
1921:
193
Hugh Dickins23e09432017-09-24 16:59:49 -0700194 /* Enable PAE and PSE, but defer PGE until kaiser_enabled is decided */
195 movl $(X86_CR4_PAE | X86_CR4_PSE), %ecx
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800196 movq %rcx, %cr4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 /* Setup early boot stage 4 level pagetables. */
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200199 addq phys_base(%rip), %rax
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 movq %rax, %cr3
201
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200202 /* Ensure I am executing from virtual addresses */
203 movq $1f, %rax
Peter Zijlstraf7d3a802018-01-16 10:38:09 +0100204 ANNOTATE_RETPOLINE_SAFE
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200205 jmp *%rax
2061:
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 /* Check if nx is implemented */
209 movl $0x80000001, %eax
210 cpuid
211 movl %edx,%edi
212
213 /* Setup EFER (Extended Feature Enable Register) */
214 movl $MSR_EFER, %ecx
215 rdmsr
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200216 btsl $_EFER_SCE, %eax /* Enable System Call */
217 btl $20,%edi /* No Execute supported? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 jnc 1f
219 btsl $_EFER_NX, %eax
H. Peter Anvin78d77df2013-05-02 10:33:46 -0700220 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
Vivek Goyal1ab60e02007-05-02 19:27:07 +02002211: wrmsr /* Make changes effective */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
223 /* Setup cr0 */
Cyrill Gorcunov369101d2008-05-12 15:43:38 +0200224#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
225 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
226 X86_CR0_PG)
227 movl $CR0_STATE, %eax
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 /* Make changes effective */
229 movq %rax, %cr0
230
231 /* Setup a boot time stack */
Josh Poimboeufb32f96c2016-08-18 10:59:03 -0500232 movq initial_stack(%rip), %rsp
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
234 /* zero EFLAGS after setting rsp */
235 pushq $0
236 popfq
237
238 /*
239 * We must switch to a new descriptor in kernel space for the GDT
240 * because soon the kernel won't have access anymore to the userspace
241 * addresses where we're currently running on. We have to do that here
242 * because in 32bit we couldn't load a 64bit linear address.
243 */
Glauber Costaa9390982008-05-28 16:19:53 -0700244 lgdt early_gdt_descr(%rip)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Brian Gerst8ec69932009-11-25 11:17:36 -0500246 /* set up data segments */
247 xorl %eax,%eax
Zachary Amsdenffb60172007-02-13 13:26:24 +0100248 movl %eax,%ds
249 movl %eax,%ss
250 movl %eax,%es
251
252 /*
253 * We don't really need to load %fs or %gs, but load them anyway
254 * to kill any stale realmode selectors. This allows execution
255 * under VT hardware.
256 */
257 movl %eax,%fs
258 movl %eax,%gs
259
Tejun Heof32ff532009-01-13 20:41:35 +0900260 /* Set up %gs.
261 *
Brian Gerst947e76c2009-01-19 12:21:28 +0900262 * The base of %gs always points to the bottom of the irqstack
263 * union. If the stack protector canary is enabled, it is
264 * located at %gs:40. Note that, on SMP, the boot cpu uses
265 * init data section till per cpu areas are set up.
Tejun Heof32ff532009-01-13 20:41:35 +0900266 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 movl $MSR_GS_BASE,%ecx
Brian Gerst650fb432010-07-17 09:03:28 -0400268 movl initial_gs(%rip),%eax
269 movl initial_gs+4(%rip),%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 wrmsr
271
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800272 /* rsi is pointer to real mode structure with interesting info.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 pass it to C */
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800274 movq %rsi, %rdi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276 /* Finally jump to run C code and to be on real kernel address
277 * Since we are running on identity-mapped space we have to jump
Eric W. Biederman26374c72006-09-26 10:52:38 +0200278 * to the full 64bit address, this is only possible as indirect
279 * jump. In addition we need to ensure %cs is set so we make this
280 * a far return.
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800281 *
282 * Note: do not change to far jump indirect with 64bit offset.
283 *
284 * AMD does not support far jump indirect with 64bit offset.
285 * AMD64 Architecture Programmer's Manual, Volume 3: states only
286 * JMP FAR mem16:16 FF /5 Far jump indirect,
287 * with the target specified by a far pointer in memory.
288 * JMP FAR mem16:32 FF /5 Far jump indirect,
289 * with the target specified by a far pointer in memory.
290 *
291 * Intel64 does support 64bit offset.
292 * Software Developer Manual Vol 2: states:
293 * FF /5 JMP m16:16 Jump far, absolute indirect,
294 * address given in m16:16
295 * FF /5 JMP m16:32 Jump far, absolute indirect,
296 * address given in m16:32.
297 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
298 * address given in m16:64.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 */
300 movq initial_code(%rip),%rax
Eric W. Biederman26374c72006-09-26 10:52:38 +0200301 pushq $0 # fake return address to stop unwinder
302 pushq $__KERNEL_CS # set correct cs
303 pushq %rax # target address in negative space
304 lretq
Borislav Petkovdbf984d2016-06-25 13:24:57 +0200305ENDPROC(secondary_startup_64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
Borislav Petkov04633df2015-11-05 16:57:56 +0100307#include "verify_cpu.S"
308
Fenghua Yu42e78e92012-11-13 11:32:44 -0800309#ifdef CONFIG_HOTPLUG_CPU
310/*
311 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
312 * up already except stack. We just set up stack here. Then call
313 * start_secondary().
314 */
315ENTRY(start_cpu0)
Josh Poimboeufb32f96c2016-08-18 10:59:03 -0500316 movq initial_stack(%rip),%rsp
Fenghua Yu42e78e92012-11-13 11:32:44 -0800317 movq initial_code(%rip),%rax
318 pushq $0 # fake return address to stop unwinder
319 pushq $__KERNEL_CS # set correct cs
320 pushq %rax # target address in negative space
321 lretq
322ENDPROC(start_cpu0)
323#endif
324
Josh Poimboeufb32f96c2016-08-18 10:59:03 -0500325 /* Both SMP bootup and ACPI suspend change these variables */
Sam Ravnborgda5968a2008-02-17 13:22:59 +0100326 __REFDATA
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800327 .balign 8
328 GLOBAL(initial_code)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 .quad x86_64_start_kernel
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800330 GLOBAL(initial_gs)
Brian Gerst2add8e22009-02-08 09:58:39 -0500331 .quad INIT_PER_CPU_VAR(irq_stack_union)
Josh Poimboeufb32f96c2016-08-18 10:59:03 -0500332 GLOBAL(initial_stack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 .quad init_thread_union+THREAD_SIZE-8
Suresh Siddhab9af7c02009-10-14 14:46:55 -0700334 __FINITDATA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200336bad_address:
337 jmp bad_address
338
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800339 __INIT
Andy Lutomirskicdeb6042015-05-22 16:15:47 -0700340ENTRY(early_idt_handler_array)
H. Peter Anvin9900aa22012-04-18 17:16:49 -0700341 # 104(%rsp) %rflags
342 # 96(%rsp) %cs
343 # 88(%rsp) %rip
344 # 80(%rsp) error code
Andi Kleen749c9702008-03-11 02:23:22 +0100345 i = 0
346 .rept NUM_EXCEPTION_VECTORS
Andy Lutomirskicdeb6042015-05-22 16:15:47 -0700347 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
H. Peter Anvin9900aa22012-04-18 17:16:49 -0700348 pushq $0 # Dummy error code, to make stack frame uniform
349 .endif
350 pushq $i # 72(%rsp) Vector number
Andy Lutomirskicdeb6042015-05-22 16:15:47 -0700351 jmp early_idt_handler_common
Andi Kleen749c9702008-03-11 02:23:22 +0100352 i = i + 1
Andy Lutomirskicdeb6042015-05-22 16:15:47 -0700353 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
Andi Kleen749c9702008-03-11 02:23:22 +0100354 .endr
Andy Lutomirskicdeb6042015-05-22 16:15:47 -0700355ENDPROC(early_idt_handler_array)
Roland McGrath8866cd92008-01-30 13:33:06 +0100356
Andy Lutomirskicdeb6042015-05-22 16:15:47 -0700357early_idt_handler_common:
358 /*
359 * The stack is the hardware frame, an error code or zero, and the
360 * vector number.
361 */
H. Peter Anvin9900aa22012-04-18 17:16:49 -0700362 cld
363
Andi Kleenb9575912005-04-16 15:25:00 -0700364 incl early_recursion_flag(%rip)
H. Peter Anvin9900aa22012-04-18 17:16:49 -0700365
Andy Lutomirski7bbcdb12016-04-02 07:01:32 -0700366 /* The vector number is currently in the pt_regs->di slot. */
367 pushq %rsi /* pt_regs->si */
368 movq 8(%rsp), %rsi /* RSI = vector number */
369 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */
370 pushq %rdx /* pt_regs->dx */
371 pushq %rcx /* pt_regs->cx */
372 pushq %rax /* pt_regs->ax */
373 pushq %r8 /* pt_regs->r8 */
374 pushq %r9 /* pt_regs->r9 */
375 pushq %r10 /* pt_regs->r10 */
376 pushq %r11 /* pt_regs->r11 */
377 pushq %rbx /* pt_regs->bx */
378 pushq %rbp /* pt_regs->bp */
379 pushq %r12 /* pt_regs->r12 */
380 pushq %r13 /* pt_regs->r13 */
381 pushq %r14 /* pt_regs->r14 */
382 pushq %r15 /* pt_regs->r15 */
H. Peter Anvin9900aa22012-04-18 17:16:49 -0700383
Andy Lutomirski7bbcdb12016-04-02 07:01:32 -0700384 cmpq $14,%rsi /* Page fault? */
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800385 jnz 10f
Andy Lutomirski7bbcdb12016-04-02 07:01:32 -0700386 GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800387 call early_make_pgtable
388 andl %eax,%eax
Andy Lutomirski7bbcdb12016-04-02 07:01:32 -0700389 jz 20f /* All good */
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800390
39110:
Andy Lutomirski7bbcdb12016-04-02 07:01:32 -0700392 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */
H. Peter Anvin9900aa22012-04-18 17:16:49 -0700393 call early_fixup_exception
H. Peter Anvin9900aa22012-04-18 17:16:49 -0700394
Andy Lutomirski0e861fb2016-04-02 07:01:34 -070039520:
H. Peter Anvin9900aa22012-04-18 17:16:49 -0700396 decl early_recursion_flag(%rip)
Andy Lutomirski7bbcdb12016-04-02 07:01:32 -0700397 jmp restore_regs_and_iret
Andy Lutomirskicdeb6042015-05-22 16:15:47 -0700398ENDPROC(early_idt_handler_common)
H. Peter Anvin9900aa22012-04-18 17:16:49 -0700399
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800400 __INITDATA
401
H. Peter Anvin9900aa22012-04-18 17:16:49 -0700402 .balign 4
Andy Lutomirski0e861fb2016-04-02 07:01:34 -0700403GLOBAL(early_recursion_flag)
Andi Kleenb9575912005-04-16 15:25:00 -0700404 .long 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Jan Beulichf0cf5d12006-01-17 07:03:32 +0100406#define NEXT_PAGE(name) \
Vivek Goyal67dcbb62007-05-02 19:27:06 +0200407 .balign PAGE_SIZE; \
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800408GLOBAL(name)
Jan Beulichf0cf5d12006-01-17 07:03:32 +0100409
Kees Cooke71fac02018-01-03 10:17:35 -0800410#ifdef CONFIG_PAGE_TABLE_ISOLATION
Dave Hansen8f0baad2017-08-30 16:23:00 -0700411/*
412 * Each PGD needs to be 8k long and 8k aligned. We do not
413 * ever go out to userspace with these, so we do not
414 * strictly *need* the second page, but this allows us to
415 * have a single set_pgd() implementation that does not
416 * need to worry about whether it has 4k or 8k to work
417 * with.
418 *
419 * This ensures PGDs are 8k long:
420 */
421#define KAISER_USER_PGD_FILL 512
422/* This ensures they are 8k-aligned: */
Richard Fellner13be4482017-05-04 14:26:50 +0200423#define NEXT_PGD_PAGE(name) \
424 .balign 2 * PAGE_SIZE; \
425GLOBAL(name)
426#else
427#define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
Dave Hansen8f0baad2017-08-30 16:23:00 -0700428#define KAISER_USER_PGD_FILL 0
Richard Fellner13be4482017-05-04 14:26:50 +0200429#endif
430
Vivek Goyal67dcbb62007-05-02 19:27:06 +0200431/* Automate the creation of 1 to 1 mapping pmd entries */
Cyrill Gorcunov0e192b92008-05-13 20:55:40 +0400432#define PMDS(START, PERM, COUNT) \
433 i = 0 ; \
434 .rept (COUNT) ; \
435 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
436 i = i + 1 ; \
Vivek Goyal67dcbb62007-05-02 19:27:06 +0200437 .endr
438
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800439 __INITDATA
Richard Fellner13be4482017-05-04 14:26:50 +0200440NEXT_PGD_PAGE(early_level4_pgt)
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800441 .fill 511,8,0
Vivek Goyalcfd243d2007-05-02 19:27:07 +0200442 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
Dave Hansen8f0baad2017-08-30 16:23:00 -0700443 .fill KAISER_USER_PGD_FILL,8,0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800445NEXT_PAGE(early_dynamic_pgts)
446 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
447
448 .data
449
450#ifndef CONFIG_XEN
Richard Fellner13be4482017-05-04 14:26:50 +0200451NEXT_PGD_PAGE(init_level4_pgt)
Dave Hansen8f0baad2017-08-30 16:23:00 -0700452 .fill 512,8,0
453 .fill KAISER_USER_PGD_FILL,8,0
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800454#else
Richard Fellner13be4482017-05-04 14:26:50 +0200455NEXT_PGD_PAGE(init_level4_pgt)
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800456 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
457 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
458 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
459 .org init_level4_pgt + L4_START_KERNEL*8, 0
460 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
461 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
Dave Hansen8f0baad2017-08-30 16:23:00 -0700462 .fill KAISER_USER_PGD_FILL,8,0
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800463
Jan Beulichf0cf5d12006-01-17 07:03:32 +0100464NEXT_PAGE(level3_ident_pgt)
Vivek Goyal67dcbb62007-05-02 19:27:06 +0200465 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800466 .fill 511, 8, 0
467NEXT_PAGE(level2_ident_pgt)
468 /* Since I easily can, map the first 1G.
469 * Don't set NX because code runs from these pages.
470 */
471 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
472#endif
Dave Hansen8f0baad2017-08-30 16:23:00 -0700473 .fill KAISER_USER_PGD_FILL,8,0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
Jan Beulichf0cf5d12006-01-17 07:03:32 +0100475NEXT_PAGE(level3_kernel_pgt)
Eduardo Habkosta6523742008-06-25 00:19:16 -0400476 .fill L3_START_KERNEL,8,0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
Vivek Goyal67dcbb62007-05-02 19:27:06 +0200478 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
Eric W. Bidermanb1c931e2007-07-15 23:37:28 -0700479 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
480
Jan Beulichf0cf5d12006-01-17 07:03:32 +0100481NEXT_PAGE(level2_kernel_pgt)
Ingo Molnar88f3aec2008-02-21 11:04:11 +0100482 /*
Ingo Molnar85eb69a2008-02-21 12:50:51 +0100483 * 512 MB kernel mapping. We spend a full page on this pagetable
Ingo Molnar88f3aec2008-02-21 11:04:11 +0100484 * anyway.
485 *
486 * The kernel code+data+bss must not be bigger than that.
487 *
Ingo Molnar85eb69a2008-02-21 12:50:51 +0100488 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
Ingo Molnar88f3aec2008-02-21 11:04:11 +0100489 * If you want to increase this then increase MODULES_VADDR
490 * too.)
491 */
Jeremy Fitzhardinge84906382008-07-01 16:46:35 -0700492 PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
Ingo Molnard4afe412008-02-21 13:39:30 +0100493 KERNEL_IMAGE_SIZE/PMD_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800495NEXT_PAGE(level2_fixmap_pgt)
496 .fill 506,8,0
497 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
498 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
499 .fill 5,8,0
500
501NEXT_PAGE(level1_fixmap_pgt)
502 .fill 512,8,0
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200503
Vivek Goyal67dcbb62007-05-02 19:27:06 +0200504#undef PMDS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Jan Beulichf0cf5d12006-01-17 07:03:32 +0100506 .data
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 .align 16
Glauber Costaa9390982008-05-28 16:19:53 -0700508 .globl early_gdt_descr
509early_gdt_descr:
510 .word GDT_ENTRIES*8-1
Tejun Heo3e5d8f92009-01-13 20:41:35 +0900511early_gdt_descr_base:
Brian Gerst2add8e22009-02-08 09:58:39 -0500512 .quad INIT_PER_CPU_VAR(gdt_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200514ENTRY(phys_base)
515 /* This must match the first entry in level2_kernel_pgt */
516 .quad 0x0000000000000000
Al Viro784d5692016-01-11 11:04:34 -0500517EXPORT_SYMBOL(phys_base)
Vivek Goyal1ab60e02007-05-02 19:27:07 +0200518
Jeremy Fitzhardinge8c5e5ac2008-07-08 15:06:44 -0700519#include "../../x86/xen/xen-head.S"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
Tim Abbott02b7da32009-09-20 18:14:14 -0400521 __PAGE_ALIGNED_BSS
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800522NEXT_PAGE(empty_zero_page)
Jan Beuliche57113b2006-03-25 16:30:01 +0100523 .skip PAGE_SIZE
Al Viro784d5692016-01-11 11:04:34 -0500524EXPORT_SYMBOL(empty_zero_page)
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800525