Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Alexander Kuleshov | 5b171e8 | 2015-01-28 00:16:28 +0600 | [diff] [blame] | 2 | * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE |
| 5 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> |
| 6 | * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> |
| 7 | * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 8 | * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | |
| 12 | #include <linux/linkage.h> |
| 13 | #include <linux/threads.h> |
Siddha, Suresh B | f6c2e33 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 14 | #include <linux/init.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <asm/segment.h> |
Vivek Goyal | 67dcbb6 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 16 | #include <asm/pgtable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <asm/page.h> |
| 18 | #include <asm/msr.h> |
| 19 | #include <asm/cache.h> |
Cyrill Gorcunov | 369101d | 2008-05-12 15:43:38 +0200 | [diff] [blame] | 20 | #include <asm/processor-flags.h> |
Tejun Heo | b12d8db | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 21 | #include <asm/percpu.h> |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 22 | #include <asm/nops.h> |
Andy Lutomirski | 7bbcdb1 | 2016-04-02 07:01:32 -0700 | [diff] [blame] | 23 | #include "../entry/calling.h" |
Al Viro | 784d569 | 2016-01-11 11:04:34 -0500 | [diff] [blame] | 24 | #include <asm/export.h> |
Peter Zijlstra | f7d3a80 | 2018-01-16 10:38:09 +0100 | [diff] [blame] | 25 | #include <asm/nospec-branch.h> |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 26 | |
Glauber de Oliveira Costa | 49a6978 | 2008-01-30 13:31:10 +0100 | [diff] [blame] | 27 | #ifdef CONFIG_PARAVIRT |
| 28 | #include <asm/asm-offsets.h> |
| 29 | #include <asm/paravirt.h> |
H. Peter Anvin | ffc4bc9 | 2012-04-18 17:16:48 -0700 | [diff] [blame] | 30 | #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg |
Glauber de Oliveira Costa | 49a6978 | 2008-01-30 13:31:10 +0100 | [diff] [blame] | 31 | #else |
H. Peter Anvin | ffc4bc9 | 2012-04-18 17:16:48 -0700 | [diff] [blame] | 32 | #define GET_CR2_INTO(reg) movq %cr2, reg |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 33 | #define INTERRUPT_RETURN iretq |
Glauber de Oliveira Costa | 49a6978 | 2008-01-30 13:31:10 +0100 | [diff] [blame] | 34 | #endif |
| 35 | |
Daniel Mack | 3ad2f3f | 2010-02-03 08:01:28 +0800 | [diff] [blame] | 36 | /* we are not able to switch in one step to the final KERNEL ADDRESS SPACE |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 37 | * because we need identity-mapped pages. |
| 38 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | */ |
| 40 | |
Eduardo Habkost | a652374 | 2008-06-25 00:19:16 -0400 | [diff] [blame] | 41 | #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
| 42 | |
Thomas Garnier | 021182e | 2016-06-21 17:47:03 -0700 | [diff] [blame] | 43 | L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE) |
Eduardo Habkost | a652374 | 2008-06-25 00:19:16 -0400 | [diff] [blame] | 44 | L4_START_KERNEL = pgd_index(__START_KERNEL_map) |
| 45 | L3_START_KERNEL = pud_index(__START_KERNEL_map) |
| 46 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | .text |
Tim Abbott | 4ae59b9 | 2009-09-16 16:44:28 -0400 | [diff] [blame] | 48 | __HEAD |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | .code64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | .globl startup_64 |
| 51 | startup_64: |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 52 | /* |
Konrad Rzeszutek Wilk | 1256276 | 2013-02-25 15:54:10 -0500 | [diff] [blame] | 53 | * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 54 | * and someone has loaded an identity mapped page table |
| 55 | * for us. These identity mapped page tables map all of the |
| 56 | * kernel pages and possibly all of memory. |
| 57 | * |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 58 | * %rsi holds a physical pointer to real_mode_data. |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 59 | * |
| 60 | * We come here either directly from a 64bit bootloader, or from |
Alexander Kuleshov | 5b171e8 | 2015-01-28 00:16:28 +0600 | [diff] [blame] | 61 | * arch/x86/boot/compressed/head_64.S. |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 62 | * |
| 63 | * We only come here initially at boot nothing else comes here. |
| 64 | * |
| 65 | * Since we may be loaded at an address different from what we were |
| 66 | * compiled to run at we first fixup the physical addresses in our page |
| 67 | * tables and then reload them. |
| 68 | */ |
| 69 | |
Borislav Petkov | 91ed140 | 2016-03-31 16:21:02 +0200 | [diff] [blame] | 70 | /* |
Josh Poimboeuf | b32f96c | 2016-08-18 10:59:03 -0500 | [diff] [blame] | 71 | * Setup stack for verify_cpu(). "-8" because initial_stack is defined |
Borislav Petkov | 91ed140 | 2016-03-31 16:21:02 +0200 | [diff] [blame] | 72 | * this way, see below. Our best guess is a NULL ptr for stack |
| 73 | * termination heuristics and we don't want to break anything which |
| 74 | * might depend on it (kgdb, ...). |
| 75 | */ |
| 76 | leaq (__end_init_task - 8)(%rip), %rsp |
| 77 | |
Borislav Petkov | 04633df | 2015-11-05 16:57:56 +0100 | [diff] [blame] | 78 | /* Sanitize CPU configuration */ |
| 79 | call verify_cpu |
| 80 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 81 | /* |
| 82 | * Compute the delta between the address I am compiled to run at and the |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 83 | * address I am actually running at. |
| 84 | */ |
| 85 | leaq _text(%rip), %rbp |
| 86 | subq $_text - __START_KERNEL_map, %rbp |
| 87 | |
| 88 | /* Is the address not 2M aligned? */ |
Alexander Kuleshov | a473314 | 2016-01-26 22:12:10 +0100 | [diff] [blame] | 89 | testl $~PMD_PAGE_MASK, %ebp |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 90 | jnz bad_address |
| 91 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 92 | /* |
| 93 | * Is the address too large? |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 94 | */ |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 95 | leaq _text(%rip), %rax |
| 96 | shrq $MAX_PHYSMEM_BITS, %rax |
| 97 | jnz bad_address |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 98 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 99 | /* |
| 100 | * Fixup the physical addresses in the page table |
| 101 | */ |
| 102 | addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip) |
Eric W. Biderman | b1c931e | 2007-07-15 23:37:28 -0700 | [diff] [blame] | 103 | |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 104 | addq %rbp, level3_kernel_pgt + (510*8)(%rip) |
Eric W. Biderman | b1c931e | 2007-07-15 23:37:28 -0700 | [diff] [blame] | 105 | addq %rbp, level3_kernel_pgt + (511*8)(%rip) |
| 106 | |
| 107 | addq %rbp, level2_fixmap_pgt + (506*8)(%rip) |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 108 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 109 | /* |
| 110 | * Set up the identity mapping for the switchover. These |
| 111 | * entries should *NOT* have the global bit set! This also |
| 112 | * creates a bunch of nonsense entries but that is fine -- |
| 113 | * it avoids problems around wraparound. |
| 114 | */ |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 115 | leaq _text(%rip), %rdi |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 116 | leaq early_level4_pgt(%rip), %rbx |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 117 | |
| 118 | movq %rdi, %rax |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 119 | shrq $PGDIR_SHIFT, %rax |
| 120 | |
| 121 | leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx |
| 122 | movq %rdx, 0(%rbx,%rax,8) |
| 123 | movq %rdx, 8(%rbx,%rax,8) |
| 124 | |
| 125 | addq $4096, %rdx |
| 126 | movq %rdi, %rax |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 127 | shrq $PUD_SHIFT, %rax |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 128 | andl $(PTRS_PER_PUD-1), %eax |
Zhang Yanfei | e9d0626 | 2013-05-14 14:48:58 +0800 | [diff] [blame] | 129 | movq %rdx, 4096(%rbx,%rax,8) |
| 130 | incl %eax |
| 131 | andl $(PTRS_PER_PUD-1), %eax |
| 132 | movq %rdx, 4096(%rbx,%rax,8) |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 133 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 134 | addq $8192, %rbx |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 135 | movq %rdi, %rax |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 136 | shrq $PMD_SHIFT, %rdi |
| 137 | addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax |
| 138 | leaq (_end - 1)(%rip), %rcx |
| 139 | shrq $PMD_SHIFT, %rcx |
| 140 | subq %rdi, %rcx |
| 141 | incl %ecx |
| 142 | |
| 143 | 1: |
| 144 | andq $(PTRS_PER_PMD - 1), %rdi |
| 145 | movq %rax, (%rbx,%rdi,8) |
| 146 | incq %rdi |
| 147 | addq $PMD_SIZE, %rax |
| 148 | decl %ecx |
| 149 | jnz 1b |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 150 | |
Thomas Gleixner | 31eedd8 | 2008-02-15 17:29:12 +0100 | [diff] [blame] | 151 | /* |
| 152 | * Fixup the kernel text+data virtual addresses. Note that |
| 153 | * we might write invalid pmds, when the kernel is relocated |
| 154 | * cleanup_highmap() fixes this up along with the mappings |
| 155 | * beyond _end. |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 156 | */ |
| 157 | leaq level2_kernel_pgt(%rip), %rdi |
| 158 | leaq 4096(%rdi), %r8 |
| 159 | /* See if it is a valid page table entry */ |
Denys Vlasenko | 3e1aa7c | 2015-03-06 21:55:32 +0100 | [diff] [blame] | 160 | 1: testb $1, 0(%rdi) |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 161 | jz 2f |
| 162 | addq %rbp, 0(%rdi) |
| 163 | /* Go to the next page */ |
| 164 | 2: addq $8, %rdi |
| 165 | cmp %r8, %rdi |
| 166 | jne 1b |
| 167 | |
| 168 | /* Fixup phys_base */ |
| 169 | addq %rbp, phys_base(%rip) |
| 170 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 171 | movq $(early_level4_pgt - __START_KERNEL_map), %rax |
| 172 | jmp 1f |
Vivek Goyal | 90b1c20 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 173 | ENTRY(secondary_startup_64) |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 174 | /* |
Konrad Rzeszutek Wilk | 1256276 | 2013-02-25 15:54:10 -0500 | [diff] [blame] | 175 | * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 176 | * and someone has loaded a mapped page table. |
| 177 | * |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 178 | * %rsi holds a physical pointer to real_mode_data. |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 179 | * |
| 180 | * We come here either from startup_64 (using physical addresses) |
| 181 | * or from trampoline.S (using virtual addresses). |
| 182 | * |
| 183 | * Using virtual addresses from trampoline.S removes the need |
| 184 | * to have any identity mapped pages in the kernel page table |
| 185 | * after the boot processor executes this code. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | */ |
| 187 | |
Borislav Petkov | 04633df | 2015-11-05 16:57:56 +0100 | [diff] [blame] | 188 | /* Sanitize CPU configuration */ |
| 189 | call verify_cpu |
| 190 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 191 | movq $(init_level4_pgt - __START_KERNEL_map), %rax |
| 192 | 1: |
| 193 | |
Hugh Dickins | 23e0943 | 2017-09-24 16:59:49 -0700 | [diff] [blame] | 194 | /* Enable PAE and PSE, but defer PGE until kaiser_enabled is decided */ |
| 195 | movl $(X86_CR4_PAE | X86_CR4_PSE), %ecx |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 196 | movq %rcx, %cr4 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | |
| 198 | /* Setup early boot stage 4 level pagetables. */ |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 199 | addq phys_base(%rip), %rax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | movq %rax, %cr3 |
| 201 | |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 202 | /* Ensure I am executing from virtual addresses */ |
| 203 | movq $1f, %rax |
Peter Zijlstra | f7d3a80 | 2018-01-16 10:38:09 +0100 | [diff] [blame] | 204 | ANNOTATE_RETPOLINE_SAFE |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 205 | jmp *%rax |
| 206 | 1: |
| 207 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | /* Check if nx is implemented */ |
| 209 | movl $0x80000001, %eax |
| 210 | cpuid |
| 211 | movl %edx,%edi |
| 212 | |
| 213 | /* Setup EFER (Extended Feature Enable Register) */ |
| 214 | movl $MSR_EFER, %ecx |
| 215 | rdmsr |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 216 | btsl $_EFER_SCE, %eax /* Enable System Call */ |
| 217 | btl $20,%edi /* No Execute supported? */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | jnc 1f |
| 219 | btsl $_EFER_NX, %eax |
H. Peter Anvin | 78d77df | 2013-05-02 10:33:46 -0700 | [diff] [blame] | 220 | btsq $_PAGE_BIT_NX,early_pmd_flags(%rip) |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 221 | 1: wrmsr /* Make changes effective */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | |
| 223 | /* Setup cr0 */ |
Cyrill Gorcunov | 369101d | 2008-05-12 15:43:38 +0200 | [diff] [blame] | 224 | #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ |
| 225 | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ |
| 226 | X86_CR0_PG) |
| 227 | movl $CR0_STATE, %eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | /* Make changes effective */ |
| 229 | movq %rax, %cr0 |
| 230 | |
| 231 | /* Setup a boot time stack */ |
Josh Poimboeuf | b32f96c | 2016-08-18 10:59:03 -0500 | [diff] [blame] | 232 | movq initial_stack(%rip), %rsp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | |
| 234 | /* zero EFLAGS after setting rsp */ |
| 235 | pushq $0 |
| 236 | popfq |
| 237 | |
| 238 | /* |
| 239 | * We must switch to a new descriptor in kernel space for the GDT |
| 240 | * because soon the kernel won't have access anymore to the userspace |
| 241 | * addresses where we're currently running on. We have to do that here |
| 242 | * because in 32bit we couldn't load a 64bit linear address. |
| 243 | */ |
Glauber Costa | a939098 | 2008-05-28 16:19:53 -0700 | [diff] [blame] | 244 | lgdt early_gdt_descr(%rip) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | |
Brian Gerst | 8ec6993 | 2009-11-25 11:17:36 -0500 | [diff] [blame] | 246 | /* set up data segments */ |
| 247 | xorl %eax,%eax |
Zachary Amsden | ffb6017 | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 248 | movl %eax,%ds |
| 249 | movl %eax,%ss |
| 250 | movl %eax,%es |
| 251 | |
| 252 | /* |
| 253 | * We don't really need to load %fs or %gs, but load them anyway |
| 254 | * to kill any stale realmode selectors. This allows execution |
| 255 | * under VT hardware. |
| 256 | */ |
| 257 | movl %eax,%fs |
| 258 | movl %eax,%gs |
| 259 | |
Tejun Heo | f32ff53 | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 260 | /* Set up %gs. |
| 261 | * |
Brian Gerst | 947e76c | 2009-01-19 12:21:28 +0900 | [diff] [blame] | 262 | * The base of %gs always points to the bottom of the irqstack |
| 263 | * union. If the stack protector canary is enabled, it is |
| 264 | * located at %gs:40. Note that, on SMP, the boot cpu uses |
| 265 | * init data section till per cpu areas are set up. |
Tejun Heo | f32ff53 | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 266 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | movl $MSR_GS_BASE,%ecx |
Brian Gerst | 650fb43 | 2010-07-17 09:03:28 -0400 | [diff] [blame] | 268 | movl initial_gs(%rip),%eax |
| 269 | movl initial_gs+4(%rip),%edx |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | wrmsr |
| 271 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 272 | /* rsi is pointer to real mode structure with interesting info. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | pass it to C */ |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 274 | movq %rsi, %rdi |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | |
| 276 | /* Finally jump to run C code and to be on real kernel address |
| 277 | * Since we are running on identity-mapped space we have to jump |
Eric W. Biederman | 26374c7 | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 278 | * to the full 64bit address, this is only possible as indirect |
| 279 | * jump. In addition we need to ensure %cs is set so we make this |
| 280 | * a far return. |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 281 | * |
| 282 | * Note: do not change to far jump indirect with 64bit offset. |
| 283 | * |
| 284 | * AMD does not support far jump indirect with 64bit offset. |
| 285 | * AMD64 Architecture Programmer's Manual, Volume 3: states only |
| 286 | * JMP FAR mem16:16 FF /5 Far jump indirect, |
| 287 | * with the target specified by a far pointer in memory. |
| 288 | * JMP FAR mem16:32 FF /5 Far jump indirect, |
| 289 | * with the target specified by a far pointer in memory. |
| 290 | * |
| 291 | * Intel64 does support 64bit offset. |
| 292 | * Software Developer Manual Vol 2: states: |
| 293 | * FF /5 JMP m16:16 Jump far, absolute indirect, |
| 294 | * address given in m16:16 |
| 295 | * FF /5 JMP m16:32 Jump far, absolute indirect, |
| 296 | * address given in m16:32. |
| 297 | * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect, |
| 298 | * address given in m16:64. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | */ |
| 300 | movq initial_code(%rip),%rax |
Eric W. Biederman | 26374c7 | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 301 | pushq $0 # fake return address to stop unwinder |
| 302 | pushq $__KERNEL_CS # set correct cs |
| 303 | pushq %rax # target address in negative space |
| 304 | lretq |
Borislav Petkov | dbf984d | 2016-06-25 13:24:57 +0200 | [diff] [blame] | 305 | ENDPROC(secondary_startup_64) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | |
Borislav Petkov | 04633df | 2015-11-05 16:57:56 +0100 | [diff] [blame] | 307 | #include "verify_cpu.S" |
| 308 | |
Fenghua Yu | 42e78e9 | 2012-11-13 11:32:44 -0800 | [diff] [blame] | 309 | #ifdef CONFIG_HOTPLUG_CPU |
| 310 | /* |
| 311 | * Boot CPU0 entry point. It's called from play_dead(). Everything has been set |
| 312 | * up already except stack. We just set up stack here. Then call |
| 313 | * start_secondary(). |
| 314 | */ |
| 315 | ENTRY(start_cpu0) |
Josh Poimboeuf | b32f96c | 2016-08-18 10:59:03 -0500 | [diff] [blame] | 316 | movq initial_stack(%rip),%rsp |
Fenghua Yu | 42e78e9 | 2012-11-13 11:32:44 -0800 | [diff] [blame] | 317 | movq initial_code(%rip),%rax |
| 318 | pushq $0 # fake return address to stop unwinder |
| 319 | pushq $__KERNEL_CS # set correct cs |
| 320 | pushq %rax # target address in negative space |
| 321 | lretq |
| 322 | ENDPROC(start_cpu0) |
| 323 | #endif |
| 324 | |
Josh Poimboeuf | b32f96c | 2016-08-18 10:59:03 -0500 | [diff] [blame] | 325 | /* Both SMP bootup and ACPI suspend change these variables */ |
Sam Ravnborg | da5968a | 2008-02-17 13:22:59 +0100 | [diff] [blame] | 326 | __REFDATA |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 327 | .balign 8 |
| 328 | GLOBAL(initial_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | .quad x86_64_start_kernel |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 330 | GLOBAL(initial_gs) |
Brian Gerst | 2add8e2 | 2009-02-08 09:58:39 -0500 | [diff] [blame] | 331 | .quad INIT_PER_CPU_VAR(irq_stack_union) |
Josh Poimboeuf | b32f96c | 2016-08-18 10:59:03 -0500 | [diff] [blame] | 332 | GLOBAL(initial_stack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | .quad init_thread_union+THREAD_SIZE-8 |
Suresh Siddha | b9af7c0 | 2009-10-14 14:46:55 -0700 | [diff] [blame] | 334 | __FINITDATA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 336 | bad_address: |
| 337 | jmp bad_address |
| 338 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 339 | __INIT |
Andy Lutomirski | cdeb604 | 2015-05-22 16:15:47 -0700 | [diff] [blame] | 340 | ENTRY(early_idt_handler_array) |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 341 | # 104(%rsp) %rflags |
| 342 | # 96(%rsp) %cs |
| 343 | # 88(%rsp) %rip |
| 344 | # 80(%rsp) error code |
Andi Kleen | 749c970 | 2008-03-11 02:23:22 +0100 | [diff] [blame] | 345 | i = 0 |
| 346 | .rept NUM_EXCEPTION_VECTORS |
Andy Lutomirski | cdeb604 | 2015-05-22 16:15:47 -0700 | [diff] [blame] | 347 | .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 348 | pushq $0 # Dummy error code, to make stack frame uniform |
| 349 | .endif |
| 350 | pushq $i # 72(%rsp) Vector number |
Andy Lutomirski | cdeb604 | 2015-05-22 16:15:47 -0700 | [diff] [blame] | 351 | jmp early_idt_handler_common |
Andi Kleen | 749c970 | 2008-03-11 02:23:22 +0100 | [diff] [blame] | 352 | i = i + 1 |
Andy Lutomirski | cdeb604 | 2015-05-22 16:15:47 -0700 | [diff] [blame] | 353 | .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc |
Andi Kleen | 749c970 | 2008-03-11 02:23:22 +0100 | [diff] [blame] | 354 | .endr |
Andy Lutomirski | cdeb604 | 2015-05-22 16:15:47 -0700 | [diff] [blame] | 355 | ENDPROC(early_idt_handler_array) |
Roland McGrath | 8866cd9 | 2008-01-30 13:33:06 +0100 | [diff] [blame] | 356 | |
Andy Lutomirski | cdeb604 | 2015-05-22 16:15:47 -0700 | [diff] [blame] | 357 | early_idt_handler_common: |
| 358 | /* |
| 359 | * The stack is the hardware frame, an error code or zero, and the |
| 360 | * vector number. |
| 361 | */ |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 362 | cld |
| 363 | |
Andi Kleen | b957591 | 2005-04-16 15:25:00 -0700 | [diff] [blame] | 364 | incl early_recursion_flag(%rip) |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 365 | |
Andy Lutomirski | 7bbcdb1 | 2016-04-02 07:01:32 -0700 | [diff] [blame] | 366 | /* The vector number is currently in the pt_regs->di slot. */ |
| 367 | pushq %rsi /* pt_regs->si */ |
| 368 | movq 8(%rsp), %rsi /* RSI = vector number */ |
| 369 | movq %rdi, 8(%rsp) /* pt_regs->di = RDI */ |
| 370 | pushq %rdx /* pt_regs->dx */ |
| 371 | pushq %rcx /* pt_regs->cx */ |
| 372 | pushq %rax /* pt_regs->ax */ |
| 373 | pushq %r8 /* pt_regs->r8 */ |
| 374 | pushq %r9 /* pt_regs->r9 */ |
| 375 | pushq %r10 /* pt_regs->r10 */ |
| 376 | pushq %r11 /* pt_regs->r11 */ |
| 377 | pushq %rbx /* pt_regs->bx */ |
| 378 | pushq %rbp /* pt_regs->bp */ |
| 379 | pushq %r12 /* pt_regs->r12 */ |
| 380 | pushq %r13 /* pt_regs->r13 */ |
| 381 | pushq %r14 /* pt_regs->r14 */ |
| 382 | pushq %r15 /* pt_regs->r15 */ |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 383 | |
Andy Lutomirski | 7bbcdb1 | 2016-04-02 07:01:32 -0700 | [diff] [blame] | 384 | cmpq $14,%rsi /* Page fault? */ |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 385 | jnz 10f |
Andy Lutomirski | 7bbcdb1 | 2016-04-02 07:01:32 -0700 | [diff] [blame] | 386 | GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */ |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 387 | call early_make_pgtable |
| 388 | andl %eax,%eax |
Andy Lutomirski | 7bbcdb1 | 2016-04-02 07:01:32 -0700 | [diff] [blame] | 389 | jz 20f /* All good */ |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 390 | |
| 391 | 10: |
Andy Lutomirski | 7bbcdb1 | 2016-04-02 07:01:32 -0700 | [diff] [blame] | 392 | movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */ |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 393 | call early_fixup_exception |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 394 | |
Andy Lutomirski | 0e861fb | 2016-04-02 07:01:34 -0700 | [diff] [blame] | 395 | 20: |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 396 | decl early_recursion_flag(%rip) |
Andy Lutomirski | 7bbcdb1 | 2016-04-02 07:01:32 -0700 | [diff] [blame] | 397 | jmp restore_regs_and_iret |
Andy Lutomirski | cdeb604 | 2015-05-22 16:15:47 -0700 | [diff] [blame] | 398 | ENDPROC(early_idt_handler_common) |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 399 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 400 | __INITDATA |
| 401 | |
H. Peter Anvin | 9900aa2 | 2012-04-18 17:16:49 -0700 | [diff] [blame] | 402 | .balign 4 |
Andy Lutomirski | 0e861fb | 2016-04-02 07:01:34 -0700 | [diff] [blame] | 403 | GLOBAL(early_recursion_flag) |
Andi Kleen | b957591 | 2005-04-16 15:25:00 -0700 | [diff] [blame] | 404 | .long 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | |
Jan Beulich | f0cf5d1 | 2006-01-17 07:03:32 +0100 | [diff] [blame] | 406 | #define NEXT_PAGE(name) \ |
Vivek Goyal | 67dcbb6 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 407 | .balign PAGE_SIZE; \ |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 408 | GLOBAL(name) |
Jan Beulich | f0cf5d1 | 2006-01-17 07:03:32 +0100 | [diff] [blame] | 409 | |
Kees Cook | e71fac0 | 2018-01-03 10:17:35 -0800 | [diff] [blame] | 410 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 411 | /* |
| 412 | * Each PGD needs to be 8k long and 8k aligned. We do not |
| 413 | * ever go out to userspace with these, so we do not |
| 414 | * strictly *need* the second page, but this allows us to |
| 415 | * have a single set_pgd() implementation that does not |
| 416 | * need to worry about whether it has 4k or 8k to work |
| 417 | * with. |
| 418 | * |
| 419 | * This ensures PGDs are 8k long: |
| 420 | */ |
| 421 | #define KAISER_USER_PGD_FILL 512 |
| 422 | /* This ensures they are 8k-aligned: */ |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 423 | #define NEXT_PGD_PAGE(name) \ |
| 424 | .balign 2 * PAGE_SIZE; \ |
| 425 | GLOBAL(name) |
| 426 | #else |
| 427 | #define NEXT_PGD_PAGE(name) NEXT_PAGE(name) |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 428 | #define KAISER_USER_PGD_FILL 0 |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 429 | #endif |
| 430 | |
Vivek Goyal | 67dcbb6 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 431 | /* Automate the creation of 1 to 1 mapping pmd entries */ |
Cyrill Gorcunov | 0e192b9 | 2008-05-13 20:55:40 +0400 | [diff] [blame] | 432 | #define PMDS(START, PERM, COUNT) \ |
| 433 | i = 0 ; \ |
| 434 | .rept (COUNT) ; \ |
| 435 | .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ |
| 436 | i = i + 1 ; \ |
Vivek Goyal | 67dcbb6 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 437 | .endr |
| 438 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 439 | __INITDATA |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 440 | NEXT_PGD_PAGE(early_level4_pgt) |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 441 | .fill 511,8,0 |
Vivek Goyal | cfd243d | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 442 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 443 | .fill KAISER_USER_PGD_FILL,8,0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 445 | NEXT_PAGE(early_dynamic_pgts) |
| 446 | .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 |
| 447 | |
| 448 | .data |
| 449 | |
| 450 | #ifndef CONFIG_XEN |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 451 | NEXT_PGD_PAGE(init_level4_pgt) |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 452 | .fill 512,8,0 |
| 453 | .fill KAISER_USER_PGD_FILL,8,0 |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 454 | #else |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 455 | NEXT_PGD_PAGE(init_level4_pgt) |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 456 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
| 457 | .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 |
| 458 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
| 459 | .org init_level4_pgt + L4_START_KERNEL*8, 0 |
| 460 | /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ |
| 461 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 462 | .fill KAISER_USER_PGD_FILL,8,0 |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 463 | |
Jan Beulich | f0cf5d1 | 2006-01-17 07:03:32 +0100 | [diff] [blame] | 464 | NEXT_PAGE(level3_ident_pgt) |
Vivek Goyal | 67dcbb6 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 465 | .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 466 | .fill 511, 8, 0 |
| 467 | NEXT_PAGE(level2_ident_pgt) |
| 468 | /* Since I easily can, map the first 1G. |
| 469 | * Don't set NX because code runs from these pages. |
| 470 | */ |
| 471 | PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) |
| 472 | #endif |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 473 | .fill KAISER_USER_PGD_FILL,8,0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | |
Jan Beulich | f0cf5d1 | 2006-01-17 07:03:32 +0100 | [diff] [blame] | 475 | NEXT_PAGE(level3_kernel_pgt) |
Eduardo Habkost | a652374 | 2008-06-25 00:19:16 -0400 | [diff] [blame] | 476 | .fill L3_START_KERNEL,8,0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ |
Vivek Goyal | 67dcbb6 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 478 | .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE |
Eric W. Biderman | b1c931e | 2007-07-15 23:37:28 -0700 | [diff] [blame] | 479 | .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE |
| 480 | |
Jan Beulich | f0cf5d1 | 2006-01-17 07:03:32 +0100 | [diff] [blame] | 481 | NEXT_PAGE(level2_kernel_pgt) |
Ingo Molnar | 88f3aec | 2008-02-21 11:04:11 +0100 | [diff] [blame] | 482 | /* |
Ingo Molnar | 85eb69a | 2008-02-21 12:50:51 +0100 | [diff] [blame] | 483 | * 512 MB kernel mapping. We spend a full page on this pagetable |
Ingo Molnar | 88f3aec | 2008-02-21 11:04:11 +0100 | [diff] [blame] | 484 | * anyway. |
| 485 | * |
| 486 | * The kernel code+data+bss must not be bigger than that. |
| 487 | * |
Ingo Molnar | 85eb69a | 2008-02-21 12:50:51 +0100 | [diff] [blame] | 488 | * (NOTE: at +512MB starts the module area, see MODULES_VADDR. |
Ingo Molnar | 88f3aec | 2008-02-21 11:04:11 +0100 | [diff] [blame] | 489 | * If you want to increase this then increase MODULES_VADDR |
| 490 | * too.) |
| 491 | */ |
Jeremy Fitzhardinge | 8490638 | 2008-07-01 16:46:35 -0700 | [diff] [blame] | 492 | PMDS(0, __PAGE_KERNEL_LARGE_EXEC, |
Ingo Molnar | d4afe41 | 2008-02-21 13:39:30 +0100 | [diff] [blame] | 493 | KERNEL_IMAGE_SIZE/PMD_SIZE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 495 | NEXT_PAGE(level2_fixmap_pgt) |
| 496 | .fill 506,8,0 |
| 497 | .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE |
| 498 | /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ |
| 499 | .fill 5,8,0 |
| 500 | |
| 501 | NEXT_PAGE(level1_fixmap_pgt) |
| 502 | .fill 512,8,0 |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 503 | |
Vivek Goyal | 67dcbb6 | 2007-05-02 19:27:06 +0200 | [diff] [blame] | 504 | #undef PMDS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | |
Jan Beulich | f0cf5d1 | 2006-01-17 07:03:32 +0100 | [diff] [blame] | 506 | .data |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | .align 16 |
Glauber Costa | a939098 | 2008-05-28 16:19:53 -0700 | [diff] [blame] | 508 | .globl early_gdt_descr |
| 509 | early_gdt_descr: |
| 510 | .word GDT_ENTRIES*8-1 |
Tejun Heo | 3e5d8f9 | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 511 | early_gdt_descr_base: |
Brian Gerst | 2add8e2 | 2009-02-08 09:58:39 -0500 | [diff] [blame] | 512 | .quad INIT_PER_CPU_VAR(gdt_page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 514 | ENTRY(phys_base) |
| 515 | /* This must match the first entry in level2_kernel_pgt */ |
| 516 | .quad 0x0000000000000000 |
Al Viro | 784d569 | 2016-01-11 11:04:34 -0500 | [diff] [blame] | 517 | EXPORT_SYMBOL(phys_base) |
Vivek Goyal | 1ab60e0 | 2007-05-02 19:27:07 +0200 | [diff] [blame] | 518 | |
Jeremy Fitzhardinge | 8c5e5ac | 2008-07-08 15:06:44 -0700 | [diff] [blame] | 519 | #include "../../x86/xen/xen-head.S" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | |
Tim Abbott | 02b7da3 | 2009-09-20 18:14:14 -0400 | [diff] [blame] | 521 | __PAGE_ALIGNED_BSS |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 522 | NEXT_PAGE(empty_zero_page) |
Jan Beulich | e57113b | 2006-03-25 16:30:01 +0100 | [diff] [blame] | 523 | .skip PAGE_SIZE |
Al Viro | 784d569 | 2016-01-11 11:04:34 -0500 | [diff] [blame] | 524 | EXPORT_SYMBOL(empty_zero_page) |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 525 | |