Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 2 | * Copyright (C) 1991,1992 Linus Torvalds |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 4 | * entry_32.S contains the system-call and low-level fault and trap handling routines. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 6 | * Stack layout while running C code: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 7 | * ptrace needs to have all registers on the stack. |
| 8 | * If the order here is changed, it needs to be |
| 9 | * updated in fork.c:copy_process(), signal.c:do_signal(), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * ptrace.c and ptrace.h |
| 11 | * |
| 12 | * 0(%esp) - %ebx |
| 13 | * 4(%esp) - %ecx |
| 14 | * 8(%esp) - %edx |
Denys Vlasenko | 9b47feb | 2015-06-08 22:35:33 +0200 | [diff] [blame] | 15 | * C(%esp) - %esi |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | * 10(%esp) - %edi |
| 17 | * 14(%esp) - %ebp |
| 18 | * 18(%esp) - %eax |
| 19 | * 1C(%esp) - %ds |
| 20 | * 20(%esp) - %es |
Jeremy Fitzhardinge | 464d1a7 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 21 | * 24(%esp) - %fs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 22 | * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS |
| 23 | * 2C(%esp) - orig_eax |
| 24 | * 30(%esp) - %eip |
| 25 | * 34(%esp) - %cs |
| 26 | * 38(%esp) - %eflags |
| 27 | * 3C(%esp) - %oldesp |
| 28 | * 40(%esp) - %oldss |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | */ |
| 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/linkage.h> |
Eric Paris | d7e7528 | 2012-01-03 14:23:06 -0500 | [diff] [blame] | 32 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/thread_info.h> |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 34 | #include <asm/irqflags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/errno.h> |
| 36 | #include <asm/segment.h> |
| 37 | #include <asm/smp.h> |
Jeremy Fitzhardinge | 0341c14 | 2009-02-13 11:14:01 -0800 | [diff] [blame] | 38 | #include <asm/page_types.h> |
Stas Sergeev | be44d2a | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 39 | #include <asm/percpu.h> |
Cyrill Gorcunov | ab68ed9 | 2008-03-25 22:16:32 +0300 | [diff] [blame] | 40 | #include <asm/processor-flags.h> |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 41 | #include <asm/ftrace.h> |
Thomas Gleixner | 9b7dc56 | 2008-05-02 20:10:09 +0200 | [diff] [blame] | 42 | #include <asm/irq_vectors.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 43 | #include <asm/cpufeatures.h> |
Andy Lutomirski | b4ca46e | 2011-08-25 16:10:33 -0400 | [diff] [blame] | 44 | #include <asm/alternative-asm.h> |
H. Peter Anvin | 6837a54 | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 45 | #include <asm/asm.h> |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 46 | #include <asm/smap.h> |
Al Viro | 784d569 | 2016-01-11 11:04:34 -0500 | [diff] [blame] | 47 | #include <asm/export.h> |
Josh Poimboeuf | 4d516f4 | 2016-09-21 16:04:01 -0500 | [diff] [blame^] | 48 | #include <asm/frame.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
Jiri Olsa | ea71454 | 2011-03-07 19:10:39 +0100 | [diff] [blame] | 50 | .section .entry.text, "ax" |
| 51 | |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 52 | /* |
| 53 | * We use macros for low-level operations which need to be overridden |
| 54 | * for paravirtualization. The following will never clobber any registers: |
| 55 | * INTERRUPT_RETURN (aka. "iret") |
| 56 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") |
Jeremy Fitzhardinge | d75cd22 | 2008-06-25 00:19:26 -0400 | [diff] [blame] | 57 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 58 | * |
| 59 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must |
| 60 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). |
| 61 | * Allowing a register to be clobbered can shrink the paravirt replacement |
| 62 | * enough to patch inline, increasing performance. |
| 63 | */ |
| 64 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | #ifdef CONFIG_PREEMPT |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 66 | # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | #else |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 68 | # define preempt_stop(clobbers) |
| 69 | # define resume_kernel restore_all |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | #endif |
| 71 | |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 72 | .macro TRACE_IRQS_IRET |
| 73 | #ifdef CONFIG_TRACE_IRQFLAGS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 74 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? |
| 75 | jz 1f |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 76 | TRACE_IRQS_ON |
| 77 | 1: |
| 78 | #endif |
| 79 | .endm |
| 80 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 81 | /* |
| 82 | * User gs save/restore |
| 83 | * |
| 84 | * %gs is used for userland TLS and kernel only uses it for stack |
| 85 | * canary which is required to be at %gs:20 by gcc. Read the comment |
| 86 | * at the top of stackprotector.h for more info. |
| 87 | * |
| 88 | * Local labels 98 and 99 are used. |
| 89 | */ |
| 90 | #ifdef CONFIG_X86_32_LAZY_GS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 92 | /* unfortunately push/pop can't be no-op */ |
| 93 | .macro PUSH_GS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 94 | pushl $0 |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 95 | .endm |
| 96 | .macro POP_GS pop=0 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 97 | addl $(4 + \pop), %esp |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 98 | .endm |
| 99 | .macro POP_GS_EX |
| 100 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 102 | /* all the rest are no-op */ |
| 103 | .macro PTGS_TO_GS |
| 104 | .endm |
| 105 | .macro PTGS_TO_GS_EX |
| 106 | .endm |
| 107 | .macro GS_TO_REG reg |
| 108 | .endm |
| 109 | .macro REG_TO_PTGS reg |
| 110 | .endm |
| 111 | .macro SET_KERNEL_GS reg |
| 112 | .endm |
| 113 | |
| 114 | #else /* CONFIG_X86_32_LAZY_GS */ |
| 115 | |
| 116 | .macro PUSH_GS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 117 | pushl %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 118 | .endm |
| 119 | |
| 120 | .macro POP_GS pop=0 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 121 | 98: popl %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 122 | .if \pop <> 0 |
Denys Vlasenko | 9b47feb | 2015-06-08 22:35:33 +0200 | [diff] [blame] | 123 | add $\pop, %esp |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 124 | .endif |
| 125 | .endm |
| 126 | .macro POP_GS_EX |
| 127 | .pushsection .fixup, "ax" |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 128 | 99: movl $0, (%esp) |
| 129 | jmp 98b |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 130 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 131 | _ASM_EXTABLE(98b, 99b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 132 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 134 | .macro PTGS_TO_GS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 135 | 98: mov PT_GS(%esp), %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 136 | .endm |
| 137 | .macro PTGS_TO_GS_EX |
| 138 | .pushsection .fixup, "ax" |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 139 | 99: movl $0, PT_GS(%esp) |
| 140 | jmp 98b |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 141 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 142 | _ASM_EXTABLE(98b, 99b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 143 | .endm |
| 144 | |
| 145 | .macro GS_TO_REG reg |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 146 | movl %gs, \reg |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 147 | .endm |
| 148 | .macro REG_TO_PTGS reg |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 149 | movl \reg, PT_GS(%esp) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 150 | .endm |
| 151 | .macro SET_KERNEL_GS reg |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 152 | movl $(__KERNEL_STACK_CANARY), \reg |
| 153 | movl \reg, %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 154 | .endm |
| 155 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 156 | #endif /* CONFIG_X86_32_LAZY_GS */ |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 157 | |
Andy Lutomirski | 150ac78 | 2015-10-05 17:48:14 -0700 | [diff] [blame] | 158 | .macro SAVE_ALL pt_regs_ax=%eax |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 159 | cld |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 160 | PUSH_GS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 161 | pushl %fs |
| 162 | pushl %es |
| 163 | pushl %ds |
Andy Lutomirski | 150ac78 | 2015-10-05 17:48:14 -0700 | [diff] [blame] | 164 | pushl \pt_regs_ax |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 165 | pushl %ebp |
| 166 | pushl %edi |
| 167 | pushl %esi |
| 168 | pushl %edx |
| 169 | pushl %ecx |
| 170 | pushl %ebx |
| 171 | movl $(__USER_DS), %edx |
| 172 | movl %edx, %ds |
| 173 | movl %edx, %es |
| 174 | movl $(__KERNEL_PERCPU), %edx |
| 175 | movl %edx, %fs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 176 | SET_KERNEL_GS %edx |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 177 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 179 | .macro RESTORE_INT_REGS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 180 | popl %ebx |
| 181 | popl %ecx |
| 182 | popl %edx |
| 183 | popl %esi |
| 184 | popl %edi |
| 185 | popl %ebp |
| 186 | popl %eax |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 187 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 189 | .macro RESTORE_REGS pop=0 |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 190 | RESTORE_INT_REGS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 191 | 1: popl %ds |
| 192 | 2: popl %es |
| 193 | 3: popl %fs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 194 | POP_GS \pop |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 195 | .pushsection .fixup, "ax" |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 196 | 4: movl $0, (%esp) |
| 197 | jmp 1b |
| 198 | 5: movl $0, (%esp) |
| 199 | jmp 2b |
| 200 | 6: movl $0, (%esp) |
| 201 | jmp 3b |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 203 | _ASM_EXTABLE(1b, 4b) |
| 204 | _ASM_EXTABLE(2b, 5b) |
| 205 | _ASM_EXTABLE(3b, 6b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 206 | POP_GS_EX |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 207 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 209 | /* |
| 210 | * %eax: prev task |
| 211 | * %edx: next task |
| 212 | */ |
| 213 | ENTRY(__switch_to_asm) |
| 214 | /* |
| 215 | * Save callee-saved registers |
| 216 | * This must match the order in struct inactive_task_frame |
| 217 | */ |
| 218 | pushl %ebp |
| 219 | pushl %ebx |
| 220 | pushl %edi |
| 221 | pushl %esi |
| 222 | |
| 223 | /* switch stack */ |
| 224 | movl %esp, TASK_threadsp(%eax) |
| 225 | movl TASK_threadsp(%edx), %esp |
| 226 | |
| 227 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 228 | movl TASK_stack_canary(%edx), %ebx |
| 229 | movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset |
| 230 | #endif |
| 231 | |
| 232 | /* restore callee-saved registers */ |
| 233 | popl %esi |
| 234 | popl %edi |
| 235 | popl %ebx |
| 236 | popl %ebp |
| 237 | |
| 238 | jmp __switch_to |
| 239 | END(__switch_to_asm) |
| 240 | |
| 241 | /* |
Josh Poimboeuf | 4d516f4 | 2016-09-21 16:04:01 -0500 | [diff] [blame^] | 242 | * The unwinder expects the last frame on the stack to always be at the same |
| 243 | * offset from the end of the page, which allows it to validate the stack. |
| 244 | * Calling schedule_tail() directly would break that convention because its an |
| 245 | * asmlinkage function so its argument has to be pushed on the stack. This |
| 246 | * wrapper creates a proper "end of stack" frame header before the call. |
| 247 | */ |
| 248 | ENTRY(schedule_tail_wrapper) |
| 249 | FRAME_BEGIN |
| 250 | |
| 251 | pushl %eax |
| 252 | call schedule_tail |
| 253 | popl %eax |
| 254 | |
| 255 | FRAME_END |
| 256 | ret |
| 257 | ENDPROC(schedule_tail_wrapper) |
| 258 | /* |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 259 | * A newly forked process directly context switches into this address. |
| 260 | * |
| 261 | * eax: prev task we switched from |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 262 | * ebx: kernel thread func (NULL for user thread) |
| 263 | * edi: kernel thread arg |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 264 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | ENTRY(ret_from_fork) |
Josh Poimboeuf | 4d516f4 | 2016-09-21 16:04:01 -0500 | [diff] [blame^] | 266 | call schedule_tail_wrapper |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 267 | |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 268 | testl %ebx, %ebx |
| 269 | jnz 1f /* kernel threads are uncommon */ |
| 270 | |
| 271 | 2: |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 272 | /* When we fork, we trace the syscall return in the child, too. */ |
| 273 | movl %esp, %eax |
| 274 | call syscall_return_slowpath |
| 275 | jmp restore_all |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 277 | /* kernel thread */ |
| 278 | 1: movl %edi, %eax |
| 279 | call *%ebx |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 280 | /* |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 281 | * A kernel thread is allowed to return here after successfully |
| 282 | * calling do_execve(). Exit to userspace to complete the execve() |
| 283 | * syscall. |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 284 | */ |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 285 | movl $0, PT_EAX(%esp) |
| 286 | jmp 2b |
| 287 | END(ret_from_fork) |
Al Viro | 6783eaa2 | 2012-08-02 23:05:11 +0400 | [diff] [blame] | 288 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | /* |
| 290 | * Return to user mode is not as complex as all this looks, |
| 291 | * but we want the default path for a system call return to |
| 292 | * go as quickly as possible which is why some of this is |
| 293 | * less clear than it otherwise should be. |
| 294 | */ |
| 295 | |
| 296 | # userspace resumption stub bypassing syscall exit tracing |
| 297 | ALIGN |
| 298 | ret_from_exception: |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 299 | preempt_stop(CLBR_ANY) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | ret_from_intr: |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 301 | #ifdef CONFIG_VM86 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 302 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
| 303 | movb PT_CS(%esp), %al |
| 304 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 305 | #else |
| 306 | /* |
Al Viro | 6783eaa2 | 2012-08-02 23:05:11 +0400 | [diff] [blame] | 307 | * We can be coming here from child spawned by kernel_thread(). |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 308 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 309 | movl PT_CS(%esp), %eax |
| 310 | andl $SEGMENT_RPL_MASK, %eax |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 311 | #endif |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 312 | cmpl $USER_RPL, %eax |
| 313 | jb resume_kernel # not returning to v8086 or userspace |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 314 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | ENTRY(resume_userspace) |
Andy Lutomirski | 5d73fc7 | 2015-07-31 14:41:09 -0700 | [diff] [blame] | 316 | DISABLE_INTERRUPTS(CLBR_ANY) |
Peter Zijlstra | e32e58a | 2008-06-06 10:14:08 +0200 | [diff] [blame] | 317 | TRACE_IRQS_OFF |
Andy Lutomirski | 5d73fc7 | 2015-07-31 14:41:09 -0700 | [diff] [blame] | 318 | movl %esp, %eax |
| 319 | call prepare_exit_to_usermode |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 320 | jmp restore_all |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 321 | END(ret_from_exception) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | |
| 323 | #ifdef CONFIG_PREEMPT |
| 324 | ENTRY(resume_kernel) |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 325 | DISABLE_INTERRUPTS(CLBR_ANY) |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 326 | .Lneed_resched: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 327 | cmpl $0, PER_CPU_VAR(__preempt_count) |
| 328 | jnz restore_all |
| 329 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? |
| 330 | jz restore_all |
| 331 | call preempt_schedule_irq |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 332 | jmp .Lneed_resched |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 333 | END(resume_kernel) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | #endif |
| 335 | |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 336 | GLOBAL(__begin_SYSENTER_singlestep_region) |
| 337 | /* |
| 338 | * All code from here through __end_SYSENTER_singlestep_region is subject |
| 339 | * to being single-stepped if a user program sets TF and executes SYSENTER. |
| 340 | * There is absolutely nothing that we can do to prevent this from happening |
| 341 | * (thanks Intel!). To keep our handling of this situation as simple as |
| 342 | * possible, we handle TF just like AC and NT, except that our #DB handler |
| 343 | * will ignore all of the single-step traps generated in this range. |
| 344 | */ |
| 345 | |
| 346 | #ifdef CONFIG_XEN |
| 347 | /* |
| 348 | * Xen doesn't set %esp to be precisely what the normal SYSENTER |
| 349 | * entry point expects, so fix it up before using the normal path. |
| 350 | */ |
| 351 | ENTRY(xen_sysenter_target) |
| 352 | addl $5*4, %esp /* remove xen-provided frame */ |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 353 | jmp .Lsysenter_past_esp |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 354 | #endif |
| 355 | |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 356 | /* |
| 357 | * 32-bit SYSENTER entry. |
| 358 | * |
| 359 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here |
| 360 | * if X86_FEATURE_SEP is available. This is the preferred system call |
| 361 | * entry on 32-bit systems. |
| 362 | * |
| 363 | * The SYSENTER instruction, in principle, should *only* occur in the |
| 364 | * vDSO. In practice, a small number of Android devices were shipped |
| 365 | * with a copy of Bionic that inlined a SYSENTER instruction. This |
| 366 | * never happened in any of Google's Bionic versions -- it only happened |
| 367 | * in a narrow range of Intel-provided versions. |
| 368 | * |
| 369 | * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. |
| 370 | * IF and VM in RFLAGS are cleared (IOW: interrupts are off). |
| 371 | * SYSENTER does not save anything on the stack, |
| 372 | * and does not save old EIP (!!!), ESP, or EFLAGS. |
| 373 | * |
| 374 | * To avoid losing track of EFLAGS.VM (and thus potentially corrupting |
| 375 | * user and/or vm86 state), we explicitly disable the SYSENTER |
| 376 | * instruction in vm86 mode by reprogramming the MSRs. |
| 377 | * |
| 378 | * Arguments: |
| 379 | * eax system call number |
| 380 | * ebx arg1 |
| 381 | * ecx arg2 |
| 382 | * edx arg3 |
| 383 | * esi arg4 |
| 384 | * edi arg5 |
| 385 | * ebp user stack |
| 386 | * 0(%ebp) arg6 |
| 387 | */ |
Ingo Molnar | 4c8cd0c | 2015-06-08 08:33:56 +0200 | [diff] [blame] | 388 | ENTRY(entry_SYSENTER_32) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 389 | movl TSS_sysenter_sp0(%esp), %esp |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 390 | .Lsysenter_past_esp: |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 391 | pushl $__USER_DS /* pt_regs->ss */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 392 | pushl %ebp /* pt_regs->sp (stashed in bp) */ |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 393 | pushfl /* pt_regs->flags (except IF = 0) */ |
| 394 | orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ |
| 395 | pushl $__USER_CS /* pt_regs->cs */ |
| 396 | pushl $0 /* pt_regs->ip = 0 (placeholder) */ |
| 397 | pushl %eax /* pt_regs->orig_ax */ |
| 398 | SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */ |
| 399 | |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 400 | /* |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 401 | * SYSENTER doesn't filter flags, so we need to clear NT, AC |
| 402 | * and TF ourselves. To save a few cycles, we can check whether |
Andy Lutomirski | 67f590e | 2016-03-09 19:00:26 -0800 | [diff] [blame] | 403 | * either was set instead of doing an unconditional popfq. |
| 404 | * This needs to happen before enabling interrupts so that |
| 405 | * we don't get preempted with NT set. |
| 406 | * |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 407 | * If TF is set, we will single-step all the way to here -- do_debug |
| 408 | * will ignore all the traps. (Yes, this is slow, but so is |
| 409 | * single-stepping in general. This allows us to avoid having |
| 410 | * a more complicated code to handle the case where a user program |
| 411 | * forces us to single-step through the SYSENTER entry code.) |
| 412 | * |
Andy Lutomirski | 67f590e | 2016-03-09 19:00:26 -0800 | [diff] [blame] | 413 | * NB.: .Lsysenter_fix_flags is a label with the code under it moved |
| 414 | * out-of-line as an optimization: NT is unlikely to be set in the |
| 415 | * majority of the cases and instead of polluting the I$ unnecessarily, |
| 416 | * we're keeping that code behind a branch which will predict as |
| 417 | * not-taken and therefore its instructions won't be fetched. |
| 418 | */ |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 419 | testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) |
Andy Lutomirski | 67f590e | 2016-03-09 19:00:26 -0800 | [diff] [blame] | 420 | jnz .Lsysenter_fix_flags |
| 421 | .Lsysenter_flags_fixed: |
| 422 | |
| 423 | /* |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 424 | * User mode is traced as though IRQs are on, and SYSENTER |
| 425 | * turned them off. |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 426 | */ |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 427 | TRACE_IRQS_OFF |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 428 | |
| 429 | movl %esp, %eax |
| 430 | call do_fast_syscall_32 |
Boris Ostrovsky | 91e2eea | 2015-11-19 16:55:45 -0500 | [diff] [blame] | 431 | /* XEN PV guests always use IRET path */ |
| 432 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ |
| 433 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 434 | |
| 435 | /* Opportunistic SYSEXIT */ |
| 436 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ |
| 437 | movl PT_EIP(%esp), %edx /* pt_regs->ip */ |
| 438 | movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ |
Andy Lutomirski | 3bd2951 | 2015-10-16 15:42:55 -0700 | [diff] [blame] | 439 | 1: mov PT_FS(%esp), %fs |
| 440 | PTGS_TO_GS |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 441 | popl %ebx /* pt_regs->bx */ |
| 442 | addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ |
| 443 | popl %esi /* pt_regs->si */ |
| 444 | popl %edi /* pt_regs->di */ |
| 445 | popl %ebp /* pt_regs->bp */ |
| 446 | popl %eax /* pt_regs->ax */ |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 447 | |
| 448 | /* |
Andy Lutomirski | c2c9b52 | 2016-03-09 19:00:27 -0800 | [diff] [blame] | 449 | * Restore all flags except IF. (We restore IF separately because |
| 450 | * STI gives a one-instruction window in which we won't be interrupted, |
| 451 | * whereas POPF does not.) |
| 452 | */ |
| 453 | addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */ |
| 454 | btr $X86_EFLAGS_IF_BIT, (%esp) |
| 455 | popfl |
| 456 | |
| 457 | /* |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 458 | * Return back to the vDSO, which will pop ecx and edx. |
| 459 | * Don't bother with DS and ES (they already contain __USER_DS). |
| 460 | */ |
Boris Ostrovsky | 88c15ec | 2015-11-19 16:55:46 -0500 | [diff] [blame] | 461 | sti |
| 462 | sysexit |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 463 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 464 | .pushsection .fixup, "ax" |
| 465 | 2: movl $0, PT_FS(%esp) |
| 466 | jmp 1b |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 467 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 468 | _ASM_EXTABLE(1b, 2b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 469 | PTGS_TO_GS_EX |
Andy Lutomirski | 67f590e | 2016-03-09 19:00:26 -0800 | [diff] [blame] | 470 | |
| 471 | .Lsysenter_fix_flags: |
| 472 | pushl $X86_EFLAGS_FIXED |
| 473 | popfl |
| 474 | jmp .Lsysenter_flags_fixed |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 475 | GLOBAL(__end_SYSENTER_singlestep_region) |
Ingo Molnar | 4c8cd0c | 2015-06-08 08:33:56 +0200 | [diff] [blame] | 476 | ENDPROC(entry_SYSENTER_32) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 478 | /* |
| 479 | * 32-bit legacy system call entry. |
| 480 | * |
| 481 | * 32-bit x86 Linux system calls traditionally used the INT $0x80 |
| 482 | * instruction. INT $0x80 lands here. |
| 483 | * |
| 484 | * This entry point can be used by any 32-bit perform system calls. |
| 485 | * Instances of INT $0x80 can be found inline in various programs and |
| 486 | * libraries. It is also used by the vDSO's __kernel_vsyscall |
| 487 | * fallback for hardware that doesn't support a faster entry method. |
| 488 | * Restarted 32-bit system calls also fall back to INT $0x80 |
| 489 | * regardless of what instruction was originally used to do the system |
| 490 | * call. (64-bit programs can use INT $0x80 as well, but they can |
| 491 | * only run on 64-bit kernels and therefore land in |
| 492 | * entry_INT80_compat.) |
| 493 | * |
| 494 | * This is considered a slow path. It is not used by most libc |
| 495 | * implementations on modern hardware except during process startup. |
| 496 | * |
| 497 | * Arguments: |
| 498 | * eax system call number |
| 499 | * ebx arg1 |
| 500 | * ecx arg2 |
| 501 | * edx arg3 |
| 502 | * esi arg4 |
| 503 | * edi arg5 |
| 504 | * ebp arg6 |
| 505 | */ |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 506 | ENTRY(entry_INT80_32) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 507 | ASM_CLAC |
Andy Lutomirski | 150ac78 | 2015-10-05 17:48:14 -0700 | [diff] [blame] | 508 | pushl %eax /* pt_regs->orig_ax */ |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 509 | SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */ |
Andy Lutomirski | 150ac78 | 2015-10-05 17:48:14 -0700 | [diff] [blame] | 510 | |
| 511 | /* |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 512 | * User mode is traced as though IRQs are on, and the interrupt gate |
| 513 | * turned them off. |
Andy Lutomirski | 150ac78 | 2015-10-05 17:48:14 -0700 | [diff] [blame] | 514 | */ |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 515 | TRACE_IRQS_OFF |
Andy Lutomirski | 150ac78 | 2015-10-05 17:48:14 -0700 | [diff] [blame] | 516 | |
| 517 | movl %esp, %eax |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 518 | call do_int80_syscall_32 |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 519 | .Lsyscall_32_done: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | |
| 521 | restore_all: |
Alexander van Heukelum | 2e04bc7 | 2009-06-18 00:35:57 +0200 | [diff] [blame] | 522 | TRACE_IRQS_IRET |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 523 | .Lrestore_all_notrace: |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 524 | #ifdef CONFIG_X86_ESPFIX32 |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 525 | ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX |
Andy Lutomirski | 58a5aac | 2016-02-29 15:50:19 -0800 | [diff] [blame] | 526 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 527 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
| 528 | /* |
| 529 | * Warning: PT_OLDSS(%esp) contains the wrong/random values if we |
| 530 | * are returning to the kernel. |
| 531 | * See comments in process.c:copy_thread() for details. |
| 532 | */ |
| 533 | movb PT_OLDSS(%esp), %ah |
| 534 | movb PT_CS(%esp), %al |
| 535 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
| 536 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 537 | je .Lldt_ss # returning to user-space with LDT SS |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 538 | #endif |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 539 | .Lrestore_nocheck: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 540 | RESTORE_REGS 4 # skip orig_eax/error_code |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 541 | .Lirq_return: |
Ingo Molnar | 3701d863 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 542 | INTERRUPT_RETURN |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 543 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 544 | .section .fixup, "ax" |
| 545 | ENTRY(iret_exc ) |
| 546 | pushl $0 # no error code |
| 547 | pushl $do_iret_error |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 548 | jmp common_exception |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | .previous |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 550 | _ASM_EXTABLE(.Lirq_return, iret_exc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 552 | #ifdef CONFIG_X86_ESPFIX32 |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 553 | .Lldt_ss: |
Alexander van Heukelum | dc4c2a0 | 2009-06-18 00:35:58 +0200 | [diff] [blame] | 554 | /* |
| 555 | * Setup and switch to ESPFIX stack |
| 556 | * |
| 557 | * We're returning to userspace with a 16 bit stack. The CPU will not |
| 558 | * restore the high word of ESP for us on executing iret... This is an |
| 559 | * "official" bug of all the x86-compatible CPUs, which we can work |
| 560 | * around to make dosemu and wine happy. We do this by preloading the |
| 561 | * high word of ESP with the high word of the userspace ESP while |
| 562 | * compensating for the offset by changing to the ESPFIX segment with |
| 563 | * a base address that matches for the difference. |
| 564 | */ |
Brian Gerst | 72c511d | 2010-07-31 12:48:23 -0400 | [diff] [blame] | 565 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 566 | mov %esp, %edx /* load kernel esp */ |
| 567 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ |
| 568 | mov %dx, %ax /* eax: new kernel esp */ |
Denys Vlasenko | 9b47feb | 2015-06-08 22:35:33 +0200 | [diff] [blame] | 569 | sub %eax, %edx /* offset (low word is 0) */ |
| 570 | shr $16, %edx |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 571 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ |
| 572 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ |
| 573 | pushl $__ESPFIX_SS |
| 574 | pushl %eax /* new kernel esp */ |
| 575 | /* |
| 576 | * Disable interrupts, but do not irqtrace this section: we |
Alexander van Heukelum | 2e04bc7 | 2009-06-18 00:35:57 +0200 | [diff] [blame] | 577 | * will soon execute iret and the tracer was already set to |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 578 | * the irqstate after the IRET: |
| 579 | */ |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 580 | DISABLE_INTERRUPTS(CLBR_EAX) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 581 | lss (%esp), %esp /* switch to espfix segment */ |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 582 | jmp .Lrestore_nocheck |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 583 | #endif |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 584 | ENDPROC(entry_INT80_32) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 586 | .macro FIXUP_ESPFIX_STACK |
Alexander van Heukelum | dc4c2a0 | 2009-06-18 00:35:58 +0200 | [diff] [blame] | 587 | /* |
| 588 | * Switch back for ESPFIX stack to the normal zerobased stack |
| 589 | * |
| 590 | * We can't call C functions using the ESPFIX stack. This code reads |
| 591 | * the high word of the segment base from the GDT and swiches to the |
| 592 | * normal stack and adjusts ESP with the matching offset. |
| 593 | */ |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 594 | #ifdef CONFIG_X86_ESPFIX32 |
Alexander van Heukelum | dc4c2a0 | 2009-06-18 00:35:58 +0200 | [diff] [blame] | 595 | /* fixup the stack */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 596 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
| 597 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ |
Denys Vlasenko | 9b47feb | 2015-06-08 22:35:33 +0200 | [diff] [blame] | 598 | shl $16, %eax |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 599 | addl %esp, %eax /* the adjusted stack pointer */ |
| 600 | pushl $__KERNEL_DS |
| 601 | pushl %eax |
| 602 | lss (%esp), %esp /* switch to the normal stack segment */ |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 603 | #endif |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 604 | .endm |
| 605 | .macro UNWIND_ESPFIX_STACK |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 606 | #ifdef CONFIG_X86_ESPFIX32 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 607 | movl %ss, %eax |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 608 | /* see if on espfix stack */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 609 | cmpw $__ESPFIX_SS, %ax |
| 610 | jne 27f |
| 611 | movl $__KERNEL_DS, %eax |
| 612 | movl %eax, %ds |
| 613 | movl %eax, %es |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 614 | /* switch to normal stack */ |
| 615 | FIXUP_ESPFIX_STACK |
| 616 | 27: |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 617 | #endif |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 618 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | |
| 620 | /* |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 621 | * Build the entry stubs with some assembler magic. |
| 622 | * We pack 1 stub into every 8-byte block. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 624 | .align 8 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | ENTRY(irq_entries_start) |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 626 | vector=FIRST_EXTERNAL_VECTOR |
| 627 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 628 | pushl $(~vector+0x80) /* Note: always in signed byte range */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 629 | vector=vector+1 |
| 630 | jmp common_interrupt |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 631 | .align 8 |
| 632 | .endr |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 633 | END(irq_entries_start) |
| 634 | |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 635 | /* |
| 636 | * the CPU automatically disables interrupts when executing an IRQ vector, |
| 637 | * so IRQ-flags tracing has to follow that: |
| 638 | */ |
H. Peter Anvin | b7c6244 | 2008-11-11 13:24:58 -0800 | [diff] [blame] | 639 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | common_interrupt: |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 641 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 642 | addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | SAVE_ALL |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 644 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 645 | movl %esp, %eax |
| 646 | call do_IRQ |
| 647 | jmp ret_from_intr |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 648 | ENDPROC(common_interrupt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | |
Tejun Heo | 02cf94c | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 650 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | ENTRY(name) \ |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 652 | ASM_CLAC; \ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 653 | pushl $~(nr); \ |
Jan Beulich | fe7cacc | 2006-06-26 13:57:44 +0200 | [diff] [blame] | 654 | SAVE_ALL; \ |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 655 | TRACE_IRQS_OFF \ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 656 | movl %esp, %eax; \ |
| 657 | call fn; \ |
| 658 | jmp ret_from_intr; \ |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 659 | ENDPROC(name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 661 | |
| 662 | #ifdef CONFIG_TRACING |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 663 | # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name) |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 664 | #else |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 665 | # define TRACE_BUILD_INTERRUPT(name, nr) |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 666 | #endif |
| 667 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 668 | #define BUILD_INTERRUPT(name, nr) \ |
| 669 | BUILD_INTERRUPT3(name, nr, smp_##name); \ |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 670 | TRACE_BUILD_INTERRUPT(name, nr) |
Tejun Heo | 02cf94c | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 671 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | /* The include is where all of the SMP etc. interrupts come from */ |
Ingo Molnar | 1164dd0 | 2009-01-28 19:34:09 +0100 | [diff] [blame] | 673 | #include <asm/entry_arch.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | ENTRY(coprocessor_error) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 676 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 677 | pushl $0 |
| 678 | pushl $do_coprocessor_error |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 679 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 680 | END(coprocessor_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | |
| 682 | ENTRY(simd_coprocessor_error) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 683 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 684 | pushl $0 |
Brian Gerst | 40d2e76 | 2010-03-21 09:00:43 -0400 | [diff] [blame] | 685 | #ifdef CONFIG_X86_INVD_BUG |
| 686 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 687 | ALTERNATIVE "pushl $do_general_protection", \ |
| 688 | "pushl $do_simd_coprocessor_error", \ |
Borislav Petkov | 8e65f6e | 2015-01-18 12:35:55 +0100 | [diff] [blame] | 689 | X86_FEATURE_XMM |
Brian Gerst | 40d2e76 | 2010-03-21 09:00:43 -0400 | [diff] [blame] | 690 | #else |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 691 | pushl $do_simd_coprocessor_error |
Brian Gerst | 40d2e76 | 2010-03-21 09:00:43 -0400 | [diff] [blame] | 692 | #endif |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 693 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 694 | END(simd_coprocessor_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | |
| 696 | ENTRY(device_not_available) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 697 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 698 | pushl $-1 # mark this as an int |
| 699 | pushl $do_device_not_available |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 700 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 701 | END(device_not_available) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 | |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 703 | #ifdef CONFIG_PARAVIRT |
| 704 | ENTRY(native_iret) |
Ingo Molnar | 3701d863 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 705 | iret |
H. Peter Anvin | 6837a54 | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 706 | _ASM_EXTABLE(native_iret, iret_exc) |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 707 | END(native_iret) |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 708 | #endif |
| 709 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | ENTRY(overflow) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 711 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 712 | pushl $0 |
| 713 | pushl $do_overflow |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 714 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 715 | END(overflow) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | |
| 717 | ENTRY(bounds) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 718 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 719 | pushl $0 |
| 720 | pushl $do_bounds |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 721 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 722 | END(bounds) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | |
| 724 | ENTRY(invalid_op) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 725 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 726 | pushl $0 |
| 727 | pushl $do_invalid_op |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 728 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 729 | END(invalid_op) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | |
| 731 | ENTRY(coprocessor_segment_overrun) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 732 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 733 | pushl $0 |
| 734 | pushl $do_coprocessor_segment_overrun |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 735 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 736 | END(coprocessor_segment_overrun) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 | |
| 738 | ENTRY(invalid_TSS) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 739 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 740 | pushl $do_invalid_TSS |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 741 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 742 | END(invalid_TSS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | |
| 744 | ENTRY(segment_not_present) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 745 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 746 | pushl $do_segment_not_present |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 747 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 748 | END(segment_not_present) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 | |
| 750 | ENTRY(stack_segment) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 751 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 752 | pushl $do_stack_segment |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 753 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 754 | END(stack_segment) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | ENTRY(alignment_check) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 757 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 758 | pushl $do_alignment_check |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 759 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 760 | END(alignment_check) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | |
Prasanna S.P | d28c439 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 762 | ENTRY(divide_error) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 763 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 764 | pushl $0 # no error code |
| 765 | pushl $do_divide_error |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 766 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 767 | END(divide_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | |
| 769 | #ifdef CONFIG_X86_MCE |
| 770 | ENTRY(machine_check) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 771 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 772 | pushl $0 |
| 773 | pushl machine_check_vector |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 774 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 775 | END(machine_check) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | #endif |
| 777 | |
| 778 | ENTRY(spurious_interrupt_bug) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 779 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 780 | pushl $0 |
| 781 | pushl $do_spurious_interrupt_bug |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 782 | jmp common_exception |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 783 | END(spurious_interrupt_bug) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 784 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 785 | #ifdef CONFIG_XEN |
| 786 | ENTRY(xen_hypervisor_callback) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 787 | pushl $-1 /* orig_ax = -1 => not a system call */ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 788 | SAVE_ALL |
| 789 | TRACE_IRQS_OFF |
Jeremy Fitzhardinge | 9ec2b80 | 2007-07-17 18:37:07 -0700 | [diff] [blame] | 790 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 791 | /* |
| 792 | * Check to see if we got the event in the critical |
| 793 | * region in xen_iret_direct, after we've reenabled |
| 794 | * events and checked for pending events. This simulates |
| 795 | * iret instruction's behaviour where it delivers a |
| 796 | * pending interrupt when enabling interrupts: |
| 797 | */ |
| 798 | movl PT_EIP(%esp), %eax |
| 799 | cmpl $xen_iret_start_crit, %eax |
| 800 | jb 1f |
| 801 | cmpl $xen_iret_end_crit, %eax |
| 802 | jae 1f |
Jeremy Fitzhardinge | 9ec2b80 | 2007-07-17 18:37:07 -0700 | [diff] [blame] | 803 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 804 | jmp xen_iret_crit_fixup |
Jeremy Fitzhardinge | 9ec2b80 | 2007-07-17 18:37:07 -0700 | [diff] [blame] | 805 | |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 806 | ENTRY(xen_do_upcall) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 807 | 1: mov %esp, %eax |
| 808 | call xen_evtchn_do_upcall |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 809 | #ifndef CONFIG_PREEMPT |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 810 | call xen_maybe_preempt_hcall |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 811 | #endif |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 812 | jmp ret_from_intr |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 813 | ENDPROC(xen_hypervisor_callback) |
| 814 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 815 | /* |
| 816 | * Hypervisor uses this for application faults while it executes. |
| 817 | * We get here for two reasons: |
| 818 | * 1. Fault while reloading DS, ES, FS or GS |
| 819 | * 2. Fault while executing IRET |
| 820 | * Category 1 we fix up by reattempting the load, and zeroing the segment |
| 821 | * register if the load fails. |
| 822 | * Category 2 we fix up by jumping to do_iret_error. We cannot use the |
| 823 | * normal Linux return path in this case because if we use the IRET hypercall |
| 824 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
| 825 | * We distinguish between categories by maintaining a status value in EAX. |
| 826 | */ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 827 | ENTRY(xen_failsafe_callback) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 828 | pushl %eax |
| 829 | movl $1, %eax |
| 830 | 1: mov 4(%esp), %ds |
| 831 | 2: mov 8(%esp), %es |
| 832 | 3: mov 12(%esp), %fs |
| 833 | 4: mov 16(%esp), %gs |
David Vrabel | a349e23d1 | 2012-10-19 17:29:07 +0100 | [diff] [blame] | 834 | /* EAX == 0 => Category 1 (Bad segment) |
| 835 | EAX != 0 => Category 2 (Bad IRET) */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 836 | testl %eax, %eax |
| 837 | popl %eax |
| 838 | lea 16(%esp), %esp |
| 839 | jz 5f |
| 840 | jmp iret_exc |
| 841 | 5: pushl $-1 /* orig_ax = -1 => not a system call */ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 842 | SAVE_ALL |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 843 | jmp ret_from_exception |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 844 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 845 | .section .fixup, "ax" |
| 846 | 6: xorl %eax, %eax |
| 847 | movl %eax, 4(%esp) |
| 848 | jmp 1b |
| 849 | 7: xorl %eax, %eax |
| 850 | movl %eax, 8(%esp) |
| 851 | jmp 2b |
| 852 | 8: xorl %eax, %eax |
| 853 | movl %eax, 12(%esp) |
| 854 | jmp 3b |
| 855 | 9: xorl %eax, %eax |
| 856 | movl %eax, 16(%esp) |
| 857 | jmp 4b |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 858 | .previous |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 859 | _ASM_EXTABLE(1b, 6b) |
| 860 | _ASM_EXTABLE(2b, 7b) |
| 861 | _ASM_EXTABLE(3b, 8b) |
| 862 | _ASM_EXTABLE(4b, 9b) |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 863 | ENDPROC(xen_failsafe_callback) |
| 864 | |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 865 | BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
Sheng Yang | 38e20b0 | 2010-05-14 12:40:51 +0100 | [diff] [blame] | 866 | xen_evtchn_do_upcall) |
| 867 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 868 | #endif /* CONFIG_XEN */ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 869 | |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 870 | #if IS_ENABLED(CONFIG_HYPERV) |
| 871 | |
| 872 | BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
| 873 | hyperv_vector_handler) |
| 874 | |
| 875 | #endif /* CONFIG_HYPERV */ |
| 876 | |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 877 | #ifdef CONFIG_FUNCTION_TRACER |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 878 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 879 | |
| 880 | ENTRY(mcount) |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 881 | ret |
| 882 | END(mcount) |
| 883 | |
| 884 | ENTRY(ftrace_caller) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 885 | pushl %eax |
| 886 | pushl %ecx |
| 887 | pushl %edx |
| 888 | pushl $0 /* Pass NULL as regs pointer */ |
| 889 | movl 4*4(%esp), %eax |
| 890 | movl 0x4(%ebp), %edx |
| 891 | movl function_trace_op, %ecx |
| 892 | subl $MCOUNT_INSN_SIZE, %eax |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 893 | |
| 894 | .globl ftrace_call |
| 895 | ftrace_call: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 896 | call ftrace_stub |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 897 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 898 | addl $4, %esp /* skip NULL pointer */ |
| 899 | popl %edx |
| 900 | popl %ecx |
| 901 | popl %eax |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 902 | .Lftrace_ret: |
Steven Rostedt | 5a45cfe | 2008-11-26 00:16:24 -0500 | [diff] [blame] | 903 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 904 | .globl ftrace_graph_call |
| 905 | ftrace_graph_call: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 906 | jmp ftrace_stub |
Steven Rostedt | 5a45cfe | 2008-11-26 00:16:24 -0500 | [diff] [blame] | 907 | #endif |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 908 | |
| 909 | .globl ftrace_stub |
| 910 | ftrace_stub: |
| 911 | ret |
| 912 | END(ftrace_caller) |
| 913 | |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 914 | ENTRY(ftrace_regs_caller) |
| 915 | pushf /* push flags before compare (in cs location) */ |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 916 | |
| 917 | /* |
| 918 | * i386 does not save SS and ESP when coming from kernel. |
| 919 | * Instead, to get sp, ®s->sp is used (see ptrace.h). |
| 920 | * Unfortunately, that means eflags must be at the same location |
| 921 | * as the current return ip is. We move the return ip into the |
| 922 | * ip location, and move flags into the return ip location. |
| 923 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 924 | pushl 4(%esp) /* save return ip into ip slot */ |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 925 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 926 | pushl $0 /* Load 0 into orig_ax */ |
| 927 | pushl %gs |
| 928 | pushl %fs |
| 929 | pushl %es |
| 930 | pushl %ds |
| 931 | pushl %eax |
| 932 | pushl %ebp |
| 933 | pushl %edi |
| 934 | pushl %esi |
| 935 | pushl %edx |
| 936 | pushl %ecx |
| 937 | pushl %ebx |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 938 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 939 | movl 13*4(%esp), %eax /* Get the saved flags */ |
| 940 | movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ |
| 941 | /* clobbering return ip */ |
| 942 | movl $__KERNEL_CS, 13*4(%esp) |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 943 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 944 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ |
| 945 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ |
| 946 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ |
| 947 | movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ |
| 948 | pushl %esp /* Save pt_regs as 4th parameter */ |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 949 | |
| 950 | GLOBAL(ftrace_regs_call) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 951 | call ftrace_stub |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 952 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 953 | addl $4, %esp /* Skip pt_regs */ |
| 954 | movl 14*4(%esp), %eax /* Move flags back into cs */ |
| 955 | movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ |
| 956 | movl 12*4(%esp), %eax /* Get return ip from regs->ip */ |
| 957 | movl %eax, 14*4(%esp) /* Put return ip back for ret */ |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 958 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 959 | popl %ebx |
| 960 | popl %ecx |
| 961 | popl %edx |
| 962 | popl %esi |
| 963 | popl %edi |
| 964 | popl %ebp |
| 965 | popl %eax |
| 966 | popl %ds |
| 967 | popl %es |
| 968 | popl %fs |
| 969 | popl %gs |
| 970 | addl $8, %esp /* Skip orig_ax and ip */ |
| 971 | popf /* Pop flags at end (no addl to corrupt flags) */ |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 972 | jmp .Lftrace_ret |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 973 | |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 974 | popf |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 975 | jmp ftrace_stub |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 976 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
| 977 | |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 978 | ENTRY(mcount) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 979 | cmpl $__PAGE_OFFSET, %esp |
| 980 | jb ftrace_stub /* Paging not enabled yet? */ |
H. Peter Anvin | af058ab | 2013-08-30 17:29:29 -0700 | [diff] [blame] | 981 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 982 | cmpl $ftrace_stub, ftrace_trace_function |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 983 | jnz .Ltrace |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 984 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 985 | cmpl $ftrace_stub, ftrace_graph_return |
| 986 | jnz ftrace_graph_caller |
Steven Rostedt | e49dc19 | 2008-12-02 23:50:05 -0500 | [diff] [blame] | 987 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 988 | cmpl $ftrace_graph_entry_stub, ftrace_graph_entry |
| 989 | jnz ftrace_graph_caller |
Frederic Weisbecker | caf4b32 | 2008-11-11 07:03:45 +0100 | [diff] [blame] | 990 | #endif |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 991 | .globl ftrace_stub |
| 992 | ftrace_stub: |
| 993 | ret |
| 994 | |
| 995 | /* taken from glibc */ |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 996 | .Ltrace: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 997 | pushl %eax |
| 998 | pushl %ecx |
| 999 | pushl %edx |
| 1000 | movl 0xc(%esp), %eax |
| 1001 | movl 0x4(%ebp), %edx |
| 1002 | subl $MCOUNT_INSN_SIZE, %eax |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1003 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1004 | call *ftrace_trace_function |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1005 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1006 | popl %edx |
| 1007 | popl %ecx |
| 1008 | popl %eax |
| 1009 | jmp ftrace_stub |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1010 | END(mcount) |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 1011 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
Al Viro | 784d569 | 2016-01-11 11:04:34 -0500 | [diff] [blame] | 1012 | EXPORT_SYMBOL(mcount) |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 1013 | #endif /* CONFIG_FUNCTION_TRACER */ |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1014 | |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 1015 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1016 | ENTRY(ftrace_graph_caller) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1017 | pushl %eax |
| 1018 | pushl %ecx |
| 1019 | pushl %edx |
| 1020 | movl 0xc(%esp), %eax |
| 1021 | lea 0x4(%ebp), %edx |
| 1022 | movl (%ebp), %ecx |
| 1023 | subl $MCOUNT_INSN_SIZE, %eax |
| 1024 | call prepare_ftrace_return |
| 1025 | popl %edx |
| 1026 | popl %ecx |
| 1027 | popl %eax |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 1028 | ret |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 1029 | END(ftrace_graph_caller) |
Frederic Weisbecker | caf4b32 | 2008-11-11 07:03:45 +0100 | [diff] [blame] | 1030 | |
| 1031 | .globl return_to_handler |
| 1032 | return_to_handler: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1033 | pushl %eax |
| 1034 | pushl %edx |
| 1035 | movl %ebp, %eax |
| 1036 | call ftrace_return_to_handler |
| 1037 | movl %eax, %ecx |
| 1038 | popl %edx |
| 1039 | popl %eax |
| 1040 | jmp *%ecx |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 1041 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1042 | |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 1043 | #ifdef CONFIG_TRACING |
| 1044 | ENTRY(trace_page_fault) |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 1045 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1046 | pushl $trace_do_page_fault |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1047 | jmp common_exception |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 1048 | END(trace_page_fault) |
| 1049 | #endif |
| 1050 | |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1051 | ENTRY(page_fault) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1052 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1053 | pushl $do_page_fault |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1054 | ALIGN |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1055 | jmp common_exception |
| 1056 | END(page_fault) |
| 1057 | |
| 1058 | common_exception: |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1059 | /* the function address is in %gs's slot on the stack */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1060 | pushl %fs |
| 1061 | pushl %es |
| 1062 | pushl %ds |
| 1063 | pushl %eax |
| 1064 | pushl %ebp |
| 1065 | pushl %edi |
| 1066 | pushl %esi |
| 1067 | pushl %edx |
| 1068 | pushl %ecx |
| 1069 | pushl %ebx |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1070 | cld |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1071 | movl $(__KERNEL_PERCPU), %ecx |
| 1072 | movl %ecx, %fs |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1073 | UNWIND_ESPFIX_STACK |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1074 | GS_TO_REG %ecx |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1075 | movl PT_GS(%esp), %edi # get the function address |
| 1076 | movl PT_ORIG_EAX(%esp), %edx # get the error code |
| 1077 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1078 | REG_TO_PTGS %ecx |
| 1079 | SET_KERNEL_GS %ecx |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1080 | movl $(__USER_DS), %ecx |
| 1081 | movl %ecx, %ds |
| 1082 | movl %ecx, %es |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1083 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1084 | movl %esp, %eax # pt_regs pointer |
| 1085 | call *%edi |
| 1086 | jmp ret_from_exception |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1087 | END(common_exception) |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1088 | |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1089 | ENTRY(debug) |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1090 | /* |
| 1091 | * #DB can happen at the first instruction of |
| 1092 | * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this |
| 1093 | * happens, then we will be running on a very small stack. We |
| 1094 | * need to detect this condition and switch to the thread |
| 1095 | * stack before calling any C code at all. |
| 1096 | * |
| 1097 | * If you edit this code, keep in mind that NMIs can happen in here. |
| 1098 | */ |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1099 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1100 | pushl $-1 # mark this as an int |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1101 | SAVE_ALL |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1102 | xorl %edx, %edx # error code 0 |
| 1103 | movl %esp, %eax # pt_regs pointer |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1104 | |
| 1105 | /* Are we currently on the SYSENTER stack? */ |
| 1106 | PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) |
| 1107 | subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ |
| 1108 | cmpl $SIZEOF_SYSENTER_stack, %ecx |
| 1109 | jb .Ldebug_from_sysenter_stack |
| 1110 | |
| 1111 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1112 | call do_debug |
| 1113 | jmp ret_from_exception |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1114 | |
| 1115 | .Ldebug_from_sysenter_stack: |
| 1116 | /* We're on the SYSENTER stack. Switch off. */ |
| 1117 | movl %esp, %ebp |
| 1118 | movl PER_CPU_VAR(cpu_current_top_of_stack), %esp |
| 1119 | TRACE_IRQS_OFF |
| 1120 | call do_debug |
| 1121 | movl %ebp, %esp |
| 1122 | jmp ret_from_exception |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1123 | END(debug) |
| 1124 | |
| 1125 | /* |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1126 | * NMI is doubly nasty. It can happen on the first instruction of |
| 1127 | * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning |
| 1128 | * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 |
| 1129 | * switched stacks. We handle both conditions by simply checking whether we |
| 1130 | * interrupted kernel code running on the SYSENTER stack. |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1131 | */ |
| 1132 | ENTRY(nmi) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1133 | ASM_CLAC |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1134 | #ifdef CONFIG_X86_ESPFIX32 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1135 | pushl %eax |
| 1136 | movl %ss, %eax |
| 1137 | cmpw $__ESPFIX_SS, %ax |
| 1138 | popl %eax |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 1139 | je .Lnmi_espfix_stack |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1140 | #endif |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1141 | |
| 1142 | pushl %eax # pt_regs->orig_ax |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1143 | SAVE_ALL |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1144 | xorl %edx, %edx # zero error code |
| 1145 | movl %esp, %eax # pt_regs pointer |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1146 | |
| 1147 | /* Are we currently on the SYSENTER stack? */ |
| 1148 | PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx) |
| 1149 | subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ |
| 1150 | cmpl $SIZEOF_SYSENTER_stack, %ecx |
| 1151 | jb .Lnmi_from_sysenter_stack |
| 1152 | |
| 1153 | /* Not on SYSENTER stack. */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1154 | call do_nmi |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 1155 | jmp .Lrestore_all_notrace |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1156 | |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1157 | .Lnmi_from_sysenter_stack: |
| 1158 | /* |
| 1159 | * We're on the SYSENTER stack. Switch off. No one (not even debug) |
| 1160 | * is using the thread stack right now, so it's safe for us to use it. |
| 1161 | */ |
| 1162 | movl %esp, %ebp |
| 1163 | movl PER_CPU_VAR(cpu_current_top_of_stack), %esp |
| 1164 | call do_nmi |
| 1165 | movl %ebp, %esp |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 1166 | jmp .Lrestore_all_notrace |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1167 | |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1168 | #ifdef CONFIG_X86_ESPFIX32 |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 1169 | .Lnmi_espfix_stack: |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 1170 | /* |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1171 | * create the pointer to lss back |
| 1172 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1173 | pushl %ss |
| 1174 | pushl %esp |
| 1175 | addl $4, (%esp) |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1176 | /* copy the iret frame of 12 bytes */ |
| 1177 | .rept 3 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1178 | pushl 16(%esp) |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1179 | .endr |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1180 | pushl %eax |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1181 | SAVE_ALL |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1182 | FIXUP_ESPFIX_STACK # %eax == %esp |
| 1183 | xorl %edx, %edx # zero error code |
| 1184 | call do_nmi |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1185 | RESTORE_REGS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1186 | lss 12+4(%esp), %esp # back to espfix stack |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 1187 | jmp .Lirq_return |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1188 | #endif |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1189 | END(nmi) |
| 1190 | |
| 1191 | ENTRY(int3) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1192 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1193 | pushl $-1 # mark this as an int |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1194 | SAVE_ALL |
| 1195 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1196 | xorl %edx, %edx # zero error code |
| 1197 | movl %esp, %eax # pt_regs pointer |
| 1198 | call do_int3 |
| 1199 | jmp ret_from_exception |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1200 | END(int3) |
| 1201 | |
| 1202 | ENTRY(general_protection) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1203 | pushl $do_general_protection |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1204 | jmp common_exception |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1205 | END(general_protection) |
| 1206 | |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 1207 | #ifdef CONFIG_KVM_GUEST |
| 1208 | ENTRY(async_page_fault) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1209 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1210 | pushl $do_async_page_fault |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1211 | jmp common_exception |
Sedat Dilek | 2ae9d29 | 2011-03-08 22:39:24 +0100 | [diff] [blame] | 1212 | END(async_page_fault) |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 1213 | #endif |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 1214 | |
| 1215 | ENTRY(rewind_stack_do_exit) |
| 1216 | /* Prevent any naive code from trying to unwind to our caller. */ |
| 1217 | xorl %ebp, %ebp |
| 1218 | |
| 1219 | movl PER_CPU_VAR(cpu_current_top_of_stack), %esi |
| 1220 | leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp |
| 1221 | |
| 1222 | call do_exit |
| 1223 | 1: jmp 1b |
| 1224 | END(rewind_stack_do_exit) |