Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 2 | * Copyright (C) 1991,1992 Linus Torvalds |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 4 | * entry_32.S contains the system-call and low-level fault and trap handling routines. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
Andi Kleen | 889f21c | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 6 | * Stack layout in 'syscall_exit': |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 7 | * ptrace needs to have all registers on the stack. |
| 8 | * If the order here is changed, it needs to be |
| 9 | * updated in fork.c:copy_process(), signal.c:do_signal(), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * ptrace.c and ptrace.h |
| 11 | * |
| 12 | * 0(%esp) - %ebx |
| 13 | * 4(%esp) - %ecx |
| 14 | * 8(%esp) - %edx |
| 15 | * C(%esp) - %esi |
| 16 | * 10(%esp) - %edi |
| 17 | * 14(%esp) - %ebp |
| 18 | * 18(%esp) - %eax |
| 19 | * 1C(%esp) - %ds |
| 20 | * 20(%esp) - %es |
Jeremy Fitzhardinge | 464d1a7 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 21 | * 24(%esp) - %fs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 22 | * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS |
| 23 | * 2C(%esp) - orig_eax |
| 24 | * 30(%esp) - %eip |
| 25 | * 34(%esp) - %cs |
| 26 | * 38(%esp) - %eflags |
| 27 | * 3C(%esp) - %oldesp |
| 28 | * 40(%esp) - %oldss |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | */ |
| 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/linkage.h> |
Eric Paris | d7e7528 | 2012-01-03 14:23:06 -0500 | [diff] [blame] | 32 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/thread_info.h> |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 34 | #include <asm/irqflags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/errno.h> |
| 36 | #include <asm/segment.h> |
| 37 | #include <asm/smp.h> |
Jeremy Fitzhardinge | 0341c14 | 2009-02-13 11:14:01 -0800 | [diff] [blame] | 38 | #include <asm/page_types.h> |
Stas Sergeev | be44d2a | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 39 | #include <asm/percpu.h> |
Cyrill Gorcunov | ab68ed9 | 2008-03-25 22:16:32 +0300 | [diff] [blame] | 40 | #include <asm/processor-flags.h> |
Abhishek Sagar | 395a59d | 2008-06-21 23:47:27 +0530 | [diff] [blame] | 41 | #include <asm/ftrace.h> |
Thomas Gleixner | 9b7dc56 | 2008-05-02 20:10:09 +0200 | [diff] [blame] | 42 | #include <asm/irq_vectors.h> |
Brian Gerst | 40d2e76 | 2010-03-21 09:00:43 -0400 | [diff] [blame] | 43 | #include <asm/cpufeature.h> |
Andy Lutomirski | b4ca46e | 2011-08-25 16:10:33 -0400 | [diff] [blame] | 44 | #include <asm/alternative-asm.h> |
H. Peter Anvin | 6837a54 | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 45 | #include <asm/asm.h> |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 46 | #include <asm/smap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 48 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
| 49 | #include <linux/elf-em.h> |
| 50 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 51 | #define __AUDIT_ARCH_LE 0x40000000 |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 52 | |
| 53 | #ifndef CONFIG_AUDITSYSCALL |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 54 | # define sysenter_audit syscall_trace_entry |
| 55 | # define sysexit_audit syscall_exit_work |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 56 | #endif |
| 57 | |
Jiri Olsa | ea71454 | 2011-03-07 19:10:39 +0100 | [diff] [blame] | 58 | .section .entry.text, "ax" |
| 59 | |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 60 | /* |
| 61 | * We use macros for low-level operations which need to be overridden |
| 62 | * for paravirtualization. The following will never clobber any registers: |
| 63 | * INTERRUPT_RETURN (aka. "iret") |
| 64 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") |
Jeremy Fitzhardinge | d75cd22f | 2008-06-25 00:19:26 -0400 | [diff] [blame] | 65 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 66 | * |
| 67 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must |
| 68 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). |
| 69 | * Allowing a register to be clobbered can shrink the paravirt replacement |
| 70 | * enough to patch inline, increasing performance. |
| 71 | */ |
| 72 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | #ifdef CONFIG_PREEMPT |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 74 | # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | #else |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 76 | # define preempt_stop(clobbers) |
| 77 | # define resume_kernel restore_all |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | #endif |
| 79 | |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 80 | .macro TRACE_IRQS_IRET |
| 81 | #ifdef CONFIG_TRACE_IRQFLAGS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 82 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? |
| 83 | jz 1f |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 84 | TRACE_IRQS_ON |
| 85 | 1: |
| 86 | #endif |
| 87 | .endm |
| 88 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 89 | /* |
| 90 | * User gs save/restore |
| 91 | * |
| 92 | * %gs is used for userland TLS and kernel only uses it for stack |
| 93 | * canary which is required to be at %gs:20 by gcc. Read the comment |
| 94 | * at the top of stackprotector.h for more info. |
| 95 | * |
| 96 | * Local labels 98 and 99 are used. |
| 97 | */ |
| 98 | #ifdef CONFIG_X86_32_LAZY_GS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 100 | /* unfortunately push/pop can't be no-op */ |
| 101 | .macro PUSH_GS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 102 | pushl $0 |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 103 | .endm |
| 104 | .macro POP_GS pop=0 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 105 | addl $(4 + \pop), %esp |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 106 | .endm |
| 107 | .macro POP_GS_EX |
| 108 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 110 | /* all the rest are no-op */ |
| 111 | .macro PTGS_TO_GS |
| 112 | .endm |
| 113 | .macro PTGS_TO_GS_EX |
| 114 | .endm |
| 115 | .macro GS_TO_REG reg |
| 116 | .endm |
| 117 | .macro REG_TO_PTGS reg |
| 118 | .endm |
| 119 | .macro SET_KERNEL_GS reg |
| 120 | .endm |
| 121 | |
| 122 | #else /* CONFIG_X86_32_LAZY_GS */ |
| 123 | |
| 124 | .macro PUSH_GS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 125 | pushl %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 126 | .endm |
| 127 | |
| 128 | .macro POP_GS pop=0 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 129 | 98: popl %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 130 | .if \pop <> 0 |
| 131 | add $\pop, %esp |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 132 | .endif |
| 133 | .endm |
| 134 | .macro POP_GS_EX |
| 135 | .pushsection .fixup, "ax" |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 136 | 99: movl $0, (%esp) |
| 137 | jmp 98b |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 138 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 139 | _ASM_EXTABLE(98b, 99b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 140 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 142 | .macro PTGS_TO_GS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 143 | 98: mov PT_GS(%esp), %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 144 | .endm |
| 145 | .macro PTGS_TO_GS_EX |
| 146 | .pushsection .fixup, "ax" |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 147 | 99: movl $0, PT_GS(%esp) |
| 148 | jmp 98b |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 149 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 150 | _ASM_EXTABLE(98b, 99b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 151 | .endm |
| 152 | |
| 153 | .macro GS_TO_REG reg |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 154 | movl %gs, \reg |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 155 | .endm |
| 156 | .macro REG_TO_PTGS reg |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 157 | movl \reg, PT_GS(%esp) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 158 | .endm |
| 159 | .macro SET_KERNEL_GS reg |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 160 | movl $(__KERNEL_STACK_CANARY), \reg |
| 161 | movl \reg, %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 162 | .endm |
| 163 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 164 | #endif /* CONFIG_X86_32_LAZY_GS */ |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 165 | |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 166 | .macro SAVE_ALL |
| 167 | cld |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 168 | PUSH_GS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 169 | pushl %fs |
| 170 | pushl %es |
| 171 | pushl %ds |
| 172 | pushl %eax |
| 173 | pushl %ebp |
| 174 | pushl %edi |
| 175 | pushl %esi |
| 176 | pushl %edx |
| 177 | pushl %ecx |
| 178 | pushl %ebx |
| 179 | movl $(__USER_DS), %edx |
| 180 | movl %edx, %ds |
| 181 | movl %edx, %es |
| 182 | movl $(__KERNEL_PERCPU), %edx |
| 183 | movl %edx, %fs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 184 | SET_KERNEL_GS %edx |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 185 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 187 | .macro RESTORE_INT_REGS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 188 | popl %ebx |
| 189 | popl %ecx |
| 190 | popl %edx |
| 191 | popl %esi |
| 192 | popl %edi |
| 193 | popl %ebp |
| 194 | popl %eax |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 195 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 197 | .macro RESTORE_REGS pop=0 |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 198 | RESTORE_INT_REGS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 199 | 1: popl %ds |
| 200 | 2: popl %es |
| 201 | 3: popl %fs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 202 | POP_GS \pop |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 203 | .pushsection .fixup, "ax" |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 204 | 4: movl $0, (%esp) |
| 205 | jmp 1b |
| 206 | 5: movl $0, (%esp) |
| 207 | jmp 2b |
| 208 | 6: movl $0, (%esp) |
| 209 | jmp 3b |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 211 | _ASM_EXTABLE(1b, 4b) |
| 212 | _ASM_EXTABLE(2b, 5b) |
| 213 | _ASM_EXTABLE(3b, 6b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 214 | POP_GS_EX |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 215 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | ENTRY(ret_from_fork) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 218 | pushl %eax |
| 219 | call schedule_tail |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | GET_THREAD_INFO(%ebp) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 221 | popl %eax |
| 222 | pushl $0x0202 # Reset kernel eflags |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 223 | popfl |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 224 | jmp syscall_exit |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 225 | END(ret_from_fork) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | |
Al Viro | 22e2430d | 2012-10-10 21:35:42 -0400 | [diff] [blame] | 227 | ENTRY(ret_from_kernel_thread) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 228 | pushl %eax |
| 229 | call schedule_tail |
Al Viro | 6783eaa | 2012-08-02 23:05:11 +0400 | [diff] [blame] | 230 | GET_THREAD_INFO(%ebp) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 231 | popl %eax |
| 232 | pushl $0x0202 # Reset kernel eflags |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 233 | popfl |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 234 | movl PT_EBP(%esp), %eax |
| 235 | call *PT_EBX(%esp) |
| 236 | movl $0, PT_EAX(%esp) |
| 237 | jmp syscall_exit |
Al Viro | 22e2430d | 2012-10-10 21:35:42 -0400 | [diff] [blame] | 238 | ENDPROC(ret_from_kernel_thread) |
Al Viro | 6783eaa | 2012-08-02 23:05:11 +0400 | [diff] [blame] | 239 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | /* |
| 241 | * Return to user mode is not as complex as all this looks, |
| 242 | * but we want the default path for a system call return to |
| 243 | * go as quickly as possible which is why some of this is |
| 244 | * less clear than it otherwise should be. |
| 245 | */ |
| 246 | |
| 247 | # userspace resumption stub bypassing syscall exit tracing |
| 248 | ALIGN |
| 249 | ret_from_exception: |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 250 | preempt_stop(CLBR_ANY) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | ret_from_intr: |
| 252 | GET_THREAD_INFO(%ebp) |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 253 | #ifdef CONFIG_VM86 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 254 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
| 255 | movb PT_CS(%esp), %al |
| 256 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 257 | #else |
| 258 | /* |
Al Viro | 6783eaa | 2012-08-02 23:05:11 +0400 | [diff] [blame] | 259 | * We can be coming here from child spawned by kernel_thread(). |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 260 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 261 | movl PT_CS(%esp), %eax |
| 262 | andl $SEGMENT_RPL_MASK, %eax |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 263 | #endif |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 264 | cmpl $USER_RPL, %eax |
| 265 | jb resume_kernel # not returning to v8086 or userspace |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 266 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | ENTRY(resume_userspace) |
Peter Zijlstra | c7e872e | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 268 | LOCKDEP_SYS_EXIT |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 269 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
| 270 | # setting need_resched or sigpending |
| 271 | # between sampling and the iret |
Peter Zijlstra | e32e58a | 2008-06-06 10:14:08 +0200 | [diff] [blame] | 272 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 273 | movl TI_flags(%ebp), %ecx |
| 274 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on |
| 275 | # int/exception return? |
| 276 | jne work_pending |
| 277 | jmp restore_all |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 278 | END(ret_from_exception) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | |
| 280 | #ifdef CONFIG_PREEMPT |
| 281 | ENTRY(resume_kernel) |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 282 | DISABLE_INTERRUPTS(CLBR_ANY) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | need_resched: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 284 | cmpl $0, PER_CPU_VAR(__preempt_count) |
| 285 | jnz restore_all |
| 286 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? |
| 287 | jz restore_all |
| 288 | call preempt_schedule_irq |
| 289 | jmp need_resched |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 290 | END(resume_kernel) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | #endif |
| 292 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 293 | /* |
| 294 | * SYSENTER_RETURN points to after the SYSENTER instruction |
| 295 | * in the vsyscall page. See vsyscall-sysentry.S, which defines |
| 296 | * the symbol. |
| 297 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 299 | # SYSENTER call handler stub |
Ingo Molnar | 4c8cd0c | 2015-06-08 08:33:56 +0200 | [diff] [blame] | 300 | ENTRY(entry_SYSENTER_32) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 301 | movl TSS_sysenter_sp0(%esp), %esp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | sysenter_past_esp: |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 303 | /* |
Jeremy Fitzhardinge | d93c870b | 2008-03-24 16:43:21 -0700 | [diff] [blame] | 304 | * Interrupts are disabled here, but we can't trace it until |
| 305 | * enough kernel state to call TRACE_IRQS_OFF can be called - but |
| 306 | * we immediately enable interrupts at that point anyway. |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 307 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 308 | pushl $__USER_DS |
| 309 | pushl %ebp |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 310 | pushfl |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 311 | orl $X86_EFLAGS_IF, (%esp) |
| 312 | pushl $__USER_CS |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 313 | /* |
| 314 | * Push current_thread_info()->sysenter_return to the stack. |
Andy Lutomirski | ff8287f | 2015-04-02 12:41:44 -0700 | [diff] [blame] | 315 | * A tiny bit of offset fixup is necessary: TI_sysenter_return |
| 316 | * is relative to thread_info, which is at the bottom of the |
| 317 | * kernel stack page. 4*4 means the 4 words pushed above; |
| 318 | * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack; |
| 319 | * and THREAD_SIZE takes us to the bottom. |
Ingo Molnar | e6e5494 | 2006-06-27 02:53:50 -0700 | [diff] [blame] | 320 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 321 | pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 323 | pushl %eax |
Jeremy Fitzhardinge | d93c870b | 2008-03-24 16:43:21 -0700 | [diff] [blame] | 324 | SAVE_ALL |
| 325 | ENABLE_INTERRUPTS(CLBR_NONE) |
| 326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | /* |
| 328 | * Load the potential sixth argument from user stack. |
| 329 | * Careful about security. |
| 330 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 331 | cmpl $__PAGE_OFFSET-3, %ebp |
| 332 | jae syscall_fault |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 333 | ASM_STAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 334 | 1: movl (%ebp), %ebp |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 335 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 336 | movl %ebp, PT_EBP(%esp) |
| 337 | _ASM_EXTABLE(1b, syscall_fault) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | GET_THREAD_INFO(%ebp) |
| 340 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 341 | testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp) |
| 342 | jnz sysenter_audit |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 343 | sysenter_do_call: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 344 | cmpl $(NR_syscalls), %eax |
| 345 | jae sysenter_badsys |
| 346 | call *sys_call_table(, %eax, 4) |
Andy Lutomirski | 554086d | 2014-06-23 14:22:15 -0700 | [diff] [blame] | 347 | sysenter_after_call: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 348 | movl %eax, PT_EAX(%esp) |
Peter Zijlstra | c7e872e | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 349 | LOCKDEP_SYS_EXIT |
Jeremy Fitzhardinge | 42c24fa | 2007-05-02 19:27:14 +0200 | [diff] [blame] | 350 | DISABLE_INTERRUPTS(CLBR_ANY) |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 351 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 352 | movl TI_flags(%ebp), %ecx |
| 353 | testl $_TIF_ALLWORK_MASK, %ecx |
| 354 | jnz sysexit_audit |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 355 | sysenter_exit: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | /* if something modifies registers it must also disable sysexit */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 357 | movl PT_EIP(%esp), %edx |
| 358 | movl PT_OLDESP(%esp), %ecx |
| 359 | xorl %ebp, %ebp |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 360 | TRACE_IRQS_ON |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 361 | 1: mov PT_FS(%esp), %fs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 362 | PTGS_TO_GS |
Jeremy Fitzhardinge | d75cd22f | 2008-06-25 00:19:26 -0400 | [diff] [blame] | 363 | ENABLE_INTERRUPTS_SYSEXIT |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 364 | |
| 365 | #ifdef CONFIG_AUDITSYSCALL |
| 366 | sysenter_audit: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 367 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp) |
| 368 | jnz syscall_trace_entry |
| 369 | /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ |
| 370 | movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ |
| 371 | /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ |
| 372 | pushl PT_ESI(%esp) /* a3: 5th arg */ |
| 373 | pushl PT_EDX+4(%esp) /* a2: 4th arg */ |
| 374 | call __audit_syscall_entry |
| 375 | popl %ecx /* get that remapped edx off the stack */ |
| 376 | popl %ecx /* get that remapped esi off the stack */ |
| 377 | movl PT_EAX(%esp), %eax /* reload syscall number */ |
| 378 | jmp sysenter_do_call |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 379 | |
| 380 | sysexit_audit: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 381 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx |
| 382 | jnz syscall_exit_work |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 383 | TRACE_IRQS_ON |
| 384 | ENABLE_INTERRUPTS(CLBR_ANY) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 385 | movl %eax, %edx /* second arg, syscall return value */ |
| 386 | cmpl $-MAX_ERRNO, %eax /* is it an error ? */ |
| 387 | setbe %al /* 1 if so, 0 if not */ |
| 388 | movzbl %al, %eax /* zero-extend that */ |
| 389 | call __audit_syscall_exit |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 390 | DISABLE_INTERRUPTS(CLBR_ANY) |
| 391 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 392 | movl TI_flags(%ebp), %ecx |
| 393 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx |
| 394 | jnz syscall_exit_work |
| 395 | movl PT_EAX(%esp), %eax /* reload syscall return value */ |
| 396 | jmp sysenter_exit |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 397 | #endif |
| 398 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 399 | .pushsection .fixup, "ax" |
| 400 | 2: movl $0, PT_FS(%esp) |
| 401 | jmp 1b |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 402 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 403 | _ASM_EXTABLE(1b, 2b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 404 | PTGS_TO_GS_EX |
Ingo Molnar | 4c8cd0c | 2015-06-08 08:33:56 +0200 | [diff] [blame] | 405 | ENDPROC(entry_SYSENTER_32) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | |
| 407 | # system call handler stub |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 408 | ENTRY(entry_INT80_32) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 409 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 410 | pushl %eax # save orig_eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | SAVE_ALL |
| 412 | GET_THREAD_INFO(%ebp) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 413 | # system call tracing in operation / emulation |
| 414 | testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp) |
| 415 | jnz syscall_trace_entry |
| 416 | cmpl $(NR_syscalls), %eax |
| 417 | jae syscall_badsys |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | syscall_call: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 419 | call *sys_call_table(, %eax, 4) |
Sven Wegener | 8142b21 | 2014-07-22 10:26:06 +0200 | [diff] [blame] | 420 | syscall_after_call: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 421 | movl %eax, PT_EAX(%esp) # store the return value |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | syscall_exit: |
Peter Zijlstra | c7e872e | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 423 | LOCKDEP_SYS_EXIT |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 424 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
| 425 | # setting need_resched or sigpending |
| 426 | # between sampling and the iret |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 427 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 428 | movl TI_flags(%ebp), %ecx |
| 429 | testl $_TIF_ALLWORK_MASK, %ecx # current->work |
| 430 | jnz syscall_exit_work |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | |
| 432 | restore_all: |
Alexander van Heukelum | 2e04bc7 | 2009-06-18 00:35:57 +0200 | [diff] [blame] | 433 | TRACE_IRQS_IRET |
| 434 | restore_all_notrace: |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 435 | #ifdef CONFIG_X86_ESPFIX32 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 436 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
| 437 | /* |
| 438 | * Warning: PT_OLDSS(%esp) contains the wrong/random values if we |
| 439 | * are returning to the kernel. |
| 440 | * See comments in process.c:copy_thread() for details. |
| 441 | */ |
| 442 | movb PT_OLDSS(%esp), %ah |
| 443 | movb PT_CS(%esp), %al |
| 444 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
| 445 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax |
| 446 | je ldt_ss # returning to user-space with LDT SS |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 447 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | restore_nocheck: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 449 | RESTORE_REGS 4 # skip orig_eax/error_code |
Adrian Bunk | f7f3d79 | 2008-02-13 23:29:53 +0200 | [diff] [blame] | 450 | irq_return: |
Ingo Molnar | 3701d863 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 451 | INTERRUPT_RETURN |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 452 | .section .fixup, "ax" |
| 453 | ENTRY(iret_exc ) |
| 454 | pushl $0 # no error code |
| 455 | pushl $do_iret_error |
| 456 | jmp error_code |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | .previous |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 458 | _ASM_EXTABLE(irq_return, iret_exc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 460 | #ifdef CONFIG_X86_ESPFIX32 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | ldt_ss: |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 462 | #ifdef CONFIG_PARAVIRT |
| 463 | /* |
| 464 | * The kernel can't run on a non-flat stack if paravirt mode |
| 465 | * is active. Rather than try to fixup the high bits of |
| 466 | * ESP, bypass this code entirely. This may break DOSemu |
| 467 | * and/or Wine support in a paravirt VM, although the option |
| 468 | * is still available to implement the setting of the high |
| 469 | * 16-bits in the INTERRUPT_RETURN paravirt-op. |
| 470 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 471 | cmpl $0, pv_info+PARAVIRT_enabled |
| 472 | jne restore_nocheck |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 473 | #endif |
| 474 | |
Alexander van Heukelum | dc4c2a0 | 2009-06-18 00:35:58 +0200 | [diff] [blame] | 475 | /* |
| 476 | * Setup and switch to ESPFIX stack |
| 477 | * |
| 478 | * We're returning to userspace with a 16 bit stack. The CPU will not |
| 479 | * restore the high word of ESP for us on executing iret... This is an |
| 480 | * "official" bug of all the x86-compatible CPUs, which we can work |
| 481 | * around to make dosemu and wine happy. We do this by preloading the |
| 482 | * high word of ESP with the high word of the userspace ESP while |
| 483 | * compensating for the offset by changing to the ESPFIX segment with |
| 484 | * a base address that matches for the difference. |
| 485 | */ |
Brian Gerst | 72c511d | 2010-07-31 12:48:23 -0400 | [diff] [blame] | 486 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 487 | mov %esp, %edx /* load kernel esp */ |
| 488 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ |
| 489 | mov %dx, %ax /* eax: new kernel esp */ |
| 490 | sub %eax, %edx /* offset (low word is 0) */ |
Alexander van Heukelum | dc4c2a0 | 2009-06-18 00:35:58 +0200 | [diff] [blame] | 491 | shr $16, %edx |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 492 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ |
| 493 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ |
| 494 | pushl $__ESPFIX_SS |
| 495 | pushl %eax /* new kernel esp */ |
| 496 | /* |
| 497 | * Disable interrupts, but do not irqtrace this section: we |
Alexander van Heukelum | 2e04bc7 | 2009-06-18 00:35:57 +0200 | [diff] [blame] | 498 | * will soon execute iret and the tracer was already set to |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 499 | * the irqstate after the IRET: |
| 500 | */ |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 501 | DISABLE_INTERRUPTS(CLBR_EAX) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 502 | lss (%esp), %esp /* switch to espfix segment */ |
| 503 | jmp restore_nocheck |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 504 | #endif |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 505 | ENDPROC(entry_INT80_32) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | |
| 507 | # perform work that needs to be done immediately before resumption |
| 508 | ALIGN |
| 509 | work_pending: |
| 510 | testb $_TIF_NEED_RESCHED, %cl |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 511 | jz work_notifysig |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | work_resched: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 513 | call schedule |
Peter Zijlstra | c7e872e | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 514 | LOCKDEP_SYS_EXIT |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 515 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
| 516 | # setting need_resched or sigpending |
| 517 | # between sampling and the iret |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 518 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 519 | movl TI_flags(%ebp), %ecx |
| 520 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other |
| 521 | # than syscall tracing? |
| 522 | jz restore_all |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | testb $_TIF_NEED_RESCHED, %cl |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 524 | jnz work_resched |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 526 | work_notifysig: # deal with pending signals and |
| 527 | # notify-resume requests |
Joe Korty | 74b47a7 | 2006-12-07 02:14:04 +0100 | [diff] [blame] | 528 | #ifdef CONFIG_VM86 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 529 | testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) |
| 530 | movl %esp, %eax |
| 531 | jnz work_notifysig_v86 # returning to kernel-space or |
| 532 | # vm86-space |
Al Viro | 969ae0b | 2012-05-26 01:07:39 -0400 | [diff] [blame] | 533 | 1: |
Joe Korty | 74b47a7 | 2006-12-07 02:14:04 +0100 | [diff] [blame] | 534 | #else |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 535 | movl %esp, %eax |
Joe Korty | 74b47a7 | 2006-12-07 02:14:04 +0100 | [diff] [blame] | 536 | #endif |
Srikar Dronamraju | 3596ff4 | 2011-10-25 19:48:12 +0530 | [diff] [blame] | 537 | TRACE_IRQS_ON |
| 538 | ENABLE_INTERRUPTS(CLBR_NONE) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 539 | movb PT_CS(%esp), %bl |
Al Viro | 44fbbb3 | 2012-04-30 18:24:46 -0400 | [diff] [blame] | 540 | andb $SEGMENT_RPL_MASK, %bl |
| 541 | cmpb $USER_RPL, %bl |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 542 | jb resume_kernel |
| 543 | xorl %edx, %edx |
| 544 | call do_notify_resume |
| 545 | jmp resume_userspace |
Al Viro | 969ae0b | 2012-05-26 01:07:39 -0400 | [diff] [blame] | 546 | |
| 547 | #ifdef CONFIG_VM86 |
| 548 | ALIGN |
| 549 | work_notifysig_v86: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 550 | pushl %ecx # save ti_flags for do_notify_resume |
| 551 | call save_v86_state # %eax contains pt_regs pointer |
| 552 | popl %ecx |
| 553 | movl %eax, %esp |
| 554 | jmp 1b |
Al Viro | 969ae0b | 2012-05-26 01:07:39 -0400 | [diff] [blame] | 555 | #endif |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 556 | END(work_pending) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | |
| 558 | # perform syscall exit tracing |
| 559 | ALIGN |
| 560 | syscall_trace_entry: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 561 | movl $-ENOSYS, PT_EAX(%esp) |
| 562 | movl %esp, %eax |
| 563 | call syscall_trace_enter |
Roland McGrath | d4d6715 | 2008-07-09 02:38:07 -0700 | [diff] [blame] | 564 | /* What it returned is what we'll actually use. */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 565 | cmpl $(NR_syscalls), %eax |
| 566 | jnae syscall_call |
| 567 | jmp syscall_exit |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 568 | END(syscall_trace_entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | |
| 570 | # perform syscall exit tracing |
| 571 | ALIGN |
| 572 | syscall_exit_work: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 573 | testl $_TIF_WORK_SYSCALL_EXIT, %ecx |
| 574 | jz work_pending |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 575 | TRACE_IRQS_ON |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 576 | ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call |
| 577 | # schedule() instead |
| 578 | movl %esp, %eax |
| 579 | call syscall_trace_leave |
| 580 | jmp resume_userspace |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 581 | END(syscall_exit_work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | syscall_fault: |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 584 | ASM_CLAC |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | GET_THREAD_INFO(%ebp) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 586 | movl $-EFAULT, PT_EAX(%esp) |
| 587 | jmp resume_userspace |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 588 | END(syscall_fault) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | syscall_badsys: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 591 | movl $-ENOSYS, %eax |
| 592 | jmp syscall_after_call |
Andy Lutomirski | 554086d | 2014-06-23 14:22:15 -0700 | [diff] [blame] | 593 | END(syscall_badsys) |
| 594 | |
| 595 | sysenter_badsys: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 596 | movl $-ENOSYS, %eax |
| 597 | jmp sysenter_after_call |
Stefan Bader | fb21b84 | 2014-08-15 10:57:46 +0200 | [diff] [blame] | 598 | END(sysenter_badsys) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 600 | .macro FIXUP_ESPFIX_STACK |
Alexander van Heukelum | dc4c2a0 | 2009-06-18 00:35:58 +0200 | [diff] [blame] | 601 | /* |
| 602 | * Switch back for ESPFIX stack to the normal zerobased stack |
| 603 | * |
| 604 | * We can't call C functions using the ESPFIX stack. This code reads |
| 605 | * the high word of the segment base from the GDT and swiches to the |
| 606 | * normal stack and adjusts ESP with the matching offset. |
| 607 | */ |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 608 | #ifdef CONFIG_X86_ESPFIX32 |
Alexander van Heukelum | dc4c2a0 | 2009-06-18 00:35:58 +0200 | [diff] [blame] | 609 | /* fixup the stack */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 610 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
| 611 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ |
Alexander van Heukelum | dc4c2a0 | 2009-06-18 00:35:58 +0200 | [diff] [blame] | 612 | shl $16, %eax |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 613 | addl %esp, %eax /* the adjusted stack pointer */ |
| 614 | pushl $__KERNEL_DS |
| 615 | pushl %eax |
| 616 | lss (%esp), %esp /* switch to the normal stack segment */ |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 617 | #endif |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 618 | .endm |
| 619 | .macro UNWIND_ESPFIX_STACK |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 620 | #ifdef CONFIG_X86_ESPFIX32 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 621 | movl %ss, %eax |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 622 | /* see if on espfix stack */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 623 | cmpw $__ESPFIX_SS, %ax |
| 624 | jne 27f |
| 625 | movl $__KERNEL_DS, %eax |
| 626 | movl %eax, %ds |
| 627 | movl %eax, %es |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 628 | /* switch to normal stack */ |
| 629 | FIXUP_ESPFIX_STACK |
| 630 | 27: |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 631 | #endif |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 632 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | |
| 634 | /* |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 635 | * Build the entry stubs with some assembler magic. |
| 636 | * We pack 1 stub into every 8-byte block. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 638 | .align 8 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | ENTRY(irq_entries_start) |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 640 | vector=FIRST_EXTERNAL_VECTOR |
| 641 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 642 | pushl $(~vector+0x80) /* Note: always in signed byte range */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 643 | vector=vector+1 |
| 644 | jmp common_interrupt |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 645 | .align 8 |
| 646 | .endr |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 647 | END(irq_entries_start) |
| 648 | |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 649 | /* |
| 650 | * the CPU automatically disables interrupts when executing an IRQ vector, |
| 651 | * so IRQ-flags tracing has to follow that: |
| 652 | */ |
H. Peter Anvin | b7c6244 | 2008-11-11 13:24:58 -0800 | [diff] [blame] | 653 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | common_interrupt: |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 655 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 656 | addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | SAVE_ALL |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 658 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 659 | movl %esp, %eax |
| 660 | call do_IRQ |
| 661 | jmp ret_from_intr |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 662 | ENDPROC(common_interrupt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | |
Tejun Heo | 02cf94c | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 664 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | ENTRY(name) \ |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 666 | ASM_CLAC; \ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 667 | pushl $~(nr); \ |
Jan Beulich | fe7cacc | 2006-06-26 13:57:44 +0200 | [diff] [blame] | 668 | SAVE_ALL; \ |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 669 | TRACE_IRQS_OFF \ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 670 | movl %esp, %eax; \ |
| 671 | call fn; \ |
| 672 | jmp ret_from_intr; \ |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 673 | ENDPROC(name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 675 | |
| 676 | #ifdef CONFIG_TRACING |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 677 | # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name) |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 678 | #else |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 679 | # define TRACE_BUILD_INTERRUPT(name, nr) |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 680 | #endif |
| 681 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 682 | #define BUILD_INTERRUPT(name, nr) \ |
| 683 | BUILD_INTERRUPT3(name, nr, smp_##name); \ |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 684 | TRACE_BUILD_INTERRUPT(name, nr) |
Tejun Heo | 02cf94c | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 685 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | /* The include is where all of the SMP etc. interrupts come from */ |
Ingo Molnar | 1164dd0 | 2009-01-28 19:34:09 +0100 | [diff] [blame] | 687 | #include <asm/entry_arch.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | ENTRY(coprocessor_error) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 690 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 691 | pushl $0 |
| 692 | pushl $do_coprocessor_error |
| 693 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 694 | END(coprocessor_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | |
| 696 | ENTRY(simd_coprocessor_error) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 697 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 698 | pushl $0 |
Brian Gerst | 40d2e76 | 2010-03-21 09:00:43 -0400 | [diff] [blame] | 699 | #ifdef CONFIG_X86_INVD_BUG |
| 700 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 701 | ALTERNATIVE "pushl $do_general_protection", \ |
| 702 | "pushl $do_simd_coprocessor_error", \ |
Borislav Petkov | 8e65f6e | 2015-01-18 12:35:55 +0100 | [diff] [blame] | 703 | X86_FEATURE_XMM |
Brian Gerst | 40d2e76 | 2010-03-21 09:00:43 -0400 | [diff] [blame] | 704 | #else |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 705 | pushl $do_simd_coprocessor_error |
Brian Gerst | 40d2e76 | 2010-03-21 09:00:43 -0400 | [diff] [blame] | 706 | #endif |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 707 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 708 | END(simd_coprocessor_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | |
| 710 | ENTRY(device_not_available) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 711 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 712 | pushl $-1 # mark this as an int |
| 713 | pushl $do_device_not_available |
| 714 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 715 | END(device_not_available) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 717 | #ifdef CONFIG_PARAVIRT |
| 718 | ENTRY(native_iret) |
Ingo Molnar | 3701d863 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 719 | iret |
H. Peter Anvin | 6837a54 | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 720 | _ASM_EXTABLE(native_iret, iret_exc) |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 721 | END(native_iret) |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 722 | |
Jeremy Fitzhardinge | d75cd22f | 2008-06-25 00:19:26 -0400 | [diff] [blame] | 723 | ENTRY(native_irq_enable_sysexit) |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 724 | sti |
| 725 | sysexit |
Jeremy Fitzhardinge | d75cd22f | 2008-06-25 00:19:26 -0400 | [diff] [blame] | 726 | END(native_irq_enable_sysexit) |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 727 | #endif |
| 728 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | ENTRY(overflow) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 730 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 731 | pushl $0 |
| 732 | pushl $do_overflow |
| 733 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 734 | END(overflow) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | |
| 736 | ENTRY(bounds) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 737 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 738 | pushl $0 |
| 739 | pushl $do_bounds |
| 740 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 741 | END(bounds) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | |
| 743 | ENTRY(invalid_op) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 744 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 745 | pushl $0 |
| 746 | pushl $do_invalid_op |
| 747 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 748 | END(invalid_op) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 | |
| 750 | ENTRY(coprocessor_segment_overrun) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 751 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 752 | pushl $0 |
| 753 | pushl $do_coprocessor_segment_overrun |
| 754 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 755 | END(coprocessor_segment_overrun) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | |
| 757 | ENTRY(invalid_TSS) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 758 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 759 | pushl $do_invalid_TSS |
| 760 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 761 | END(invalid_TSS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | |
| 763 | ENTRY(segment_not_present) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 764 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 765 | pushl $do_segment_not_present |
| 766 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 767 | END(segment_not_present) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 768 | |
| 769 | ENTRY(stack_segment) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 770 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 771 | pushl $do_stack_segment |
| 772 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 773 | END(stack_segment) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | ENTRY(alignment_check) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 776 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 777 | pushl $do_alignment_check |
| 778 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 779 | END(alignment_check) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | |
Prasanna S.P | d28c439 | 2006-09-26 10:52:34 +0200 | [diff] [blame] | 781 | ENTRY(divide_error) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 782 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 783 | pushl $0 # no error code |
| 784 | pushl $do_divide_error |
| 785 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 786 | END(divide_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 787 | |
| 788 | #ifdef CONFIG_X86_MCE |
| 789 | ENTRY(machine_check) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 790 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 791 | pushl $0 |
| 792 | pushl machine_check_vector |
| 793 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 794 | END(machine_check) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 795 | #endif |
| 796 | |
| 797 | ENTRY(spurious_interrupt_bug) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 798 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 799 | pushl $0 |
| 800 | pushl $do_spurious_interrupt_bug |
| 801 | jmp error_code |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 802 | END(spurious_interrupt_bug) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 804 | #ifdef CONFIG_XEN |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 805 | /* |
| 806 | * Xen doesn't set %esp to be precisely what the normal SYSENTER |
| 807 | * entry point expects, so fix it up before using the normal path. |
| 808 | */ |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 809 | ENTRY(xen_sysenter_target) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 810 | addl $5*4, %esp /* remove xen-provided frame */ |
| 811 | jmp sysenter_past_esp |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 812 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 813 | ENTRY(xen_hypervisor_callback) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 814 | pushl $-1 /* orig_ax = -1 => not a system call */ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 815 | SAVE_ALL |
| 816 | TRACE_IRQS_OFF |
Jeremy Fitzhardinge | 9ec2b80 | 2007-07-17 18:37:07 -0700 | [diff] [blame] | 817 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 818 | /* |
| 819 | * Check to see if we got the event in the critical |
| 820 | * region in xen_iret_direct, after we've reenabled |
| 821 | * events and checked for pending events. This simulates |
| 822 | * iret instruction's behaviour where it delivers a |
| 823 | * pending interrupt when enabling interrupts: |
| 824 | */ |
| 825 | movl PT_EIP(%esp), %eax |
| 826 | cmpl $xen_iret_start_crit, %eax |
| 827 | jb 1f |
| 828 | cmpl $xen_iret_end_crit, %eax |
| 829 | jae 1f |
Jeremy Fitzhardinge | 9ec2b80 | 2007-07-17 18:37:07 -0700 | [diff] [blame] | 830 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 831 | jmp xen_iret_crit_fixup |
Jeremy Fitzhardinge | 9ec2b80 | 2007-07-17 18:37:07 -0700 | [diff] [blame] | 832 | |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 833 | ENTRY(xen_do_upcall) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 834 | 1: mov %esp, %eax |
| 835 | call xen_evtchn_do_upcall |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 836 | #ifndef CONFIG_PREEMPT |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 837 | call xen_maybe_preempt_hcall |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 838 | #endif |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 839 | jmp ret_from_intr |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 840 | ENDPROC(xen_hypervisor_callback) |
| 841 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 842 | /* |
| 843 | * Hypervisor uses this for application faults while it executes. |
| 844 | * We get here for two reasons: |
| 845 | * 1. Fault while reloading DS, ES, FS or GS |
| 846 | * 2. Fault while executing IRET |
| 847 | * Category 1 we fix up by reattempting the load, and zeroing the segment |
| 848 | * register if the load fails. |
| 849 | * Category 2 we fix up by jumping to do_iret_error. We cannot use the |
| 850 | * normal Linux return path in this case because if we use the IRET hypercall |
| 851 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
| 852 | * We distinguish between categories by maintaining a status value in EAX. |
| 853 | */ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 854 | ENTRY(xen_failsafe_callback) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 855 | pushl %eax |
| 856 | movl $1, %eax |
| 857 | 1: mov 4(%esp), %ds |
| 858 | 2: mov 8(%esp), %es |
| 859 | 3: mov 12(%esp), %fs |
| 860 | 4: mov 16(%esp), %gs |
David Vrabel | a349e23d1 | 2012-10-19 17:29:07 +0100 | [diff] [blame] | 861 | /* EAX == 0 => Category 1 (Bad segment) |
| 862 | EAX != 0 => Category 2 (Bad IRET) */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 863 | testl %eax, %eax |
| 864 | popl %eax |
| 865 | lea 16(%esp), %esp |
| 866 | jz 5f |
| 867 | jmp iret_exc |
| 868 | 5: pushl $-1 /* orig_ax = -1 => not a system call */ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 869 | SAVE_ALL |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 870 | jmp ret_from_exception |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 871 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 872 | .section .fixup, "ax" |
| 873 | 6: xorl %eax, %eax |
| 874 | movl %eax, 4(%esp) |
| 875 | jmp 1b |
| 876 | 7: xorl %eax, %eax |
| 877 | movl %eax, 8(%esp) |
| 878 | jmp 2b |
| 879 | 8: xorl %eax, %eax |
| 880 | movl %eax, 12(%esp) |
| 881 | jmp 3b |
| 882 | 9: xorl %eax, %eax |
| 883 | movl %eax, 16(%esp) |
| 884 | jmp 4b |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 885 | .previous |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 886 | _ASM_EXTABLE(1b, 6b) |
| 887 | _ASM_EXTABLE(2b, 7b) |
| 888 | _ASM_EXTABLE(3b, 8b) |
| 889 | _ASM_EXTABLE(4b, 9b) |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 890 | ENDPROC(xen_failsafe_callback) |
| 891 | |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 892 | BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
Sheng Yang | 38e20b0 | 2010-05-14 12:40:51 +0100 | [diff] [blame] | 893 | xen_evtchn_do_upcall) |
| 894 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 895 | #endif /* CONFIG_XEN */ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 896 | |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 897 | #if IS_ENABLED(CONFIG_HYPERV) |
| 898 | |
| 899 | BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
| 900 | hyperv_vector_handler) |
| 901 | |
| 902 | #endif /* CONFIG_HYPERV */ |
| 903 | |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 904 | #ifdef CONFIG_FUNCTION_TRACER |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 905 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 906 | |
| 907 | ENTRY(mcount) |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 908 | ret |
| 909 | END(mcount) |
| 910 | |
| 911 | ENTRY(ftrace_caller) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 912 | pushl %eax |
| 913 | pushl %ecx |
| 914 | pushl %edx |
| 915 | pushl $0 /* Pass NULL as regs pointer */ |
| 916 | movl 4*4(%esp), %eax |
| 917 | movl 0x4(%ebp), %edx |
| 918 | movl function_trace_op, %ecx |
| 919 | subl $MCOUNT_INSN_SIZE, %eax |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 920 | |
| 921 | .globl ftrace_call |
| 922 | ftrace_call: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 923 | call ftrace_stub |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 924 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 925 | addl $4, %esp /* skip NULL pointer */ |
| 926 | popl %edx |
| 927 | popl %ecx |
| 928 | popl %eax |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 929 | ftrace_ret: |
Steven Rostedt | 5a45cfe | 2008-11-26 00:16:24 -0500 | [diff] [blame] | 930 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 931 | .globl ftrace_graph_call |
| 932 | ftrace_graph_call: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 933 | jmp ftrace_stub |
Steven Rostedt | 5a45cfe | 2008-11-26 00:16:24 -0500 | [diff] [blame] | 934 | #endif |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 935 | |
| 936 | .globl ftrace_stub |
| 937 | ftrace_stub: |
| 938 | ret |
| 939 | END(ftrace_caller) |
| 940 | |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 941 | ENTRY(ftrace_regs_caller) |
| 942 | pushf /* push flags before compare (in cs location) */ |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 943 | |
| 944 | /* |
| 945 | * i386 does not save SS and ESP when coming from kernel. |
| 946 | * Instead, to get sp, ®s->sp is used (see ptrace.h). |
| 947 | * Unfortunately, that means eflags must be at the same location |
| 948 | * as the current return ip is. We move the return ip into the |
| 949 | * ip location, and move flags into the return ip location. |
| 950 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 951 | pushl 4(%esp) /* save return ip into ip slot */ |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 952 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 953 | pushl $0 /* Load 0 into orig_ax */ |
| 954 | pushl %gs |
| 955 | pushl %fs |
| 956 | pushl %es |
| 957 | pushl %ds |
| 958 | pushl %eax |
| 959 | pushl %ebp |
| 960 | pushl %edi |
| 961 | pushl %esi |
| 962 | pushl %edx |
| 963 | pushl %ecx |
| 964 | pushl %ebx |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 965 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 966 | movl 13*4(%esp), %eax /* Get the saved flags */ |
| 967 | movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ |
| 968 | /* clobbering return ip */ |
| 969 | movl $__KERNEL_CS, 13*4(%esp) |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 970 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 971 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ |
| 972 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ |
| 973 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ |
| 974 | movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ |
| 975 | pushl %esp /* Save pt_regs as 4th parameter */ |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 976 | |
| 977 | GLOBAL(ftrace_regs_call) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 978 | call ftrace_stub |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 979 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 980 | addl $4, %esp /* Skip pt_regs */ |
| 981 | movl 14*4(%esp), %eax /* Move flags back into cs */ |
| 982 | movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ |
| 983 | movl 12*4(%esp), %eax /* Get return ip from regs->ip */ |
| 984 | movl %eax, 14*4(%esp) /* Put return ip back for ret */ |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 985 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 986 | popl %ebx |
| 987 | popl %ecx |
| 988 | popl %edx |
| 989 | popl %esi |
| 990 | popl %edi |
| 991 | popl %ebp |
| 992 | popl %eax |
| 993 | popl %ds |
| 994 | popl %es |
| 995 | popl %fs |
| 996 | popl %gs |
| 997 | addl $8, %esp /* Skip orig_ax and ip */ |
| 998 | popf /* Pop flags at end (no addl to corrupt flags) */ |
| 999 | jmp ftrace_ret |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 1000 | |
Steven Rostedt | 4de7239 | 2012-06-05 20:00:11 -0400 | [diff] [blame] | 1001 | popf |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1002 | jmp ftrace_stub |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 1003 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
| 1004 | |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1005 | ENTRY(mcount) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1006 | cmpl $__PAGE_OFFSET, %esp |
| 1007 | jb ftrace_stub /* Paging not enabled yet? */ |
H. Peter Anvin | af058ab | 2013-08-30 17:29:29 -0700 | [diff] [blame] | 1008 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1009 | cmpl $ftrace_stub, ftrace_trace_function |
| 1010 | jnz trace |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 1011 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1012 | cmpl $ftrace_stub, ftrace_graph_return |
| 1013 | jnz ftrace_graph_caller |
Steven Rostedt | e49dc19 | 2008-12-02 23:50:05 -0500 | [diff] [blame] | 1014 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1015 | cmpl $ftrace_graph_entry_stub, ftrace_graph_entry |
| 1016 | jnz ftrace_graph_caller |
Frederic Weisbecker | caf4b32 | 2008-11-11 07:03:45 +0100 | [diff] [blame] | 1017 | #endif |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1018 | .globl ftrace_stub |
| 1019 | ftrace_stub: |
| 1020 | ret |
| 1021 | |
| 1022 | /* taken from glibc */ |
| 1023 | trace: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1024 | pushl %eax |
| 1025 | pushl %ecx |
| 1026 | pushl %edx |
| 1027 | movl 0xc(%esp), %eax |
| 1028 | movl 0x4(%ebp), %edx |
| 1029 | subl $MCOUNT_INSN_SIZE, %eax |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1030 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1031 | call *ftrace_trace_function |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1032 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1033 | popl %edx |
| 1034 | popl %ecx |
| 1035 | popl %eax |
| 1036 | jmp ftrace_stub |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1037 | END(mcount) |
Steven Rostedt | d61f82d | 2008-05-12 21:20:43 +0200 | [diff] [blame] | 1038 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 1039 | #endif /* CONFIG_FUNCTION_TRACER */ |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1040 | |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 1041 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 1042 | ENTRY(ftrace_graph_caller) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1043 | pushl %eax |
| 1044 | pushl %ecx |
| 1045 | pushl %edx |
| 1046 | movl 0xc(%esp), %eax |
| 1047 | lea 0x4(%ebp), %edx |
| 1048 | movl (%ebp), %ecx |
| 1049 | subl $MCOUNT_INSN_SIZE, %eax |
| 1050 | call prepare_ftrace_return |
| 1051 | popl %edx |
| 1052 | popl %ecx |
| 1053 | popl %eax |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 1054 | ret |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 1055 | END(ftrace_graph_caller) |
Frederic Weisbecker | caf4b32 | 2008-11-11 07:03:45 +0100 | [diff] [blame] | 1056 | |
| 1057 | .globl return_to_handler |
| 1058 | return_to_handler: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1059 | pushl %eax |
| 1060 | pushl %edx |
| 1061 | movl %ebp, %eax |
| 1062 | call ftrace_return_to_handler |
| 1063 | movl %eax, %ecx |
| 1064 | popl %edx |
| 1065 | popl %eax |
| 1066 | jmp *%ecx |
Frederic Weisbecker | e7d3737 | 2008-11-16 06:02:06 +0100 | [diff] [blame] | 1067 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 1069 | #ifdef CONFIG_TRACING |
| 1070 | ENTRY(trace_page_fault) |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 1071 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1072 | pushl $trace_do_page_fault |
| 1073 | jmp error_code |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 1074 | END(trace_page_fault) |
| 1075 | #endif |
| 1076 | |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1077 | ENTRY(page_fault) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1078 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1079 | pushl $do_page_fault |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1080 | ALIGN |
| 1081 | error_code: |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1082 | /* the function address is in %gs's slot on the stack */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1083 | pushl %fs |
| 1084 | pushl %es |
| 1085 | pushl %ds |
| 1086 | pushl %eax |
| 1087 | pushl %ebp |
| 1088 | pushl %edi |
| 1089 | pushl %esi |
| 1090 | pushl %edx |
| 1091 | pushl %ecx |
| 1092 | pushl %ebx |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1093 | cld |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1094 | movl $(__KERNEL_PERCPU), %ecx |
| 1095 | movl %ecx, %fs |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1096 | UNWIND_ESPFIX_STACK |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1097 | GS_TO_REG %ecx |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1098 | movl PT_GS(%esp), %edi # get the function address |
| 1099 | movl PT_ORIG_EAX(%esp), %edx # get the error code |
| 1100 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1101 | REG_TO_PTGS %ecx |
| 1102 | SET_KERNEL_GS %ecx |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1103 | movl $(__USER_DS), %ecx |
| 1104 | movl %ecx, %ds |
| 1105 | movl %ecx, %es |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1106 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1107 | movl %esp, %eax # pt_regs pointer |
| 1108 | call *%edi |
| 1109 | jmp ret_from_exception |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1110 | END(page_fault) |
| 1111 | |
| 1112 | /* |
| 1113 | * Debug traps and NMI can happen at the one SYSENTER instruction |
| 1114 | * that sets up the real kernel stack. Check here, since we can't |
| 1115 | * allow the wrong stack to be used. |
| 1116 | * |
| 1117 | * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have |
| 1118 | * already pushed 3 words if it hits on the sysenter instruction: |
| 1119 | * eflags, cs and eip. |
| 1120 | * |
| 1121 | * We just load the right stack, and push the three (known) values |
| 1122 | * by hand onto the new stack - while updating the return eip past |
| 1123 | * the instruction that would have done it for sysenter. |
| 1124 | */ |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1125 | .macro FIX_STACK offset ok label |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1126 | cmpw $__KERNEL_CS, 4(%esp) |
| 1127 | jne \ok |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1128 | \label: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1129 | movl TSS_sysenter_sp0 + \offset(%esp), %esp |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 1130 | pushfl |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1131 | pushl $__KERNEL_CS |
| 1132 | pushl $sysenter_past_esp |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1133 | .endm |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1134 | |
| 1135 | ENTRY(debug) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1136 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1137 | cmpl $entry_SYSENTER_32, (%esp) |
| 1138 | jne debug_stack_correct |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1139 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1140 | debug_stack_correct: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1141 | pushl $-1 # mark this as an int |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1142 | SAVE_ALL |
| 1143 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1144 | xorl %edx, %edx # error code 0 |
| 1145 | movl %esp, %eax # pt_regs pointer |
| 1146 | call do_debug |
| 1147 | jmp ret_from_exception |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1148 | END(debug) |
| 1149 | |
| 1150 | /* |
| 1151 | * NMI is doubly nasty. It can happen _while_ we're handling |
| 1152 | * a debug fault, and the debug fault hasn't yet been able to |
| 1153 | * clear up the stack. So we first check whether we got an |
| 1154 | * NMI on the sysenter entry path, but after that we need to |
| 1155 | * check whether we got an NMI on the debug path where the debug |
| 1156 | * fault happened on the sysenter path. |
| 1157 | */ |
| 1158 | ENTRY(nmi) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1159 | ASM_CLAC |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1160 | #ifdef CONFIG_X86_ESPFIX32 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1161 | pushl %eax |
| 1162 | movl %ss, %eax |
| 1163 | cmpw $__ESPFIX_SS, %ax |
| 1164 | popl %eax |
| 1165 | je nmi_espfix_stack |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1166 | #endif |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1167 | cmpl $entry_SYSENTER_32, (%esp) |
| 1168 | je nmi_stack_fixup |
| 1169 | pushl %eax |
| 1170 | movl %esp, %eax |
| 1171 | /* |
| 1172 | * Do not access memory above the end of our stack page, |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1173 | * it might not exist. |
| 1174 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1175 | andl $(THREAD_SIZE-1), %eax |
| 1176 | cmpl $(THREAD_SIZE-20), %eax |
| 1177 | popl %eax |
| 1178 | jae nmi_stack_correct |
| 1179 | cmpl $entry_SYSENTER_32, 12(%esp) |
| 1180 | je nmi_debug_stack_check |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1181 | nmi_stack_correct: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1182 | pushl %eax |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1183 | SAVE_ALL |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1184 | xorl %edx, %edx # zero error code |
| 1185 | movl %esp, %eax # pt_regs pointer |
| 1186 | call do_nmi |
| 1187 | jmp restore_all_notrace |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1188 | |
| 1189 | nmi_stack_fixup: |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1190 | FIX_STACK 12, nmi_stack_correct, 1 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1191 | jmp nmi_stack_correct |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1192 | |
| 1193 | nmi_debug_stack_check: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1194 | cmpw $__KERNEL_CS, 16(%esp) |
| 1195 | jne nmi_stack_correct |
| 1196 | cmpl $debug, (%esp) |
| 1197 | jb nmi_stack_correct |
| 1198 | cmpl $debug_esp_fix_insn, (%esp) |
| 1199 | ja nmi_stack_correct |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1200 | FIX_STACK 24, nmi_stack_correct, 1 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1201 | jmp nmi_stack_correct |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1202 | |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1203 | #ifdef CONFIG_X86_ESPFIX32 |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1204 | nmi_espfix_stack: |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 1205 | /* |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1206 | * create the pointer to lss back |
| 1207 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1208 | pushl %ss |
| 1209 | pushl %esp |
| 1210 | addl $4, (%esp) |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1211 | /* copy the iret frame of 12 bytes */ |
| 1212 | .rept 3 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1213 | pushl 16(%esp) |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1214 | .endr |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1215 | pushl %eax |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1216 | SAVE_ALL |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1217 | FIXUP_ESPFIX_STACK # %eax == %esp |
| 1218 | xorl %edx, %edx # zero error code |
| 1219 | call do_nmi |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1220 | RESTORE_REGS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1221 | lss 12+4(%esp), %esp # back to espfix stack |
| 1222 | jmp irq_return |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1223 | #endif |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1224 | END(nmi) |
| 1225 | |
| 1226 | ENTRY(int3) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1227 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1228 | pushl $-1 # mark this as an int |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1229 | SAVE_ALL |
| 1230 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1231 | xorl %edx, %edx # zero error code |
| 1232 | movl %esp, %eax # pt_regs pointer |
| 1233 | call do_int3 |
| 1234 | jmp ret_from_exception |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1235 | END(int3) |
| 1236 | |
| 1237 | ENTRY(general_protection) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1238 | pushl $do_general_protection |
| 1239 | jmp error_code |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1240 | END(general_protection) |
| 1241 | |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 1242 | #ifdef CONFIG_KVM_GUEST |
| 1243 | ENTRY(async_page_fault) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1244 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame^] | 1245 | pushl $do_async_page_fault |
| 1246 | jmp error_code |
Sedat Dilek | 2ae9d29 | 2011-03-08 22:39:24 +0100 | [diff] [blame] | 1247 | END(async_page_fault) |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 1248 | #endif |