Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 2 | * Compatibility mode system call entry point for x86-64. |
| 3 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Copyright 2000-2002 Andi Kleen, SuSE Labs. |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 5 | */ |
Ingo Molnar | d36f947 | 2015-06-03 18:29:26 +0200 | [diff] [blame] | 6 | #include "calling.h" |
Sam Ravnborg | e2d5df9 | 2005-09-09 21:28:48 +0200 | [diff] [blame] | 7 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <asm/current.h> |
| 9 | #include <asm/errno.h> |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 10 | #include <asm/ia32_unistd.h> |
| 11 | #include <asm/thread_info.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <asm/segment.h> |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 13 | #include <asm/irqflags.h> |
H. Peter Anvin | 1ce6f86 | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 14 | #include <asm/asm.h> |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 15 | #include <asm/smap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/linkage.h> |
Eric Paris | d7e7528 | 2012-01-03 14:23:06 -0500 | [diff] [blame] | 17 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
Jiri Olsa | ea71454 | 2011-03-07 19:10:39 +0100 | [diff] [blame] | 19 | .section .entry.text, "ax" |
| 20 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | /* |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 22 | * 32-bit SYSENTER entry. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | * |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 24 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here |
| 25 | * on 64-bit kernels running on Intel CPUs. |
| 26 | * |
| 27 | * The SYSENTER instruction, in principle, should *only* occur in the |
| 28 | * vDSO. In practice, a small number of Android devices were shipped |
| 29 | * with a copy of Bionic that inlined a SYSENTER instruction. This |
| 30 | * never happened in any of Google's Bionic versions -- it only happened |
| 31 | * in a narrow range of Intel-provided versions. |
| 32 | * |
| 33 | * SYSENTER loads SS, RSP, CS, and RIP from previously programmed MSRs. |
| 34 | * IF and VM in RFLAGS are cleared (IOW: interrupts are off). |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 35 | * SYSENTER does not save anything on the stack, |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 36 | * and does not save old RIP (!!!), RSP, or RFLAGS. |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 37 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | * Arguments: |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 39 | * eax system call number |
| 40 | * ebx arg1 |
| 41 | * ecx arg2 |
| 42 | * edx arg3 |
| 43 | * esi arg4 |
| 44 | * edi arg5 |
| 45 | * ebp user stack |
| 46 | * 0(%ebp) arg6 |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 47 | */ |
Ingo Molnar | 4c8cd0c | 2015-06-08 08:33:56 +0200 | [diff] [blame] | 48 | ENTRY(entry_SYSENTER_compat) |
Andy Lutomirski | b611acf | 2015-10-05 17:47:55 -0700 | [diff] [blame] | 49 | /* Interrupts are off on entry. */ |
Jeremy Fitzhardinge | 457da70 | 2008-06-26 07:28:51 -0700 | [diff] [blame] | 50 | SWAPGS_UNSAFE_STACK |
Denys Vlasenko | 3a23208 | 2015-04-24 17:31:35 +0200 | [diff] [blame] | 51 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
Denys Vlasenko | a232e3d | 2015-03-27 11:36:20 +0100 | [diff] [blame] | 52 | |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 53 | /* |
| 54 | * User tracing code (ptrace or signal handlers) might assume that |
| 55 | * the saved RAX contains a 32-bit number when we're invoking a 32-bit |
| 56 | * syscall. Just in case the high bits are nonzero, zero-extend |
| 57 | * the syscall number. (This could almost certainly be deleted |
| 58 | * with no ill effects.) |
| 59 | */ |
Denys Vlasenko | 4ee8ec1 | 2015-03-27 11:36:21 +0100 | [diff] [blame] | 60 | movl %eax, %eax |
| 61 | |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 62 | /* Construct struct pt_regs on stack */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 63 | pushq $__USER32_DS /* pt_regs->ss */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 64 | pushq %rbp /* pt_regs->sp (stashed in bp) */ |
Andy Lutomirski | b611acf | 2015-10-05 17:47:55 -0700 | [diff] [blame] | 65 | |
| 66 | /* |
| 67 | * Push flags. This is nasty. First, interrupts are currently |
| 68 | * off, but we need pt_regs->flags to have IF set. Second, even |
| 69 | * if TF was set when SYSENTER started, it's clear by now. We fix |
| 70 | * that later using TIF_SINGLESTEP. |
| 71 | */ |
| 72 | pushfq /* pt_regs->flags (except IF = 0) */ |
| 73 | orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 74 | pushq $__USER32_CS /* pt_regs->cs */ |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 75 | xorq %r8,%r8 |
| 76 | pushq %r8 /* pt_regs->ip = 0 (placeholder) */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 77 | pushq %rax /* pt_regs->orig_ax */ |
| 78 | pushq %rdi /* pt_regs->di */ |
| 79 | pushq %rsi /* pt_regs->si */ |
| 80 | pushq %rdx /* pt_regs->dx */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 81 | pushq %rcx /* pt_regs->cx */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 82 | pushq $-ENOSYS /* pt_regs->ax */ |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 83 | pushq %r8 /* pt_regs->r8 = 0 */ |
| 84 | pushq %r8 /* pt_regs->r9 = 0 */ |
| 85 | pushq %r8 /* pt_regs->r10 = 0 */ |
| 86 | pushq %r8 /* pt_regs->r11 = 0 */ |
| 87 | pushq %rbx /* pt_regs->rbx */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 88 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 89 | pushq %r8 /* pt_regs->r12 = 0 */ |
| 90 | pushq %r8 /* pt_regs->r13 = 0 */ |
| 91 | pushq %r8 /* pt_regs->r14 = 0 */ |
| 92 | pushq %r8 /* pt_regs->r15 = 0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | cld |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 94 | |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 95 | /* |
Andy Lutomirski | e786041 | 2016-03-09 19:00:25 -0800 | [diff] [blame] | 96 | * SYSENTER doesn't filter flags, so we need to clear NT and AC |
Andy Lutomirski | 8c7aa69 | 2014-10-01 11:49:04 -0700 | [diff] [blame] | 97 | * ourselves. To save a few cycles, we can check whether |
Andy Lutomirski | e786041 | 2016-03-09 19:00:25 -0800 | [diff] [blame] | 98 | * either was set instead of doing an unconditional popfq. |
Andy Lutomirski | b611acf | 2015-10-05 17:47:55 -0700 | [diff] [blame] | 99 | * This needs to happen before enabling interrupts so that |
| 100 | * we don't get preempted with NT set. |
Borislav Petkov | 374a3a3 | 2015-10-09 19:08:59 +0200 | [diff] [blame] | 101 | * |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 102 | * If TF is set, we will single-step all the way to here -- do_debug |
| 103 | * will ignore all the traps. (Yes, this is slow, but so is |
| 104 | * single-stepping in general. This allows us to avoid having |
| 105 | * a more complicated code to handle the case where a user program |
| 106 | * forces us to single-step through the SYSENTER entry code.) |
| 107 | * |
Borislav Petkov | f74acf0 | 2015-12-12 11:27:57 +0100 | [diff] [blame] | 108 | * NB.: .Lsysenter_fix_flags is a label with the code under it moved |
Borislav Petkov | 374a3a3 | 2015-10-09 19:08:59 +0200 | [diff] [blame] | 109 | * out-of-line as an optimization: NT is unlikely to be set in the |
| 110 | * majority of the cases and instead of polluting the I$ unnecessarily, |
| 111 | * we're keeping that code behind a branch which will predict as |
| 112 | * not-taken and therefore its instructions won't be fetched. |
Andy Lutomirski | 8c7aa69 | 2014-10-01 11:49:04 -0700 | [diff] [blame] | 113 | */ |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 114 | testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, EFLAGS(%rsp) |
Borislav Petkov | f74acf0 | 2015-12-12 11:27:57 +0100 | [diff] [blame] | 115 | jnz .Lsysenter_fix_flags |
| 116 | .Lsysenter_flags_fixed: |
Andy Lutomirski | 8c7aa69 | 2014-10-01 11:49:04 -0700 | [diff] [blame] | 117 | |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 118 | /* |
| 119 | * User mode is traced as though IRQs are on, and SYSENTER |
| 120 | * turned them off. |
| 121 | */ |
| 122 | TRACE_IRQS_OFF |
Andy Lutomirski | e62a254 | 2015-10-05 17:48:02 -0700 | [diff] [blame] | 123 | |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 124 | movq %rsp, %rdi |
| 125 | call do_fast_syscall_32 |
Boris Ostrovsky | 91e2eea | 2015-11-19 16:55:45 -0500 | [diff] [blame] | 126 | /* XEN PV guests always use IRET path */ |
| 127 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ |
| 128 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 129 | jmp sysret32_from_system_call |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | |
Borislav Petkov | f74acf0 | 2015-12-12 11:27:57 +0100 | [diff] [blame] | 131 | .Lsysenter_fix_flags: |
Andy Lutomirski | b611acf | 2015-10-05 17:47:55 -0700 | [diff] [blame] | 132 | pushq $X86_EFLAGS_FIXED |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 133 | popfq |
Borislav Petkov | f74acf0 | 2015-12-12 11:27:57 +0100 | [diff] [blame] | 134 | jmp .Lsysenter_flags_fixed |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 135 | GLOBAL(__end_entry_SYSENTER_compat) |
Ingo Molnar | 4c8cd0c | 2015-06-08 08:33:56 +0200 | [diff] [blame] | 136 | ENDPROC(entry_SYSENTER_compat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
| 138 | /* |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 139 | * 32-bit SYSCALL entry. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | * |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 141 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here |
| 142 | * on 64-bit kernels running on AMD CPUs. |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 143 | * |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 144 | * The SYSCALL instruction, in principle, should *only* occur in the |
| 145 | * vDSO. In practice, it appears that this really is the case. |
| 146 | * As evidence: |
| 147 | * |
| 148 | * - The calling convention for SYSCALL has changed several times without |
| 149 | * anyone noticing. |
| 150 | * |
| 151 | * - Prior to the in-kernel X86_BUG_SYSRET_SS_ATTRS fixup, anything |
| 152 | * user task that did SYSCALL without immediately reloading SS |
| 153 | * would randomly crash. |
| 154 | * |
| 155 | * - Most programmers do not directly target AMD CPUs, and the 32-bit |
| 156 | * SYSCALL instruction does not exist on Intel CPUs. Even on AMD |
| 157 | * CPUs, Linux disables the SYSCALL instruction on 32-bit kernels |
| 158 | * because the SYSCALL instruction in legacy/native 32-bit mode (as |
| 159 | * opposed to compat mode) is sufficiently poorly designed as to be |
| 160 | * essentially unusable. |
| 161 | * |
| 162 | * 32-bit SYSCALL saves RIP to RCX, clears RFLAGS.RF, then saves |
| 163 | * RFLAGS to R11, then loads new SS, CS, and RIP from previously |
| 164 | * programmed MSRs. RFLAGS gets masked by a value from another MSR |
| 165 | * (so CLD and CLAC are not needed). SYSCALL does not save anything on |
| 166 | * the stack and does not change RSP. |
| 167 | * |
| 168 | * Note: RFLAGS saving+masking-with-MSR happens only in Long mode |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 169 | * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it). |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 170 | * Don't get confused: RFLAGS saving+masking depends on Long Mode Active bit |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 171 | * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes |
| 172 | * or target CS descriptor's L bit (SYSCALL does not read segment descriptors). |
| 173 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | * Arguments: |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 175 | * eax system call number |
| 176 | * ecx return address |
| 177 | * ebx arg1 |
| 178 | * ebp arg2 (note: not saved in the stack frame, should not be touched) |
| 179 | * edx arg3 |
| 180 | * esi arg4 |
| 181 | * edi arg5 |
| 182 | * esp user stack |
| 183 | * 0(%esp) arg6 |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 184 | */ |
Ingo Molnar | 2cd2355 | 2015-06-08 08:28:07 +0200 | [diff] [blame] | 185 | ENTRY(entry_SYSCALL_compat) |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 186 | /* Interrupts are off on entry. */ |
Jeremy Fitzhardinge | 457da70 | 2008-06-26 07:28:51 -0700 | [diff] [blame] | 187 | SWAPGS_UNSAFE_STACK |
Andy Lutomirski | e62a254 | 2015-10-05 17:48:02 -0700 | [diff] [blame] | 188 | |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 189 | /* Stash user ESP and switch to the kernel stack. */ |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 190 | movl %esp, %r8d |
| 191 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
Denys Vlasenko | a232e3d | 2015-03-27 11:36:20 +0100 | [diff] [blame] | 192 | |
Denys Vlasenko | 4ee8ec1 | 2015-03-27 11:36:21 +0100 | [diff] [blame] | 193 | /* Zero-extending 32-bit regs, do not remove */ |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 194 | movl %eax, %eax |
Denys Vlasenko | 4ee8ec1 | 2015-03-27 11:36:21 +0100 | [diff] [blame] | 195 | |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 196 | /* Construct struct pt_regs on stack */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 197 | pushq $__USER32_DS /* pt_regs->ss */ |
| 198 | pushq %r8 /* pt_regs->sp */ |
| 199 | pushq %r11 /* pt_regs->flags */ |
| 200 | pushq $__USER32_CS /* pt_regs->cs */ |
| 201 | pushq %rcx /* pt_regs->ip */ |
| 202 | pushq %rax /* pt_regs->orig_ax */ |
| 203 | pushq %rdi /* pt_regs->di */ |
| 204 | pushq %rsi /* pt_regs->si */ |
| 205 | pushq %rdx /* pt_regs->dx */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 206 | pushq %rbp /* pt_regs->cx (stashed in bp) */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 207 | pushq $-ENOSYS /* pt_regs->ax */ |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 208 | xorq %r8,%r8 |
| 209 | pushq %r8 /* pt_regs->r8 = 0 */ |
| 210 | pushq %r8 /* pt_regs->r9 = 0 */ |
| 211 | pushq %r8 /* pt_regs->r10 = 0 */ |
| 212 | pushq %r8 /* pt_regs->r11 = 0 */ |
| 213 | pushq %rbx /* pt_regs->rbx */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 214 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 215 | pushq %r8 /* pt_regs->r12 = 0 */ |
| 216 | pushq %r8 /* pt_regs->r13 = 0 */ |
| 217 | pushq %r8 /* pt_regs->r14 = 0 */ |
| 218 | pushq %r8 /* pt_regs->r15 = 0 */ |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 219 | |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 220 | /* |
| 221 | * User mode is traced as though IRQs are on, and SYSENTER |
| 222 | * turned them off. |
| 223 | */ |
| 224 | TRACE_IRQS_OFF |
| 225 | |
| 226 | movq %rsp, %rdi |
| 227 | call do_fast_syscall_32 |
Boris Ostrovsky | 91e2eea | 2015-11-19 16:55:45 -0500 | [diff] [blame] | 228 | /* XEN PV guests always use IRET path */ |
| 229 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ |
| 230 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 231 | |
| 232 | /* Opportunistic SYSRET */ |
| 233 | sysret32_from_system_call: |
| 234 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ |
| 235 | movq RBX(%rsp), %rbx /* pt_regs->rbx */ |
| 236 | movq RBP(%rsp), %rbp /* pt_regs->rbp */ |
| 237 | movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ |
| 238 | movq RIP(%rsp), %rcx /* pt_regs->ip (in rcx) */ |
| 239 | addq $RAX, %rsp /* Skip r8-r15 */ |
| 240 | popq %rax /* pt_regs->rax */ |
| 241 | popq %rdx /* Skip pt_regs->cx */ |
| 242 | popq %rdx /* pt_regs->dx */ |
| 243 | popq %rsi /* pt_regs->si */ |
| 244 | popq %rdi /* pt_regs->di */ |
| 245 | |
| 246 | /* |
| 247 | * USERGS_SYSRET32 does: |
| 248 | * GSBASE = user's GS base |
| 249 | * EIP = ECX |
| 250 | * RFLAGS = R11 |
| 251 | * CS = __USER32_CS |
| 252 | * SS = __USER_DS |
| 253 | * |
| 254 | * ECX will not match pt_regs->cx, but we're returning to a vDSO |
| 255 | * trampoline that will fix up RCX, so this is okay. |
| 256 | * |
| 257 | * R12-R15 are callee-saved, so they contain whatever was in them |
| 258 | * when the system call started, which is already known to user |
| 259 | * code. We zero R8-R10 to avoid info leaks. |
| 260 | */ |
| 261 | xorq %r8, %r8 |
| 262 | xorq %r9, %r9 |
| 263 | xorq %r10, %r10 |
| 264 | movq RSP-ORIG_RAX(%rsp), %rsp |
Boris Ostrovsky | 75ef821 | 2015-11-19 16:55:47 -0500 | [diff] [blame] | 265 | swapgs |
| 266 | sysretl |
Ingo Molnar | 2cd2355 | 2015-06-08 08:28:07 +0200 | [diff] [blame] | 267 | END(entry_SYSCALL_compat) |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 268 | |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 269 | /* |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 270 | * 32-bit legacy system call entry. |
| 271 | * |
| 272 | * 32-bit x86 Linux system calls traditionally used the INT $0x80 |
| 273 | * instruction. INT $0x80 lands here. |
| 274 | * |
| 275 | * This entry point can be used by 32-bit and 64-bit programs to perform |
| 276 | * 32-bit system calls. Instances of INT $0x80 can be found inline in |
| 277 | * various programs and libraries. It is also used by the vDSO's |
| 278 | * __kernel_vsyscall fallback for hardware that doesn't support a faster |
| 279 | * entry method. Restarted 32-bit system calls also fall back to INT |
| 280 | * $0x80 regardless of what instruction was originally used to do the |
| 281 | * system call. |
| 282 | * |
| 283 | * This is considered a slow path. It is not used by most libc |
| 284 | * implementations on modern hardware except during process startup. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | * |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 286 | * Arguments: |
| 287 | * eax system call number |
| 288 | * ebx arg1 |
| 289 | * ecx arg2 |
| 290 | * edx arg3 |
| 291 | * esi arg4 |
| 292 | * edi arg5 |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 293 | * ebp arg6 |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 294 | */ |
Ingo Molnar | 2cd2355 | 2015-06-08 08:28:07 +0200 | [diff] [blame] | 295 | ENTRY(entry_INT80_compat) |
Denys Vlasenko | a232e3d | 2015-03-27 11:36:20 +0100 | [diff] [blame] | 296 | /* |
| 297 | * Interrupts are off on entry. |
Denys Vlasenko | a232e3d | 2015-03-27 11:36:20 +0100 | [diff] [blame] | 298 | */ |
Jeremy Fitzhardinge | 360c044 | 2008-07-08 15:06:28 -0700 | [diff] [blame] | 299 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
Andy Lutomirski | 3d44d51 | 2016-02-24 12:18:49 -0800 | [diff] [blame] | 300 | ASM_CLAC /* Do this early to minimize exposure */ |
Jeremy Fitzhardinge | 6680415 | 2008-06-25 00:19:29 -0400 | [diff] [blame] | 301 | SWAPGS |
Denys Vlasenko | a232e3d | 2015-03-27 11:36:20 +0100 | [diff] [blame] | 302 | |
Andy Lutomirski | ee08c6b | 2015-10-05 17:48:09 -0700 | [diff] [blame] | 303 | /* |
| 304 | * User tracing code (ptrace or signal handlers) might assume that |
| 305 | * the saved RAX contains a 32-bit number when we're invoking a 32-bit |
| 306 | * syscall. Just in case the high bits are nonzero, zero-extend |
| 307 | * the syscall number. (This could almost certainly be deleted |
| 308 | * with no ill effects.) |
| 309 | */ |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 310 | movl %eax, %eax |
Denys Vlasenko | 4ee8ec1 | 2015-03-27 11:36:21 +0100 | [diff] [blame] | 311 | |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 312 | /* Construct struct pt_regs on stack (iret frame is already on stack) */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 313 | pushq %rax /* pt_regs->orig_ax */ |
| 314 | pushq %rdi /* pt_regs->di */ |
| 315 | pushq %rsi /* pt_regs->si */ |
| 316 | pushq %rdx /* pt_regs->dx */ |
| 317 | pushq %rcx /* pt_regs->cx */ |
| 318 | pushq $-ENOSYS /* pt_regs->ax */ |
Andy Lutomirski | 8169aff | 2015-10-05 17:48:05 -0700 | [diff] [blame] | 319 | xorq %r8,%r8 |
| 320 | pushq %r8 /* pt_regs->r8 = 0 */ |
| 321 | pushq %r8 /* pt_regs->r9 = 0 */ |
| 322 | pushq %r8 /* pt_regs->r10 = 0 */ |
| 323 | pushq %r8 /* pt_regs->r11 = 0 */ |
| 324 | pushq %rbx /* pt_regs->rbx */ |
| 325 | pushq %rbp /* pt_regs->rbp */ |
| 326 | pushq %r12 /* pt_regs->r12 */ |
| 327 | pushq %r13 /* pt_regs->r13 */ |
| 328 | pushq %r14 /* pt_regs->r14 */ |
| 329 | pushq %r15 /* pt_regs->r15 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | cld |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 331 | |
Denys Vlasenko | 73cbf68 | 2015-06-02 21:04:02 +0200 | [diff] [blame] | 332 | /* |
Andy Lutomirski | ee08c6b | 2015-10-05 17:48:09 -0700 | [diff] [blame] | 333 | * User mode is traced as though IRQs are on, and the interrupt |
| 334 | * gate turned them off. |
Denys Vlasenko | 73cbf68 | 2015-06-02 21:04:02 +0200 | [diff] [blame] | 335 | */ |
Andy Lutomirski | ee08c6b | 2015-10-05 17:48:09 -0700 | [diff] [blame] | 336 | TRACE_IRQS_OFF |
| 337 | |
| 338 | movq %rsp, %rdi |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 339 | call do_int80_syscall_32 |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 340 | .Lsyscall_32_done: |
Andy Lutomirski | ee08c6b | 2015-10-05 17:48:09 -0700 | [diff] [blame] | 341 | |
| 342 | /* Go back to user mode. */ |
| 343 | TRACE_IRQS_ON |
| 344 | SWAPGS |
| 345 | jmp restore_regs_and_iret |
Ingo Molnar | 2cd2355 | 2015-06-08 08:28:07 +0200 | [diff] [blame] | 346 | END(entry_INT80_compat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | |
Jan Beulich | f6b2bc8 | 2011-11-29 11:24:10 +0000 | [diff] [blame] | 348 | ALIGN |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 349 | GLOBAL(stub32_clone) |
Denys Vlasenko | 5cdc683 | 2015-06-03 15:58:49 +0200 | [diff] [blame] | 350 | /* |
Denys Vlasenko | 7a5a982 | 2015-06-03 15:58:50 +0200 | [diff] [blame] | 351 | * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr). |
| 352 | * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val). |
| 353 | * |
| 354 | * The native 64-bit kernel's sys_clone() implements the latter, |
| 355 | * so we need to swap arguments here before calling it: |
Denys Vlasenko | 5cdc683 | 2015-06-03 15:58:49 +0200 | [diff] [blame] | 356 | */ |
Denys Vlasenko | 7a5a982 | 2015-06-03 15:58:50 +0200 | [diff] [blame] | 357 | xchg %r8, %rcx |
Andy Lutomirski | 8169aff | 2015-10-05 17:48:05 -0700 | [diff] [blame] | 358 | jmp sys_clone |