Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 1 | /* |
| 2 | * common.c - C code for kernel entry and exit |
| 3 | * Copyright (c) 2015 Andrew Lutomirski |
| 4 | * GPL v2 |
| 5 | * |
| 6 | * Based on asm and ptrace code by many authors. The code here originated |
| 7 | * in ptrace.c and signal.c. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/smp.h> |
| 14 | #include <linux/errno.h> |
| 15 | #include <linux/ptrace.h> |
| 16 | #include <linux/tracehook.h> |
| 17 | #include <linux/audit.h> |
| 18 | #include <linux/seccomp.h> |
| 19 | #include <linux/signal.h> |
| 20 | #include <linux/export.h> |
| 21 | #include <linux/context_tracking.h> |
| 22 | #include <linux/user-return-notifier.h> |
| 23 | #include <linux/uprobes.h> |
| 24 | |
| 25 | #include <asm/desc.h> |
| 26 | #include <asm/traps.h> |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 27 | #include <asm/vdso.h> |
| 28 | #include <asm/uaccess.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 29 | #include <asm/cpufeature.h> |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 30 | |
| 31 | #define CREATE_TRACE_POINTS |
| 32 | #include <trace/events/syscalls.h> |
| 33 | |
Andy Lutomirski | dd63607 | 2015-10-05 17:48:22 -0700 | [diff] [blame] | 34 | static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs) |
| 35 | { |
| 36 | unsigned long top_of_stack = |
| 37 | (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING; |
| 38 | return (struct thread_info *)(top_of_stack - THREAD_SIZE); |
| 39 | } |
| 40 | |
Andy Lutomirski | feed36c | 2015-07-03 12:44:25 -0700 | [diff] [blame] | 41 | #ifdef CONFIG_CONTEXT_TRACKING |
| 42 | /* Called on entry from user mode with IRQs off. */ |
Paolo Bonzini | be8a18e | 2016-06-20 16:58:30 +0200 | [diff] [blame] | 43 | __visible inline void enter_from_user_mode(void) |
Andy Lutomirski | feed36c | 2015-07-03 12:44:25 -0700 | [diff] [blame] | 44 | { |
| 45 | CT_WARN_ON(ct_state() != CONTEXT_USER); |
Paolo Bonzini | 2e9d1e1 | 2016-06-20 16:58:29 +0200 | [diff] [blame] | 46 | user_exit_irqoff(); |
Andy Lutomirski | feed36c | 2015-07-03 12:44:25 -0700 | [diff] [blame] | 47 | } |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 48 | #else |
| 49 | static inline void enter_from_user_mode(void) {} |
Andy Lutomirski | feed36c | 2015-07-03 12:44:25 -0700 | [diff] [blame] | 50 | #endif |
| 51 | |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 52 | static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) |
| 53 | { |
| 54 | #ifdef CONFIG_X86_64 |
| 55 | if (arch == AUDIT_ARCH_X86_64) { |
| 56 | audit_syscall_entry(regs->orig_ax, regs->di, |
| 57 | regs->si, regs->dx, regs->r10); |
| 58 | } else |
| 59 | #endif |
| 60 | { |
| 61 | audit_syscall_entry(regs->orig_ax, regs->bx, |
| 62 | regs->cx, regs->dx, regs->si); |
| 63 | } |
| 64 | } |
| 65 | |
| 66 | /* |
| 67 | * We can return 0 to resume the syscall or anything else to go to phase |
| 68 | * 2. If we resume the syscall, we need to put something appropriate in |
| 69 | * regs->orig_ax. |
| 70 | * |
| 71 | * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax |
| 72 | * are fully functional. |
| 73 | * |
| 74 | * For phase 2's benefit, our return value is: |
| 75 | * 0: resume the syscall |
| 76 | * 1: go to phase 2; no seccomp phase 2 needed |
| 77 | * anything else: go to phase 2; pass return value to seccomp |
| 78 | */ |
| 79 | unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) |
| 80 | { |
Andy Lutomirski | dd63607 | 2015-10-05 17:48:22 -0700 | [diff] [blame] | 81 | struct thread_info *ti = pt_regs_to_thread_info(regs); |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 82 | unsigned long ret = 0; |
| 83 | u32 work; |
| 84 | |
Andy Lutomirski | 4aabd14 | 2015-10-05 17:48:21 -0700 | [diff] [blame] | 85 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) |
| 86 | BUG_ON(regs != task_pt_regs(current)); |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 87 | |
Andy Lutomirski | dd63607 | 2015-10-05 17:48:22 -0700 | [diff] [blame] | 88 | work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 89 | |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 90 | #ifdef CONFIG_SECCOMP |
| 91 | /* |
| 92 | * Do seccomp first -- it should minimize exposure of other |
| 93 | * code, and keeping seccomp fast is probably more valuable |
| 94 | * than the rest of this. |
| 95 | */ |
| 96 | if (work & _TIF_SECCOMP) { |
| 97 | struct seccomp_data sd; |
| 98 | |
| 99 | sd.arch = arch; |
| 100 | sd.nr = regs->orig_ax; |
| 101 | sd.instruction_pointer = regs->ip; |
| 102 | #ifdef CONFIG_X86_64 |
| 103 | if (arch == AUDIT_ARCH_X86_64) { |
| 104 | sd.args[0] = regs->di; |
| 105 | sd.args[1] = regs->si; |
| 106 | sd.args[2] = regs->dx; |
| 107 | sd.args[3] = regs->r10; |
| 108 | sd.args[4] = regs->r8; |
| 109 | sd.args[5] = regs->r9; |
| 110 | } else |
| 111 | #endif |
| 112 | { |
| 113 | sd.args[0] = regs->bx; |
| 114 | sd.args[1] = regs->cx; |
| 115 | sd.args[2] = regs->dx; |
| 116 | sd.args[3] = regs->si; |
| 117 | sd.args[4] = regs->di; |
| 118 | sd.args[5] = regs->bp; |
| 119 | } |
| 120 | |
| 121 | BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0); |
| 122 | BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1); |
| 123 | |
| 124 | ret = seccomp_phase1(&sd); |
| 125 | if (ret == SECCOMP_PHASE1_SKIP) { |
| 126 | regs->orig_ax = -1; |
| 127 | ret = 0; |
| 128 | } else if (ret != SECCOMP_PHASE1_OK) { |
| 129 | return ret; /* Go directly to phase 2 */ |
| 130 | } |
| 131 | |
| 132 | work &= ~_TIF_SECCOMP; |
| 133 | } |
| 134 | #endif |
| 135 | |
| 136 | /* Do our best to finish without phase 2. */ |
| 137 | if (work == 0) |
| 138 | return ret; /* seccomp and/or nohz only (ret == 0 here) */ |
| 139 | |
| 140 | #ifdef CONFIG_AUDITSYSCALL |
| 141 | if (work == _TIF_SYSCALL_AUDIT) { |
| 142 | /* |
| 143 | * If there is no more work to be done except auditing, |
| 144 | * then audit in phase 1. Phase 2 always audits, so, if |
| 145 | * we audit here, then we can't go on to phase 2. |
| 146 | */ |
| 147 | do_audit_syscall_entry(regs, arch); |
| 148 | return 0; |
| 149 | } |
| 150 | #endif |
| 151 | |
| 152 | return 1; /* Something is enabled that we can't handle in phase 1 */ |
| 153 | } |
| 154 | |
| 155 | /* Returns the syscall nr to run (which should match regs->orig_ax). */ |
| 156 | long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch, |
| 157 | unsigned long phase1_result) |
| 158 | { |
Andy Lutomirski | dd63607 | 2015-10-05 17:48:22 -0700 | [diff] [blame] | 159 | struct thread_info *ti = pt_regs_to_thread_info(regs); |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 160 | long ret = 0; |
Andy Lutomirski | dd63607 | 2015-10-05 17:48:22 -0700 | [diff] [blame] | 161 | u32 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 162 | |
Andy Lutomirski | 4aabd14 | 2015-10-05 17:48:21 -0700 | [diff] [blame] | 163 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) |
| 164 | BUG_ON(regs != task_pt_regs(current)); |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 165 | |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 166 | #ifdef CONFIG_SECCOMP |
| 167 | /* |
| 168 | * Call seccomp_phase2 before running the other hooks so that |
| 169 | * they can see any changes made by a seccomp tracer. |
| 170 | */ |
| 171 | if (phase1_result > 1 && seccomp_phase2(phase1_result)) { |
| 172 | /* seccomp failures shouldn't expose any additional code. */ |
| 173 | return -1; |
| 174 | } |
| 175 | #endif |
| 176 | |
| 177 | if (unlikely(work & _TIF_SYSCALL_EMU)) |
| 178 | ret = -1L; |
| 179 | |
| 180 | if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && |
| 181 | tracehook_report_syscall_entry(regs)) |
| 182 | ret = -1L; |
| 183 | |
| 184 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
| 185 | trace_sys_enter(regs, regs->orig_ax); |
| 186 | |
| 187 | do_audit_syscall_entry(regs, arch); |
| 188 | |
| 189 | return ret ?: regs->orig_ax; |
| 190 | } |
| 191 | |
| 192 | long syscall_trace_enter(struct pt_regs *regs) |
| 193 | { |
Dmitry Safonov | abfb949 | 2016-04-18 16:43:43 +0300 | [diff] [blame] | 194 | u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 195 | unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch); |
| 196 | |
| 197 | if (phase1_result == 0) |
| 198 | return regs->orig_ax; |
| 199 | else |
| 200 | return syscall_trace_enter_phase2(regs, arch, phase1_result); |
| 201 | } |
| 202 | |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 203 | #define EXIT_TO_USERMODE_LOOP_FLAGS \ |
| 204 | (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ |
| 205 | _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) |
| 206 | |
| 207 | static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 208 | { |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 209 | /* |
| 210 | * In order to return to user mode, we need to have IRQs off with |
| 211 | * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY, |
| 212 | * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags |
| 213 | * can be set at any time on preemptable kernels if we have IRQs on, |
| 214 | * so we need to loop. Disabling preemption wouldn't help: doing the |
| 215 | * work to clear some of the flags can sleep. |
| 216 | */ |
| 217 | while (true) { |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 218 | /* We have work to do. */ |
| 219 | local_irq_enable(); |
| 220 | |
| 221 | if (cached_flags & _TIF_NEED_RESCHED) |
| 222 | schedule(); |
| 223 | |
| 224 | if (cached_flags & _TIF_UPROBE) |
| 225 | uprobe_notify_resume(regs); |
| 226 | |
| 227 | /* deal with pending signal delivery */ |
| 228 | if (cached_flags & _TIF_SIGPENDING) |
| 229 | do_signal(regs); |
| 230 | |
| 231 | if (cached_flags & _TIF_NOTIFY_RESUME) { |
| 232 | clear_thread_flag(TIF_NOTIFY_RESUME); |
| 233 | tracehook_notify_resume(regs); |
| 234 | } |
| 235 | |
| 236 | if (cached_flags & _TIF_USER_RETURN_NOTIFY) |
| 237 | fire_user_return_notifiers(); |
| 238 | |
| 239 | /* Disable IRQs and retry */ |
| 240 | local_irq_disable(); |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 241 | |
| 242 | cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags); |
| 243 | |
| 244 | if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) |
| 245 | break; |
| 246 | |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 247 | } |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 248 | } |
| 249 | |
| 250 | /* Called with IRQs disabled. */ |
| 251 | __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) |
| 252 | { |
Andy Lutomirski | 4e79e18 | 2016-02-10 14:15:27 -0800 | [diff] [blame] | 253 | struct thread_info *ti = pt_regs_to_thread_info(regs); |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 254 | u32 cached_flags; |
| 255 | |
| 256 | if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled())) |
| 257 | local_irq_disable(); |
| 258 | |
| 259 | lockdep_sys_exit(); |
| 260 | |
Andy Lutomirski | 4e79e18 | 2016-02-10 14:15:27 -0800 | [diff] [blame] | 261 | cached_flags = READ_ONCE(ti->flags); |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 262 | |
| 263 | if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) |
| 264 | exit_to_usermode_loop(regs, cached_flags); |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 265 | |
Andy Lutomirski | 4e79e18 | 2016-02-10 14:15:27 -0800 | [diff] [blame] | 266 | #ifdef CONFIG_COMPAT |
| 267 | /* |
| 268 | * Compat syscalls set TS_COMPAT. Make sure we clear it before |
| 269 | * returning to user mode. We need to clear it *after* signal |
| 270 | * handling, because syscall restart has a fixup for compat |
| 271 | * syscalls. The fixup is exercised by the ptrace_syscall_32 |
| 272 | * selftest. |
Andy Lutomirski | 609c19a | 2016-07-26 23:12:22 -0700 | [diff] [blame^] | 273 | * |
| 274 | * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer |
| 275 | * special case only applies after poking regs and before the |
| 276 | * very next return to user mode. |
Andy Lutomirski | 4e79e18 | 2016-02-10 14:15:27 -0800 | [diff] [blame] | 277 | */ |
Andy Lutomirski | 609c19a | 2016-07-26 23:12:22 -0700 | [diff] [blame^] | 278 | ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED); |
Andy Lutomirski | 4e79e18 | 2016-02-10 14:15:27 -0800 | [diff] [blame] | 279 | #endif |
| 280 | |
Paolo Bonzini | 2e9d1e1 | 2016-06-20 16:58:29 +0200 | [diff] [blame] | 281 | user_enter_irqoff(); |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 282 | } |
| 283 | |
Andy Lutomirski | f5e6a97 | 2015-10-05 17:48:24 -0700 | [diff] [blame] | 284 | #define SYSCALL_EXIT_WORK_FLAGS \ |
| 285 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
| 286 | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) |
| 287 | |
| 288 | static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags) |
| 289 | { |
| 290 | bool step; |
| 291 | |
| 292 | audit_syscall_exit(regs); |
| 293 | |
| 294 | if (cached_flags & _TIF_SYSCALL_TRACEPOINT) |
| 295 | trace_sys_exit(regs, regs->ax); |
| 296 | |
| 297 | /* |
| 298 | * If TIF_SYSCALL_EMU is set, we only get here because of |
| 299 | * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). |
| 300 | * We already reported this syscall instruction in |
| 301 | * syscall_trace_enter(). |
| 302 | */ |
| 303 | step = unlikely( |
| 304 | (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)) |
| 305 | == _TIF_SINGLESTEP); |
| 306 | if (step || cached_flags & _TIF_SYSCALL_TRACE) |
| 307 | tracehook_report_syscall_exit(regs, step); |
| 308 | } |
| 309 | |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 310 | /* |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 311 | * Called with IRQs on and fully valid regs. Returns with IRQs off in a |
| 312 | * state such that we can immediately switch to user mode. |
| 313 | */ |
Andy Lutomirski | f5e6a97 | 2015-10-05 17:48:24 -0700 | [diff] [blame] | 314 | __visible inline void syscall_return_slowpath(struct pt_regs *regs) |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 315 | { |
| 316 | struct thread_info *ti = pt_regs_to_thread_info(regs); |
| 317 | u32 cached_flags = READ_ONCE(ti->flags); |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 318 | |
| 319 | CT_WARN_ON(ct_state() != CONTEXT_KERNEL); |
| 320 | |
Andy Lutomirski | 460d124 | 2015-10-05 17:48:18 -0700 | [diff] [blame] | 321 | if (IS_ENABLED(CONFIG_PROVE_LOCKING) && |
| 322 | WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax)) |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 323 | local_irq_enable(); |
| 324 | |
| 325 | /* |
| 326 | * First do one-time work. If these work items are enabled, we |
| 327 | * want to run them exactly once per syscall exit with IRQs on. |
| 328 | */ |
Andy Lutomirski | f5e6a97 | 2015-10-05 17:48:24 -0700 | [diff] [blame] | 329 | if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS)) |
| 330 | syscall_slow_exit_work(regs, cached_flags); |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 331 | |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 332 | local_irq_disable(); |
| 333 | prepare_exit_to_usermode(regs); |
| 334 | } |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 335 | |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 336 | #ifdef CONFIG_X86_64 |
| 337 | __visible void do_syscall_64(struct pt_regs *regs) |
| 338 | { |
| 339 | struct thread_info *ti = pt_regs_to_thread_info(regs); |
| 340 | unsigned long nr = regs->orig_ax; |
| 341 | |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 342 | enter_from_user_mode(); |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 343 | local_irq_enable(); |
| 344 | |
| 345 | if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) |
| 346 | nr = syscall_trace_enter(regs); |
| 347 | |
| 348 | /* |
| 349 | * NB: Native and x32 syscalls are dispatched from the same |
| 350 | * table. The only functional difference is the x32 bit in |
| 351 | * regs->orig_ax, which changes the behavior of some syscalls. |
| 352 | */ |
| 353 | if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) { |
| 354 | regs->ax = sys_call_table[nr & __SYSCALL_MASK]( |
| 355 | regs->di, regs->si, regs->dx, |
| 356 | regs->r10, regs->r8, regs->r9); |
| 357 | } |
| 358 | |
| 359 | syscall_return_slowpath(regs); |
| 360 | } |
| 361 | #endif |
| 362 | |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 363 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) |
| 364 | /* |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 365 | * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does |
| 366 | * all entry and exit work and returns with IRQs off. This function is |
| 367 | * extremely hot in workloads that use it, and it's usually called from |
Andy Lutomirski | 33c52129 | 2015-10-05 17:48:19 -0700 | [diff] [blame] | 368 | * do_fast_syscall_32, so forcibly inline it to improve performance. |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 369 | */ |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 370 | static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 371 | { |
| 372 | struct thread_info *ti = pt_regs_to_thread_info(regs); |
| 373 | unsigned int nr = (unsigned int)regs->orig_ax; |
| 374 | |
| 375 | #ifdef CONFIG_IA32_EMULATION |
| 376 | ti->status |= TS_COMPAT; |
| 377 | #endif |
| 378 | |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 379 | if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) { |
| 380 | /* |
| 381 | * Subtlety here: if ptrace pokes something larger than |
| 382 | * 2^32-1 into orig_ax, this truncates it. This may or |
| 383 | * may not be necessary, but it matches the old asm |
| 384 | * behavior. |
| 385 | */ |
| 386 | nr = syscall_trace_enter(regs); |
| 387 | } |
| 388 | |
Andy Lutomirski | 33c52129 | 2015-10-05 17:48:19 -0700 | [diff] [blame] | 389 | if (likely(nr < IA32_NR_syscalls)) { |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 390 | /* |
| 391 | * It's possible that a 32-bit syscall implementation |
| 392 | * takes a 64-bit parameter but nonetheless assumes that |
| 393 | * the high bits are zero. Make sure we zero-extend all |
| 394 | * of the args. |
| 395 | */ |
| 396 | regs->ax = ia32_sys_call_table[nr]( |
| 397 | (unsigned int)regs->bx, (unsigned int)regs->cx, |
| 398 | (unsigned int)regs->dx, (unsigned int)regs->si, |
| 399 | (unsigned int)regs->di, (unsigned int)regs->bp); |
| 400 | } |
| 401 | |
| 402 | syscall_return_slowpath(regs); |
| 403 | } |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 404 | |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 405 | /* Handles int $0x80 */ |
| 406 | __visible void do_int80_syscall_32(struct pt_regs *regs) |
Andy Lutomirski | 8b13c25 | 2015-10-05 17:48:17 -0700 | [diff] [blame] | 407 | { |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 408 | enter_from_user_mode(); |
Andy Lutomirski | 8b13c25 | 2015-10-05 17:48:17 -0700 | [diff] [blame] | 409 | local_irq_enable(); |
| 410 | do_syscall_32_irqs_on(regs); |
| 411 | } |
| 412 | |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 413 | /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */ |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 414 | __visible long do_fast_syscall_32(struct pt_regs *regs) |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 415 | { |
| 416 | /* |
| 417 | * Called using the internal vDSO SYSENTER/SYSCALL32 calling |
| 418 | * convention. Adjust regs so it looks like we entered using int80. |
| 419 | */ |
| 420 | |
| 421 | unsigned long landing_pad = (unsigned long)current->mm->context.vdso + |
| 422 | vdso_image_32.sym_int80_landing_pad; |
| 423 | |
| 424 | /* |
| 425 | * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward |
| 426 | * so that 'regs->ip -= 2' lands back on an int $0x80 instruction. |
| 427 | * Fix it up. |
| 428 | */ |
| 429 | regs->ip = landing_pad; |
| 430 | |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 431 | enter_from_user_mode(); |
| 432 | |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 433 | local_irq_enable(); |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 434 | |
| 435 | /* Fetch EBP from where the vDSO stashed it. */ |
Andy Lutomirski | c68ca67 | 2015-10-05 17:48:20 -0700 | [diff] [blame] | 436 | if ( |
| 437 | #ifdef CONFIG_X86_64 |
| 438 | /* |
| 439 | * Micro-optimization: the pointer we're following is explicitly |
| 440 | * 32 bits, so it can't be out of range. |
| 441 | */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 442 | __get_user(*(u32 *)®s->bp, |
Andy Lutomirski | c68ca67 | 2015-10-05 17:48:20 -0700 | [diff] [blame] | 443 | (u32 __user __force *)(unsigned long)(u32)regs->sp) |
| 444 | #else |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 445 | get_user(*(u32 *)®s->bp, |
Andy Lutomirski | c68ca67 | 2015-10-05 17:48:20 -0700 | [diff] [blame] | 446 | (u32 __user __force *)(unsigned long)(u32)regs->sp) |
| 447 | #endif |
| 448 | ) { |
| 449 | |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 450 | /* User code screwed up. */ |
| 451 | local_irq_disable(); |
| 452 | regs->ax = -EFAULT; |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 453 | prepare_exit_to_usermode(regs); |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 454 | return 0; /* Keep it simple: use IRET. */ |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 455 | } |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 456 | |
| 457 | /* Now this is just like a normal syscall. */ |
Andy Lutomirski | 8b13c25 | 2015-10-05 17:48:17 -0700 | [diff] [blame] | 458 | do_syscall_32_irqs_on(regs); |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 459 | |
| 460 | #ifdef CONFIG_X86_64 |
| 461 | /* |
| 462 | * Opportunistic SYSRETL: if possible, try to return using SYSRETL. |
| 463 | * SYSRETL is available on all 64-bit CPUs, so we don't need to |
| 464 | * bother with SYSEXIT. |
| 465 | * |
| 466 | * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP, |
| 467 | * because the ECX fixup above will ensure that this is essentially |
| 468 | * never the case. |
| 469 | */ |
| 470 | return regs->cs == __USER32_CS && regs->ss == __USER_DS && |
| 471 | regs->ip == landing_pad && |
| 472 | (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0; |
| 473 | #else |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 474 | /* |
| 475 | * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT. |
| 476 | * |
| 477 | * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP, |
| 478 | * because the ECX fixup above will ensure that this is essentially |
| 479 | * never the case. |
| 480 | * |
| 481 | * We don't allow syscalls at all from VM86 mode, but we still |
| 482 | * need to check VM, because we might be returning from sys_vm86. |
| 483 | */ |
| 484 | return static_cpu_has(X86_FEATURE_SEP) && |
| 485 | regs->cs == __USER_CS && regs->ss == __USER_DS && |
| 486 | regs->ip == landing_pad && |
| 487 | (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0; |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 488 | #endif |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 489 | } |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 490 | #endif |