blob: df56ca39487700c42b7a2bd6f62b8d6657de8c2a [file] [log] [blame]
Andy Lutomirski1f484aa2015-07-03 12:44:23 -07001/*
2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
4 * GPL v2
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/errno.h>
15#include <linux/ptrace.h>
16#include <linux/tracehook.h>
17#include <linux/audit.h>
18#include <linux/seccomp.h>
19#include <linux/signal.h>
20#include <linux/export.h>
21#include <linux/context_tracking.h>
22#include <linux/user-return-notifier.h>
23#include <linux/uprobes.h>
24
25#include <asm/desc.h>
26#include <asm/traps.h>
Andy Lutomirski710246d2015-10-05 17:48:10 -070027#include <asm/vdso.h>
28#include <asm/uaccess.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010029#include <asm/cpufeature.h>
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070030
31#define CREATE_TRACE_POINTS
32#include <trace/events/syscalls.h>
33
Andy Lutomirskidd636072015-10-05 17:48:22 -070034static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
35{
36 unsigned long top_of_stack =
37 (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
38 return (struct thread_info *)(top_of_stack - THREAD_SIZE);
39}
40
Andy Lutomirskifeed36c2015-07-03 12:44:25 -070041#ifdef CONFIG_CONTEXT_TRACKING
42/* Called on entry from user mode with IRQs off. */
43__visible void enter_from_user_mode(void)
44{
45 CT_WARN_ON(ct_state() != CONTEXT_USER);
46 user_exit();
47}
Andy Lutomirski9999c8c02016-03-09 13:24:33 -080048#else
49static inline void enter_from_user_mode(void) {}
Andy Lutomirskifeed36c2015-07-03 12:44:25 -070050#endif
51
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070052static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
53{
54#ifdef CONFIG_X86_64
55 if (arch == AUDIT_ARCH_X86_64) {
56 audit_syscall_entry(regs->orig_ax, regs->di,
57 regs->si, regs->dx, regs->r10);
58 } else
59#endif
60 {
61 audit_syscall_entry(regs->orig_ax, regs->bx,
62 regs->cx, regs->dx, regs->si);
63 }
64}
65
66/*
Andy Lutomirskic87a8512016-05-27 13:08:59 -070067 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
68 * to skip the syscall.
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070069 */
Andy Lutomirskic87a8512016-05-27 13:08:59 -070070static long syscall_trace_enter(struct pt_regs *regs)
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070071{
Andy Lutomirskic87a8512016-05-27 13:08:59 -070072 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
73
Andy Lutomirskidd636072015-10-05 17:48:22 -070074 struct thread_info *ti = pt_regs_to_thread_info(regs);
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070075 unsigned long ret = 0;
76 u32 work;
77
Andy Lutomirski4aabd142015-10-05 17:48:21 -070078 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
79 BUG_ON(regs != task_pt_regs(current));
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070080
Andy Lutomirskidd636072015-10-05 17:48:22 -070081 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070082
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070083#ifdef CONFIG_SECCOMP
84 /*
85 * Do seccomp first -- it should minimize exposure of other
86 * code, and keeping seccomp fast is probably more valuable
87 * than the rest of this.
88 */
89 if (work & _TIF_SECCOMP) {
90 struct seccomp_data sd;
91
92 sd.arch = arch;
93 sd.nr = regs->orig_ax;
94 sd.instruction_pointer = regs->ip;
95#ifdef CONFIG_X86_64
96 if (arch == AUDIT_ARCH_X86_64) {
97 sd.args[0] = regs->di;
98 sd.args[1] = regs->si;
99 sd.args[2] = regs->dx;
100 sd.args[3] = regs->r10;
101 sd.args[4] = regs->r8;
102 sd.args[5] = regs->r9;
103 } else
104#endif
105 {
106 sd.args[0] = regs->bx;
107 sd.args[1] = regs->cx;
108 sd.args[2] = regs->dx;
109 sd.args[3] = regs->si;
110 sd.args[4] = regs->di;
111 sd.args[5] = regs->bp;
112 }
113
Andy Lutomirskic87a8512016-05-27 13:08:59 -0700114 ret = __secure_computing(&sd);
115 if (ret == -1)
116 return ret;
Andy Lutomirski1f484aa2015-07-03 12:44:23 -0700117 }
118#endif
119
120 if (unlikely(work & _TIF_SYSCALL_EMU))
121 ret = -1L;
122
123 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
124 tracehook_report_syscall_entry(regs))
125 ret = -1L;
126
127 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
128 trace_sys_enter(regs, regs->orig_ax);
129
130 do_audit_syscall_entry(regs, arch);
131
132 return ret ?: regs->orig_ax;
133}
134
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700135#define EXIT_TO_USERMODE_LOOP_FLAGS \
136 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
137 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
138
139static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700140{
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700141 /*
142 * In order to return to user mode, we need to have IRQs off with
143 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
144 * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags
145 * can be set at any time on preemptable kernels if we have IRQs on,
146 * so we need to loop. Disabling preemption wouldn't help: doing the
147 * work to clear some of the flags can sleep.
148 */
149 while (true) {
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700150 /* We have work to do. */
151 local_irq_enable();
152
153 if (cached_flags & _TIF_NEED_RESCHED)
154 schedule();
155
156 if (cached_flags & _TIF_UPROBE)
157 uprobe_notify_resume(regs);
158
159 /* deal with pending signal delivery */
160 if (cached_flags & _TIF_SIGPENDING)
161 do_signal(regs);
162
163 if (cached_flags & _TIF_NOTIFY_RESUME) {
164 clear_thread_flag(TIF_NOTIFY_RESUME);
165 tracehook_notify_resume(regs);
166 }
167
168 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
169 fire_user_return_notifiers();
170
171 /* Disable IRQs and retry */
172 local_irq_disable();
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700173
174 cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags);
175
176 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
177 break;
178
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700179 }
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700180}
181
182/* Called with IRQs disabled. */
183__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
184{
Andy Lutomirski4e79e182016-02-10 14:15:27 -0800185 struct thread_info *ti = pt_regs_to_thread_info(regs);
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700186 u32 cached_flags;
187
188 if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
189 local_irq_disable();
190
191 lockdep_sys_exit();
192
Andy Lutomirski4e79e182016-02-10 14:15:27 -0800193 cached_flags = READ_ONCE(ti->flags);
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700194
195 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
196 exit_to_usermode_loop(regs, cached_flags);
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700197
Andy Lutomirski4e79e182016-02-10 14:15:27 -0800198#ifdef CONFIG_COMPAT
199 /*
200 * Compat syscalls set TS_COMPAT. Make sure we clear it before
201 * returning to user mode. We need to clear it *after* signal
202 * handling, because syscall restart has a fixup for compat
203 * syscalls. The fixup is exercised by the ptrace_syscall_32
204 * selftest.
205 */
206 ti->status &= ~TS_COMPAT;
207#endif
208
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700209 user_enter();
210}
211
Andy Lutomirskif5e6a972015-10-05 17:48:24 -0700212#define SYSCALL_EXIT_WORK_FLAGS \
213 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
214 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
215
216static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
217{
218 bool step;
219
220 audit_syscall_exit(regs);
221
222 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
223 trace_sys_exit(regs, regs->ax);
224
225 /*
226 * If TIF_SYSCALL_EMU is set, we only get here because of
227 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
228 * We already reported this syscall instruction in
229 * syscall_trace_enter().
230 */
231 step = unlikely(
232 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
233 == _TIF_SINGLESTEP);
234 if (step || cached_flags & _TIF_SYSCALL_TRACE)
235 tracehook_report_syscall_exit(regs, step);
236}
237
Andy Lutomirski1f484aa2015-07-03 12:44:23 -0700238/*
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700239 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
240 * state such that we can immediately switch to user mode.
241 */
Andy Lutomirskif5e6a972015-10-05 17:48:24 -0700242__visible inline void syscall_return_slowpath(struct pt_regs *regs)
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700243{
244 struct thread_info *ti = pt_regs_to_thread_info(regs);
245 u32 cached_flags = READ_ONCE(ti->flags);
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700246
247 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
248
Andy Lutomirski460d1242015-10-05 17:48:18 -0700249 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
250 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700251 local_irq_enable();
252
253 /*
254 * First do one-time work. If these work items are enabled, we
255 * want to run them exactly once per syscall exit with IRQs on.
256 */
Andy Lutomirskif5e6a972015-10-05 17:48:24 -0700257 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
258 syscall_slow_exit_work(regs, cached_flags);
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700259
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700260 local_irq_disable();
261 prepare_exit_to_usermode(regs);
262}
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700263
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800264#ifdef CONFIG_X86_64
265__visible void do_syscall_64(struct pt_regs *regs)
266{
267 struct thread_info *ti = pt_regs_to_thread_info(regs);
268 unsigned long nr = regs->orig_ax;
269
Andy Lutomirski9999c8c02016-03-09 13:24:33 -0800270 enter_from_user_mode();
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800271 local_irq_enable();
272
273 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
274 nr = syscall_trace_enter(regs);
275
276 /*
277 * NB: Native and x32 syscalls are dispatched from the same
278 * table. The only functional difference is the x32 bit in
279 * regs->orig_ax, which changes the behavior of some syscalls.
280 */
281 if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
282 regs->ax = sys_call_table[nr & __SYSCALL_MASK](
283 regs->di, regs->si, regs->dx,
284 regs->r10, regs->r8, regs->r9);
285 }
286
287 syscall_return_slowpath(regs);
288}
289#endif
290
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700291#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
292/*
Andy Lutomirski9999c8c02016-03-09 13:24:33 -0800293 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
294 * all entry and exit work and returns with IRQs off. This function is
295 * extremely hot in workloads that use it, and it's usually called from
Andy Lutomirski33c521292015-10-05 17:48:19 -0700296 * do_fast_syscall_32, so forcibly inline it to improve performance.
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700297 */
Andy Lutomirskia798f092016-03-09 13:24:32 -0800298static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700299{
300 struct thread_info *ti = pt_regs_to_thread_info(regs);
301 unsigned int nr = (unsigned int)regs->orig_ax;
302
303#ifdef CONFIG_IA32_EMULATION
304 ti->status |= TS_COMPAT;
305#endif
306
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700307 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
308 /*
309 * Subtlety here: if ptrace pokes something larger than
310 * 2^32-1 into orig_ax, this truncates it. This may or
311 * may not be necessary, but it matches the old asm
312 * behavior.
313 */
314 nr = syscall_trace_enter(regs);
315 }
316
Andy Lutomirski33c521292015-10-05 17:48:19 -0700317 if (likely(nr < IA32_NR_syscalls)) {
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700318 /*
319 * It's possible that a 32-bit syscall implementation
320 * takes a 64-bit parameter but nonetheless assumes that
321 * the high bits are zero. Make sure we zero-extend all
322 * of the args.
323 */
324 regs->ax = ia32_sys_call_table[nr](
325 (unsigned int)regs->bx, (unsigned int)regs->cx,
326 (unsigned int)regs->dx, (unsigned int)regs->si,
327 (unsigned int)regs->di, (unsigned int)regs->bp);
328 }
329
330 syscall_return_slowpath(regs);
331}
Andy Lutomirski710246d2015-10-05 17:48:10 -0700332
Andy Lutomirskia798f092016-03-09 13:24:32 -0800333/* Handles int $0x80 */
334__visible void do_int80_syscall_32(struct pt_regs *regs)
Andy Lutomirski8b13c252015-10-05 17:48:17 -0700335{
Andy Lutomirski9999c8c02016-03-09 13:24:33 -0800336 enter_from_user_mode();
Andy Lutomirski8b13c252015-10-05 17:48:17 -0700337 local_irq_enable();
338 do_syscall_32_irqs_on(regs);
339}
340
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700341/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
Andy Lutomirski7841b402015-10-05 17:48:12 -0700342__visible long do_fast_syscall_32(struct pt_regs *regs)
Andy Lutomirski710246d2015-10-05 17:48:10 -0700343{
344 /*
345 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
346 * convention. Adjust regs so it looks like we entered using int80.
347 */
348
349 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
350 vdso_image_32.sym_int80_landing_pad;
351
352 /*
353 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
354 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
355 * Fix it up.
356 */
357 regs->ip = landing_pad;
358
Andy Lutomirski9999c8c02016-03-09 13:24:33 -0800359 enter_from_user_mode();
360
Andy Lutomirski710246d2015-10-05 17:48:10 -0700361 local_irq_enable();
Andy Lutomirski9999c8c02016-03-09 13:24:33 -0800362
363 /* Fetch EBP from where the vDSO stashed it. */
Andy Lutomirskic68ca672015-10-05 17:48:20 -0700364 if (
365#ifdef CONFIG_X86_64
366 /*
367 * Micro-optimization: the pointer we're following is explicitly
368 * 32 bits, so it can't be out of range.
369 */
Andy Lutomirski30bfa7b2015-12-16 23:18:48 -0800370 __get_user(*(u32 *)&regs->bp,
Andy Lutomirskic68ca672015-10-05 17:48:20 -0700371 (u32 __user __force *)(unsigned long)(u32)regs->sp)
372#else
Andy Lutomirski30bfa7b2015-12-16 23:18:48 -0800373 get_user(*(u32 *)&regs->bp,
Andy Lutomirskic68ca672015-10-05 17:48:20 -0700374 (u32 __user __force *)(unsigned long)(u32)regs->sp)
375#endif
376 ) {
377
Andy Lutomirski710246d2015-10-05 17:48:10 -0700378 /* User code screwed up. */
379 local_irq_disable();
380 regs->ax = -EFAULT;
Andy Lutomirski710246d2015-10-05 17:48:10 -0700381 prepare_exit_to_usermode(regs);
Andy Lutomirski7841b402015-10-05 17:48:12 -0700382 return 0; /* Keep it simple: use IRET. */
Andy Lutomirski710246d2015-10-05 17:48:10 -0700383 }
Andy Lutomirski710246d2015-10-05 17:48:10 -0700384
385 /* Now this is just like a normal syscall. */
Andy Lutomirski8b13c252015-10-05 17:48:17 -0700386 do_syscall_32_irqs_on(regs);
Andy Lutomirski7841b402015-10-05 17:48:12 -0700387
388#ifdef CONFIG_X86_64
389 /*
390 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
391 * SYSRETL is available on all 64-bit CPUs, so we don't need to
392 * bother with SYSEXIT.
393 *
394 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
395 * because the ECX fixup above will ensure that this is essentially
396 * never the case.
397 */
398 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
399 regs->ip == landing_pad &&
400 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
401#else
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700402 /*
403 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
404 *
405 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
406 * because the ECX fixup above will ensure that this is essentially
407 * never the case.
408 *
409 * We don't allow syscalls at all from VM86 mode, but we still
410 * need to check VM, because we might be returning from sys_vm86.
411 */
412 return static_cpu_has(X86_FEATURE_SEP) &&
413 regs->cs == __USER_CS && regs->ss == __USER_DS &&
414 regs->ip == landing_pad &&
415 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
Andy Lutomirski7841b402015-10-05 17:48:12 -0700416#endif
Andy Lutomirski710246d2015-10-05 17:48:10 -0700417}
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700418#endif