Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
Alexander van Heukelum | a8c1be9 | 2008-07-02 01:29:44 +0200 | [diff] [blame] | 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * Pentium III FXSR, SSE support |
| 6 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
| 7 | */ |
| 8 | |
| 9 | /* |
Alexander van Heukelum | c1d518c | 2008-10-03 23:17:11 +0200 | [diff] [blame] | 10 | * Handle hardware traps and faults. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | */ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 12 | |
| 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 14 | |
Frederic Weisbecker | 56dd947 | 2013-02-24 00:23:25 +0100 | [diff] [blame] | 15 | #include <linux/context_tracking.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/interrupt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/kallsyms.h> |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 18 | #include <linux/spinlock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/kprobes.h> |
Andrew Morton | 1e2af92 | 2006-09-27 01:51:15 -0700 | [diff] [blame] | 20 | #include <linux/uaccess.h> |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 21 | #include <linux/kdebug.h> |
Jason Wessel | f503b5a | 2010-05-20 21:04:25 -0500 | [diff] [blame] | 22 | #include <linux/kgdb.h> |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 23 | #include <linux/kernel.h> |
| 24 | #include <linux/module.h> |
| 25 | #include <linux/ptrace.h> |
Oleg Nesterov | b02ef20 | 2014-05-12 18:24:45 +0200 | [diff] [blame] | 26 | #include <linux/uprobes.h> |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 27 | #include <linux/string.h> |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 28 | #include <linux/delay.h> |
| 29 | #include <linux/errno.h> |
| 30 | #include <linux/kexec.h> |
| 31 | #include <linux/sched.h> |
| 32 | #include <linux/timer.h> |
| 33 | #include <linux/init.h> |
Jeremy Fitzhardinge | 91768d6 | 2006-12-08 02:36:21 -0800 | [diff] [blame] | 34 | #include <linux/bug.h> |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 35 | #include <linux/nmi.h> |
| 36 | #include <linux/mm.h> |
Alexander van Heukelum | c1d518c | 2008-10-03 23:17:11 +0200 | [diff] [blame] | 37 | #include <linux/smp.h> |
| 38 | #include <linux/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
| 40 | #ifdef CONFIG_EISA |
| 41 | #include <linux/ioport.h> |
| 42 | #include <linux/eisa.h> |
| 43 | #endif |
| 44 | |
Dave Jiang | c0d1217 | 2007-07-19 01:49:46 -0700 | [diff] [blame] | 45 | #if defined(CONFIG_EDAC) |
| 46 | #include <linux/edac.h> |
| 47 | #endif |
| 48 | |
Vegard Nossum | f856129 | 2008-04-04 00:53:23 +0200 | [diff] [blame] | 49 | #include <asm/kmemcheck.h> |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 50 | #include <asm/stacktrace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <asm/processor.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #include <asm/debugreg.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 53 | #include <linux/atomic.h> |
Steven Rostedt | 08d636b | 2011-08-16 09:57:10 -0400 | [diff] [blame] | 54 | #include <asm/ftrace.h> |
Alexander van Heukelum | c1d518c | 2008-10-03 23:17:11 +0200 | [diff] [blame] | 55 | #include <asm/traps.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | #include <asm/desc.h> |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 57 | #include <asm/fpu/internal.h> |
Hidetoshi Seto | 9e55e44 | 2009-06-15 17:22:15 +0900 | [diff] [blame] | 58 | #include <asm/mce.h> |
Kees Cook | 4eefbe7 | 2013-04-10 12:24:22 -0700 | [diff] [blame] | 59 | #include <asm/fixmap.h> |
Ingo Molnar | 1164dd0 | 2009-01-28 19:34:09 +0100 | [diff] [blame] | 60 | #include <asm/mach_traps.h> |
Jiri Kosina | 17f4157 | 2013-07-23 10:09:28 +0200 | [diff] [blame] | 61 | #include <asm/alternative.h> |
Dave Hansen | a84eeaa | 2015-06-07 11:37:01 -0700 | [diff] [blame] | 62 | #include <asm/fpu/xstate.h> |
Dave Hansen | e7126cf | 2015-06-07 11:37:03 -0700 | [diff] [blame] | 63 | #include <asm/trace/mpx.h> |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 64 | #include <asm/mpx.h> |
Brian Gerst | ba3e127 | 2015-07-29 01:41:21 -0400 | [diff] [blame] | 65 | #include <asm/vm86.h> |
Alexander van Heukelum | c1d518c | 2008-10-03 23:17:11 +0200 | [diff] [blame] | 66 | |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 67 | #ifdef CONFIG_X86_64 |
Thomas Gleixner | 428cf90 | 2009-08-20 10:35:46 +0200 | [diff] [blame] | 68 | #include <asm/x86_init.h> |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 69 | #include <asm/pgalloc.h> |
| 70 | #include <asm/proto.h> |
Kees Cook | 4df05f3 | 2013-07-16 11:34:41 -0700 | [diff] [blame] | 71 | |
| 72 | /* No need to be aligned, but done to keep all IDTs defined the same way. */ |
| 73 | gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 74 | #else |
Alexander van Heukelum | c1d518c | 2008-10-03 23:17:11 +0200 | [diff] [blame] | 75 | #include <asm/processor-flags.h> |
Ingo Molnar | 8e6dafd | 2009-02-23 00:34:39 +0100 | [diff] [blame] | 76 | #include <asm/setup.h> |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 77 | #include <asm/proto.h> |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 78 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
Kees Cook | 4df05f3 | 2013-07-16 11:34:41 -0700 | [diff] [blame] | 80 | /* Must be page-aligned because the real IDT is used in a fixmap. */ |
| 81 | gate_desc idt_table[NR_VECTORS] __page_aligned_bss; |
| 82 | |
Yinghai Lu | b77b881 | 2008-12-19 15:23:44 -0800 | [diff] [blame] | 83 | DECLARE_BITMAP(used_vectors, NR_VECTORS); |
| 84 | EXPORT_SYMBOL_GPL(used_vectors); |
| 85 | |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 86 | static inline void cond_local_irq_enable(struct pt_regs *regs) |
Alexander van Heukelum | 762db43 | 2008-09-09 21:55:55 +0200 | [diff] [blame] | 87 | { |
| 88 | if (regs->flags & X86_EFLAGS_IF) |
| 89 | local_irq_enable(); |
| 90 | } |
| 91 | |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 92 | static inline void cond_local_irq_disable(struct pt_regs *regs) |
Thomas Gleixner | be71661 | 2009-01-13 23:36:34 +0100 | [diff] [blame] | 93 | { |
| 94 | if (regs->flags & X86_EFLAGS_IF) |
| 95 | local_irq_disable(); |
| 96 | } |
| 97 | |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 98 | void ist_enter(struct pt_regs *regs) |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 99 | { |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 100 | if (user_mode(regs)) { |
Linus Torvalds | 5778077 | 2015-09-01 08:40:25 -0700 | [diff] [blame] | 101 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 102 | } else { |
| 103 | /* |
| 104 | * We might have interrupted pretty much anything. In |
| 105 | * fact, if we're a machine check, we can even interrupt |
| 106 | * NMI processing. We don't want in_nmi() to return true, |
| 107 | * but we need to notify RCU. |
| 108 | */ |
| 109 | rcu_nmi_enter(); |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 110 | } |
Andy Lutomirski | b926e6f | 2015-01-31 04:53:53 -0800 | [diff] [blame] | 111 | |
| 112 | /* |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 113 | * We are atomic because we're on the IST stack; or we're on |
| 114 | * x86_32, in which case we still shouldn't schedule; or we're |
| 115 | * on x86_64 and entered from user mode, in which case we're |
| 116 | * still atomic unless ist_begin_non_atomic is called. |
Andy Lutomirski | b926e6f | 2015-01-31 04:53:53 -0800 | [diff] [blame] | 117 | */ |
| 118 | preempt_count_add(HARDIRQ_OFFSET); |
| 119 | |
| 120 | /* This code is a bit fragile. Test it. */ |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 121 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 122 | } |
| 123 | |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 124 | void ist_exit(struct pt_regs *regs) |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 125 | { |
| 126 | preempt_count_sub(HARDIRQ_OFFSET); |
| 127 | |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 128 | if (!user_mode(regs)) |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 129 | rcu_nmi_exit(); |
| 130 | } |
| 131 | |
Andy Lutomirski | bced35b | 2014-11-19 17:59:41 -0800 | [diff] [blame] | 132 | /** |
| 133 | * ist_begin_non_atomic() - begin a non-atomic section in an IST exception |
| 134 | * @regs: regs passed to the IST exception handler |
| 135 | * |
| 136 | * IST exception handlers normally cannot schedule. As a special |
| 137 | * exception, if the exception interrupted userspace code (i.e. |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 138 | * user_mode(regs) would return true) and the exception was not |
Andy Lutomirski | bced35b | 2014-11-19 17:59:41 -0800 | [diff] [blame] | 139 | * a double fault, it can be safe to schedule. ist_begin_non_atomic() |
| 140 | * begins a non-atomic section within an ist_enter()/ist_exit() region. |
| 141 | * Callers are responsible for enabling interrupts themselves inside |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 142 | * the non-atomic section, and callers must call ist_end_non_atomic() |
Andy Lutomirski | bced35b | 2014-11-19 17:59:41 -0800 | [diff] [blame] | 143 | * before ist_exit(). |
| 144 | */ |
| 145 | void ist_begin_non_atomic(struct pt_regs *regs) |
| 146 | { |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 147 | BUG_ON(!user_mode(regs)); |
Andy Lutomirski | bced35b | 2014-11-19 17:59:41 -0800 | [diff] [blame] | 148 | |
| 149 | /* |
| 150 | * Sanity check: we need to be on the normal thread stack. This |
| 151 | * will catch asm bugs and any attempt to use ist_preempt_enable |
| 152 | * from double_fault. |
| 153 | */ |
Andy Lutomirski | a7fcf28 | 2015-03-06 17:50:19 -0800 | [diff] [blame] | 154 | BUG_ON((unsigned long)(current_top_of_stack() - |
| 155 | current_stack_pointer()) >= THREAD_SIZE); |
Andy Lutomirski | bced35b | 2014-11-19 17:59:41 -0800 | [diff] [blame] | 156 | |
| 157 | preempt_count_sub(HARDIRQ_OFFSET); |
| 158 | } |
| 159 | |
| 160 | /** |
| 161 | * ist_end_non_atomic() - begin a non-atomic section in an IST exception |
| 162 | * |
| 163 | * Ends a non-atomic section started with ist_begin_non_atomic(). |
| 164 | */ |
| 165 | void ist_end_non_atomic(void) |
| 166 | { |
| 167 | preempt_count_add(HARDIRQ_OFFSET); |
| 168 | } |
| 169 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 170 | static nokprobe_inline int |
Frederic Weisbecker | c416ddf | 2012-09-25 14:51:19 +0200 | [diff] [blame] | 171 | do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, |
| 172 | struct pt_regs *regs, long error_code) |
| 173 | { |
Andy Lutomirski | d74ef11 | 2015-03-18 18:33:35 -0700 | [diff] [blame] | 174 | if (v8086_mode(regs)) { |
Frederic Weisbecker | c416ddf | 2012-09-25 14:51:19 +0200 | [diff] [blame] | 175 | /* |
| 176 | * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. |
| 177 | * On nmi (interrupt 2), do_trap should not be called. |
| 178 | */ |
| 179 | if (trapnr < X86_TRAP_UD) { |
| 180 | if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, |
| 181 | error_code, trapnr)) |
| 182 | return 0; |
| 183 | } |
| 184 | return -1; |
| 185 | } |
Andy Lutomirski | d74ef11 | 2015-03-18 18:33:35 -0700 | [diff] [blame] | 186 | |
Ingo Molnar | 55474c4 | 2015-03-29 11:02:34 +0200 | [diff] [blame] | 187 | if (!user_mode(regs)) { |
Frederic Weisbecker | c416ddf | 2012-09-25 14:51:19 +0200 | [diff] [blame] | 188 | if (!fixup_exception(regs)) { |
| 189 | tsk->thread.error_code = error_code; |
| 190 | tsk->thread.trap_nr = trapnr; |
| 191 | die(str, regs, error_code); |
| 192 | } |
| 193 | return 0; |
| 194 | } |
| 195 | |
| 196 | return -1; |
| 197 | } |
| 198 | |
Oleg Nesterov | 1c326c4 | 2014-05-08 20:04:11 +0200 | [diff] [blame] | 199 | static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, |
| 200 | siginfo_t *info) |
Oleg Nesterov | 958d3d7 | 2014-05-07 17:59:39 +0200 | [diff] [blame] | 201 | { |
| 202 | unsigned long siaddr; |
| 203 | int sicode; |
| 204 | |
| 205 | switch (trapnr) { |
Oleg Nesterov | 1c326c4 | 2014-05-08 20:04:11 +0200 | [diff] [blame] | 206 | default: |
| 207 | return SEND_SIG_PRIV; |
| 208 | |
Oleg Nesterov | 958d3d7 | 2014-05-07 17:59:39 +0200 | [diff] [blame] | 209 | case X86_TRAP_DE: |
| 210 | sicode = FPE_INTDIV; |
Oleg Nesterov | b02ef20 | 2014-05-12 18:24:45 +0200 | [diff] [blame] | 211 | siaddr = uprobe_get_trap_addr(regs); |
Oleg Nesterov | 958d3d7 | 2014-05-07 17:59:39 +0200 | [diff] [blame] | 212 | break; |
| 213 | case X86_TRAP_UD: |
| 214 | sicode = ILL_ILLOPN; |
Oleg Nesterov | b02ef20 | 2014-05-12 18:24:45 +0200 | [diff] [blame] | 215 | siaddr = uprobe_get_trap_addr(regs); |
Oleg Nesterov | 958d3d7 | 2014-05-07 17:59:39 +0200 | [diff] [blame] | 216 | break; |
| 217 | case X86_TRAP_AC: |
| 218 | sicode = BUS_ADRALN; |
| 219 | siaddr = 0; |
| 220 | break; |
| 221 | } |
| 222 | |
| 223 | info->si_signo = signr; |
| 224 | info->si_errno = 0; |
| 225 | info->si_code = sicode; |
| 226 | info->si_addr = (void __user *)siaddr; |
Oleg Nesterov | 1c326c4 | 2014-05-08 20:04:11 +0200 | [diff] [blame] | 227 | return info; |
Oleg Nesterov | 958d3d7 | 2014-05-07 17:59:39 +0200 | [diff] [blame] | 228 | } |
| 229 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 230 | static void |
Alexander van Heukelum | 3c1326f | 2008-09-26 14:03:08 +0200 | [diff] [blame] | 231 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 232 | long error_code, siginfo_t *info) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | { |
Alexander Nyberg | 4f339ec | 2005-06-25 14:58:27 -0700 | [diff] [blame] | 234 | struct task_struct *tsk = current; |
Alexander Nyberg | 4f339ec | 2005-06-25 14:58:27 -0700 | [diff] [blame] | 235 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | |
Frederic Weisbecker | c416ddf | 2012-09-25 14:51:19 +0200 | [diff] [blame] | 237 | if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) |
| 238 | return; |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 239 | /* |
Srikar Dronamraju | 51e7dc7 | 2012-03-12 14:55:55 +0530 | [diff] [blame] | 240 | * We want error_code and trap_nr set for userspace faults and |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 241 | * kernelspace faults which result in die(), but not |
| 242 | * kernelspace faults which are fixed up. die() gives the |
| 243 | * process no chance to handle the signal and notice the |
| 244 | * kernel fault information, so that won't result in polluting |
| 245 | * the information about previously queued, but not yet |
| 246 | * delivered, faults. See also do_general_protection below. |
| 247 | */ |
| 248 | tsk->thread.error_code = error_code; |
Srikar Dronamraju | 51e7dc7 | 2012-03-12 14:55:55 +0530 | [diff] [blame] | 249 | tsk->thread.trap_nr = trapnr; |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 250 | |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 251 | #ifdef CONFIG_X86_64 |
| 252 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && |
| 253 | printk_ratelimit()) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 254 | pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", |
| 255 | tsk->comm, tsk->pid, str, |
| 256 | regs->ip, regs->sp, error_code); |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 257 | print_vma_addr(" in ", regs->ip); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 258 | pr_cont("\n"); |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 259 | } |
| 260 | #endif |
| 261 | |
Oleg Nesterov | 38cad57 | 2014-05-07 16:47:09 +0200 | [diff] [blame] | 262 | force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 264 | NOKPROBE_SYMBOL(do_trap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | |
Oleg Nesterov | dff0796 | 2014-05-07 17:21:34 +0200 | [diff] [blame] | 266 | static void do_error_trap(struct pt_regs *regs, long error_code, char *str, |
Oleg Nesterov | 1c326c4 | 2014-05-08 20:04:11 +0200 | [diff] [blame] | 267 | unsigned long trapnr, int signr) |
Oleg Nesterov | dff0796 | 2014-05-07 17:21:34 +0200 | [diff] [blame] | 268 | { |
Oleg Nesterov | 1c326c4 | 2014-05-08 20:04:11 +0200 | [diff] [blame] | 269 | siginfo_t info; |
Oleg Nesterov | dff0796 | 2014-05-07 17:21:34 +0200 | [diff] [blame] | 270 | |
Linus Torvalds | 5778077 | 2015-09-01 08:40:25 -0700 | [diff] [blame] | 271 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); |
Andy Lutomirski | 02fdcd5 | 2015-07-03 12:44:24 -0700 | [diff] [blame] | 272 | |
Oleg Nesterov | dff0796 | 2014-05-07 17:21:34 +0200 | [diff] [blame] | 273 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != |
| 274 | NOTIFY_STOP) { |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 275 | cond_local_irq_enable(regs); |
Oleg Nesterov | 1c326c4 | 2014-05-08 20:04:11 +0200 | [diff] [blame] | 276 | do_trap(trapnr, signr, str, regs, error_code, |
| 277 | fill_trap_info(regs, signr, trapnr, &info)); |
Oleg Nesterov | dff0796 | 2014-05-07 17:21:34 +0200 | [diff] [blame] | 278 | } |
Oleg Nesterov | dff0796 | 2014-05-07 17:21:34 +0200 | [diff] [blame] | 279 | } |
| 280 | |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 281 | #define DO_ERROR(trapnr, signr, str, name) \ |
Alexander van Heukelum | e407d620 | 2008-09-30 18:41:36 +0200 | [diff] [blame] | 282 | dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 283 | { \ |
Oleg Nesterov | 1c326c4 | 2014-05-08 20:04:11 +0200 | [diff] [blame] | 284 | do_error_trap(regs, error_code, str, trapnr, signr); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | } |
| 286 | |
Oleg Nesterov | 0eb1483 | 2014-05-08 20:12:24 +0200 | [diff] [blame] | 287 | DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error) |
| 288 | DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) |
Oleg Nesterov | 0eb1483 | 2014-05-08 20:12:24 +0200 | [diff] [blame] | 289 | DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) |
| 290 | DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) |
| 291 | DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) |
| 292 | DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) |
Oleg Nesterov | 0eb1483 | 2014-05-08 20:12:24 +0200 | [diff] [blame] | 293 | DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) |
Oleg Nesterov | 0eb1483 | 2014-05-08 20:12:24 +0200 | [diff] [blame] | 294 | DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 296 | #ifdef CONFIG_X86_64 |
| 297 | /* Runs on IST stack */ |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 298 | dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) |
| 299 | { |
| 300 | static const char str[] = "double fault"; |
| 301 | struct task_struct *tsk = current; |
| 302 | |
Andy Lutomirski | af726f2 | 2014-11-22 18:00:31 -0800 | [diff] [blame] | 303 | #ifdef CONFIG_X86_ESPFIX64 |
| 304 | extern unsigned char native_irq_return_iret[]; |
| 305 | |
| 306 | /* |
| 307 | * If IRET takes a non-IST fault on the espfix64 stack, then we |
| 308 | * end up promoting it to a doublefault. In that case, modify |
| 309 | * the stack to make it look like we just entered the #GP |
| 310 | * handler from user space, similar to bad_iret. |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 311 | * |
| 312 | * No need for ist_enter here because we don't use RCU. |
Andy Lutomirski | af726f2 | 2014-11-22 18:00:31 -0800 | [diff] [blame] | 313 | */ |
| 314 | if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && |
| 315 | regs->cs == __KERNEL_CS && |
| 316 | regs->ip == (unsigned long)native_irq_return_iret) |
| 317 | { |
| 318 | struct pt_regs *normal_regs = task_pt_regs(current); |
| 319 | |
| 320 | /* Fake a #GP(0) from userspace. */ |
| 321 | memmove(&normal_regs->ip, (void *)regs->sp, 5*8); |
| 322 | normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ |
| 323 | regs->ip = (unsigned long)general_protection; |
| 324 | regs->sp = (unsigned long)&normal_regs->orig_ax; |
Andy Lutomirski | 9592747 | 2014-11-19 17:41:09 -0800 | [diff] [blame] | 325 | |
Andy Lutomirski | af726f2 | 2014-11-22 18:00:31 -0800 | [diff] [blame] | 326 | return; |
| 327 | } |
| 328 | #endif |
| 329 | |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 330 | ist_enter(regs); |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 331 | notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 332 | |
| 333 | tsk->thread.error_code = error_code; |
Srikar Dronamraju | 51e7dc7 | 2012-03-12 14:55:55 +0530 | [diff] [blame] | 334 | tsk->thread.trap_nr = X86_TRAP_DF; |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 335 | |
Borislav Petkov | 4d067d8 | 2013-05-09 12:02:29 +0200 | [diff] [blame] | 336 | #ifdef CONFIG_DOUBLEFAULT |
| 337 | df_debug(regs, error_code); |
| 338 | #endif |
Ingo Molnar | bd8b96d | 2008-12-26 09:20:22 +0100 | [diff] [blame] | 339 | /* |
| 340 | * This is always a kernel trap and never fixable (and thus must |
| 341 | * never return). |
| 342 | */ |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 343 | for (;;) |
| 344 | die(str, regs, error_code); |
| 345 | } |
| 346 | #endif |
| 347 | |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 348 | dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) |
| 349 | { |
Dave Hansen | 1126cb45 | 2015-09-02 16:31:29 -0700 | [diff] [blame] | 350 | const struct mpx_bndcsr *bndcsr; |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 351 | siginfo_t *info; |
| 352 | |
Linus Torvalds | 5778077 | 2015-09-01 08:40:25 -0700 | [diff] [blame] | 353 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 354 | if (notify_die(DIE_TRAP, "bounds", regs, error_code, |
| 355 | X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 356 | return; |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 357 | cond_local_irq_enable(regs); |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 358 | |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 359 | if (!user_mode(regs)) |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 360 | die("bounds", regs, error_code); |
| 361 | |
| 362 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) { |
| 363 | /* The exception is not from Intel MPX */ |
| 364 | goto exit_trap; |
| 365 | } |
| 366 | |
| 367 | /* |
| 368 | * We need to look at BNDSTATUS to resolve this exception. |
Dave Hansen | a84eeaa | 2015-06-07 11:37:01 -0700 | [diff] [blame] | 369 | * A NULL here might mean that it is in its 'init state', |
| 370 | * which is all zeros which indicates MPX was not |
| 371 | * responsible for the exception. |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 372 | */ |
Dave Hansen | d91cab7 | 2015-09-02 16:31:26 -0700 | [diff] [blame] | 373 | bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR); |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 374 | if (!bndcsr) |
| 375 | goto exit_trap; |
| 376 | |
Dave Hansen | e7126cf | 2015-06-07 11:37:03 -0700 | [diff] [blame] | 377 | trace_bounds_exception_mpx(bndcsr); |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 378 | /* |
| 379 | * The error code field of the BNDSTATUS register communicates status |
| 380 | * information of a bound range exception #BR or operation involving |
| 381 | * bound directory. |
| 382 | */ |
| 383 | switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) { |
| 384 | case 2: /* Bound directory has invalid entry. */ |
Dave Hansen | 46a6e0c | 2015-06-07 11:37:02 -0700 | [diff] [blame] | 385 | if (mpx_handle_bd_fault()) |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 386 | goto exit_trap; |
| 387 | break; /* Success, it was handled */ |
| 388 | case 1: /* Bound violation. */ |
Dave Hansen | 46a6e0c | 2015-06-07 11:37:02 -0700 | [diff] [blame] | 389 | info = mpx_generate_siginfo(regs); |
Dan Carpenter | e10abb2 | 2014-11-25 20:21:14 +0300 | [diff] [blame] | 390 | if (IS_ERR(info)) { |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 391 | /* |
| 392 | * We failed to decode the MPX instruction. Act as if |
| 393 | * the exception was not caused by MPX. |
| 394 | */ |
| 395 | goto exit_trap; |
| 396 | } |
| 397 | /* |
| 398 | * Success, we decoded the instruction and retrieved |
| 399 | * an 'info' containing the address being accessed |
| 400 | * which caused the exception. This information |
| 401 | * allows and application to possibly handle the |
| 402 | * #BR exception itself. |
| 403 | */ |
| 404 | do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info); |
| 405 | kfree(info); |
| 406 | break; |
| 407 | case 0: /* No exception caused by Intel MPX operations. */ |
| 408 | goto exit_trap; |
| 409 | default: |
| 410 | die("bounds", regs, error_code); |
| 411 | } |
| 412 | |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 413 | return; |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 414 | |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 415 | exit_trap: |
| 416 | /* |
| 417 | * This path out is for all the cases where we could not |
| 418 | * handle the exception in some way (like allocating a |
| 419 | * table or telling userspace about it. We will also end |
| 420 | * up here if the kernel has MPX turned off at compile |
| 421 | * time.. |
| 422 | */ |
| 423 | do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL); |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 424 | } |
| 425 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 426 | dotraplinkage void |
Alexander van Heukelum | 13485ab | 2008-07-02 01:32:04 +0200 | [diff] [blame] | 427 | do_general_protection(struct pt_regs *regs, long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | { |
Alexander van Heukelum | 13485ab | 2008-07-02 01:32:04 +0200 | [diff] [blame] | 429 | struct task_struct *tsk; |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 430 | |
Linus Torvalds | 5778077 | 2015-09-01 08:40:25 -0700 | [diff] [blame] | 431 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 432 | cond_local_irq_enable(regs); |
Alexander van Heukelum | c6df0d7 | 2008-09-09 21:56:07 +0200 | [diff] [blame] | 433 | |
Andy Lutomirski | d74ef11 | 2015-03-18 18:33:35 -0700 | [diff] [blame] | 434 | if (v8086_mode(regs)) { |
Frederic Weisbecker | ef3f628 | 2012-09-24 21:05:52 +0200 | [diff] [blame] | 435 | local_irq_enable(); |
| 436 | handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 437 | return; |
Frederic Weisbecker | ef3f628 | 2012-09-24 21:05:52 +0200 | [diff] [blame] | 438 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | |
Alexander van Heukelum | 13485ab | 2008-07-02 01:32:04 +0200 | [diff] [blame] | 440 | tsk = current; |
Ingo Molnar | 55474c4 | 2015-03-29 11:02:34 +0200 | [diff] [blame] | 441 | if (!user_mode(regs)) { |
Frederic Weisbecker | ef3f628 | 2012-09-24 21:05:52 +0200 | [diff] [blame] | 442 | if (fixup_exception(regs)) |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 443 | return; |
Frederic Weisbecker | ef3f628 | 2012-09-24 21:05:52 +0200 | [diff] [blame] | 444 | |
| 445 | tsk->thread.error_code = error_code; |
| 446 | tsk->thread.trap_nr = X86_TRAP_GP; |
Frederic Weisbecker | 6ba3c97 | 2012-07-11 20:26:35 +0200 | [diff] [blame] | 447 | if (notify_die(DIE_GPF, "general protection fault", regs, error_code, |
| 448 | X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) |
Frederic Weisbecker | ef3f628 | 2012-09-24 21:05:52 +0200 | [diff] [blame] | 449 | die("general protection fault", regs, error_code); |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 450 | return; |
Frederic Weisbecker | ef3f628 | 2012-09-24 21:05:52 +0200 | [diff] [blame] | 451 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | |
Alexander van Heukelum | 13485ab | 2008-07-02 01:32:04 +0200 | [diff] [blame] | 453 | tsk->thread.error_code = error_code; |
Srikar Dronamraju | 51e7dc7 | 2012-03-12 14:55:55 +0530 | [diff] [blame] | 454 | tsk->thread.trap_nr = X86_TRAP_GP; |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 455 | |
Alexander van Heukelum | 13485ab | 2008-07-02 01:32:04 +0200 | [diff] [blame] | 456 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
| 457 | printk_ratelimit()) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 458 | pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", |
Alexander van Heukelum | 13485ab | 2008-07-02 01:32:04 +0200 | [diff] [blame] | 459 | tsk->comm, task_pid_nr(tsk), |
| 460 | regs->ip, regs->sp, error_code); |
Andi Kleen | 0325291 | 2008-01-30 13:33:18 +0100 | [diff] [blame] | 461 | print_vma_addr(" in ", regs->ip); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 462 | pr_cont("\n"); |
Andi Kleen | 0325291 | 2008-01-30 13:33:18 +0100 | [diff] [blame] | 463 | } |
Masoud Asgharifard Sharbiani | abd4f75 | 2007-07-22 11:12:28 +0200 | [diff] [blame] | 464 | |
Oleg Nesterov | 38cad57 | 2014-05-07 16:47:09 +0200 | [diff] [blame] | 465 | force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 467 | NOKPROBE_SYMBOL(do_general_protection); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | |
Alexander van Heukelum | c1d518c | 2008-10-03 23:17:11 +0200 | [diff] [blame] | 469 | /* May run on IST stack. */ |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 470 | dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | { |
Steven Rostedt | 08d636b | 2011-08-16 09:57:10 -0400 | [diff] [blame] | 472 | #ifdef CONFIG_DYNAMIC_FTRACE |
Steven Rostedt | a192cd0 | 2012-05-30 13:26:37 -0400 | [diff] [blame] | 473 | /* |
| 474 | * ftrace must be first, everything else may cause a recursive crash. |
| 475 | * See note by declaration of modifying_ftrace_code in ftrace.c |
| 476 | */ |
| 477 | if (unlikely(atomic_read(&modifying_ftrace_code)) && |
| 478 | ftrace_int3_handler(regs)) |
Steven Rostedt | 08d636b | 2011-08-16 09:57:10 -0400 | [diff] [blame] | 479 | return; |
| 480 | #endif |
Jiri Kosina | 17f4157 | 2013-07-23 10:09:28 +0200 | [diff] [blame] | 481 | if (poke_int3_handler(regs)) |
| 482 | return; |
| 483 | |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 484 | ist_enter(regs); |
Linus Torvalds | 5778077 | 2015-09-01 08:40:25 -0700 | [diff] [blame] | 485 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); |
Jason Wessel | f503b5a | 2010-05-20 21:04:25 -0500 | [diff] [blame] | 486 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 487 | if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
| 488 | SIGTRAP) == NOTIFY_STOP) |
Frederic Weisbecker | 6ba3c97 | 2012-07-11 20:26:35 +0200 | [diff] [blame] | 489 | goto exit; |
Jason Wessel | f503b5a | 2010-05-20 21:04:25 -0500 | [diff] [blame] | 490 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ |
Srikar Dronamraju | cc3a1bf | 2011-10-25 19:51:59 +0530 | [diff] [blame] | 491 | |
Masami Hiramatsu | 6f6343f | 2014-04-17 17:17:33 +0900 | [diff] [blame] | 492 | #ifdef CONFIG_KPROBES |
| 493 | if (kprobe_int3_handler(regs)) |
Masami Hiramatsu | 4cdf77a | 2014-06-14 06:47:12 +0000 | [diff] [blame] | 494 | goto exit; |
Masami Hiramatsu | 6f6343f | 2014-04-17 17:17:33 +0900 | [diff] [blame] | 495 | #endif |
| 496 | |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 497 | if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
| 498 | SIGTRAP) == NOTIFY_STOP) |
Frederic Weisbecker | 6ba3c97 | 2012-07-11 20:26:35 +0200 | [diff] [blame] | 499 | goto exit; |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 500 | |
Steven Rostedt | 4218118 | 2011-12-16 11:43:02 -0500 | [diff] [blame] | 501 | /* |
| 502 | * Let others (NMI) know that the debug stack is in use |
| 503 | * as we may switch to the interrupt stack. |
| 504 | */ |
| 505 | debug_stack_usage_inc(); |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 506 | preempt_disable(); |
| 507 | cond_local_irq_enable(regs); |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 508 | do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 509 | cond_local_irq_disable(regs); |
| 510 | preempt_enable_no_resched(); |
Steven Rostedt | 4218118 | 2011-12-16 11:43:02 -0500 | [diff] [blame] | 511 | debug_stack_usage_dec(); |
Frederic Weisbecker | 6ba3c97 | 2012-07-11 20:26:35 +0200 | [diff] [blame] | 512 | exit: |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 513 | ist_exit(regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 515 | NOKPROBE_SYMBOL(do_int3); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 517 | #ifdef CONFIG_X86_64 |
Ingo Molnar | bd8b96d | 2008-12-26 09:20:22 +0100 | [diff] [blame] | 518 | /* |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 519 | * Help handler running on IST stack to switch off the IST stack if the |
| 520 | * interrupted code was in user mode. The actual stack switch is done in |
| 521 | * entry_64.S |
Ingo Molnar | bd8b96d | 2008-12-26 09:20:22 +0100 | [diff] [blame] | 522 | */ |
Andy Lutomirski | 7ddc6a2 | 2014-11-24 17:39:06 -0800 | [diff] [blame] | 523 | asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 524 | { |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 525 | struct pt_regs *regs = task_pt_regs(current); |
| 526 | *regs = *eregs; |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 527 | return regs; |
| 528 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 529 | NOKPROBE_SYMBOL(sync_regs); |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 530 | |
| 531 | struct bad_iret_stack { |
| 532 | void *error_entry_ret; |
| 533 | struct pt_regs regs; |
| 534 | }; |
| 535 | |
Andy Lutomirski | 7ddc6a2 | 2014-11-24 17:39:06 -0800 | [diff] [blame] | 536 | asmlinkage __visible notrace |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 537 | struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) |
| 538 | { |
| 539 | /* |
| 540 | * This is called from entry_64.S early in handling a fault |
| 541 | * caused by a bad iret to user mode. To handle the fault |
| 542 | * correctly, we want move our stack frame to task_pt_regs |
| 543 | * and we want to pretend that the exception came from the |
| 544 | * iret target. |
| 545 | */ |
| 546 | struct bad_iret_stack *new_stack = |
| 547 | container_of(task_pt_regs(current), |
| 548 | struct bad_iret_stack, regs); |
| 549 | |
| 550 | /* Copy the IRET target to the new stack. */ |
| 551 | memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); |
| 552 | |
| 553 | /* Copy the remainder of the stack from the current stack. */ |
| 554 | memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); |
| 555 | |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 556 | BUG_ON(!user_mode(&new_stack->regs)); |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 557 | return new_stack; |
| 558 | } |
Andy Lutomirski | 7ddc6a2 | 2014-11-24 17:39:06 -0800 | [diff] [blame] | 559 | NOKPROBE_SYMBOL(fixup_bad_iret); |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 560 | #endif |
| 561 | |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 562 | static bool is_sysenter_singlestep(struct pt_regs *regs) |
| 563 | { |
| 564 | /* |
| 565 | * We don't try for precision here. If we're anywhere in the region of |
| 566 | * code that can be single-stepped in the SYSENTER entry path, then |
| 567 | * assume that this is a useless single-step trap due to SYSENTER |
| 568 | * being invoked with TF set. (We don't know in advance exactly |
| 569 | * which instructions will be hit because BTF could plausibly |
| 570 | * be set.) |
| 571 | */ |
| 572 | #ifdef CONFIG_X86_32 |
| 573 | return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) < |
| 574 | (unsigned long)__end_SYSENTER_singlestep_region - |
| 575 | (unsigned long)__begin_SYSENTER_singlestep_region; |
| 576 | #elif defined(CONFIG_IA32_EMULATION) |
| 577 | return (regs->ip - (unsigned long)entry_SYSENTER_compat) < |
| 578 | (unsigned long)__end_entry_SYSENTER_compat - |
| 579 | (unsigned long)entry_SYSENTER_compat; |
| 580 | #else |
| 581 | return false; |
| 582 | #endif |
| 583 | } |
| 584 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | /* |
| 586 | * Our handling of the processor debug registers is non-trivial. |
| 587 | * We do not clear them on entry and exit from the kernel. Therefore |
| 588 | * it is possible to get a watchpoint trap here from inside the kernel. |
| 589 | * However, the code in ./ptrace.c has ensured that the user can |
| 590 | * only set watchpoints on userspace addresses. Therefore the in-kernel |
| 591 | * watchpoint trap can only occur in code which is reading/writing |
| 592 | * from user space. Such code must not hold kernel locks (since it |
| 593 | * can equally take a page fault), therefore it is safe to call |
| 594 | * force_sig_info even though that claims and releases locks. |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 595 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | * Code in ./signal.c ensures that the debug control register |
| 597 | * is restored before we deliver any signal, and therefore that |
| 598 | * user code runs with the correct debug control register even though |
| 599 | * we clear it here. |
| 600 | * |
| 601 | * Being careful here means that we don't have to be as careful in a |
| 602 | * lot of more complicated places (task switching can be a bit lazy |
| 603 | * about restoring all the debug state, and ptrace doesn't have to |
| 604 | * find every occurrence of the TF bit that could be saved away even |
| 605 | * by user code) |
Alexander van Heukelum | c1d518c | 2008-10-03 23:17:11 +0200 | [diff] [blame] | 606 | * |
| 607 | * May run on IST stack. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | */ |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 609 | dotraplinkage void do_debug(struct pt_regs *regs, long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | struct task_struct *tsk = current; |
Frederic Weisbecker | a1e80fa | 2010-06-30 15:09:06 +0200 | [diff] [blame] | 612 | int user_icebp = 0; |
K.Prasad | 08d6832 | 2009-06-01 23:44:08 +0530 | [diff] [blame] | 613 | unsigned long dr6; |
Srinivasa Ds | da654b7 | 2008-09-23 15:23:52 +0530 | [diff] [blame] | 614 | int si_code; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 616 | ist_enter(regs); |
Masami Hiramatsu | 4cdf77a | 2014-06-14 06:47:12 +0000 | [diff] [blame] | 617 | |
K.Prasad | 08d6832 | 2009-06-01 23:44:08 +0530 | [diff] [blame] | 618 | get_debugreg(dr6, 6); |
Andy Lutomirski | 8bb5643 | 2016-03-09 19:00:29 -0800 | [diff] [blame] | 619 | /* |
| 620 | * The Intel SDM says: |
| 621 | * |
| 622 | * Certain debug exceptions may clear bits 0-3. The remaining |
| 623 | * contents of the DR6 register are never cleared by the |
| 624 | * processor. To avoid confusion in identifying debug |
| 625 | * exceptions, debug handlers should clear the register before |
| 626 | * returning to the interrupted task. |
| 627 | * |
| 628 | * Keep it simple: clear DR6 immediately. |
| 629 | */ |
| 630 | set_debugreg(0, 6); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | |
K.Prasad | 40f9249 | 2010-01-28 16:44:01 +0530 | [diff] [blame] | 632 | /* Filter out all the reserved bits which are preset to 1 */ |
| 633 | dr6 &= ~DR6_RESERVED; |
| 634 | |
Frederic Weisbecker | a1e80fa | 2010-06-30 15:09:06 +0200 | [diff] [blame] | 635 | /* |
Andy Lutomirski | 81edd9f | 2016-03-09 19:00:28 -0800 | [diff] [blame] | 636 | * The SDM says "The processor clears the BTF flag when it |
| 637 | * generates a debug exception." Clear TIF_BLOCKSTEP to keep |
| 638 | * TIF_BLOCKSTEP in sync with the hardware BTF flag. |
| 639 | */ |
| 640 | clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); |
| 641 | |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 642 | if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) && |
| 643 | is_sysenter_singlestep(regs))) { |
| 644 | dr6 &= ~DR_STEP; |
| 645 | if (!dr6) |
| 646 | goto exit; |
| 647 | /* |
| 648 | * else we might have gotten a single-step trap and hit a |
| 649 | * watchpoint at the same time, in which case we should fall |
| 650 | * through and handle the watchpoint. |
| 651 | */ |
| 652 | } |
| 653 | |
Andy Lutomirski | 81edd9f | 2016-03-09 19:00:28 -0800 | [diff] [blame] | 654 | /* |
Frederic Weisbecker | a1e80fa | 2010-06-30 15:09:06 +0200 | [diff] [blame] | 655 | * If dr6 has no reason to give us about the origin of this trap, |
| 656 | * then it's very likely the result of an icebp/int01 trap. |
| 657 | * User wants a sigtrap for that. |
| 658 | */ |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 659 | if (!dr6 && user_mode(regs)) |
Frederic Weisbecker | a1e80fa | 2010-06-30 15:09:06 +0200 | [diff] [blame] | 660 | user_icebp = 1; |
| 661 | |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 662 | /* Catch kmemcheck conditions! */ |
Ingo Molnar | eadb8a0 | 2009-06-17 12:52:15 +0200 | [diff] [blame] | 663 | if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) |
Frederic Weisbecker | 6ba3c97 | 2012-07-11 20:26:35 +0200 | [diff] [blame] | 664 | goto exit; |
Vegard Nossum | f856129 | 2008-04-04 00:53:23 +0200 | [diff] [blame] | 665 | |
K.Prasad | 08d6832 | 2009-06-01 23:44:08 +0530 | [diff] [blame] | 666 | /* Store the virtualized DR6 value */ |
| 667 | tsk->thread.debugreg6 = dr6; |
| 668 | |
Masami Hiramatsu | 6f6343f | 2014-04-17 17:17:33 +0900 | [diff] [blame] | 669 | #ifdef CONFIG_KPROBES |
| 670 | if (kprobe_debug_handler(regs)) |
| 671 | goto exit; |
| 672 | #endif |
| 673 | |
Rusty Russell | 5a802e1 | 2013-06-16 14:12:47 +0930 | [diff] [blame] | 674 | if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, |
K.Prasad | 62edab9 | 2009-06-01 23:47:06 +0530 | [diff] [blame] | 675 | SIGTRAP) == NOTIFY_STOP) |
Frederic Weisbecker | 6ba3c97 | 2012-07-11 20:26:35 +0200 | [diff] [blame] | 676 | goto exit; |
Alexander van Heukelum | 3d2a71a | 2008-09-30 18:41:37 +0200 | [diff] [blame] | 677 | |
Steven Rostedt | 4218118 | 2011-12-16 11:43:02 -0500 | [diff] [blame] | 678 | /* |
| 679 | * Let others (NMI) know that the debug stack is in use |
| 680 | * as we may switch to the interrupt stack. |
| 681 | */ |
| 682 | debug_stack_usage_inc(); |
| 683 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | /* It's safe to allow irq's after DR6 has been saved */ |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 685 | preempt_disable(); |
| 686 | cond_local_irq_enable(regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | |
Andy Lutomirski | d74ef11 | 2015-03-18 18:33:35 -0700 | [diff] [blame] | 688 | if (v8086_mode(regs)) { |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 689 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, |
| 690 | X86_TRAP_DB); |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 691 | cond_local_irq_disable(regs); |
| 692 | preempt_enable_no_resched(); |
Steven Rostedt | 4218118 | 2011-12-16 11:43:02 -0500 | [diff] [blame] | 693 | debug_stack_usage_dec(); |
Frederic Weisbecker | 6ba3c97 | 2012-07-11 20:26:35 +0200 | [diff] [blame] | 694 | goto exit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | } |
| 696 | |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 697 | if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) { |
| 698 | /* |
| 699 | * Historical junk that used to handle SYSENTER single-stepping. |
| 700 | * This should be unreachable now. If we survive for a while |
| 701 | * without anyone hitting this warning, we'll turn this into |
| 702 | * an oops. |
| 703 | */ |
K.Prasad | 08d6832 | 2009-06-01 23:44:08 +0530 | [diff] [blame] | 704 | tsk->thread.debugreg6 &= ~DR_STEP; |
| 705 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); |
| 706 | regs->flags &= ~X86_EFLAGS_TF; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | } |
K.Prasad | 08d6832 | 2009-06-01 23:44:08 +0530 | [diff] [blame] | 708 | si_code = get_si_code(tsk->thread.debugreg6); |
Frederic Weisbecker | a1e80fa | 2010-06-30 15:09:06 +0200 | [diff] [blame] | 709 | if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) |
K.Prasad | 08d6832 | 2009-06-01 23:44:08 +0530 | [diff] [blame] | 710 | send_sigtrap(tsk, regs, error_code, si_code); |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 711 | cond_local_irq_disable(regs); |
| 712 | preempt_enable_no_resched(); |
Steven Rostedt | 4218118 | 2011-12-16 11:43:02 -0500 | [diff] [blame] | 713 | debug_stack_usage_dec(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | |
Frederic Weisbecker | 6ba3c97 | 2012-07-11 20:26:35 +0200 | [diff] [blame] | 715 | exit: |
Andy Lutomirski | 2a41aa4 | 2016-03-09 19:00:33 -0800 | [diff] [blame] | 716 | #if defined(CONFIG_X86_32) |
| 717 | /* |
| 718 | * This is the most likely code path that involves non-trivial use |
| 719 | * of the SYSENTER stack. Check that we haven't overrun it. |
| 720 | */ |
| 721 | WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC, |
| 722 | "Overran or corrupted SYSENTER stack\n"); |
| 723 | #endif |
Andy Lutomirski | 8c84014 | 2015-07-03 12:44:32 -0700 | [diff] [blame] | 724 | ist_exit(regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 726 | NOKPROBE_SYMBOL(do_debug); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | |
| 728 | /* |
| 729 | * Note that we play around with the 'TS' bit in an attempt to get |
| 730 | * the correct behaviour even in the presence of the asynchronous |
| 731 | * IRQ13 behaviour |
| 732 | */ |
Oleg Nesterov | 5e1b05b | 2014-05-08 20:34:00 +0200 | [diff] [blame] | 733 | static void math_error(struct pt_regs *regs, int error_code, int trapnr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | { |
Brian Gerst | e2e75c9 | 2010-03-21 09:00:45 -0400 | [diff] [blame] | 735 | struct task_struct *task = current; |
Ingo Molnar | e1cebad | 2015-04-30 09:29:38 +0200 | [diff] [blame] | 736 | struct fpu *fpu = &task->thread.fpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 737 | siginfo_t info; |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 738 | char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : |
| 739 | "simd exception"; |
Brian Gerst | e2e75c9 | 2010-03-21 09:00:45 -0400 | [diff] [blame] | 740 | |
| 741 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) |
| 742 | return; |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 743 | cond_local_irq_enable(regs); |
Brian Gerst | e2e75c9 | 2010-03-21 09:00:45 -0400 | [diff] [blame] | 744 | |
Ingo Molnar | e1cebad | 2015-04-30 09:29:38 +0200 | [diff] [blame] | 745 | if (!user_mode(regs)) { |
Brian Gerst | e2e75c9 | 2010-03-21 09:00:45 -0400 | [diff] [blame] | 746 | if (!fixup_exception(regs)) { |
| 747 | task->thread.error_code = error_code; |
Srikar Dronamraju | 51e7dc7 | 2012-03-12 14:55:55 +0530 | [diff] [blame] | 748 | task->thread.trap_nr = trapnr; |
Brian Gerst | e2e75c9 | 2010-03-21 09:00:45 -0400 | [diff] [blame] | 749 | die(str, regs, error_code); |
| 750 | } |
| 751 | return; |
| 752 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 | |
| 754 | /* |
| 755 | * Save the info for the exception handler and clear the error. |
| 756 | */ |
Ingo Molnar | e1cebad | 2015-04-30 09:29:38 +0200 | [diff] [blame] | 757 | fpu__save(fpu); |
| 758 | |
| 759 | task->thread.trap_nr = trapnr; |
Brian Gerst | 9b6dba9 | 2010-03-21 09:00:44 -0400 | [diff] [blame] | 760 | task->thread.error_code = error_code; |
Ingo Molnar | e1cebad | 2015-04-30 09:29:38 +0200 | [diff] [blame] | 761 | info.si_signo = SIGFPE; |
| 762 | info.si_errno = 0; |
| 763 | info.si_addr = (void __user *)uprobe_get_trap_addr(regs); |
H. Peter Anvin | adf77ba | 2008-12-22 17:56:05 -0800 | [diff] [blame] | 764 | |
Ingo Molnar | e1cebad | 2015-04-30 09:29:38 +0200 | [diff] [blame] | 765 | info.si_code = fpu__exception_code(fpu, trapnr); |
H. Peter Anvin | adf77ba | 2008-12-22 17:56:05 -0800 | [diff] [blame] | 766 | |
Ingo Molnar | e1cebad | 2015-04-30 09:29:38 +0200 | [diff] [blame] | 767 | /* Retry when we get spurious exceptions: */ |
| 768 | if (!info.si_code) |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 769 | return; |
Ingo Molnar | e1cebad | 2015-04-30 09:29:38 +0200 | [diff] [blame] | 770 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | force_sig_info(SIGFPE, &info, task); |
| 772 | } |
| 773 | |
Alexander van Heukelum | e407d620 | 2008-09-30 18:41:36 +0200 | [diff] [blame] | 774 | dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | { |
Linus Torvalds | 5778077 | 2015-09-01 08:40:25 -0700 | [diff] [blame] | 776 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 777 | math_error(regs, error_code, X86_TRAP_MF); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | } |
| 779 | |
Alexander van Heukelum | e407d620 | 2008-09-30 18:41:36 +0200 | [diff] [blame] | 780 | dotraplinkage void |
| 781 | do_simd_coprocessor_error(struct pt_regs *regs, long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | { |
Linus Torvalds | 5778077 | 2015-09-01 08:40:25 -0700 | [diff] [blame] | 783 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 784 | math_error(regs, error_code, X86_TRAP_XF); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | } |
| 786 | |
Alexander van Heukelum | e407d620 | 2008-09-30 18:41:36 +0200 | [diff] [blame] | 787 | dotraplinkage void |
| 788 | do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | { |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 790 | cond_local_irq_enable(regs); |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 791 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 793 | dotraplinkage void |
Brian Gerst | aa78bcf | 2009-02-10 09:51:45 -0500 | [diff] [blame] | 794 | do_device_not_available(struct pt_regs *regs, long error_code) |
Alexander van Heukelum | 7643e9b | 2008-09-09 21:56:02 +0200 | [diff] [blame] | 795 | { |
Linus Torvalds | 5778077 | 2015-09-01 08:40:25 -0700 | [diff] [blame] | 796 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); |
Suresh Siddha | 5d2bd70 | 2012-09-06 14:58:52 -0700 | [diff] [blame] | 797 | BUG_ON(use_eager_fpu()); |
Suresh Siddha | 304bced | 2012-08-24 14:13:02 -0700 | [diff] [blame] | 798 | |
Brian Gerst | a334fe4 | 2010-09-03 21:17:15 -0400 | [diff] [blame] | 799 | #ifdef CONFIG_MATH_EMULATION |
Alexander van Heukelum | 7643e9b | 2008-09-09 21:56:02 +0200 | [diff] [blame] | 800 | if (read_cr0() & X86_CR0_EM) { |
Tejun Heo | d315760 | 2009-02-09 22:17:39 +0900 | [diff] [blame] | 801 | struct math_emu_info info = { }; |
| 802 | |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 803 | cond_local_irq_enable(regs); |
Tejun Heo | d315760 | 2009-02-09 22:17:39 +0900 | [diff] [blame] | 804 | |
Brian Gerst | aa78bcf | 2009-02-10 09:51:45 -0500 | [diff] [blame] | 805 | info.regs = regs; |
Tejun Heo | d315760 | 2009-02-09 22:17:39 +0900 | [diff] [blame] | 806 | math_emulate(&info); |
Brian Gerst | a334fe4 | 2010-09-03 21:17:15 -0400 | [diff] [blame] | 807 | return; |
Alexander van Heukelum | 7643e9b | 2008-09-09 21:56:02 +0200 | [diff] [blame] | 808 | } |
Brian Gerst | a334fe4 | 2010-09-03 21:17:15 -0400 | [diff] [blame] | 809 | #endif |
Ingo Molnar | e1884d6 | 2015-05-04 11:49:58 +0200 | [diff] [blame] | 810 | fpu__restore(¤t->thread.fpu); /* interrupts still off */ |
Brian Gerst | a334fe4 | 2010-09-03 21:17:15 -0400 | [diff] [blame] | 811 | #ifdef CONFIG_X86_32 |
Alexander Kuleshov | d99e1bd | 2016-01-25 20:41:46 +0100 | [diff] [blame] | 812 | cond_local_irq_enable(regs); |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 813 | #endif |
Alexander van Heukelum | 7643e9b | 2008-09-09 21:56:02 +0200 | [diff] [blame] | 814 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 815 | NOKPROBE_SYMBOL(do_device_not_available); |
Alexander van Heukelum | 7643e9b | 2008-09-09 21:56:02 +0200 | [diff] [blame] | 816 | |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 817 | #ifdef CONFIG_X86_32 |
Alexander van Heukelum | e407d620 | 2008-09-30 18:41:36 +0200 | [diff] [blame] | 818 | dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) |
Alexander van Heukelum | f8e0870 | 2008-09-09 21:56:13 +0200 | [diff] [blame] | 819 | { |
| 820 | siginfo_t info; |
Frederic Weisbecker | 6ba3c97 | 2012-07-11 20:26:35 +0200 | [diff] [blame] | 821 | |
Linus Torvalds | 5778077 | 2015-09-01 08:40:25 -0700 | [diff] [blame] | 822 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); |
Alexander van Heukelum | f8e0870 | 2008-09-09 21:56:13 +0200 | [diff] [blame] | 823 | local_irq_enable(); |
| 824 | |
| 825 | info.si_signo = SIGILL; |
| 826 | info.si_errno = 0; |
| 827 | info.si_code = ILL_BADSTK; |
Hannes Eder | fc6fcdf | 2009-02-22 01:00:57 +0100 | [diff] [blame] | 828 | info.si_addr = NULL; |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 829 | if (notify_die(DIE_TRAP, "iret exception", regs, error_code, |
Frederic Weisbecker | 6ba3c97 | 2012-07-11 20:26:35 +0200 | [diff] [blame] | 830 | X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { |
| 831 | do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, |
| 832 | &info); |
| 833 | } |
Alexander van Heukelum | f8e0870 | 2008-09-09 21:56:13 +0200 | [diff] [blame] | 834 | } |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 835 | #endif |
Alexander van Heukelum | f8e0870 | 2008-09-09 21:56:13 +0200 | [diff] [blame] | 836 | |
Jan Kiszka | 29c8439 | 2010-05-20 21:04:29 -0500 | [diff] [blame] | 837 | /* Set of traps needed for early debugging. */ |
| 838 | void __init early_trap_init(void) |
| 839 | { |
Wang Nan | b4d8327 | 2015-02-26 13:49:39 +0800 | [diff] [blame] | 840 | /* |
Wang Nan | 5eca745 | 2015-02-27 12:19:49 +0800 | [diff] [blame] | 841 | * Don't use IST to set DEBUG_STACK as it doesn't work until TSS |
| 842 | * is ready in cpu_init() <-- trap_init(). Before trap_init(), |
| 843 | * CPU runs at ring 0 so it is impossible to hit an invalid |
| 844 | * stack. Using the original stack works well enough at this |
| 845 | * early stage. DEBUG_STACK will be equipped after cpu_init() in |
Wang Nan | b4d8327 | 2015-02-26 13:49:39 +0800 | [diff] [blame] | 846 | * trap_init(). |
Wang Nan | 5eca745 | 2015-02-27 12:19:49 +0800 | [diff] [blame] | 847 | * |
| 848 | * We don't need to set trace_idt_table like set_intr_gate(), |
| 849 | * since we don't have trace_debug and it will be reset to |
| 850 | * 'debug' in trap_init() by set_intr_gate_ist(). |
Wang Nan | b4d8327 | 2015-02-26 13:49:39 +0800 | [diff] [blame] | 851 | */ |
Wang Nan | 5eca745 | 2015-02-27 12:19:49 +0800 | [diff] [blame] | 852 | set_intr_gate_notrace(X86_TRAP_DB, debug); |
Jan Kiszka | 29c8439 | 2010-05-20 21:04:29 -0500 | [diff] [blame] | 853 | /* int3 can be called from all */ |
Wang Nan | 5eca745 | 2015-02-27 12:19:49 +0800 | [diff] [blame] | 854 | set_system_intr_gate(X86_TRAP_BP, &int3); |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 855 | #ifdef CONFIG_X86_32 |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 856 | set_intr_gate(X86_TRAP_PF, page_fault); |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 857 | #endif |
Jan Kiszka | 29c8439 | 2010-05-20 21:04:29 -0500 | [diff] [blame] | 858 | load_idt(&idt_descr); |
| 859 | } |
| 860 | |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 861 | void __init early_trap_pf_init(void) |
| 862 | { |
| 863 | #ifdef CONFIG_X86_64 |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 864 | set_intr_gate(X86_TRAP_PF, page_fault); |
H. Peter Anvin | 8170e6b | 2013-01-24 12:19:52 -0800 | [diff] [blame] | 865 | #endif |
| 866 | } |
| 867 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | void __init trap_init(void) |
| 869 | { |
Rusty Russell | dbeb2be | 2007-10-19 20:35:03 +0200 | [diff] [blame] | 870 | int i; |
| 871 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | #ifdef CONFIG_EISA |
Ingo Molnar | 927222b | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 873 | void __iomem *p = early_ioremap(0x0FFFD9, 4); |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 874 | |
| 875 | if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | EISA_bus = 1; |
Ingo Molnar | 927222b | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 877 | early_iounmap(p, 4); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | #endif |
| 879 | |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 880 | set_intr_gate(X86_TRAP_DE, divide_error); |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 881 | set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); |
Alexander van Heukelum | 699d293 | 2008-10-03 22:00:32 +0200 | [diff] [blame] | 882 | /* int4 can be called from all */ |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 883 | set_system_intr_gate(X86_TRAP_OF, &overflow); |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 884 | set_intr_gate(X86_TRAP_BR, bounds); |
| 885 | set_intr_gate(X86_TRAP_UD, invalid_op); |
| 886 | set_intr_gate(X86_TRAP_NM, device_not_available); |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 887 | #ifdef CONFIG_X86_32 |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 888 | set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 889 | #else |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 890 | set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 891 | #endif |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 892 | set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); |
| 893 | set_intr_gate(X86_TRAP_TS, invalid_TSS); |
| 894 | set_intr_gate(X86_TRAP_NP, segment_not_present); |
Andy Lutomirski | 6f442be | 2014-11-22 18:00:32 -0800 | [diff] [blame] | 895 | set_intr_gate(X86_TRAP_SS, stack_segment); |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 896 | set_intr_gate(X86_TRAP_GP, general_protection); |
| 897 | set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); |
| 898 | set_intr_gate(X86_TRAP_MF, coprocessor_error); |
| 899 | set_intr_gate(X86_TRAP_AC, alignment_check); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | #ifdef CONFIG_X86_MCE |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 901 | set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 902 | #endif |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 903 | set_intr_gate(X86_TRAP_XF, simd_coprocessor_error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | |
Yinghai Lu | bb3f0b5 | 2009-01-25 02:38:09 -0800 | [diff] [blame] | 905 | /* Reserve all the builtin and the syscall vector: */ |
| 906 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) |
| 907 | set_bit(i, used_vectors); |
| 908 | |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 909 | #ifdef CONFIG_IA32_EMULATION |
Ingo Molnar | 2cd2355 | 2015-06-08 08:28:07 +0200 | [diff] [blame] | 910 | set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat); |
Yinghai Lu | bb3f0b5 | 2009-01-25 02:38:09 -0800 | [diff] [blame] | 911 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 912 | #endif |
| 913 | |
| 914 | #ifdef CONFIG_X86_32 |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame^] | 915 | set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_32); |
Brian Gerst | 51bb928 | 2015-05-09 11:36:52 -0400 | [diff] [blame] | 916 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); |
Alexander van Heukelum | 081f75b | 2008-10-03 22:00:39 +0200 | [diff] [blame] | 917 | #endif |
Yinghai Lu | bb3f0b5 | 2009-01-25 02:38:09 -0800 | [diff] [blame] | 918 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | /* |
Kees Cook | 4eefbe7 | 2013-04-10 12:24:22 -0700 | [diff] [blame] | 920 | * Set the IDT descriptor to a fixed read-only location, so that the |
| 921 | * "sidt" instruction will not leak the location of the kernel, and |
| 922 | * to defend the IDT against arbitrary memory write vulnerabilities. |
| 923 | * It will be reloaded in cpu_init() */ |
| 924 | __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); |
| 925 | idt_descr.address = fix_to_virt(FIX_RO_IDT); |
| 926 | |
| 927 | /* |
Ingo Molnar | b596440 | 2008-02-26 11:15:50 +0100 | [diff] [blame] | 928 | * Should be a barrier for any external CPU state: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | */ |
| 930 | cpu_init(); |
| 931 | |
Wang Nan | b4d8327 | 2015-02-26 13:49:39 +0800 | [diff] [blame] | 932 | /* |
| 933 | * X86_TRAP_DB and X86_TRAP_BP have been set |
Wang Nan | 5eca745 | 2015-02-27 12:19:49 +0800 | [diff] [blame] | 934 | * in early_trap_init(). However, ITS works only after |
Wang Nan | b4d8327 | 2015-02-26 13:49:39 +0800 | [diff] [blame] | 935 | * cpu_init() loads TSS. See comments in early_trap_init(). |
| 936 | */ |
| 937 | set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); |
| 938 | /* int3 can be called from all */ |
| 939 | set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); |
| 940 | |
Thomas Gleixner | 428cf90 | 2009-08-20 10:35:46 +0200 | [diff] [blame] | 941 | x86_init.irqs.trap_init(); |
Steven Rostedt | 228bdaa | 2011-12-09 03:02:19 -0500 | [diff] [blame] | 942 | |
| 943 | #ifdef CONFIG_X86_64 |
Seiji Aguchi | 629f4f9 | 2013-06-20 11:45:44 -0400 | [diff] [blame] | 944 | memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16); |
Kees Cook | c940826 | 2012-03-09 16:07:10 -0800 | [diff] [blame] | 945 | set_nmi_gate(X86_TRAP_DB, &debug); |
| 946 | set_nmi_gate(X86_TRAP_BP, &int3); |
Steven Rostedt | 228bdaa | 2011-12-09 03:02:19 -0500 | [diff] [blame] | 947 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | } |