Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
| 4 | */ |
| 5 | #include <linux/kallsyms.h> |
| 6 | #include <linux/kprobes.h> |
| 7 | #include <linux/uaccess.h> |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 8 | #include <linux/hardirq.h> |
| 9 | #include <linux/kdebug.h> |
Paul Gortmaker | 186f436 | 2016-07-13 20:18:56 -0400 | [diff] [blame] | 10 | #include <linux/export.h> |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 11 | #include <linux/ptrace.h> |
| 12 | #include <linux/kexec.h> |
Ingo Molnar | b803090 | 2009-11-26 08:17:31 +0100 | [diff] [blame] | 13 | #include <linux/sysfs.h> |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 14 | #include <linux/bug.h> |
| 15 | #include <linux/nmi.h> |
| 16 | |
| 17 | #include <asm/stacktrace.h> |
| 18 | |
Josh Poimboeuf | 9c00390 | 2016-09-14 21:07:41 -0500 | [diff] [blame] | 19 | static char *exception_stack_names[N_EXCEPTION_STACKS] = { |
| 20 | [ DOUBLEFAULT_STACK-1 ] = "#DF", |
| 21 | [ NMI_STACK-1 ] = "NMI", |
| 22 | [ DEBUG_STACK-1 ] = "#DB", |
| 23 | [ MCE_STACK-1 ] = "#MC", |
| 24 | }; |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 25 | |
Josh Poimboeuf | 9c00390 | 2016-09-14 21:07:41 -0500 | [diff] [blame] | 26 | static unsigned long exception_stack_sizes[N_EXCEPTION_STACKS] = { |
| 27 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, |
| 28 | [DEBUG_STACK - 1] = DEBUG_STKSZ |
Ingo Molnar | b803090 | 2009-11-26 08:17:31 +0100 | [diff] [blame] | 29 | }; |
Frederic Weisbecker | 0406ca6 | 2009-07-01 21:02:09 +0200 | [diff] [blame] | 30 | |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 31 | void stack_type_str(enum stack_type type, const char **begin, const char **end) |
Frederic Weisbecker | 0406ca6 | 2009-07-01 21:02:09 +0200 | [diff] [blame] | 32 | { |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 33 | BUILD_BUG_ON(N_EXCEPTION_STACKS != 4); |
| 34 | |
| 35 | switch (type) { |
| 36 | case STACK_TYPE_IRQ: |
| 37 | *begin = "IRQ"; |
| 38 | *end = "EOI"; |
| 39 | break; |
| 40 | case STACK_TYPE_EXCEPTION ... STACK_TYPE_EXCEPTION_LAST: |
| 41 | *begin = exception_stack_names[type - STACK_TYPE_EXCEPTION]; |
| 42 | *end = "EOE"; |
| 43 | break; |
| 44 | default: |
| 45 | *begin = NULL; |
| 46 | *end = NULL; |
| 47 | } |
| 48 | } |
| 49 | |
Josh Poimboeuf | fcd709e | 2016-09-14 21:07:44 -0500 | [diff] [blame] | 50 | static bool in_exception_stack(unsigned long *stack, struct stack_info *info) |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 51 | { |
| 52 | unsigned long *begin, *end; |
| 53 | struct pt_regs *regs; |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 54 | unsigned k; |
| 55 | |
Josh Poimboeuf | 9c00390 | 2016-09-14 21:07:41 -0500 | [diff] [blame] | 56 | BUILD_BUG_ON(N_EXCEPTION_STACKS != 4); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 57 | |
Josh Poimboeuf | 9c00390 | 2016-09-14 21:07:41 -0500 | [diff] [blame] | 58 | for (k = 0; k < N_EXCEPTION_STACKS; k++) { |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 59 | end = (unsigned long *)raw_cpu_ptr(&orig_ist)->ist[k]; |
| 60 | begin = end - (exception_stack_sizes[k] / sizeof(long)); |
| 61 | regs = (struct pt_regs *)end - 1; |
Josh Poimboeuf | 9c00390 | 2016-09-14 21:07:41 -0500 | [diff] [blame] | 62 | |
| 63 | if (stack < begin || stack >= end) |
| 64 | continue; |
| 65 | |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 66 | info->type = STACK_TYPE_EXCEPTION + k; |
| 67 | info->begin = begin; |
| 68 | info->end = end; |
| 69 | info->next_sp = (unsigned long *)regs->sp; |
| 70 | |
| 71 | return true; |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 72 | } |
Josh Poimboeuf | 9c00390 | 2016-09-14 21:07:41 -0500 | [diff] [blame] | 73 | |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 74 | return false; |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 75 | } |
| 76 | |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 77 | static bool in_irq_stack(unsigned long *stack, struct stack_info *info) |
Frederic Weisbecker | af2d828 | 2009-12-06 05:34:27 +0100 | [diff] [blame] | 78 | { |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 79 | unsigned long *end = (unsigned long *)this_cpu_read(irq_stack_ptr); |
| 80 | unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long)); |
| 81 | |
Josh Poimboeuf | 5fe599e | 2016-09-14 21:07:43 -0500 | [diff] [blame] | 82 | /* |
| 83 | * This is a software stack, so 'end' can be a valid stack pointer. |
| 84 | * It just means the stack is empty. |
| 85 | */ |
| 86 | if (stack < begin || stack > end) |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 87 | return false; |
| 88 | |
| 89 | info->type = STACK_TYPE_IRQ; |
| 90 | info->begin = begin; |
| 91 | info->end = end; |
| 92 | |
| 93 | /* |
| 94 | * The next stack pointer is the first thing pushed by the entry code |
| 95 | * after switching to the irq stack. |
| 96 | */ |
| 97 | info->next_sp = (unsigned long *)*(end - 1); |
| 98 | |
| 99 | return true; |
Frederic Weisbecker | af2d828 | 2009-12-06 05:34:27 +0100 | [diff] [blame] | 100 | } |
| 101 | |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 102 | int get_stack_info(unsigned long *stack, struct task_struct *task, |
| 103 | struct stack_info *info, unsigned long *visit_mask) |
Steven Rostedt | 2223f6f | 2014-02-06 09:41:32 -0500 | [diff] [blame] | 104 | { |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 105 | if (!stack) |
| 106 | goto unknown; |
Steven Rostedt | 2223f6f | 2014-02-06 09:41:32 -0500 | [diff] [blame] | 107 | |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 108 | task = task ? : current; |
Steven Rostedt | 2223f6f | 2014-02-06 09:41:32 -0500 | [diff] [blame] | 109 | |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 110 | if (in_task_stack(stack, task, info)) |
Josh Poimboeuf | fcd709e | 2016-09-14 21:07:44 -0500 | [diff] [blame] | 111 | goto recursion_check; |
Steven Rostedt | 2223f6f | 2014-02-06 09:41:32 -0500 | [diff] [blame] | 112 | |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 113 | if (task != current) |
| 114 | goto unknown; |
Steven Rostedt | 2223f6f | 2014-02-06 09:41:32 -0500 | [diff] [blame] | 115 | |
Josh Poimboeuf | fcd709e | 2016-09-14 21:07:44 -0500 | [diff] [blame] | 116 | if (in_exception_stack(stack, info)) |
| 117 | goto recursion_check; |
Steven Rostedt | 2223f6f | 2014-02-06 09:41:32 -0500 | [diff] [blame] | 118 | |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 119 | if (in_irq_stack(stack, info)) |
Josh Poimboeuf | fcd709e | 2016-09-14 21:07:44 -0500 | [diff] [blame] | 120 | goto recursion_check; |
| 121 | |
| 122 | goto unknown; |
| 123 | |
| 124 | recursion_check: |
| 125 | /* |
| 126 | * Make sure we don't iterate through any given stack more than once. |
| 127 | * If it comes up a second time then there's something wrong going on: |
| 128 | * just break out and report an unknown stack type. |
| 129 | */ |
| 130 | if (visit_mask) { |
| 131 | if (*visit_mask & (1UL << info->type)) |
| 132 | goto unknown; |
| 133 | *visit_mask |= 1UL << info->type; |
| 134 | } |
Steven Rostedt | 2223f6f | 2014-02-06 09:41:32 -0500 | [diff] [blame] | 135 | |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 136 | return 0; |
| 137 | |
| 138 | unknown: |
| 139 | info->type = STACK_TYPE_UNKNOWN; |
| 140 | return -EINVAL; |
Steven Rostedt | 2223f6f | 2014-02-06 09:41:32 -0500 | [diff] [blame] | 141 | } |
| 142 | |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 143 | void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
| 144 | unsigned long *sp, char *log_lvl) |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 145 | { |
Ingo Molnar | 67f2de0 | 2009-11-26 08:29:10 +0100 | [diff] [blame] | 146 | unsigned long *irq_stack_end; |
| 147 | unsigned long *irq_stack; |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 148 | unsigned long *stack; |
| 149 | int i; |
Ingo Molnar | 67f2de0 | 2009-11-26 08:29:10 +0100 | [diff] [blame] | 150 | |
Andy Lutomirski | 1959a60 | 2016-09-15 22:45:45 -0700 | [diff] [blame] | 151 | if (!try_get_task_stack(task)) |
| 152 | return; |
| 153 | |
Josh Poimboeuf | cfeeed2 | 2016-09-08 16:49:20 -0500 | [diff] [blame] | 154 | irq_stack_end = (unsigned long *)this_cpu_read(irq_stack_ptr); |
Josh Poimboeuf | 4950d6d | 2016-08-18 10:59:08 -0500 | [diff] [blame] | 155 | irq_stack = irq_stack_end - (IRQ_STACK_SIZE / sizeof(long)); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 156 | |
Josh Poimboeuf | 4b8afaf | 2016-08-24 11:50:17 -0500 | [diff] [blame] | 157 | sp = sp ? : get_stack_pointer(task, regs); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 158 | |
| 159 | stack = sp; |
| 160 | for (i = 0; i < kstack_depth_to_print; i++) { |
Andy Lutomirski | 98f30b1 | 2016-07-14 13:22:53 -0700 | [diff] [blame] | 161 | unsigned long word; |
| 162 | |
Brian Gerst | 26f80bd | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 163 | if (stack >= irq_stack && stack <= irq_stack_end) { |
| 164 | if (stack == irq_stack_end) { |
| 165 | stack = (unsigned long *) (irq_stack_end[-1]); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 166 | pr_cont(" <EOI> "); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 167 | } |
| 168 | } else { |
Adrien Schildknecht | 04769ae | 2015-02-22 16:23:58 +0100 | [diff] [blame] | 169 | if (kstack_end(stack)) |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 170 | break; |
| 171 | } |
Andy Lutomirski | 98f30b1 | 2016-07-14 13:22:53 -0700 | [diff] [blame] | 172 | |
| 173 | if (probe_kernel_address(stack, word)) |
| 174 | break; |
| 175 | |
Adrien Schildknecht | 1fc7f61 | 2015-02-20 03:34:21 +0100 | [diff] [blame] | 176 | if ((i % STACKSLOTS_PER_LINE) == 0) { |
| 177 | if (i != 0) |
| 178 | pr_cont("\n"); |
Andy Lutomirski | 98f30b1 | 2016-07-14 13:22:53 -0700 | [diff] [blame] | 179 | printk("%s %016lx", log_lvl, word); |
Adrien Schildknecht | 1fc7f61 | 2015-02-20 03:34:21 +0100 | [diff] [blame] | 180 | } else |
Andy Lutomirski | 98f30b1 | 2016-07-14 13:22:53 -0700 | [diff] [blame] | 181 | pr_cont(" %016lx", word); |
| 182 | |
| 183 | stack++; |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 184 | touch_nmi_watchdog(); |
| 185 | } |
Ingo Molnar | 67f2de0 | 2009-11-26 08:29:10 +0100 | [diff] [blame] | 186 | |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 187 | pr_cont("\n"); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 188 | show_trace_log_lvl(task, regs, sp, log_lvl); |
Andy Lutomirski | 1959a60 | 2016-09-15 22:45:45 -0700 | [diff] [blame] | 189 | |
| 190 | put_task_stack(task); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 191 | } |
| 192 | |
Jan Beulich | 57da8b9 | 2012-05-09 08:47:37 +0100 | [diff] [blame] | 193 | void show_regs(struct pt_regs *regs) |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 194 | { |
| 195 | int i; |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 196 | |
Tejun Heo | a43cb95 | 2013-04-30 15:27:17 -0700 | [diff] [blame] | 197 | show_regs_print_info(KERN_DEFAULT); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 198 | __show_regs(regs, 1); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 199 | |
| 200 | /* |
| 201 | * When in-kernel, we also print out the stack and code at the |
| 202 | * time of the fault.. |
| 203 | */ |
| 204 | if (!user_mode(regs)) { |
| 205 | unsigned int code_prologue = code_bytes * 43 / 64; |
| 206 | unsigned int code_len = code_bytes; |
| 207 | unsigned char c; |
| 208 | u8 *ip; |
| 209 | |
Prarit Bhargava | b0f4c4b | 2012-01-26 08:55:34 -0500 | [diff] [blame] | 210 | printk(KERN_DEFAULT "Stack:\n"); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 211 | show_stack_log_lvl(current, regs, NULL, KERN_DEFAULT); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 212 | |
Prarit Bhargava | b0f4c4b | 2012-01-26 08:55:34 -0500 | [diff] [blame] | 213 | printk(KERN_DEFAULT "Code: "); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 214 | |
| 215 | ip = (u8 *)regs->ip - code_prologue; |
| 216 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { |
Alexander van Heukelum | 8a54166 | 2008-10-04 23:12:46 +0200 | [diff] [blame] | 217 | /* try starting at IP */ |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 218 | ip = (u8 *)regs->ip; |
| 219 | code_len = code_len - code_prologue + 1; |
| 220 | } |
| 221 | for (i = 0; i < code_len; i++, ip++) { |
| 222 | if (ip < (u8 *)PAGE_OFFSET || |
| 223 | probe_kernel_address(ip, c)) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 224 | pr_cont(" Bad RIP value."); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 225 | break; |
| 226 | } |
| 227 | if (ip == (u8 *)regs->ip) |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 228 | pr_cont("<%02x> ", c); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 229 | else |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 230 | pr_cont("%02x ", c); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 231 | } |
| 232 | } |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 233 | pr_cont("\n"); |
Alexander van Heukelum | 6fcbede | 2008-09-30 13:12:15 +0200 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | int is_valid_bugaddr(unsigned long ip) |
| 237 | { |
| 238 | unsigned short ud2; |
| 239 | |
| 240 | if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2))) |
| 241 | return 0; |
| 242 | |
| 243 | return ud2 == 0x0b0f; |
| 244 | } |