Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 1 | /* |
| 2 | * NMI backtrace support |
| 3 | * |
| 4 | * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King, |
| 5 | * with the following header: |
| 6 | * |
| 7 | * HW NMI watchdog support |
| 8 | * |
| 9 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
| 10 | * |
| 11 | * Arch specific calls to support NMI watchdog |
| 12 | * |
| 13 | * Bits copied from original nmi.c file |
| 14 | */ |
| 15 | #include <linux/cpumask.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/kprobes.h> |
| 18 | #include <linux/nmi.h> |
Chris Metcalf | 6727ad9 | 2016-10-07 17:02:55 -0700 | [diff] [blame] | 19 | #include <linux/cpu.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 20 | #include <linux/sched/debug.h> |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 21 | |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 22 | #ifdef arch_trigger_cpumask_backtrace |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 23 | /* For reliability, we're prepared to waste bits here. */ |
| 24 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 25 | |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 26 | /* "in progress" flag of arch_trigger_cpumask_backtrace */ |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 27 | static unsigned long backtrace_flag; |
| 28 | |
Daniel Thompson | 0768330 | 2015-09-22 17:12:10 +0100 | [diff] [blame] | 29 | /* |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 30 | * When raise() is called it will be passed a pointer to the |
Daniel Thompson | 0768330 | 2015-09-22 17:12:10 +0100 | [diff] [blame] | 31 | * backtrace_mask. Architectures that call nmi_cpu_backtrace() |
| 32 | * directly from their raise() functions may rely on the mask |
| 33 | * they are passed being updated as a side effect of this call. |
| 34 | */ |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 35 | void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, |
| 36 | bool exclude_self, |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 37 | void (*raise)(cpumask_t *mask)) |
| 38 | { |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 39 | int i, this_cpu = get_cpu(); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 40 | |
| 41 | if (test_and_set_bit(0, &backtrace_flag)) { |
| 42 | /* |
| 43 | * If there is already a trigger_all_cpu_backtrace() in progress |
| 44 | * (backtrace_flag == 1), don't output double cpu dump infos. |
| 45 | */ |
| 46 | put_cpu(); |
| 47 | return; |
| 48 | } |
| 49 | |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 50 | cpumask_copy(to_cpumask(backtrace_mask), mask); |
| 51 | if (exclude_self) |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 52 | cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); |
| 53 | |
Chris Metcalf | 6776648 | 2016-10-07 17:02:49 -0700 | [diff] [blame] | 54 | /* |
| 55 | * Don't try to send an NMI to this cpu; it may work on some |
| 56 | * architectures, but on others it may not, and we'll get |
| 57 | * information at least as useful just by doing a dump_stack() here. |
| 58 | * Note that nmi_cpu_backtrace(NULL) will clear the cpu bit. |
| 59 | */ |
| 60 | if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask))) |
| 61 | nmi_cpu_backtrace(NULL); |
| 62 | |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 63 | if (!cpumask_empty(to_cpumask(backtrace_mask))) { |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 64 | pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n", |
| 65 | this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask)); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 66 | raise(to_cpumask(backtrace_mask)); |
| 67 | } |
| 68 | |
| 69 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
| 70 | for (i = 0; i < 10 * 1000; i++) { |
| 71 | if (cpumask_empty(to_cpumask(backtrace_mask))) |
| 72 | break; |
| 73 | mdelay(1); |
| 74 | touch_softlockup_watchdog(); |
| 75 | } |
| 76 | |
| 77 | /* |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 78 | * Force flush any remote buffers that might be stuck in IRQ context |
| 79 | * and therefore could not run their irq_work. |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 80 | */ |
Sergey Senozhatsky | f92bac3 | 2016-12-27 23:16:05 +0900 | [diff] [blame] | 81 | printk_safe_flush(); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 82 | |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 83 | clear_bit_unlock(0, &backtrace_flag); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 84 | put_cpu(); |
| 85 | } |
| 86 | |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 87 | bool nmi_cpu_backtrace(struct pt_regs *regs) |
| 88 | { |
| 89 | int cpu = smp_processor_id(); |
| 90 | |
| 91 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
Chris Metcalf | 6727ad9 | 2016-10-07 17:02:55 -0700 | [diff] [blame] | 92 | if (regs && cpu_in_idle(instruction_pointer(regs))) { |
| 93 | pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", |
| 94 | cpu, instruction_pointer(regs)); |
| 95 | } else { |
| 96 | pr_warn("NMI backtrace for cpu %d\n", cpu); |
| 97 | if (regs) |
| 98 | show_regs(regs); |
| 99 | else |
| 100 | dump_stack(); |
| 101 | } |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 102 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
| 103 | return true; |
| 104 | } |
| 105 | |
| 106 | return false; |
| 107 | } |
| 108 | NOKPROBE_SYMBOL(nmi_cpu_backtrace); |
| 109 | #endif |