Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Detect hard lockups on a system |
| 3 | * |
| 4 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
| 5 | * |
| 6 | * Note: Most of this code is borrowed heavily from the original softlockup |
| 7 | * detector, so thanks to Ingo for the initial implementation. |
| 8 | * Some chunks also taken from the old x86-specific nmi watchdog code, thanks |
| 9 | * to those contributors as well. |
| 10 | */ |
| 11 | |
| 12 | #define pr_fmt(fmt) "NMI watchdog: " fmt |
| 13 | |
| 14 | #include <linux/nmi.h> |
| 15 | #include <linux/module.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 16 | #include <linux/sched/debug.h> |
| 17 | |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 18 | #include <asm/irq_regs.h> |
| 19 | #include <linux/perf_event.h> |
| 20 | |
| 21 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); |
| 22 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); |
| 23 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
| 24 | |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 25 | static unsigned long hardlockup_allcpu_dumped; |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 26 | |
Nicholas Piggin | f2e0cff | 2017-07-12 14:35:43 -0700 | [diff] [blame] | 27 | void arch_touch_nmi_watchdog(void) |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 28 | { |
| 29 | /* |
| 30 | * Using __raw here because some code paths have |
| 31 | * preemption enabled. If preemption is enabled |
| 32 | * then interrupts should be enabled too, in which |
| 33 | * case we shouldn't have to worry about the watchdog |
| 34 | * going off. |
| 35 | */ |
| 36 | raw_cpu_write(watchdog_nmi_touch, true); |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 37 | } |
Nicholas Piggin | f2e0cff | 2017-07-12 14:35:43 -0700 | [diff] [blame] | 38 | EXPORT_SYMBOL(arch_touch_nmi_watchdog); |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 39 | |
| 40 | static struct perf_event_attr wd_hw_attr = { |
| 41 | .type = PERF_TYPE_HARDWARE, |
| 42 | .config = PERF_COUNT_HW_CPU_CYCLES, |
| 43 | .size = sizeof(struct perf_event_attr), |
| 44 | .pinned = 1, |
| 45 | .disabled = 1, |
| 46 | }; |
| 47 | |
| 48 | /* Callback function for perf event subsystem */ |
| 49 | static void watchdog_overflow_callback(struct perf_event *event, |
| 50 | struct perf_sample_data *data, |
| 51 | struct pt_regs *regs) |
| 52 | { |
| 53 | /* Ensure the watchdog never gets throttled */ |
| 54 | event->hw.interrupts = 0; |
| 55 | |
Don Zickus | b94f511 | 2017-01-24 15:17:53 -0800 | [diff] [blame] | 56 | if (atomic_read(&watchdog_park_in_progress) != 0) |
| 57 | return; |
| 58 | |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 59 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
| 60 | __this_cpu_write(watchdog_nmi_touch, false); |
| 61 | return; |
| 62 | } |
| 63 | |
| 64 | /* check for a hardlockup |
| 65 | * This is done by making sure our timer interrupt |
| 66 | * is incrementing. The timer interrupt should have |
| 67 | * fired multiple times before we overflow'd. If it hasn't |
| 68 | * then this is a good indication the cpu is stuck |
| 69 | */ |
| 70 | if (is_hardlockup()) { |
| 71 | int this_cpu = smp_processor_id(); |
| 72 | |
| 73 | /* only print hardlockups once */ |
| 74 | if (__this_cpu_read(hard_watchdog_warn) == true) |
| 75 | return; |
| 76 | |
| 77 | pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
| 78 | print_modules(); |
| 79 | print_irqtrace_events(current); |
| 80 | if (regs) |
| 81 | show_regs(regs); |
| 82 | else |
| 83 | dump_stack(); |
| 84 | |
| 85 | /* |
| 86 | * Perform all-CPU dump only once to avoid multiple hardlockups |
| 87 | * generating interleaving traces |
| 88 | */ |
| 89 | if (sysctl_hardlockup_all_cpu_backtrace && |
| 90 | !test_and_set_bit(0, &hardlockup_allcpu_dumped)) |
| 91 | trigger_allbutself_cpu_backtrace(); |
| 92 | |
| 93 | if (hardlockup_panic) |
| 94 | nmi_panic(regs, "Hard LOCKUP"); |
| 95 | |
| 96 | __this_cpu_write(hard_watchdog_warn, true); |
| 97 | return; |
| 98 | } |
| 99 | |
| 100 | __this_cpu_write(hard_watchdog_warn, false); |
| 101 | return; |
| 102 | } |
| 103 | |
| 104 | /* |
| 105 | * People like the simple clean cpu node info on boot. |
| 106 | * Reduce the watchdog noise by only printing messages |
| 107 | * that are different from what cpu0 displayed. |
| 108 | */ |
Prarit Bhargava | 8dcde9d | 2017-02-22 15:40:56 -0800 | [diff] [blame] | 109 | static unsigned long firstcpu_err; |
| 110 | static atomic_t watchdog_cpus; |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 111 | |
| 112 | int watchdog_nmi_enable(unsigned int cpu) |
| 113 | { |
| 114 | struct perf_event_attr *wd_attr; |
| 115 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
Prarit Bhargava | 8dcde9d | 2017-02-22 15:40:56 -0800 | [diff] [blame] | 116 | int firstcpu = 0; |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 117 | |
| 118 | /* nothing to do if the hard lockup detector is disabled */ |
| 119 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) |
| 120 | goto out; |
| 121 | |
| 122 | /* is it already setup and enabled? */ |
| 123 | if (event && event->state > PERF_EVENT_STATE_OFF) |
| 124 | goto out; |
| 125 | |
| 126 | /* it is setup but not enabled */ |
| 127 | if (event != NULL) |
| 128 | goto out_enable; |
| 129 | |
Prarit Bhargava | 8dcde9d | 2017-02-22 15:40:56 -0800 | [diff] [blame] | 130 | if (atomic_inc_return(&watchdog_cpus) == 1) |
| 131 | firstcpu = 1; |
| 132 | |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 133 | wd_attr = &wd_hw_attr; |
| 134 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); |
| 135 | |
| 136 | /* Try to register using hardware perf events */ |
| 137 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); |
| 138 | |
Prarit Bhargava | 8dcde9d | 2017-02-22 15:40:56 -0800 | [diff] [blame] | 139 | /* save the first cpu's error for future comparision */ |
| 140 | if (firstcpu && IS_ERR(event)) |
| 141 | firstcpu_err = PTR_ERR(event); |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 142 | |
| 143 | if (!IS_ERR(event)) { |
Prarit Bhargava | 8dcde9d | 2017-02-22 15:40:56 -0800 | [diff] [blame] | 144 | /* only print for the first cpu initialized */ |
| 145 | if (firstcpu || firstcpu_err) |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 146 | pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); |
| 147 | goto out_save; |
| 148 | } |
| 149 | |
| 150 | /* |
| 151 | * Disable the hard lockup detector if _any_ CPU fails to set up |
| 152 | * set up the hardware perf event. The watchdog() function checks |
| 153 | * the NMI_WATCHDOG_ENABLED bit periodically. |
| 154 | * |
| 155 | * The barriers are for syncing up watchdog_enabled across all the |
| 156 | * cpus, as clear_bit() does not use barriers. |
| 157 | */ |
| 158 | smp_mb__before_atomic(); |
| 159 | clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled); |
| 160 | smp_mb__after_atomic(); |
| 161 | |
| 162 | /* skip displaying the same error again */ |
Prarit Bhargava | 8dcde9d | 2017-02-22 15:40:56 -0800 | [diff] [blame] | 163 | if (!firstcpu && (PTR_ERR(event) == firstcpu_err)) |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 164 | return PTR_ERR(event); |
| 165 | |
| 166 | /* vary the KERN level based on the returned errno */ |
| 167 | if (PTR_ERR(event) == -EOPNOTSUPP) |
| 168 | pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); |
| 169 | else if (PTR_ERR(event) == -ENOENT) |
| 170 | pr_warn("disabled (cpu%i): hardware events not enabled\n", |
| 171 | cpu); |
| 172 | else |
| 173 | pr_err("disabled (cpu%i): unable to create perf event: %ld\n", |
| 174 | cpu, PTR_ERR(event)); |
| 175 | |
| 176 | pr_info("Shutting down hard lockup detector on all cpus\n"); |
| 177 | |
| 178 | return PTR_ERR(event); |
| 179 | |
| 180 | /* success path */ |
| 181 | out_save: |
| 182 | per_cpu(watchdog_ev, cpu) = event; |
| 183 | out_enable: |
| 184 | perf_event_enable(per_cpu(watchdog_ev, cpu)); |
| 185 | out: |
| 186 | return 0; |
| 187 | } |
| 188 | |
| 189 | void watchdog_nmi_disable(unsigned int cpu) |
| 190 | { |
| 191 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
| 192 | |
| 193 | if (event) { |
| 194 | perf_event_disable(event); |
| 195 | per_cpu(watchdog_ev, cpu) = NULL; |
| 196 | |
| 197 | /* should be in cleanup, but blocks oprofile */ |
| 198 | perf_event_release_kernel(event); |
Prarit Bhargava | 8dcde9d | 2017-02-22 15:40:56 -0800 | [diff] [blame] | 199 | |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 200 | /* watchdog_nmi_enable() expects this to be zero initially. */ |
Prarit Bhargava | 8dcde9d | 2017-02-22 15:40:56 -0800 | [diff] [blame] | 201 | if (atomic_dec_and_test(&watchdog_cpus)) |
| 202 | firstcpu_err = 0; |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 203 | } |
| 204 | } |