blob: f2be11ab7e08c456d064f3c3d53efe1cd153a2e6 [file] [log] [blame]
Don Zickus58687ac2010-05-07 17:11:44 -04001/*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
Fernando Luis Vázquez Cao86f5e6a2012-02-09 17:42:22 -05006 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
Don Zickus58687ac2010-05-07 17:11:44 -04009 * to those contributors as well.
10 */
11
Andrew Morton45019802012-03-23 15:01:55 -070012#define pr_fmt(fmt) "NMI watchdog: " fmt
13
Don Zickus58687ac2010-05-07 17:11:44 -040014#include <linux/mm.h>
15#include <linux/cpu.h>
16#include <linux/nmi.h>
17#include <linux/init.h>
Don Zickus58687ac2010-05-07 17:11:44 -040018#include <linux/module.h>
19#include <linux/sysctl.h>
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +000020#include <linux/smpboot.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060021#include <linux/sched/rt.h>
Don Zickus58687ac2010-05-07 17:11:44 -040022
23#include <asm/irq_regs.h>
Eric B Munson5d1c0f42012-03-10 14:37:28 -050024#include <linux/kvm_para.h>
Don Zickus58687ac2010-05-07 17:11:44 -040025#include <linux/perf_event.h>
26
Ulrich Obergfell84d56e62015-04-14 15:43:55 -070027/*
28 * The run state of the lockup detectors is controlled by the content of the
29 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
30 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
31 *
32 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
33 * are variables that are only used as an 'interface' between the parameters
34 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
35 * 'watchdog_thresh' variable is handled differently because its value is not
36 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
37 * is equal zero.
38 */
39#define NMI_WATCHDOG_ENABLED_BIT 0
40#define SOFT_WATCHDOG_ENABLED_BIT 1
41#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
42#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
43
44#ifdef CONFIG_HARDLOCKUP_DETECTOR
45static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
46#else
47static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
48#endif
49int __read_mostly nmi_watchdog_enabled;
50int __read_mostly soft_watchdog_enabled;
51int __read_mostly watchdog_user_enabled;
Mandeep Singh Baines4eec42f2011-05-22 22:10:23 -070052int __read_mostly watchdog_thresh = 10;
Ulrich Obergfell84d56e62015-04-14 15:43:55 -070053
Aaron Tomlined235872014-06-23 13:22:05 -070054#ifdef CONFIG_SMP
55int __read_mostly sysctl_softlockup_all_cpu_backtrace;
56#else
57#define sysctl_softlockup_all_cpu_backtrace 0
58#endif
59
Frederic Weisbecker3c00ea82013-05-19 20:45:15 +020060static int __read_mostly watchdog_running;
Chuansheng Liu0f34c402012-12-17 15:59:50 -080061static u64 __read_mostly sample_period;
Don Zickus58687ac2010-05-07 17:11:44 -040062
63static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
64static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
65static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
66static DEFINE_PER_CPU(bool, softlockup_touch_sync);
Don Zickus58687ac2010-05-07 17:11:44 -040067static DEFINE_PER_CPU(bool, soft_watchdog_warn);
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +000068static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
69static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
chai wenb1a8de12014-10-09 15:25:17 -070070static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
Frederic Weisbecker23637d42010-05-15 23:15:20 +020071#ifdef CONFIG_HARDLOCKUP_DETECTOR
Don Zickuscafcd802010-05-14 11:11:21 -040072static DEFINE_PER_CPU(bool, hard_watchdog_warn);
73static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
Don Zickus58687ac2010-05-07 17:11:44 -040074static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
75static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
76#endif
Aaron Tomlined235872014-06-23 13:22:05 -070077static unsigned long soft_lockup_nmi_warn;
Don Zickus58687ac2010-05-07 17:11:44 -040078
Don Zickus58687ac2010-05-07 17:11:44 -040079/* boot commands */
80/*
81 * Should we panic when a soft-lockup or hard-lockup occurs:
82 */
Frederic Weisbecker23637d42010-05-15 23:15:20 +020083#ifdef CONFIG_HARDLOCKUP_DETECTOR
Don Zickusfef2c9b2011-03-22 16:34:16 -070084static int hardlockup_panic =
85 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
Ulrich Obergfell6e7458a2014-10-13 15:55:35 -070086/*
87 * We may not want to enable hard lockup detection by default in all cases,
88 * for example when running the kernel as a guest on a hypervisor. In these
89 * cases this function can be called to disable hard lockup detection. This
90 * function should only be executed once by the boot processor before the
91 * kernel command line parameters are parsed, because otherwise it is not
92 * possible to override this in hardlockup_panic_setup().
93 */
Ulrich Obergfell692297d2015-04-14 15:44:19 -070094void hardlockup_detector_disable(void)
Ulrich Obergfell6e7458a2014-10-13 15:55:35 -070095{
Ulrich Obergfell692297d2015-04-14 15:44:19 -070096 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
Ulrich Obergfell6e7458a2014-10-13 15:55:35 -070097}
98
Don Zickus58687ac2010-05-07 17:11:44 -040099static int __init hardlockup_panic_setup(char *str)
100{
101 if (!strncmp(str, "panic", 5))
102 hardlockup_panic = 1;
Don Zickusfef2c9b2011-03-22 16:34:16 -0700103 else if (!strncmp(str, "nopanic", 7))
104 hardlockup_panic = 0;
Don Zickus5dc30552010-11-29 17:07:17 -0500105 else if (!strncmp(str, "0", 1))
Ulrich Obergfell195daf62015-04-14 15:44:13 -0700106 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
107 else if (!strncmp(str, "1", 1))
108 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
Don Zickus58687ac2010-05-07 17:11:44 -0400109 return 1;
110}
111__setup("nmi_watchdog=", hardlockup_panic_setup);
112#endif
113
114unsigned int __read_mostly softlockup_panic =
115 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
116
117static int __init softlockup_panic_setup(char *str)
118{
119 softlockup_panic = simple_strtoul(str, NULL, 0);
120
121 return 1;
122}
123__setup("softlockup_panic=", softlockup_panic_setup);
124
125static int __init nowatchdog_setup(char *str)
126{
Ulrich Obergfell195daf62015-04-14 15:44:13 -0700127 watchdog_enabled = 0;
Don Zickus58687ac2010-05-07 17:11:44 -0400128 return 1;
129}
130__setup("nowatchdog", nowatchdog_setup);
131
Don Zickus58687ac2010-05-07 17:11:44 -0400132static int __init nosoftlockup_setup(char *str)
133{
Ulrich Obergfell195daf62015-04-14 15:44:13 -0700134 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
Don Zickus58687ac2010-05-07 17:11:44 -0400135 return 1;
136}
137__setup("nosoftlockup", nosoftlockup_setup);
Ulrich Obergfell195daf62015-04-14 15:44:13 -0700138
Aaron Tomlined235872014-06-23 13:22:05 -0700139#ifdef CONFIG_SMP
140static int __init softlockup_all_cpu_backtrace_setup(char *str)
141{
142 sysctl_softlockup_all_cpu_backtrace =
143 !!simple_strtol(str, NULL, 0);
144 return 1;
145}
146__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
147#endif
Don Zickus58687ac2010-05-07 17:11:44 -0400148
Mandeep Singh Baines4eec42f2011-05-22 22:10:23 -0700149/*
150 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
151 * lockups can have false positives under extreme conditions. So we generally
152 * want a higher threshold for soft lockups than for hard lockups. So we couple
153 * the thresholds with a factor: we make the soft threshold twice the amount of
154 * time the hard threshold is.
155 */
Ingo Molnar6e9101a2011-05-24 05:43:18 +0200156static int get_softlockup_thresh(void)
Mandeep Singh Baines4eec42f2011-05-22 22:10:23 -0700157{
158 return watchdog_thresh * 2;
159}
Don Zickus58687ac2010-05-07 17:11:44 -0400160
161/*
162 * Returns seconds, approximately. We don't need nanosecond
163 * resolution, and we don't need to waste time with a big divide when
164 * 2^30ns == 1.074s.
165 */
Namhyung Kimc06b4f12012-12-27 11:49:44 +0900166static unsigned long get_timestamp(void)
Don Zickus58687ac2010-05-07 17:11:44 -0400167{
Cyril Bur545a2bf2015-02-12 15:01:24 -0800168 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
Don Zickus58687ac2010-05-07 17:11:44 -0400169}
170
Chuansheng Liu0f34c402012-12-17 15:59:50 -0800171static void set_sample_period(void)
Don Zickus58687ac2010-05-07 17:11:44 -0400172{
173 /*
Mandeep Singh Baines586692a2011-05-22 22:10:22 -0700174 * convert watchdog_thresh from seconds to ns
Fernando Luis Vázquez Cao86f5e6a2012-02-09 17:42:22 -0500175 * the divide by 5 is to give hrtimer several chances (two
176 * or three with the current relation between the soft
177 * and hard thresholds) to increment before the
178 * hardlockup detector generates a warning
Don Zickus58687ac2010-05-07 17:11:44 -0400179 */
Chuansheng Liu0f34c402012-12-17 15:59:50 -0800180 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
Don Zickus58687ac2010-05-07 17:11:44 -0400181}
182
183/* Commands for resetting the watchdog */
184static void __touch_watchdog(void)
185{
Namhyung Kimc06b4f12012-12-27 11:49:44 +0900186 __this_cpu_write(watchdog_touch_ts, get_timestamp());
Don Zickus58687ac2010-05-07 17:11:44 -0400187}
188
Don Zickus332fbdb2010-05-07 17:11:45 -0400189void touch_softlockup_watchdog(void)
Don Zickus58687ac2010-05-07 17:11:44 -0400190{
Andrew Morton78611442014-04-18 15:07:12 -0700191 /*
192 * Preemption can be enabled. It doesn't matter which CPU's timestamp
193 * gets zeroed here, so use the raw_ operation.
194 */
195 raw_cpu_write(watchdog_touch_ts, 0);
Don Zickus58687ac2010-05-07 17:11:44 -0400196}
Ingo Molnar0167c782010-05-13 08:53:33 +0200197EXPORT_SYMBOL(touch_softlockup_watchdog);
Don Zickus58687ac2010-05-07 17:11:44 -0400198
Don Zickus332fbdb2010-05-07 17:11:45 -0400199void touch_all_softlockup_watchdogs(void)
Don Zickus58687ac2010-05-07 17:11:44 -0400200{
201 int cpu;
202
203 /*
204 * this is done lockless
205 * do we care if a 0 races with a timestamp?
206 * all it means is the softlock check starts one cycle later
207 */
208 for_each_online_cpu(cpu)
209 per_cpu(watchdog_touch_ts, cpu) = 0;
210}
211
Don Zickuscafcd802010-05-14 11:11:21 -0400212#ifdef CONFIG_HARDLOCKUP_DETECTOR
Don Zickus58687ac2010-05-07 17:11:44 -0400213void touch_nmi_watchdog(void)
214{
Ben Zhang62572e22014-04-03 14:47:18 -0700215 /*
216 * Using __raw here because some code paths have
217 * preemption enabled. If preemption is enabled
218 * then interrupts should be enabled too, in which
219 * case we shouldn't have to worry about the watchdog
220 * going off.
221 */
Christoph Lameterf7f66b02014-08-17 12:30:34 -0500222 raw_cpu_write(watchdog_nmi_touch, true);
Don Zickus332fbdb2010-05-07 17:11:45 -0400223 touch_softlockup_watchdog();
Don Zickus58687ac2010-05-07 17:11:44 -0400224}
225EXPORT_SYMBOL(touch_nmi_watchdog);
226
Don Zickuscafcd802010-05-14 11:11:21 -0400227#endif
228
Don Zickus58687ac2010-05-07 17:11:44 -0400229void touch_softlockup_watchdog_sync(void)
230{
Christoph Lameterf7f66b02014-08-17 12:30:34 -0500231 __this_cpu_write(softlockup_touch_sync, true);
232 __this_cpu_write(watchdog_touch_ts, 0);
Don Zickus58687ac2010-05-07 17:11:44 -0400233}
234
Frederic Weisbecker23637d42010-05-15 23:15:20 +0200235#ifdef CONFIG_HARDLOCKUP_DETECTOR
Don Zickus58687ac2010-05-07 17:11:44 -0400236/* watchdog detector functions */
Don Zickus26e09c62010-05-17 18:06:04 -0400237static int is_hardlockup(void)
Don Zickus58687ac2010-05-07 17:11:44 -0400238{
Christoph Lameter909ea962010-12-08 16:22:55 +0100239 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
Don Zickus58687ac2010-05-07 17:11:44 -0400240
Christoph Lameter909ea962010-12-08 16:22:55 +0100241 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
Don Zickus58687ac2010-05-07 17:11:44 -0400242 return 1;
243
Christoph Lameter909ea962010-12-08 16:22:55 +0100244 __this_cpu_write(hrtimer_interrupts_saved, hrint);
Don Zickus58687ac2010-05-07 17:11:44 -0400245 return 0;
246}
247#endif
248
Don Zickus26e09c62010-05-17 18:06:04 -0400249static int is_softlockup(unsigned long touch_ts)
Don Zickus58687ac2010-05-07 17:11:44 -0400250{
Namhyung Kimc06b4f12012-12-27 11:49:44 +0900251 unsigned long now = get_timestamp();
Don Zickus58687ac2010-05-07 17:11:44 -0400252
Ulrich Obergfell195daf62015-04-14 15:44:13 -0700253 if (watchdog_enabled & SOFT_WATCHDOG_ENABLED) {
254 /* Warn about unreasonable delays. */
255 if (time_after(now, touch_ts + get_softlockup_thresh()))
256 return now - touch_ts;
257 }
Don Zickus58687ac2010-05-07 17:11:44 -0400258 return 0;
259}
260
Frederic Weisbecker23637d42010-05-15 23:15:20 +0200261#ifdef CONFIG_HARDLOCKUP_DETECTOR
Cyrill Gorcunov1880c4a2011-06-23 16:49:18 +0400262
Don Zickus58687ac2010-05-07 17:11:44 -0400263static struct perf_event_attr wd_hw_attr = {
264 .type = PERF_TYPE_HARDWARE,
265 .config = PERF_COUNT_HW_CPU_CYCLES,
266 .size = sizeof(struct perf_event_attr),
267 .pinned = 1,
268 .disabled = 1,
269};
270
271/* Callback function for perf event subsystem */
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200272static void watchdog_overflow_callback(struct perf_event *event,
Don Zickus58687ac2010-05-07 17:11:44 -0400273 struct perf_sample_data *data,
274 struct pt_regs *regs)
275{
Peter Zijlstrac6db67c2010-08-20 11:49:15 +0200276 /* Ensure the watchdog never gets throttled */
277 event->hw.interrupts = 0;
278
Christoph Lameter909ea962010-12-08 16:22:55 +0100279 if (__this_cpu_read(watchdog_nmi_touch) == true) {
280 __this_cpu_write(watchdog_nmi_touch, false);
Don Zickus58687ac2010-05-07 17:11:44 -0400281 return;
282 }
283
284 /* check for a hardlockup
285 * This is done by making sure our timer interrupt
286 * is incrementing. The timer interrupt should have
287 * fired multiple times before we overflow'd. If it hasn't
288 * then this is a good indication the cpu is stuck
289 */
Don Zickus26e09c62010-05-17 18:06:04 -0400290 if (is_hardlockup()) {
291 int this_cpu = smp_processor_id();
292
Don Zickus58687ac2010-05-07 17:11:44 -0400293 /* only print hardlockups once */
Christoph Lameter909ea962010-12-08 16:22:55 +0100294 if (__this_cpu_read(hard_watchdog_warn) == true)
Don Zickus58687ac2010-05-07 17:11:44 -0400295 return;
296
297 if (hardlockup_panic)
Fabian Frederick656c3b72014-08-06 16:04:03 -0700298 panic("Watchdog detected hard LOCKUP on cpu %d",
299 this_cpu);
Don Zickus58687ac2010-05-07 17:11:44 -0400300 else
Fabian Frederick656c3b72014-08-06 16:04:03 -0700301 WARN(1, "Watchdog detected hard LOCKUP on cpu %d",
302 this_cpu);
Don Zickus58687ac2010-05-07 17:11:44 -0400303
Christoph Lameter909ea962010-12-08 16:22:55 +0100304 __this_cpu_write(hard_watchdog_warn, true);
Don Zickus58687ac2010-05-07 17:11:44 -0400305 return;
306 }
307
Christoph Lameter909ea962010-12-08 16:22:55 +0100308 __this_cpu_write(hard_watchdog_warn, false);
Don Zickus58687ac2010-05-07 17:11:44 -0400309 return;
310}
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000311#endif /* CONFIG_HARDLOCKUP_DETECTOR */
312
Don Zickus58687ac2010-05-07 17:11:44 -0400313static void watchdog_interrupt_count(void)
314{
Christoph Lameter909ea962010-12-08 16:22:55 +0100315 __this_cpu_inc(hrtimer_interrupts);
Don Zickus58687ac2010-05-07 17:11:44 -0400316}
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000317
318static int watchdog_nmi_enable(unsigned int cpu);
319static void watchdog_nmi_disable(unsigned int cpu);
Don Zickus58687ac2010-05-07 17:11:44 -0400320
321/* watchdog kicker functions */
322static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
323{
Christoph Lameter909ea962010-12-08 16:22:55 +0100324 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
Don Zickus58687ac2010-05-07 17:11:44 -0400325 struct pt_regs *regs = get_irq_regs();
326 int duration;
Aaron Tomlined235872014-06-23 13:22:05 -0700327 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
Don Zickus58687ac2010-05-07 17:11:44 -0400328
329 /* kick the hardlockup detector */
330 watchdog_interrupt_count();
331
332 /* kick the softlockup detector */
Christoph Lameter909ea962010-12-08 16:22:55 +0100333 wake_up_process(__this_cpu_read(softlockup_watchdog));
Don Zickus58687ac2010-05-07 17:11:44 -0400334
335 /* .. and repeat */
Chuansheng Liu0f34c402012-12-17 15:59:50 -0800336 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
Don Zickus58687ac2010-05-07 17:11:44 -0400337
338 if (touch_ts == 0) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100339 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
Don Zickus58687ac2010-05-07 17:11:44 -0400340 /*
341 * If the time stamp was touched atomically
342 * make sure the scheduler tick is up to date.
343 */
Christoph Lameter909ea962010-12-08 16:22:55 +0100344 __this_cpu_write(softlockup_touch_sync, false);
Don Zickus58687ac2010-05-07 17:11:44 -0400345 sched_clock_tick();
346 }
Eric B Munson5d1c0f42012-03-10 14:37:28 -0500347
348 /* Clear the guest paused flag on watchdog reset */
349 kvm_check_and_clear_guest_paused();
Don Zickus58687ac2010-05-07 17:11:44 -0400350 __touch_watchdog();
351 return HRTIMER_RESTART;
352 }
353
354 /* check for a softlockup
355 * This is done by making sure a high priority task is
356 * being scheduled. The task touches the watchdog to
357 * indicate it is getting cpu time. If it hasn't then
358 * this is a good indication some task is hogging the cpu
359 */
Don Zickus26e09c62010-05-17 18:06:04 -0400360 duration = is_softlockup(touch_ts);
Don Zickus58687ac2010-05-07 17:11:44 -0400361 if (unlikely(duration)) {
Eric B Munson5d1c0f42012-03-10 14:37:28 -0500362 /*
363 * If a virtual machine is stopped by the host it can look to
364 * the watchdog like a soft lockup, check to see if the host
365 * stopped the vm before we issue the warning
366 */
367 if (kvm_check_and_clear_guest_paused())
368 return HRTIMER_RESTART;
369
Don Zickus58687ac2010-05-07 17:11:44 -0400370 /* only warn once */
chai wenb1a8de12014-10-09 15:25:17 -0700371 if (__this_cpu_read(soft_watchdog_warn) == true) {
372 /*
373 * When multiple processes are causing softlockups the
374 * softlockup detector only warns on the first one
375 * because the code relies on a full quiet cycle to
376 * re-arm. The second process prevents the quiet cycle
377 * and never gets reported. Use task pointers to detect
378 * this.
379 */
380 if (__this_cpu_read(softlockup_task_ptr_saved) !=
381 current) {
382 __this_cpu_write(soft_watchdog_warn, false);
383 __touch_watchdog();
384 }
Don Zickus58687ac2010-05-07 17:11:44 -0400385 return HRTIMER_RESTART;
chai wenb1a8de12014-10-09 15:25:17 -0700386 }
Don Zickus58687ac2010-05-07 17:11:44 -0400387
Aaron Tomlined235872014-06-23 13:22:05 -0700388 if (softlockup_all_cpu_backtrace) {
389 /* Prevent multiple soft-lockup reports if one cpu is already
390 * engaged in dumping cpu back traces
391 */
392 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
393 /* Someone else will report us. Let's give up */
394 __this_cpu_write(soft_watchdog_warn, true);
395 return HRTIMER_RESTART;
396 }
397 }
398
Fabian Frederick656c3b72014-08-06 16:04:03 -0700399 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
Don Zickus26e09c62010-05-17 18:06:04 -0400400 smp_processor_id(), duration,
Don Zickus58687ac2010-05-07 17:11:44 -0400401 current->comm, task_pid_nr(current));
chai wenb1a8de12014-10-09 15:25:17 -0700402 __this_cpu_write(softlockup_task_ptr_saved, current);
Don Zickus58687ac2010-05-07 17:11:44 -0400403 print_modules();
404 print_irqtrace_events(current);
405 if (regs)
406 show_regs(regs);
407 else
408 dump_stack();
409
Aaron Tomlined235872014-06-23 13:22:05 -0700410 if (softlockup_all_cpu_backtrace) {
411 /* Avoid generating two back traces for current
412 * given that one is already made above
413 */
414 trigger_allbutself_cpu_backtrace();
415
416 clear_bit(0, &soft_lockup_nmi_warn);
417 /* Barrier to sync with other cpus */
418 smp_mb__after_atomic();
419 }
420
Josh Hunt69361ee2014-08-08 14:22:31 -0700421 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
Don Zickus58687ac2010-05-07 17:11:44 -0400422 if (softlockup_panic)
423 panic("softlockup: hung tasks");
Christoph Lameter909ea962010-12-08 16:22:55 +0100424 __this_cpu_write(soft_watchdog_warn, true);
Don Zickus58687ac2010-05-07 17:11:44 -0400425 } else
Christoph Lameter909ea962010-12-08 16:22:55 +0100426 __this_cpu_write(soft_watchdog_warn, false);
Don Zickus58687ac2010-05-07 17:11:44 -0400427
428 return HRTIMER_RESTART;
429}
430
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000431static void watchdog_set_prio(unsigned int policy, unsigned int prio)
Don Zickus58687ac2010-05-07 17:11:44 -0400432{
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000433 struct sched_param param = { .sched_priority = prio };
434
435 sched_setscheduler(current, policy, &param);
436}
437
438static void watchdog_enable(unsigned int cpu)
439{
Christoph Lameterf7f66b02014-08-17 12:30:34 -0500440 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
Don Zickus58687ac2010-05-07 17:11:44 -0400441
Bjørn Mork3935e8952012-12-19 20:51:31 +0100442 /* kick off the timer for the hardlockup detector */
443 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
444 hrtimer->function = watchdog_timer_fn;
445
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000446 /* Enable the perf event */
447 watchdog_nmi_enable(cpu);
Don Zickus58687ac2010-05-07 17:11:44 -0400448
Don Zickus58687ac2010-05-07 17:11:44 -0400449 /* done here because hrtimer_start can only pin to smp_processor_id() */
Chuansheng Liu0f34c402012-12-17 15:59:50 -0800450 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
Don Zickus58687ac2010-05-07 17:11:44 -0400451 HRTIMER_MODE_REL_PINNED);
452
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000453 /* initialize timestamp */
454 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
455 __touch_watchdog();
Don Zickus58687ac2010-05-07 17:11:44 -0400456}
457
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000458static void watchdog_disable(unsigned int cpu)
459{
Christoph Lameterf7f66b02014-08-17 12:30:34 -0500460 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000461
462 watchdog_set_prio(SCHED_NORMAL, 0);
463 hrtimer_cancel(hrtimer);
464 /* disable the perf event */
465 watchdog_nmi_disable(cpu);
466}
467
Frederic Weisbeckerb8900bc2013-06-06 15:42:53 +0200468static void watchdog_cleanup(unsigned int cpu, bool online)
469{
470 watchdog_disable(cpu);
471}
472
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000473static int watchdog_should_run(unsigned int cpu)
474{
475 return __this_cpu_read(hrtimer_interrupts) !=
476 __this_cpu_read(soft_lockup_hrtimer_cnt);
477}
478
479/*
480 * The watchdog thread function - touches the timestamp.
481 *
Chuansheng Liu0f34c402012-12-17 15:59:50 -0800482 * It only runs once every sample_period seconds (4 seconds by
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000483 * default) to reset the softlockup timestamp. If this gets delayed
484 * for more than 2*watchdog_thresh seconds then the debug-printout
485 * triggers in watchdog_timer_fn().
486 */
487static void watchdog(unsigned int cpu)
488{
489 __this_cpu_write(soft_lockup_hrtimer_cnt,
490 __this_cpu_read(hrtimer_interrupts));
491 __touch_watchdog();
Ulrich Obergfellbcfba4f2015-04-14 15:44:10 -0700492
493 /*
494 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
495 * failure path. Check for failures that can occur asynchronously -
496 * for example, when CPUs are on-lined - and shut down the hardware
497 * perf event on each CPU accordingly.
498 *
499 * The only non-obvious place this bit can be cleared is through
500 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
501 * pr_info here would be too noisy as it would result in a message
502 * every few seconds if the hardlockup was disabled but the softlockup
503 * enabled.
504 */
505 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
506 watchdog_nmi_disable(cpu);
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000507}
Don Zickus58687ac2010-05-07 17:11:44 -0400508
Frederic Weisbecker23637d42010-05-15 23:15:20 +0200509#ifdef CONFIG_HARDLOCKUP_DETECTOR
Don Zickusa7027042012-06-13 09:35:48 -0400510/*
511 * People like the simple clean cpu node info on boot.
512 * Reduce the watchdog noise by only printing messages
513 * that are different from what cpu0 displayed.
514 */
515static unsigned long cpu0_err;
516
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000517static int watchdog_nmi_enable(unsigned int cpu)
Don Zickus58687ac2010-05-07 17:11:44 -0400518{
519 struct perf_event_attr *wd_attr;
520 struct perf_event *event = per_cpu(watchdog_ev, cpu);
521
Ulrich Obergfell195daf62015-04-14 15:44:13 -0700522 /* nothing to do if the hard lockup detector is disabled */
523 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
524 goto out;
525
Don Zickus58687ac2010-05-07 17:11:44 -0400526 /* is it already setup and enabled? */
527 if (event && event->state > PERF_EVENT_STATE_OFF)
528 goto out;
529
530 /* it is setup but not enabled */
531 if (event != NULL)
532 goto out_enable;
533
Don Zickus58687ac2010-05-07 17:11:44 -0400534 wd_attr = &wd_hw_attr;
Mandeep Singh Baines4eec42f2011-05-22 22:10:23 -0700535 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
Cyrill Gorcunov1880c4a2011-06-23 16:49:18 +0400536
537 /* Try to register using hardware perf events */
Avi Kivity4dc0da82011-06-29 18:42:35 +0300538 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
Don Zickusa7027042012-06-13 09:35:48 -0400539
540 /* save cpu0 error for future comparision */
541 if (cpu == 0 && IS_ERR(event))
542 cpu0_err = PTR_ERR(event);
543
Don Zickus58687ac2010-05-07 17:11:44 -0400544 if (!IS_ERR(event)) {
Don Zickusa7027042012-06-13 09:35:48 -0400545 /* only print for cpu0 or different than cpu0 */
546 if (cpu == 0 || cpu0_err)
547 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
Don Zickus58687ac2010-05-07 17:11:44 -0400548 goto out_save;
549 }
550
Ulrich Obergfellbcfba4f2015-04-14 15:44:10 -0700551 /*
552 * Disable the hard lockup detector if _any_ CPU fails to set up
553 * set up the hardware perf event. The watchdog() function checks
554 * the NMI_WATCHDOG_ENABLED bit periodically.
555 *
556 * The barriers are for syncing up watchdog_enabled across all the
557 * cpus, as clear_bit() does not use barriers.
558 */
559 smp_mb__before_atomic();
560 clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
561 smp_mb__after_atomic();
562
Don Zickusa7027042012-06-13 09:35:48 -0400563 /* skip displaying the same error again */
564 if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
565 return PTR_ERR(event);
Don Zickus5651f7f2011-02-09 14:02:33 -0500566
567 /* vary the KERN level based on the returned errno */
568 if (PTR_ERR(event) == -EOPNOTSUPP)
Andrew Morton45019802012-03-23 15:01:55 -0700569 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
Don Zickus5651f7f2011-02-09 14:02:33 -0500570 else if (PTR_ERR(event) == -ENOENT)
Fabian Frederick656c3b72014-08-06 16:04:03 -0700571 pr_warn("disabled (cpu%i): hardware events not enabled\n",
Andrew Morton45019802012-03-23 15:01:55 -0700572 cpu);
Don Zickus5651f7f2011-02-09 14:02:33 -0500573 else
Andrew Morton45019802012-03-23 15:01:55 -0700574 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
575 cpu, PTR_ERR(event));
Ulrich Obergfellbcfba4f2015-04-14 15:44:10 -0700576
577 pr_info("Shutting down hard lockup detector on all cpus\n");
578
Akinobu Mitaeac24332010-08-31 23:00:08 -0400579 return PTR_ERR(event);
Don Zickus58687ac2010-05-07 17:11:44 -0400580
581 /* success path */
582out_save:
583 per_cpu(watchdog_ev, cpu) = event;
584out_enable:
585 perf_event_enable(per_cpu(watchdog_ev, cpu));
586out:
587 return 0;
588}
589
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000590static void watchdog_nmi_disable(unsigned int cpu)
Don Zickus58687ac2010-05-07 17:11:44 -0400591{
592 struct perf_event *event = per_cpu(watchdog_ev, cpu);
593
594 if (event) {
595 perf_event_disable(event);
596 per_cpu(watchdog_ev, cpu) = NULL;
597
598 /* should be in cleanup, but blocks oprofile */
599 perf_event_release_kernel(event);
600 }
Ulrich Obergfelldf577142014-08-11 10:49:25 -0400601 if (cpu == 0) {
602 /* watchdog_nmi_enable() expects this to be zero initially. */
603 cpu0_err = 0;
604 }
Don Zickus58687ac2010-05-07 17:11:44 -0400605}
606#else
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000607static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
608static void watchdog_nmi_disable(unsigned int cpu) { return; }
Frederic Weisbecker23637d42010-05-15 23:15:20 +0200609#endif /* CONFIG_HARDLOCKUP_DETECTOR */
Don Zickus58687ac2010-05-07 17:11:44 -0400610
Frederic Weisbeckerb8900bc2013-06-06 15:42:53 +0200611static struct smp_hotplug_thread watchdog_threads = {
612 .store = &softlockup_watchdog,
613 .thread_should_run = watchdog_should_run,
614 .thread_fn = watchdog,
615 .thread_comm = "watchdog/%u",
616 .setup = watchdog_enable,
617 .cleanup = watchdog_cleanup,
618 .park = watchdog_disable,
619 .unpark = watchdog_enable,
620};
621
Michal Hocko9809b182013-09-24 15:27:30 -0700622static void restart_watchdog_hrtimer(void *info)
623{
Christoph Lameterf7f66b02014-08-17 12:30:34 -0500624 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
Michal Hocko9809b182013-09-24 15:27:30 -0700625 int ret;
626
627 /*
628 * No need to cancel and restart hrtimer if it is currently executing
629 * because it will reprogram itself with the new period now.
630 * We should never see it unqueued here because we are running per-cpu
631 * with interrupts disabled.
632 */
633 ret = hrtimer_try_to_cancel(hrtimer);
634 if (ret == 1)
635 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
636 HRTIMER_MODE_REL_PINNED);
637}
638
Ulrich Obergfellb2f57c32015-04-14 15:44:16 -0700639static void update_watchdog(int cpu)
Michal Hocko9809b182013-09-24 15:27:30 -0700640{
Michal Hocko9809b182013-09-24 15:27:30 -0700641 /*
642 * Make sure that perf event counter will adopt to a new
643 * sampling period. Updating the sampling period directly would
644 * be much nicer but we do not have an API for that now so
645 * let's use a big hammer.
646 * Hrtimer will adopt the new period on the next tick but this
647 * might be late already so we have to restart the timer as well.
648 */
649 watchdog_nmi_disable(cpu);
Frederic Weisbeckere0a23b062014-02-24 16:40:00 +0100650 smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
Michal Hocko9809b182013-09-24 15:27:30 -0700651 watchdog_nmi_enable(cpu);
652}
653
Ulrich Obergfellb2f57c32015-04-14 15:44:16 -0700654static void update_watchdog_all_cpus(void)
Michal Hocko9809b182013-09-24 15:27:30 -0700655{
656 int cpu;
657
658 get_online_cpus();
Michal Hocko9809b182013-09-24 15:27:30 -0700659 for_each_online_cpu(cpu)
Ulrich Obergfellb2f57c32015-04-14 15:44:16 -0700660 update_watchdog(cpu);
Michal Hocko9809b182013-09-24 15:27:30 -0700661 put_online_cpus();
662}
663
Ulrich Obergfellb2f57c32015-04-14 15:44:16 -0700664static int watchdog_enable_all_cpus(void)
Frederic Weisbeckerb8900bc2013-06-06 15:42:53 +0200665{
666 int err = 0;
667
Frederic Weisbecker3c00ea82013-05-19 20:45:15 +0200668 if (!watchdog_running) {
Frederic Weisbeckerb8900bc2013-06-06 15:42:53 +0200669 err = smpboot_register_percpu_thread(&watchdog_threads);
670 if (err)
671 pr_err("Failed to create watchdog threads, disabled\n");
672 else
Frederic Weisbecker3c00ea82013-05-19 20:45:15 +0200673 watchdog_running = 1;
Ulrich Obergfellb2f57c32015-04-14 15:44:16 -0700674 } else {
675 /*
676 * Enable/disable the lockup detectors or
677 * change the sample period 'on the fly'.
678 */
679 update_watchdog_all_cpus();
Frederic Weisbeckerb8900bc2013-06-06 15:42:53 +0200680 }
681
682 return err;
683}
684
Don Zickus58687ac2010-05-07 17:11:44 -0400685/* prepare/enable/disable routines */
Vasily Averin4ff81952011-10-31 17:11:18 -0700686/* sysctl functions */
687#ifdef CONFIG_SYSCTL
Don Zickus58687ac2010-05-07 17:11:44 -0400688static void watchdog_disable_all_cpus(void)
689{
Frederic Weisbecker3c00ea82013-05-19 20:45:15 +0200690 if (watchdog_running) {
691 watchdog_running = 0;
Frederic Weisbeckerb8900bc2013-06-06 15:42:53 +0200692 smpboot_unregister_percpu_thread(&watchdog_threads);
Thomas Gleixnerbcd951c2012-07-16 10:42:38 +0000693 }
Don Zickus58687ac2010-05-07 17:11:44 -0400694}
695
Don Zickus58687ac2010-05-07 17:11:44 -0400696/*
Ulrich Obergfella0c9cbb2015-04-14 15:43:58 -0700697 * Update the run state of the lockup detectors.
698 */
699static int proc_watchdog_update(void)
700{
701 int err = 0;
702
703 /*
704 * Watchdog threads won't be started if they are already active.
705 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
706 * care of this. If those threads are already active, the sample
707 * period will be updated and the lockup detectors will be enabled
708 * or disabled 'on the fly'.
709 */
710 if (watchdog_enabled && watchdog_thresh)
Ulrich Obergfellb2f57c32015-04-14 15:44:16 -0700711 err = watchdog_enable_all_cpus();
Ulrich Obergfella0c9cbb2015-04-14 15:43:58 -0700712 else
713 watchdog_disable_all_cpus();
714
715 return err;
716
717}
718
Ulrich Obergfellf54c2272015-04-14 15:44:01 -0700719static DEFINE_MUTEX(watchdog_proc_mutex);
720
Ulrich Obergfella0c9cbb2015-04-14 15:43:58 -0700721/*
Ulrich Obergfellef246a22015-04-14 15:44:05 -0700722 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
723 *
724 * caller | table->data points to | 'which' contains the flag(s)
725 * -------------------|-----------------------|-----------------------------
726 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
727 * | | with SOFT_WATCHDOG_ENABLED
728 * -------------------|-----------------------|-----------------------------
729 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
730 * -------------------|-----------------------|-----------------------------
731 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
732 */
733static int proc_watchdog_common(int which, struct ctl_table *table, int write,
734 void __user *buffer, size_t *lenp, loff_t *ppos)
735{
736 int err, old, new;
737 int *watchdog_param = (int *)table->data;
738
739 mutex_lock(&watchdog_proc_mutex);
740
741 /*
742 * If the parameter is being read return the state of the corresponding
743 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
744 * run state of the lockup detectors.
745 */
746 if (!write) {
747 *watchdog_param = (watchdog_enabled & which) != 0;
748 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
749 } else {
750 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
751 if (err)
752 goto out;
753
754 /*
755 * There is a race window between fetching the current value
756 * from 'watchdog_enabled' and storing the new value. During
757 * this race window, watchdog_nmi_enable() can sneak in and
758 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
759 * The 'cmpxchg' detects this race and the loop retries.
760 */
761 do {
762 old = watchdog_enabled;
763 /*
764 * If the parameter value is not zero set the
765 * corresponding bit(s), else clear it(them).
766 */
767 if (*watchdog_param)
768 new = old | which;
769 else
770 new = old & ~which;
771 } while (cmpxchg(&watchdog_enabled, old, new) != old);
772
773 /*
774 * Update the run state of the lockup detectors.
775 * Restore 'watchdog_enabled' on failure.
776 */
777 err = proc_watchdog_update();
778 if (err)
779 watchdog_enabled = old;
780 }
781out:
782 mutex_unlock(&watchdog_proc_mutex);
783 return err;
784}
785
786/*
Ulrich Obergfell83a80a32015-04-14 15:44:08 -0700787 * /proc/sys/kernel/watchdog
788 */
789int proc_watchdog(struct ctl_table *table, int write,
790 void __user *buffer, size_t *lenp, loff_t *ppos)
791{
792 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
793 table, write, buffer, lenp, ppos);
794}
795
796/*
797 * /proc/sys/kernel/nmi_watchdog
798 */
799int proc_nmi_watchdog(struct ctl_table *table, int write,
800 void __user *buffer, size_t *lenp, loff_t *ppos)
801{
802 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
803 table, write, buffer, lenp, ppos);
804}
805
806/*
807 * /proc/sys/kernel/soft_watchdog
808 */
809int proc_soft_watchdog(struct ctl_table *table, int write,
810 void __user *buffer, size_t *lenp, loff_t *ppos)
811{
812 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
813 table, write, buffer, lenp, ppos);
814}
815
816/*
817 * /proc/sys/kernel/watchdog_thresh
818 */
819int proc_watchdog_thresh(struct ctl_table *table, int write,
820 void __user *buffer, size_t *lenp, loff_t *ppos)
821{
822 int err, old;
823
824 mutex_lock(&watchdog_proc_mutex);
825
826 old = ACCESS_ONCE(watchdog_thresh);
827 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
828
829 if (err || !write)
830 goto out;
831
832 /*
833 * Update the sample period.
834 * Restore 'watchdog_thresh' on failure.
835 */
836 set_sample_period();
837 err = proc_watchdog_update();
838 if (err)
839 watchdog_thresh = old;
840out:
841 mutex_unlock(&watchdog_proc_mutex);
842 return err;
843}
Don Zickus58687ac2010-05-07 17:11:44 -0400844#endif /* CONFIG_SYSCTL */
845
Peter Zijlstra004417a2010-11-25 18:38:29 +0100846void __init lockup_detector_init(void)
Don Zickus58687ac2010-05-07 17:11:44 -0400847{
Chuansheng Liu0f34c402012-12-17 15:59:50 -0800848 set_sample_period();
Frederic Weisbeckerb8900bc2013-06-06 15:42:53 +0200849
Ulrich Obergfell195daf62015-04-14 15:44:13 -0700850 if (watchdog_enabled)
Ulrich Obergfellb2f57c32015-04-14 15:44:16 -0700851 watchdog_enable_all_cpus();
Don Zickus58687ac2010-05-07 17:11:44 -0400852}