blob: cfebb3bc4eed2cffa5ad165574f0ae2e9e82c94e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/include/linux/nmi.h
3 */
4#ifndef LINUX_NMI_H
5#define LINUX_NMI_H
6
Michal Schmidt99384062006-09-29 01:59:03 -07007#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <asm/irq.h>
Nicholas Pigginf2e0cff2017-07-12 14:35:43 -07009#if defined(CONFIG_HAVE_NMI_WATCHDOG)
10#include <asm/nmi.h>
11#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Ingo Molnard151b272017-02-02 11:17:23 +010013#ifdef CONFIG_LOCKUP_DETECTOR
Nicholas Piggin05a4a952017-07-12 14:35:46 -070014void lockup_detector_init(void);
Thomas Gleixner6554fd82017-09-12 21:36:57 +020015void lockup_detector_soft_poweroff(void);
Thomas Gleixner941154b2017-09-12 21:37:04 +020016void lockup_detector_cleanup(void);
Thomas Gleixner3b371b52017-09-12 21:37:13 +020017bool is_hardlockup(void);
18
19extern int watchdog_user_enabled;
20extern int nmi_watchdog_enabled;
21extern int soft_watchdog_enabled;
22extern int watchdog_thresh;
23extern unsigned long watchdog_enabled;
24
25extern struct cpumask watchdog_cpumask;
26extern unsigned long *watchdog_cpumask_bits;
27#ifdef CONFIG_SMP
28extern int sysctl_softlockup_all_cpu_backtrace;
29extern int sysctl_hardlockup_all_cpu_backtrace;
Nicholas Piggin05a4a952017-07-12 14:35:46 -070030#else
Thomas Gleixner3b371b52017-09-12 21:37:13 +020031#define sysctl_softlockup_all_cpu_backtrace 0
32#define sysctl_hardlockup_all_cpu_backtrace 0
33#endif /* !CONFIG_SMP */
34
35#else /* CONFIG_LOCKUP_DETECTOR */
Thomas Gleixner6554fd82017-09-12 21:36:57 +020036static inline void lockup_detector_init(void) { }
37static inline void lockup_detector_soft_poweroff(void) { }
Thomas Gleixner941154b2017-09-12 21:37:04 +020038static inline void lockup_detector_cleanup(void) { }
Thomas Gleixner3b371b52017-09-12 21:37:13 +020039#endif /* !CONFIG_LOCKUP_DETECTOR */
Nicholas Piggin05a4a952017-07-12 14:35:46 -070040
41#ifdef CONFIG_SOFTLOCKUP_DETECTOR
Ingo Molnard151b272017-02-02 11:17:23 +010042extern void touch_softlockup_watchdog_sched(void);
43extern void touch_softlockup_watchdog(void);
44extern void touch_softlockup_watchdog_sync(void);
45extern void touch_all_softlockup_watchdogs(void);
Ingo Molnard151b272017-02-02 11:17:23 +010046extern unsigned int softlockup_panic;
Ingo Molnard151b272017-02-02 11:17:23 +010047#else
Thomas Gleixner3b371b52017-09-12 21:37:13 +020048static inline void touch_softlockup_watchdog_sched(void) { }
49static inline void touch_softlockup_watchdog(void) { }
50static inline void touch_softlockup_watchdog_sync(void) { }
51static inline void touch_all_softlockup_watchdogs(void) { }
Ingo Molnard151b272017-02-02 11:17:23 +010052#endif
53
54#ifdef CONFIG_DETECT_HUNG_TASK
55void reset_hung_task_detector(void);
56#else
Thomas Gleixner3b371b52017-09-12 21:37:13 +020057static inline void reset_hung_task_detector(void) { }
Ingo Molnard151b272017-02-02 11:17:23 +010058#endif
59
Babu Moger249e52e2016-12-14 15:06:21 -080060/*
61 * The run state of the lockup detectors is controlled by the content of the
62 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
63 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
64 *
65 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
66 * are variables that are only used as an 'interface' between the parameters
67 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
68 * 'watchdog_thresh' variable is handled differently because its value is not
69 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
70 * is equal zero.
71 */
72#define NMI_WATCHDOG_ENABLED_BIT 0
73#define SOFT_WATCHDOG_ENABLED_BIT 1
74#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
75#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
76
Nicholas Pigginf2e0cff2017-07-12 14:35:43 -070077#if defined(CONFIG_HARDLOCKUP_DETECTOR)
78extern void hardlockup_detector_disable(void);
Nicholas Piggin05a4a952017-07-12 14:35:46 -070079extern unsigned int hardlockup_panic;
Nicholas Pigginf2e0cff2017-07-12 14:35:43 -070080#else
81static inline void hardlockup_detector_disable(void) {}
82#endif
83
Nicholas Piggin05a4a952017-07-12 14:35:46 -070084#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
Nicholas Pigginf2e0cff2017-07-12 14:35:43 -070085extern void arch_touch_nmi_watchdog(void);
Peter Zijlstrad0b6e0a2017-09-12 21:36:55 +020086extern void hardlockup_detector_perf_stop(void);
87extern void hardlockup_detector_perf_restart(void);
Thomas Gleixner941154b2017-09-12 21:37:04 +020088extern void hardlockup_detector_perf_disable(void);
89extern void hardlockup_detector_perf_cleanup(void);
Nicholas Pigginf2e0cff2017-07-12 14:35:43 -070090#else
Peter Zijlstrad0b6e0a2017-09-12 21:36:55 +020091static inline void hardlockup_detector_perf_stop(void) { }
92static inline void hardlockup_detector_perf_restart(void) { }
Thomas Gleixner941154b2017-09-12 21:37:04 +020093static inline void hardlockup_detector_perf_disable(void) { }
94static inline void hardlockup_detector_perf_cleanup(void) { }
Nicholas Piggin05a4a952017-07-12 14:35:46 -070095#if !defined(CONFIG_HAVE_NMI_WATCHDOG)
Nicholas Pigginf2e0cff2017-07-12 14:35:43 -070096static inline void arch_touch_nmi_watchdog(void) {}
97#endif
Nicholas Piggin05a4a952017-07-12 14:35:46 -070098#endif
Nicholas Pigginf2e0cff2017-07-12 14:35:43 -070099
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100/**
101 * touch_nmi_watchdog - restart NMI watchdog timeout.
Thomas Gleixner3b371b52017-09-12 21:37:13 +0200102 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
104 * may be used to reset the timeout - for code which intentionally
105 * disables interrupts for a long time. This call is stateless.
106 */
Ingo Molnar5d0e6002007-02-13 13:26:24 +0100107static inline void touch_nmi_watchdog(void)
108{
Nicholas Pigginf2e0cff2017-07-12 14:35:43 -0700109 arch_touch_nmi_watchdog();
Ingo Molnar5d0e6002007-02-13 13:26:24 +0100110 touch_softlockup_watchdog();
111}
Ulrich Obergfell6e7458a2014-10-13 15:55:35 -0700112
Ingo Molnar47cab6a2009-08-03 09:31:54 +0200113/*
114 * Create trigger_all_cpu_backtrace() out of the arch-provided
115 * base function. Return whether such support was available,
116 * to allow calling code to fall back to some other mechanism:
117 */
Chris Metcalf9a01c3e2016-10-07 17:02:45 -0700118#ifdef arch_trigger_cpumask_backtrace
Ingo Molnar47cab6a2009-08-03 09:31:54 +0200119static inline bool trigger_all_cpu_backtrace(void)
120{
Chris Metcalf9a01c3e2016-10-07 17:02:45 -0700121 arch_trigger_cpumask_backtrace(cpu_online_mask, false);
Ingo Molnar47cab6a2009-08-03 09:31:54 +0200122 return true;
123}
Chris Metcalf9a01c3e2016-10-07 17:02:45 -0700124
Aaron Tomlinf3aca3d02014-06-23 13:22:05 -0700125static inline bool trigger_allbutself_cpu_backtrace(void)
126{
Chris Metcalf9a01c3e2016-10-07 17:02:45 -0700127 arch_trigger_cpumask_backtrace(cpu_online_mask, true);
128 return true;
129}
130
131static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
132{
133 arch_trigger_cpumask_backtrace(mask, false);
134 return true;
135}
136
137static inline bool trigger_single_cpu_backtrace(int cpu)
138{
139 arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
Aaron Tomlinf3aca3d02014-06-23 13:22:05 -0700140 return true;
141}
Russell Kingb2c0b2c2014-09-03 23:57:13 +0100142
143/* generic implementation */
Chris Metcalf9a01c3e2016-10-07 17:02:45 -0700144void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
145 bool exclude_self,
Russell Kingb2c0b2c2014-09-03 23:57:13 +0100146 void (*raise)(cpumask_t *mask));
147bool nmi_cpu_backtrace(struct pt_regs *regs);
148
Ingo Molnar47cab6a2009-08-03 09:31:54 +0200149#else
150static inline bool trigger_all_cpu_backtrace(void)
151{
152 return false;
153}
Aaron Tomlinf3aca3d02014-06-23 13:22:05 -0700154static inline bool trigger_allbutself_cpu_backtrace(void)
155{
156 return false;
157}
Chris Metcalf9a01c3e2016-10-07 17:02:45 -0700158static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
159{
160 return false;
161}
162static inline bool trigger_single_cpu_backtrace(int cpu)
163{
164 return false;
165}
Andrew Mortonbb81a092006-12-07 02:14:01 +0100166#endif
167
Nicholas Piggin05a4a952017-07-12 14:35:46 -0700168#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
Mandeep Singh Baines4eec42f2011-05-22 22:10:23 -0700169u64 hw_nmi_get_sample_period(int watchdog_thresh);
Nicholas Piggin05a4a952017-07-12 14:35:46 -0700170#endif
171
Thomas Gleixner7edaeb62017-08-15 09:50:13 +0200172#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
173 defined(CONFIG_HARDLOCKUP_DETECTOR)
174void watchdog_update_hrtimer_threshold(u64 period);
175#else
176static inline void watchdog_update_hrtimer_threshold(u64 period) { }
177#endif
178
Don Zickus504d7cf2010-02-12 17:19:19 -0500179struct ctl_table;
Ulrich Obergfell83a80a32015-04-14 15:44:08 -0700180extern int proc_watchdog(struct ctl_table *, int ,
181 void __user *, size_t *, loff_t *);
182extern int proc_nmi_watchdog(struct ctl_table *, int ,
183 void __user *, size_t *, loff_t *);
184extern int proc_soft_watchdog(struct ctl_table *, int ,
185 void __user *, size_t *, loff_t *);
186extern int proc_watchdog_thresh(struct ctl_table *, int ,
187 void __user *, size_t *, loff_t *);
Chris Metcalffe4ba3c2015-06-24 16:55:45 -0700188extern int proc_watchdog_cpumask(struct ctl_table *, int,
189 void __user *, size_t *, loff_t *);
Don Zickus84e478c2010-02-05 21:47:05 -0500190
Tomasz Nowicki44a69f62014-07-22 11:20:12 +0200191#ifdef CONFIG_HAVE_ACPI_APEI_NMI
192#include <asm/nmi.h>
193#endif
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195#endif