blob: 4443a29d6fa86b8e5b800e0dfc045bfc6f7f20a8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/include/linux/nmi.h
3 */
4#ifndef LINUX_NMI_H
5#define LINUX_NMI_H
6
Michal Schmidt99384062006-09-29 01:59:03 -07007#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <asm/irq.h>
9
Babu Moger0ce66ee2016-12-14 15:06:21 -080010/*
11 * The run state of the lockup detectors is controlled by the content of the
12 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
13 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
14 *
15 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
16 * are variables that are only used as an 'interface' between the parameters
17 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
18 * 'watchdog_thresh' variable is handled differently because its value is not
19 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
20 * is equal zero.
21 */
22#define NMI_WATCHDOG_ENABLED_BIT 0
23#define SOFT_WATCHDOG_ENABLED_BIT 1
24#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
25#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
26
Kyle Yanbd448742017-08-21 15:10:31 -070027DECLARE_PER_CPU(unsigned long, hrtimer_interrupts);
28DECLARE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030/**
31 * touch_nmi_watchdog - restart NMI watchdog timeout.
Jeevan Shriramf444a872017-02-15 22:12:54 -080032 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
34 * may be used to reset the timeout - for code which intentionally
35 * disables interrupts for a long time. This call is stateless.
36 */
Kyle Yanbd448742017-08-21 15:10:31 -070037#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI)
Andrew Mortonbb81a092006-12-07 02:14:01 +010038#include <asm/nmi.h>
Kyle Yanbd448742017-08-21 15:10:31 -070039#endif
40
41#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
Don Zickus96a84c22010-11-29 17:07:16 -050042extern void touch_nmi_watchdog(void);
43#else
Ingo Molnar5d0e6002007-02-13 13:26:24 +010044static inline void touch_nmi_watchdog(void)
45{
46 touch_softlockup_watchdog();
47}
Don Zickus96a84c22010-11-29 17:07:16 -050048#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Ulrich Obergfell6e7458a2014-10-13 15:55:35 -070050#if defined(CONFIG_HARDLOCKUP_DETECTOR)
Ulrich Obergfell692297d2015-04-14 15:44:19 -070051extern void hardlockup_detector_disable(void);
Ulrich Obergfell6e7458a2014-10-13 15:55:35 -070052#else
Guenter Roeckaacfbe62015-09-04 15:45:12 -070053static inline void hardlockup_detector_disable(void) {}
Ulrich Obergfell6e7458a2014-10-13 15:55:35 -070054#endif
55
Ingo Molnar47cab6a2009-08-03 09:31:54 +020056/*
57 * Create trigger_all_cpu_backtrace() out of the arch-provided
58 * base function. Return whether such support was available,
59 * to allow calling code to fall back to some other mechanism:
60 */
Chris Metcalf9a01c3e2016-10-07 17:02:45 -070061#ifdef arch_trigger_cpumask_backtrace
Ingo Molnar47cab6a2009-08-03 09:31:54 +020062static inline bool trigger_all_cpu_backtrace(void)
63{
Jeevan Shriramf444a872017-02-15 22:12:54 -080064 #if defined(CONFIG_ARM64)
Rohit Vaswanibe186fd2014-06-26 23:35:09 -070065 arch_trigger_all_cpu_backtrace();
66 else
67 arch_trigger_cpumask_backtrace(cpu_online_mask, false);
68 #endif
69
Ingo Molnar47cab6a2009-08-03 09:31:54 +020070 return true;
71}
Chris Metcalf9a01c3e2016-10-07 17:02:45 -070072
Aaron Tomlinf3aca3d2014-06-23 13:22:05 -070073static inline bool trigger_allbutself_cpu_backtrace(void)
74{
Jeevan Shriramf444a872017-02-15 22:12:54 -080075 #if defined(CONFIG_ARM64)
Rohit Vaswanibe186fd2014-06-26 23:35:09 -070076 arch_trigger_all_cpu_backtrace();
77 else
78 arch_trigger_cpumask_backtrace(cpu_online_mask, true);
79 #endif
80
Chris Metcalf9a01c3e2016-10-07 17:02:45 -070081 return true;
82}
83
84static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
85{
86 arch_trigger_cpumask_backtrace(mask, false);
Rohit Vaswanibe186fd2014-06-26 23:35:09 -070087
Chris Metcalf9a01c3e2016-10-07 17:02:45 -070088 return true;
89}
90
91static inline bool trigger_single_cpu_backtrace(int cpu)
92{
93 arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
Rohit Vaswanibe186fd2014-06-26 23:35:09 -070094
Aaron Tomlinf3aca3d2014-06-23 13:22:05 -070095 return true;
96}
Russell Kingb2c0b2c2014-09-03 23:57:13 +010097
98/* generic implementation */
Chris Metcalf9a01c3e2016-10-07 17:02:45 -070099void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
100 bool exclude_self,
Russell Kingb2c0b2c2014-09-03 23:57:13 +0100101 void (*raise)(cpumask_t *mask));
102bool nmi_cpu_backtrace(struct pt_regs *regs);
103
Ingo Molnar47cab6a2009-08-03 09:31:54 +0200104#else
105static inline bool trigger_all_cpu_backtrace(void)
106{
107 return false;
108}
Aaron Tomlinf3aca3d2014-06-23 13:22:05 -0700109static inline bool trigger_allbutself_cpu_backtrace(void)
110{
111 return false;
112}
Chris Metcalf9a01c3e2016-10-07 17:02:45 -0700113static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
114{
115 return false;
116}
117static inline bool trigger_single_cpu_backtrace(int cpu)
118{
119 return false;
120}
Andrew Mortonbb81a092006-12-07 02:14:01 +0100121#endif
122
Don Zickus58687ac2010-05-07 17:11:44 -0400123#ifdef CONFIG_LOCKUP_DETECTOR
Mandeep Singh Baines4eec42f2011-05-22 22:10:23 -0700124u64 hw_nmi_get_sample_period(int watchdog_thresh);
Ulrich Obergfell84d56e62015-04-14 15:43:55 -0700125extern int nmi_watchdog_enabled;
126extern int soft_watchdog_enabled;
Frederic Weisbecker3c00ea82013-05-19 20:45:15 +0200127extern int watchdog_user_enabled;
Mandeep Singh Baines586692a2011-05-22 22:10:22 -0700128extern int watchdog_thresh;
Babu Moger0ce66ee2016-12-14 15:06:21 -0800129extern unsigned long watchdog_enabled;
Chris Metcalffe4ba3c2015-06-24 16:55:45 -0700130extern unsigned long *watchdog_cpumask_bits;
Don Zickusb13b3b72017-01-24 15:17:53 -0800131extern atomic_t watchdog_park_in_progress;
Babu Moger0ce66ee2016-12-14 15:06:21 -0800132#ifdef CONFIG_SMP
Aaron Tomlined235872014-06-23 13:22:05 -0700133extern int sysctl_softlockup_all_cpu_backtrace;
Jiri Kosina55537872015-11-05 18:44:41 -0800134extern int sysctl_hardlockup_all_cpu_backtrace;
Babu Moger0ce66ee2016-12-14 15:06:21 -0800135#else
136#define sysctl_softlockup_all_cpu_backtrace 0
137#define sysctl_hardlockup_all_cpu_backtrace 0
138#endif
Kyle Yanbd448742017-08-21 15:10:31 -0700139
Babu Moger0ce66ee2016-12-14 15:06:21 -0800140extern bool is_hardlockup(void);
Don Zickus504d7cf2010-02-12 17:19:19 -0500141struct ctl_table;
Ulrich Obergfell83a80a32015-04-14 15:44:08 -0700142extern int proc_watchdog(struct ctl_table *, int ,
143 void __user *, size_t *, loff_t *);
144extern int proc_nmi_watchdog(struct ctl_table *, int ,
145 void __user *, size_t *, loff_t *);
146extern int proc_soft_watchdog(struct ctl_table *, int ,
147 void __user *, size_t *, loff_t *);
148extern int proc_watchdog_thresh(struct ctl_table *, int ,
149 void __user *, size_t *, loff_t *);
Chris Metcalffe4ba3c2015-06-24 16:55:45 -0700150extern int proc_watchdog_cpumask(struct ctl_table *, int,
151 void __user *, size_t *, loff_t *);
Ulrich Obergfellec6a9062015-09-04 15:45:28 -0700152extern int lockup_detector_suspend(void);
153extern void lockup_detector_resume(void);
Ulrich Obergfell999bbe42015-09-04 15:45:25 -0700154#else
Ulrich Obergfellec6a9062015-09-04 15:45:28 -0700155static inline int lockup_detector_suspend(void)
Ulrich Obergfell999bbe42015-09-04 15:45:25 -0700156{
157 return 0;
158}
159
Ulrich Obergfellec6a9062015-09-04 15:45:28 -0700160static inline void lockup_detector_resume(void)
Ulrich Obergfell999bbe42015-09-04 15:45:25 -0700161{
162}
Don Zickus84e478c2010-02-05 21:47:05 -0500163#endif
164
Tomasz Nowicki44a69f62014-07-22 11:20:12 +0200165#ifdef CONFIG_HAVE_ACPI_APEI_NMI
166#include <asm/nmi.h>
167#endif
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169#endif