Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/include/linux/nmi.h |
| 3 | */ |
| 4 | #ifndef LINUX_NMI_H |
| 5 | #define LINUX_NMI_H |
| 6 | |
Michal Schmidt | 9938406 | 2006-09-29 01:59:03 -0700 | [diff] [blame] | 7 | #include <linux/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <asm/irq.h> |
| 9 | |
| 10 | /** |
| 11 | * touch_nmi_watchdog - restart NMI watchdog timeout. |
| 12 | * |
| 13 | * If the architecture supports the NMI watchdog, touch_nmi_watchdog() |
| 14 | * may be used to reset the timeout - for code which intentionally |
| 15 | * disables interrupts for a long time. This call is stateless. |
| 16 | */ |
Cong Wang | d314d74 | 2012-03-23 15:01:51 -0700 | [diff] [blame] | 17 | #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 18 | #include <asm/nmi.h> |
Don Zickus | 96a84c2 | 2010-11-29 17:07:16 -0500 | [diff] [blame] | 19 | extern void touch_nmi_watchdog(void); |
| 20 | #else |
Ingo Molnar | 5d0e600 | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 21 | static inline void touch_nmi_watchdog(void) |
| 22 | { |
| 23 | touch_softlockup_watchdog(); |
| 24 | } |
Don Zickus | 96a84c2 | 2010-11-29 17:07:16 -0500 | [diff] [blame] | 25 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Ulrich Obergfell | 6e7458a | 2014-10-13 15:55:35 -0700 | [diff] [blame] | 27 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) |
Ulrich Obergfell | 692297d | 2015-04-14 15:44:19 -0700 | [diff] [blame] | 28 | extern void hardlockup_detector_disable(void); |
Ulrich Obergfell | 6e7458a | 2014-10-13 15:55:35 -0700 | [diff] [blame] | 29 | #else |
Guenter Roeck | aacfbe6 | 2015-09-04 15:45:12 -0700 | [diff] [blame] | 30 | static inline void hardlockup_detector_disable(void) {} |
Ulrich Obergfell | 6e7458a | 2014-10-13 15:55:35 -0700 | [diff] [blame] | 31 | #endif |
| 32 | |
Ingo Molnar | 47cab6a | 2009-08-03 09:31:54 +0200 | [diff] [blame] | 33 | /* |
| 34 | * Create trigger_all_cpu_backtrace() out of the arch-provided |
| 35 | * base function. Return whether such support was available, |
| 36 | * to allow calling code to fall back to some other mechanism: |
| 37 | */ |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 38 | #ifdef arch_trigger_cpumask_backtrace |
Ingo Molnar | 47cab6a | 2009-08-03 09:31:54 +0200 | [diff] [blame] | 39 | static inline bool trigger_all_cpu_backtrace(void) |
| 40 | { |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 41 | arch_trigger_cpumask_backtrace(cpu_online_mask, false); |
Ingo Molnar | 47cab6a | 2009-08-03 09:31:54 +0200 | [diff] [blame] | 42 | return true; |
| 43 | } |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 44 | |
Aaron Tomlin | f3aca3d0 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 45 | static inline bool trigger_allbutself_cpu_backtrace(void) |
| 46 | { |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 47 | arch_trigger_cpumask_backtrace(cpu_online_mask, true); |
| 48 | return true; |
| 49 | } |
| 50 | |
| 51 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) |
| 52 | { |
| 53 | arch_trigger_cpumask_backtrace(mask, false); |
| 54 | return true; |
| 55 | } |
| 56 | |
| 57 | static inline bool trigger_single_cpu_backtrace(int cpu) |
| 58 | { |
| 59 | arch_trigger_cpumask_backtrace(cpumask_of(cpu), false); |
Aaron Tomlin | f3aca3d0 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 60 | return true; |
| 61 | } |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 62 | |
| 63 | /* generic implementation */ |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 64 | void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, |
| 65 | bool exclude_self, |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 66 | void (*raise)(cpumask_t *mask)); |
| 67 | bool nmi_cpu_backtrace(struct pt_regs *regs); |
| 68 | |
Ingo Molnar | 47cab6a | 2009-08-03 09:31:54 +0200 | [diff] [blame] | 69 | #else |
| 70 | static inline bool trigger_all_cpu_backtrace(void) |
| 71 | { |
| 72 | return false; |
| 73 | } |
Aaron Tomlin | f3aca3d0 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 74 | static inline bool trigger_allbutself_cpu_backtrace(void) |
| 75 | { |
| 76 | return false; |
| 77 | } |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 78 | static inline bool trigger_cpumask_backtrace(struct cpumask *mask) |
| 79 | { |
| 80 | return false; |
| 81 | } |
| 82 | static inline bool trigger_single_cpu_backtrace(int cpu) |
| 83 | { |
| 84 | return false; |
| 85 | } |
Andrew Morton | bb81a09 | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 86 | #endif |
| 87 | |
Don Zickus | 58687ac | 2010-05-07 17:11:44 -0400 | [diff] [blame] | 88 | #ifdef CONFIG_LOCKUP_DETECTOR |
Mandeep Singh Baines | 4eec42f | 2011-05-22 22:10:23 -0700 | [diff] [blame] | 89 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
Ulrich Obergfell | 84d56e6 | 2015-04-14 15:43:55 -0700 | [diff] [blame] | 90 | extern int nmi_watchdog_enabled; |
| 91 | extern int soft_watchdog_enabled; |
Frederic Weisbecker | 3c00ea8 | 2013-05-19 20:45:15 +0200 | [diff] [blame] | 92 | extern int watchdog_user_enabled; |
Mandeep Singh Baines | 586692a | 2011-05-22 22:10:22 -0700 | [diff] [blame] | 93 | extern int watchdog_thresh; |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 94 | extern unsigned long *watchdog_cpumask_bits; |
Aaron Tomlin | ed23587 | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 95 | extern int sysctl_softlockup_all_cpu_backtrace; |
Jiri Kosina | 5553787 | 2015-11-05 18:44:41 -0800 | [diff] [blame] | 96 | extern int sysctl_hardlockup_all_cpu_backtrace; |
Don Zickus | 504d7cf | 2010-02-12 17:19:19 -0500 | [diff] [blame] | 97 | struct ctl_table; |
Ulrich Obergfell | 83a80a3 | 2015-04-14 15:44:08 -0700 | [diff] [blame] | 98 | extern int proc_watchdog(struct ctl_table *, int , |
| 99 | void __user *, size_t *, loff_t *); |
| 100 | extern int proc_nmi_watchdog(struct ctl_table *, int , |
| 101 | void __user *, size_t *, loff_t *); |
| 102 | extern int proc_soft_watchdog(struct ctl_table *, int , |
| 103 | void __user *, size_t *, loff_t *); |
| 104 | extern int proc_watchdog_thresh(struct ctl_table *, int , |
| 105 | void __user *, size_t *, loff_t *); |
Chris Metcalf | fe4ba3c | 2015-06-24 16:55:45 -0700 | [diff] [blame] | 106 | extern int proc_watchdog_cpumask(struct ctl_table *, int, |
| 107 | void __user *, size_t *, loff_t *); |
Ulrich Obergfell | ec6a906 | 2015-09-04 15:45:28 -0700 | [diff] [blame] | 108 | extern int lockup_detector_suspend(void); |
| 109 | extern void lockup_detector_resume(void); |
Ulrich Obergfell | 999bbe4 | 2015-09-04 15:45:25 -0700 | [diff] [blame] | 110 | #else |
Ulrich Obergfell | ec6a906 | 2015-09-04 15:45:28 -0700 | [diff] [blame] | 111 | static inline int lockup_detector_suspend(void) |
Ulrich Obergfell | 999bbe4 | 2015-09-04 15:45:25 -0700 | [diff] [blame] | 112 | { |
| 113 | return 0; |
| 114 | } |
| 115 | |
Ulrich Obergfell | ec6a906 | 2015-09-04 15:45:28 -0700 | [diff] [blame] | 116 | static inline void lockup_detector_resume(void) |
Ulrich Obergfell | 999bbe4 | 2015-09-04 15:45:25 -0700 | [diff] [blame] | 117 | { |
| 118 | } |
Don Zickus | 84e478c | 2010-02-05 21:47:05 -0500 | [diff] [blame] | 119 | #endif |
| 120 | |
Tomasz Nowicki | 44a69f6 | 2014-07-22 11:20:12 +0200 | [diff] [blame] | 121 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI |
| 122 | #include <asm/nmi.h> |
| 123 | #endif |
| 124 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | #endif |