Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef LINUX_HARDIRQ_H |
| 2 | #define LINUX_HARDIRQ_H |
| 3 | |
Frederic Weisbecker | 2d4b847 | 2013-07-29 20:29:43 +0200 | [diff] [blame] | 4 | #include <linux/preempt_mask.h> |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 5 | #include <linux/lockdep.h> |
Steven Rostedt | 6a60dd1 | 2008-11-06 15:55:21 -0500 | [diff] [blame] | 6 | #include <linux/ftrace_irq.h> |
Frederic Weisbecker | dcbf832 | 2012-10-05 23:07:19 +0200 | [diff] [blame] | 7 | #include <linux/vtime.h> |
Peter Zijlstra | 0bd3a17 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 8 | #include <asm/hardirq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | extern void synchronize_irq(unsigned int irq); |
Peter Zijlstra | 02cea39 | 2015-02-05 14:06:23 +0100 | [diff] [blame] | 12 | extern bool synchronize_hardirq(unsigned int irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
Paul E. McKenney | 127781d | 2013-03-27 08:44:00 -0700 | [diff] [blame] | 14 | #if defined(CONFIG_TINY_RCU) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 15 | |
| 16 | static inline void rcu_nmi_enter(void) |
| 17 | { |
| 18 | } |
| 19 | |
| 20 | static inline void rcu_nmi_exit(void) |
| 21 | { |
| 22 | } |
| 23 | |
| 24 | #else |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 25 | extern void rcu_nmi_enter(void); |
| 26 | extern void rcu_nmi_exit(void); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 27 | #endif |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 28 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 29 | /* |
| 30 | * It is safe to do non-atomic ops on ->hardirq_context, |
| 31 | * because NMI handlers may not preempt and the ops are |
| 32 | * always balanced, so the interrupted value of ->hardirq_context |
| 33 | * will always be restored. |
| 34 | */ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 35 | #define __irq_enter() \ |
| 36 | do { \ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 37 | account_irq_enter_time(current); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 38 | preempt_count_add(HARDIRQ_OFFSET); \ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 39 | trace_hardirq_enter(); \ |
| 40 | } while (0) |
| 41 | |
| 42 | /* |
| 43 | * Enter irq context (on NO_HZ, update jiffies): |
| 44 | */ |
Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 45 | extern void irq_enter(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 47 | /* |
| 48 | * Exit irq context without processing softirqs: |
| 49 | */ |
| 50 | #define __irq_exit() \ |
| 51 | do { \ |
| 52 | trace_hardirq_exit(); \ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 53 | account_irq_exit_time(current); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 54 | preempt_count_sub(HARDIRQ_OFFSET); \ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 55 | } while (0) |
| 56 | |
| 57 | /* |
| 58 | * Exit irq context and process softirqs if needed: |
| 59 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | extern void irq_exit(void); |
| 61 | |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 62 | #define nmi_enter() \ |
| 63 | do { \ |
Steven Rostedt | 0f1ac8f | 2013-01-15 22:11:19 -0500 | [diff] [blame] | 64 | lockdep_off(); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 65 | ftrace_nmi_enter(); \ |
| 66 | BUG_ON(in_nmi()); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 67 | preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 68 | rcu_nmi_enter(); \ |
| 69 | trace_hardirq_enter(); \ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 70 | } while (0) |
Linus Torvalds | 5f34fe1 | 2008-12-30 16:10:19 -0800 | [diff] [blame] | 71 | |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 72 | #define nmi_exit() \ |
| 73 | do { \ |
| 74 | trace_hardirq_exit(); \ |
| 75 | rcu_nmi_exit(); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 76 | BUG_ON(!in_nmi()); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 77 | preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 78 | ftrace_nmi_exit(); \ |
Steven Rostedt | 0f1ac8f | 2013-01-15 22:11:19 -0500 | [diff] [blame] | 79 | lockdep_on(); \ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 80 | } while (0) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 81 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | #endif /* LINUX_HARDIRQ_H */ |