Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef LINUX_HARDIRQ_H |
| 2 | #define LINUX_HARDIRQ_H |
| 3 | |
Frederic Weisbecker | 2d4b847 | 2013-07-29 20:29:43 +0200 | [diff] [blame] | 4 | #include <linux/preempt_mask.h> |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 5 | #include <linux/lockdep.h> |
Steven Rostedt | 6a60dd1 | 2008-11-06 15:55:21 -0500 | [diff] [blame] | 6 | #include <linux/ftrace_irq.h> |
Frederic Weisbecker | dcbf832 | 2012-10-05 23:07:19 +0200 | [diff] [blame] | 7 | #include <linux/vtime.h> |
Peter Zijlstra | 0bd3a17 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 8 | #include <asm/hardirq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | extern void synchronize_irq(unsigned int irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
Paul E. McKenney | 127781d | 2013-03-27 08:44:00 -0700 | [diff] [blame] | 13 | #if defined(CONFIG_TINY_RCU) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 14 | |
| 15 | static inline void rcu_nmi_enter(void) |
| 16 | { |
| 17 | } |
| 18 | |
| 19 | static inline void rcu_nmi_exit(void) |
| 20 | { |
| 21 | } |
| 22 | |
| 23 | #else |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 24 | extern void rcu_nmi_enter(void); |
| 25 | extern void rcu_nmi_exit(void); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 26 | #endif |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 27 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 28 | /* |
| 29 | * It is safe to do non-atomic ops on ->hardirq_context, |
| 30 | * because NMI handlers may not preempt and the ops are |
| 31 | * always balanced, so the interrupted value of ->hardirq_context |
| 32 | * will always be restored. |
| 33 | */ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 34 | #define __irq_enter() \ |
| 35 | do { \ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 36 | account_irq_enter_time(current); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 37 | preempt_count_add(HARDIRQ_OFFSET); \ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 38 | trace_hardirq_enter(); \ |
| 39 | } while (0) |
| 40 | |
| 41 | /* |
| 42 | * Enter irq context (on NO_HZ, update jiffies): |
| 43 | */ |
Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 44 | extern void irq_enter(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 46 | /* |
| 47 | * Exit irq context without processing softirqs: |
| 48 | */ |
| 49 | #define __irq_exit() \ |
| 50 | do { \ |
| 51 | trace_hardirq_exit(); \ |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 52 | account_irq_exit_time(current); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 53 | preempt_count_sub(HARDIRQ_OFFSET); \ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 54 | } while (0) |
| 55 | |
| 56 | /* |
| 57 | * Exit irq context and process softirqs if needed: |
| 58 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | extern void irq_exit(void); |
| 60 | |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 61 | #define nmi_enter() \ |
| 62 | do { \ |
Steven Rostedt | 0f1ac8f | 2013-01-15 22:11:19 -0500 | [diff] [blame] | 63 | lockdep_off(); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 64 | ftrace_nmi_enter(); \ |
| 65 | BUG_ON(in_nmi()); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 66 | preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 67 | rcu_nmi_enter(); \ |
| 68 | trace_hardirq_enter(); \ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 69 | } while (0) |
Linus Torvalds | 5f34fe1 | 2008-12-30 16:10:19 -0800 | [diff] [blame] | 70 | |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 71 | #define nmi_exit() \ |
| 72 | do { \ |
| 73 | trace_hardirq_exit(); \ |
| 74 | rcu_nmi_exit(); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 75 | BUG_ON(!in_nmi()); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 76 | preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 77 | ftrace_nmi_exit(); \ |
Steven Rostedt | 0f1ac8f | 2013-01-15 22:11:19 -0500 | [diff] [blame] | 78 | lockdep_on(); \ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 79 | } while (0) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 80 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | #endif /* LINUX_HARDIRQ_H */ |