Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef LINUX_HARDIRQ_H |
| 2 | #define LINUX_HARDIRQ_H |
| 3 | |
Randy Dunlap | 67bc4eb | 2005-07-12 13:58:36 -0700 | [diff] [blame] | 4 | #include <linux/preempt.h> |
Alexey Dobriyan | 405f557 | 2009-07-11 22:08:37 +0400 | [diff] [blame] | 5 | #ifdef CONFIG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <linux/smp_lock.h> |
Alexey Dobriyan | 405f557 | 2009-07-11 22:08:37 +0400 | [diff] [blame] | 7 | #endif |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 8 | #include <linux/lockdep.h> |
Steven Rostedt | 6a60dd1 | 2008-11-06 15:55:21 -0500 | [diff] [blame] | 9 | #include <linux/ftrace_irq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <asm/hardirq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | /* |
| 13 | * We put the hardirq and softirq counter into the preemption |
| 14 | * counter. The bitmask has the following meaning: |
| 15 | * |
| 16 | * - bits 0-7 are the preemption count (max preemption depth: 256) |
| 17 | * - bits 8-15 are the softirq count (max # of softirqs: 256) |
| 18 | * |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 19 | * The hardirq count can in theory reach the same as NR_IRQS. |
| 20 | * In reality, the number of nested IRQS is limited to the stack |
| 21 | * size as well. For archs with over 1000 IRQS it is not practical |
| 22 | * to expect that they will all nest. We give a max of 10 bits for |
| 23 | * hardirq nesting. An arch may choose to give less than 10 bits. |
| 24 | * m68k expects it to be 8. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | * |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 26 | * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) |
| 27 | * - bit 26 is the NMI_MASK |
| 28 | * - bit 28 is the PREEMPT_ACTIVE flag |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | * |
| 30 | * PREEMPT_MASK: 0x000000ff |
| 31 | * SOFTIRQ_MASK: 0x0000ff00 |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 32 | * HARDIRQ_MASK: 0x03ff0000 |
| 33 | * NMI_MASK: 0x04000000 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | */ |
| 35 | #define PREEMPT_BITS 8 |
| 36 | #define SOFTIRQ_BITS 8 |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 37 | #define NMI_BITS 1 |
| 38 | |
| 39 | #define MAX_HARDIRQ_BITS 10 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
| 41 | #ifndef HARDIRQ_BITS |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 42 | # define HARDIRQ_BITS MAX_HARDIRQ_BITS |
Eric W. Biederman | 23d0b8b | 2006-10-04 02:16:49 -0700 | [diff] [blame] | 43 | #endif |
| 44 | |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 45 | #if HARDIRQ_BITS > MAX_HARDIRQ_BITS |
| 46 | #error HARDIRQ_BITS too high! |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #endif |
| 48 | |
| 49 | #define PREEMPT_SHIFT 0 |
| 50 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
| 51 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 52 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
| 54 | #define __IRQ_MASK(x) ((1UL << (x))-1) |
| 55 | |
| 56 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
Paolo 'Blaisorblade' Giarrusso | 8f28e8f | 2005-05-28 15:52:02 -0700 | [diff] [blame] | 58 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 59 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
| 61 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) |
| 62 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) |
| 63 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 64 | #define NMI_OFFSET (1UL << NMI_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 66 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
| 67 | |
Arnd Bergmann | 8e5b59a | 2009-08-06 16:02:50 -0700 | [diff] [blame] | 68 | #ifndef PREEMPT_ACTIVE |
| 69 | #define PREEMPT_ACTIVE_BITS 1 |
| 70 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) |
| 71 | #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) |
| 72 | #endif |
| 73 | |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 74 | #if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) |
Paolo 'Blaisorblade' Giarrusso | 8f28e8f | 2005-05-28 15:52:02 -0700 | [diff] [blame] | 75 | #error PREEMPT_ACTIVE is too low! |
| 76 | #endif |
| 77 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
| 79 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 80 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
| 81 | | NMI_MASK)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | |
| 83 | /* |
| 84 | * Are we doing bottom half or hardware interrupt processing? |
| 85 | * Are we in a softirq context? Interrupt context? |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 86 | * in_softirq - Are we currently processing softirq or have bh disabled? |
| 87 | * in_serving_softirq - Are we currently processing softirq? |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | */ |
| 89 | #define in_irq() (hardirq_count()) |
| 90 | #define in_softirq() (softirq_count()) |
| 91 | #define in_interrupt() (irq_count()) |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 92 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
Steven Rostedt | 375b38b | 2009-02-06 00:51:37 -0500 | [diff] [blame] | 94 | /* |
| 95 | * Are we in NMI context? |
| 96 | */ |
Steven Rostedt | 5a5fb7d | 2009-02-12 10:53:37 -0500 | [diff] [blame] | 97 | #define in_nmi() (preempt_count() & NMI_MASK) |
Steven Rostedt | 375b38b | 2009-02-06 00:51:37 -0500 | [diff] [blame] | 98 | |
Linus Torvalds | 8e3e076 | 2008-05-10 20:58:02 -0700 | [diff] [blame] | 99 | #if defined(CONFIG_PREEMPT) |
| 100 | # define PREEMPT_INATOMIC_BASE kernel_locked() |
| 101 | # define PREEMPT_CHECK_OFFSET 1 |
| 102 | #else |
| 103 | # define PREEMPT_INATOMIC_BASE 0 |
| 104 | # define PREEMPT_CHECK_OFFSET 0 |
| 105 | #endif |
| 106 | |
Jonathan Corbet | 8c703d3 | 2008-03-28 14:15:49 -0700 | [diff] [blame] | 107 | /* |
| 108 | * Are we running in atomic context? WARNING: this macro cannot |
| 109 | * always detect atomic context; in particular, it cannot know about |
| 110 | * held spinlocks in non-preemptible kernels. Thus it should not be |
| 111 | * used in the general case to determine whether sleeping is possible. |
| 112 | * Do not use in_atomic() in driver code. |
| 113 | */ |
Linus Torvalds | 8e3e076 | 2008-05-10 20:58:02 -0700 | [diff] [blame] | 114 | #define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE) |
Ingo Molnar | 4da1ce6 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 115 | |
| 116 | /* |
| 117 | * Check whether we were atomic before we did preempt_disable(): |
Linus Torvalds | 8e3e076 | 2008-05-10 20:58:02 -0700 | [diff] [blame] | 118 | * (used by the scheduler, *after* releasing the kernel lock) |
Ingo Molnar | 4da1ce6 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 119 | */ |
| 120 | #define in_atomic_preempt_off() \ |
| 121 | ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) |
| 122 | |
| 123 | #ifdef CONFIG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | # define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
| 125 | # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) |
| 126 | #else |
| 127 | # define preemptible() 0 |
| 128 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET |
| 129 | #endif |
| 130 | |
Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 131 | #if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | extern void synchronize_irq(unsigned int irq); |
| 133 | #else |
| 134 | # define synchronize_irq(irq) barrier() |
| 135 | #endif |
| 136 | |
Al Viro | f037360 | 2005-11-13 16:06:57 -0800 | [diff] [blame] | 137 | struct task_struct; |
| 138 | |
Venkatesh Pallipadi | b52bfee | 2010-10-04 17:03:19 -0700 | [diff] [blame] | 139 | #if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | static inline void account_system_vtime(struct task_struct *tsk) |
| 141 | { |
| 142 | } |
Venkatesh Pallipadi | e1e10a2 | 2010-10-04 17:03:17 -0700 | [diff] [blame] | 143 | #else |
| 144 | extern void account_system_vtime(struct task_struct *tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | #endif |
| 146 | |
Paul E. McKenney | b560d8a | 2009-08-21 22:08:51 -0700 | [diff] [blame] | 147 | #if defined(CONFIG_NO_HZ) |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 148 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 149 | extern void rcu_enter_nohz(void); |
| 150 | extern void rcu_exit_nohz(void); |
| 151 | |
| 152 | static inline void rcu_irq_enter(void) |
| 153 | { |
| 154 | rcu_exit_nohz(); |
| 155 | } |
| 156 | |
| 157 | static inline void rcu_irq_exit(void) |
| 158 | { |
| 159 | rcu_enter_nohz(); |
| 160 | } |
| 161 | |
| 162 | static inline void rcu_nmi_enter(void) |
| 163 | { |
| 164 | } |
| 165 | |
| 166 | static inline void rcu_nmi_exit(void) |
| 167 | { |
| 168 | } |
| 169 | |
| 170 | #else |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 171 | extern void rcu_irq_enter(void); |
| 172 | extern void rcu_irq_exit(void); |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 173 | extern void rcu_nmi_enter(void); |
| 174 | extern void rcu_nmi_exit(void); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 175 | #endif |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 176 | #else |
| 177 | # define rcu_irq_enter() do { } while (0) |
| 178 | # define rcu_irq_exit() do { } while (0) |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 179 | # define rcu_nmi_enter() do { } while (0) |
| 180 | # define rcu_nmi_exit() do { } while (0) |
Paul E. McKenney | b560d8a | 2009-08-21 22:08:51 -0700 | [diff] [blame] | 181 | #endif /* #if defined(CONFIG_NO_HZ) */ |
Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 182 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 183 | /* |
| 184 | * It is safe to do non-atomic ops on ->hardirq_context, |
| 185 | * because NMI handlers may not preempt and the ops are |
| 186 | * always balanced, so the interrupted value of ->hardirq_context |
| 187 | * will always be restored. |
| 188 | */ |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 189 | #define __irq_enter() \ |
| 190 | do { \ |
| 191 | account_system_vtime(current); \ |
| 192 | add_preempt_count(HARDIRQ_OFFSET); \ |
| 193 | trace_hardirq_enter(); \ |
| 194 | } while (0) |
| 195 | |
| 196 | /* |
| 197 | * Enter irq context (on NO_HZ, update jiffies): |
| 198 | */ |
Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 199 | extern void irq_enter(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 201 | /* |
| 202 | * Exit irq context without processing softirqs: |
| 203 | */ |
| 204 | #define __irq_exit() \ |
| 205 | do { \ |
| 206 | trace_hardirq_exit(); \ |
| 207 | account_system_vtime(current); \ |
| 208 | sub_preempt_count(HARDIRQ_OFFSET); \ |
| 209 | } while (0) |
| 210 | |
| 211 | /* |
| 212 | * Exit irq context and process softirqs if needed: |
| 213 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | extern void irq_exit(void); |
| 215 | |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 216 | #define nmi_enter() \ |
| 217 | do { \ |
| 218 | ftrace_nmi_enter(); \ |
| 219 | BUG_ON(in_nmi()); \ |
| 220 | add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ |
| 221 | lockdep_off(); \ |
| 222 | rcu_nmi_enter(); \ |
| 223 | trace_hardirq_enter(); \ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 224 | } while (0) |
Linus Torvalds | 5f34fe1 | 2008-12-30 16:10:19 -0800 | [diff] [blame] | 225 | |
Steven Rostedt | 2a7b8df | 2009-02-12 14:16:46 -0500 | [diff] [blame] | 226 | #define nmi_exit() \ |
| 227 | do { \ |
| 228 | trace_hardirq_exit(); \ |
| 229 | rcu_nmi_exit(); \ |
| 230 | lockdep_on(); \ |
| 231 | BUG_ON(!in_nmi()); \ |
| 232 | sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ |
| 233 | ftrace_nmi_exit(); \ |
Steven Rostedt | 17666f0 | 2008-10-30 16:08:32 -0400 | [diff] [blame] | 234 | } while (0) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 235 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | #endif /* LINUX_HARDIRQ_H */ |