blob: 32f9fd6619b4160c997bba07e2661382f3afaacf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef LINUX_HARDIRQ_H
2#define LINUX_HARDIRQ_H
3
Randy Dunlap67bc4eb2005-07-12 13:58:36 -07004#include <linux/preempt.h>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07005#include <linux/lockdep.h>
Steven Rostedt6a60dd12008-11-06 15:55:21 -05006#include <linux/ftrace_irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <asm/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9/*
10 * We put the hardirq and softirq counter into the preemption
11 * counter. The bitmask has the following meaning:
12 *
13 * - bits 0-7 are the preemption count (max preemption depth: 256)
14 * - bits 8-15 are the softirq count (max # of softirqs: 256)
15 *
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050016 * The hardirq count can in theory reach the same as NR_IRQS.
17 * In reality, the number of nested IRQS is limited to the stack
18 * size as well. For archs with over 1000 IRQS it is not practical
19 * to expect that they will all nest. We give a max of 10 bits for
20 * hardirq nesting. An arch may choose to give less than 10 bits.
21 * m68k expects it to be 8.
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 *
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050023 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
24 * - bit 26 is the NMI_MASK
25 * - bit 28 is the PREEMPT_ACTIVE flag
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 *
27 * PREEMPT_MASK: 0x000000ff
28 * SOFTIRQ_MASK: 0x0000ff00
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050029 * HARDIRQ_MASK: 0x03ff0000
30 * NMI_MASK: 0x04000000
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 */
32#define PREEMPT_BITS 8
33#define SOFTIRQ_BITS 8
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050034#define NMI_BITS 1
35
36#define MAX_HARDIRQ_BITS 10
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38#ifndef HARDIRQ_BITS
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050039# define HARDIRQ_BITS MAX_HARDIRQ_BITS
Eric W. Biederman23d0b8b2006-10-04 02:16:49 -070040#endif
41
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050042#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
43#error HARDIRQ_BITS too high!
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#endif
45
46#define PREEMPT_SHIFT 0
47#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
48#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050049#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51#define __IRQ_MASK(x) ((1UL << (x))-1)
52
53#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
Paolo 'Blaisorblade' Giarrusso8f28e8f2005-05-28 15:52:02 -070055#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050056#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
59#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
60#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050061#define NMI_OFFSET (1UL << NMI_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -070063#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
64
Arnd Bergmann8e5b59a2009-08-06 16:02:50 -070065#ifndef PREEMPT_ACTIVE
66#define PREEMPT_ACTIVE_BITS 1
67#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
68#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
69#endif
70
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050071#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
Paolo 'Blaisorblade' Giarrusso8f28e8f2005-05-28 15:52:02 -070072#error PREEMPT_ACTIVE is too low!
73#endif
74
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
76#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050077#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
78 | NMI_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80/*
81 * Are we doing bottom half or hardware interrupt processing?
82 * Are we in a softirq context? Interrupt context?
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -070083 * in_softirq - Are we currently processing softirq or have bh disabled?
84 * in_serving_softirq - Are we currently processing softirq?
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 */
86#define in_irq() (hardirq_count())
87#define in_softirq() (softirq_count())
88#define in_interrupt() (irq_count())
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -070089#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Steven Rostedt375b38b2009-02-06 00:51:37 -050091/*
92 * Are we in NMI context?
93 */
Steven Rostedt5a5fb7d2009-02-12 10:53:37 -050094#define in_nmi() (preempt_count() & NMI_MASK)
Steven Rostedt375b38b2009-02-06 00:51:37 -050095
Arnd Bergmann7fe19da2010-10-28 16:12:33 +020096#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
Linus Torvaldsed1d77b2010-11-18 10:56:29 -080097# include <linux/sched.h>
Linus Torvalds7957f0a2010-11-17 14:58:36 -080098# define PREEMPT_INATOMIC_BASE (current->lock_depth >= 0)
Linus Torvalds8e3e0762008-05-10 20:58:02 -070099#else
100# define PREEMPT_INATOMIC_BASE 0
Arnd Bergmann7fe19da2010-10-28 16:12:33 +0200101#endif
102
103#if defined(CONFIG_PREEMPT)
104# define PREEMPT_CHECK_OFFSET 1
105#else
Linus Torvalds8e3e0762008-05-10 20:58:02 -0700106# define PREEMPT_CHECK_OFFSET 0
107#endif
108
Jonathan Corbet8c703d32008-03-28 14:15:49 -0700109/*
110 * Are we running in atomic context? WARNING: this macro cannot
111 * always detect atomic context; in particular, it cannot know about
112 * held spinlocks in non-preemptible kernels. Thus it should not be
113 * used in the general case to determine whether sleeping is possible.
114 * Do not use in_atomic() in driver code.
115 */
Linus Torvalds8e3e0762008-05-10 20:58:02 -0700116#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE)
Ingo Molnar4da1ce62007-07-09 18:51:58 +0200117
118/*
119 * Check whether we were atomic before we did preempt_disable():
Linus Torvalds8e3e0762008-05-10 20:58:02 -0700120 * (used by the scheduler, *after* releasing the kernel lock)
Ingo Molnar4da1ce62007-07-09 18:51:58 +0200121 */
122#define in_atomic_preempt_off() \
123 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
124
125#ifdef CONFIG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126# define preemptible() (preempt_count() == 0 && !irqs_disabled())
127# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
128#else
129# define preemptible() 0
130# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
131#endif
132
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100133#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134extern void synchronize_irq(unsigned int irq);
135#else
136# define synchronize_irq(irq) barrier()
137#endif
138
Al Virof0373602005-11-13 16:06:57 -0800139struct task_struct;
140
Venkatesh Pallipadib52bfee2010-10-04 17:03:19 -0700141#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142static inline void account_system_vtime(struct task_struct *tsk)
143{
144}
Venkatesh Pallipadie1e10a22010-10-04 17:03:17 -0700145#else
146extern void account_system_vtime(struct task_struct *tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147#endif
148
Paul E. McKenneyb560d8a2009-08-21 22:08:51 -0700149#if defined(CONFIG_NO_HZ)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700150#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700151extern void rcu_enter_nohz(void);
152extern void rcu_exit_nohz(void);
153
154static inline void rcu_irq_enter(void)
155{
156 rcu_exit_nohz();
157}
158
159static inline void rcu_irq_exit(void)
160{
161 rcu_enter_nohz();
162}
163
164static inline void rcu_nmi_enter(void)
165{
166}
167
168static inline void rcu_nmi_exit(void)
169{
170}
171
172#else
Steven Rostedt2232c2d2008-02-29 18:46:50 +0100173extern void rcu_irq_enter(void);
174extern void rcu_irq_exit(void);
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100175extern void rcu_nmi_enter(void);
176extern void rcu_nmi_exit(void);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700177#endif
Steven Rostedt2232c2d2008-02-29 18:46:50 +0100178#else
179# define rcu_irq_enter() do { } while (0)
180# define rcu_irq_exit() do { } while (0)
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100181# define rcu_nmi_enter() do { } while (0)
182# define rcu_nmi_exit() do { } while (0)
Paul E. McKenneyb560d8a2009-08-21 22:08:51 -0700183#endif /* #if defined(CONFIG_NO_HZ) */
Steven Rostedt2232c2d2008-02-29 18:46:50 +0100184
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700185/*
186 * It is safe to do non-atomic ops on ->hardirq_context,
187 * because NMI handlers may not preempt and the ops are
188 * always balanced, so the interrupted value of ->hardirq_context
189 * will always be restored.
190 */
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800191#define __irq_enter() \
192 do { \
193 account_system_vtime(current); \
194 add_preempt_count(HARDIRQ_OFFSET); \
195 trace_hardirq_enter(); \
196 } while (0)
197
198/*
199 * Enter irq context (on NO_HZ, update jiffies):
200 */
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800201extern void irq_enter(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700203/*
204 * Exit irq context without processing softirqs:
205 */
206#define __irq_exit() \
207 do { \
208 trace_hardirq_exit(); \
209 account_system_vtime(current); \
210 sub_preempt_count(HARDIRQ_OFFSET); \
211 } while (0)
212
213/*
214 * Exit irq context and process softirqs if needed:
215 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216extern void irq_exit(void);
217
Steven Rostedt2a7b8df2009-02-12 14:16:46 -0500218#define nmi_enter() \
219 do { \
220 ftrace_nmi_enter(); \
221 BUG_ON(in_nmi()); \
222 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
223 lockdep_off(); \
224 rcu_nmi_enter(); \
225 trace_hardirq_enter(); \
Steven Rostedt17666f02008-10-30 16:08:32 -0400226 } while (0)
Linus Torvalds5f34fe12008-12-30 16:10:19 -0800227
Steven Rostedt2a7b8df2009-02-12 14:16:46 -0500228#define nmi_exit() \
229 do { \
230 trace_hardirq_exit(); \
231 rcu_nmi_exit(); \
232 lockdep_on(); \
233 BUG_ON(!in_nmi()); \
234 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
235 ftrace_nmi_exit(); \
Steven Rostedt17666f02008-10-30 16:08:32 -0400236 } while (0)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238#endif /* LINUX_HARDIRQ_H */