blob: 12d5f972f23f46cf1dbf83c0e7fc93333cf49015 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef LINUX_HARDIRQ_H
2#define LINUX_HARDIRQ_H
3
Frederic Weisbecker2d4b8472013-07-29 20:29:43 +02004#include <linux/preempt_mask.h>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07005#include <linux/lockdep.h>
Steven Rostedt6a60dd12008-11-06 15:55:21 -05006#include <linux/ftrace_irq.h>
Frederic Weisbeckerdcbf8322012-10-05 23:07:19 +02007#include <linux/vtime.h>
Peter Zijlstra0bd3a172013-11-19 16:13:38 +01008#include <asm/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011extern void synchronize_irq(unsigned int irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Paul E. McKenney127781d2013-03-27 08:44:00 -070013#if defined(CONFIG_TINY_RCU)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070014
15static inline void rcu_nmi_enter(void)
16{
17}
18
19static inline void rcu_nmi_exit(void)
20{
21}
22
23#else
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010024extern void rcu_nmi_enter(void);
25extern void rcu_nmi_exit(void);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070026#endif
Steven Rostedt2232c2d2008-02-29 18:46:50 +010027
Ingo Molnarde30a2b2006-07-03 00:24:42 -070028/*
29 * It is safe to do non-atomic ops on ->hardirq_context,
30 * because NMI handlers may not preempt and the ops are
31 * always balanced, so the interrupted value of ->hardirq_context
32 * will always be restored.
33 */
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080034#define __irq_enter() \
35 do { \
Frederic Weisbecker6a616712012-12-16 20:00:34 +010036 account_irq_enter_time(current); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020037 preempt_count_add(HARDIRQ_OFFSET); \
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080038 trace_hardirq_enter(); \
39 } while (0)
40
41/*
42 * Enter irq context (on NO_HZ, update jiffies):
43 */
Ingo Molnardde4b2b2007-02-16 01:27:45 -080044extern void irq_enter(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Ingo Molnarde30a2b2006-07-03 00:24:42 -070046/*
47 * Exit irq context without processing softirqs:
48 */
49#define __irq_exit() \
50 do { \
51 trace_hardirq_exit(); \
Frederic Weisbecker6a616712012-12-16 20:00:34 +010052 account_irq_exit_time(current); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020053 preempt_count_sub(HARDIRQ_OFFSET); \
Ingo Molnarde30a2b2006-07-03 00:24:42 -070054 } while (0)
55
56/*
57 * Exit irq context and process softirqs if needed:
58 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070059extern void irq_exit(void);
60
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050061#define nmi_enter() \
62 do { \
Steven Rostedt0f1ac8f2013-01-15 22:11:19 -050063 lockdep_off(); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050064 ftrace_nmi_enter(); \
65 BUG_ON(in_nmi()); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020066 preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050067 rcu_nmi_enter(); \
68 trace_hardirq_enter(); \
Steven Rostedt17666f02008-10-30 16:08:32 -040069 } while (0)
Linus Torvalds5f34fe12008-12-30 16:10:19 -080070
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050071#define nmi_exit() \
72 do { \
73 trace_hardirq_exit(); \
74 rcu_nmi_exit(); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050075 BUG_ON(!in_nmi()); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020076 preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050077 ftrace_nmi_exit(); \
Steven Rostedt0f1ac8f2013-01-15 22:11:19 -050078 lockdep_on(); \
Steven Rostedt17666f02008-10-30 16:08:32 -040079 } while (0)
Ingo Molnarde30a2b2006-07-03 00:24:42 -070080
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#endif /* LINUX_HARDIRQ_H */