blob: ccfe17c5c8da1d78f62d652da8bb845d00dfc1f9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef LINUX_HARDIRQ_H
2#define LINUX_HARDIRQ_H
3
Frederic Weisbecker2d4b8472013-07-29 20:29:43 +02004#include <linux/preempt_mask.h>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07005#include <linux/lockdep.h>
Steven Rostedt6a60dd12008-11-06 15:55:21 -05006#include <linux/ftrace_irq.h>
Frederic Weisbeckerdcbf8322012-10-05 23:07:19 +02007#include <linux/vtime.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010010#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070011extern void synchronize_irq(unsigned int irq);
12#else
13# define synchronize_irq(irq) barrier()
14#endif
15
Paul E. McKenney127781d2013-03-27 08:44:00 -070016#if defined(CONFIG_TINY_RCU)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070017
18static inline void rcu_nmi_enter(void)
19{
20}
21
22static inline void rcu_nmi_exit(void)
23{
24}
25
26#else
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010027extern void rcu_nmi_enter(void);
28extern void rcu_nmi_exit(void);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070029#endif
Steven Rostedt2232c2d2008-02-29 18:46:50 +010030
Ingo Molnarde30a2b2006-07-03 00:24:42 -070031/*
32 * It is safe to do non-atomic ops on ->hardirq_context,
33 * because NMI handlers may not preempt and the ops are
34 * always balanced, so the interrupted value of ->hardirq_context
35 * will always be restored.
36 */
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080037#define __irq_enter() \
38 do { \
Frederic Weisbecker6a616712012-12-16 20:00:34 +010039 account_irq_enter_time(current); \
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080040 add_preempt_count(HARDIRQ_OFFSET); \
41 trace_hardirq_enter(); \
42 } while (0)
43
44/*
45 * Enter irq context (on NO_HZ, update jiffies):
46 */
Ingo Molnardde4b2b2007-02-16 01:27:45 -080047extern void irq_enter(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Ingo Molnarde30a2b2006-07-03 00:24:42 -070049/*
50 * Exit irq context without processing softirqs:
51 */
52#define __irq_exit() \
53 do { \
54 trace_hardirq_exit(); \
Frederic Weisbecker6a616712012-12-16 20:00:34 +010055 account_irq_exit_time(current); \
Ingo Molnarde30a2b2006-07-03 00:24:42 -070056 sub_preempt_count(HARDIRQ_OFFSET); \
57 } while (0)
58
59/*
60 * Exit irq context and process softirqs if needed:
61 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070062extern void irq_exit(void);
63
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050064#define nmi_enter() \
65 do { \
Steven Rostedt0f1ac8f2013-01-15 22:11:19 -050066 lockdep_off(); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050067 ftrace_nmi_enter(); \
68 BUG_ON(in_nmi()); \
69 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050070 rcu_nmi_enter(); \
71 trace_hardirq_enter(); \
Steven Rostedt17666f02008-10-30 16:08:32 -040072 } while (0)
Linus Torvalds5f34fe12008-12-30 16:10:19 -080073
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050074#define nmi_exit() \
75 do { \
76 trace_hardirq_exit(); \
77 rcu_nmi_exit(); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050078 BUG_ON(!in_nmi()); \
79 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
80 ftrace_nmi_exit(); \
Steven Rostedt0f1ac8f2013-01-15 22:11:19 -050081 lockdep_on(); \
Steven Rostedt17666f02008-10-30 16:08:32 -040082 } while (0)
Ingo Molnarde30a2b2006-07-03 00:24:42 -070083
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#endif /* LINUX_HARDIRQ_H */