blob: c683996110b15f1ca45e9b7499fc983a036c5b96 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef LINUX_HARDIRQ_H
2#define LINUX_HARDIRQ_H
3
Frederic Weisbecker92cf2112015-05-12 16:41:46 +02004#include <linux/preempt.h>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07005#include <linux/lockdep.h>
Steven Rostedt6a60dd12008-11-06 15:55:21 -05006#include <linux/ftrace_irq.h>
Frederic Weisbeckerdcbf8322012-10-05 23:07:19 +02007#include <linux/vtime.h>
Peter Zijlstra0bd3a172013-11-19 16:13:38 +01008#include <asm/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011extern void synchronize_irq(unsigned int irq);
Peter Zijlstra02cea392015-02-05 14:06:23 +010012extern bool synchronize_hardirq(unsigned int irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Paul E. McKenney127781d2013-03-27 08:44:00 -070014#if defined(CONFIG_TINY_RCU)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070015
16static inline void rcu_nmi_enter(void)
17{
18}
19
20static inline void rcu_nmi_exit(void)
21{
22}
23
24#else
Paul E. McKenney64db4cf2008-12-18 21:55:32 +010025extern void rcu_nmi_enter(void);
26extern void rcu_nmi_exit(void);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070027#endif
Steven Rostedt2232c2d2008-02-29 18:46:50 +010028
Ingo Molnarde30a2b2006-07-03 00:24:42 -070029/*
30 * It is safe to do non-atomic ops on ->hardirq_context,
31 * because NMI handlers may not preempt and the ops are
32 * always balanced, so the interrupted value of ->hardirq_context
33 * will always be restored.
34 */
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080035#define __irq_enter() \
36 do { \
Frederic Weisbecker6a616712012-12-16 20:00:34 +010037 account_irq_enter_time(current); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020038 preempt_count_add(HARDIRQ_OFFSET); \
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080039 trace_hardirq_enter(); \
40 } while (0)
41
42/*
43 * Enter irq context (on NO_HZ, update jiffies):
44 */
Ingo Molnardde4b2b2007-02-16 01:27:45 -080045extern void irq_enter(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Ingo Molnarde30a2b2006-07-03 00:24:42 -070047/*
48 * Exit irq context without processing softirqs:
49 */
50#define __irq_exit() \
51 do { \
52 trace_hardirq_exit(); \
Frederic Weisbecker6a616712012-12-16 20:00:34 +010053 account_irq_exit_time(current); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020054 preempt_count_sub(HARDIRQ_OFFSET); \
Ingo Molnarde30a2b2006-07-03 00:24:42 -070055 } while (0)
56
57/*
58 * Exit irq context and process softirqs if needed:
59 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070060extern void irq_exit(void);
61
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050062#define nmi_enter() \
63 do { \
Petr Mladek42a0bb32016-05-20 17:00:33 -070064 printk_nmi_enter(); \
Steven Rostedt0f1ac8f2013-01-15 22:11:19 -050065 lockdep_off(); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050066 ftrace_nmi_enter(); \
67 BUG_ON(in_nmi()); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020068 preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050069 rcu_nmi_enter(); \
70 trace_hardirq_enter(); \
Steven Rostedt17666f02008-10-30 16:08:32 -040071 } while (0)
Linus Torvalds5f34fe12008-12-30 16:10:19 -080072
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050073#define nmi_exit() \
74 do { \
75 trace_hardirq_exit(); \
76 rcu_nmi_exit(); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050077 BUG_ON(!in_nmi()); \
Peter Zijlstrabdb43802013-09-10 12:15:23 +020078 preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
Steven Rostedt2a7b8df2009-02-12 14:16:46 -050079 ftrace_nmi_exit(); \
Steven Rostedt0f1ac8f2013-01-15 22:11:19 -050080 lockdep_on(); \
Petr Mladek42a0bb32016-05-20 17:00:33 -070081 printk_nmi_exit(); \
Steven Rostedt17666f02008-10-30 16:08:32 -040082 } while (0)
Ingo Molnarde30a2b2006-07-03 00:24:42 -070083
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#endif /* LINUX_HARDIRQ_H */