Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PREEMPT_H |
| 2 | #define __LINUX_PREEMPT_H |
| 3 | |
| 4 | /* |
| 5 | * include/linux/preempt.h - macros for accessing and manipulating |
| 6 | * preempt_count (used for kernel preemption, interrupt count, etc.) |
| 7 | */ |
| 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/linkage.h> |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 10 | #include <linux/list.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
Peter Zijlstra | f27dde8 | 2013-08-14 14:55:31 +0200 | [diff] [blame] | 12 | /* |
| 13 | * We use the MSB mostly because its available; see <linux/preempt_mask.h> for |
| 14 | * the other bits -- can't include that header due to inclusion hell. |
| 15 | */ |
| 16 | #define PREEMPT_NEED_RESCHED 0x80000000 |
| 17 | |
Peter Zijlstra | a787870 | 2013-08-14 14:55:40 +0200 | [diff] [blame] | 18 | #include <asm/preempt.h> |
Peter Zijlstra | f27dde8 | 2013-08-14 14:55:31 +0200 | [diff] [blame] | 19 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 20 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 21 | extern void preempt_count_add(int val); |
| 22 | extern void preempt_count_sub(int val); |
| 23 | #define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #else |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 25 | #define preempt_count_add(val) __preempt_count_add(val) |
| 26 | #define preempt_count_sub(val) __preempt_count_sub(val) |
| 27 | #define preempt_count_dec_and_test() __preempt_count_dec_and_test() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #endif |
| 29 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 30 | #define __preempt_count_inc() __preempt_count_add(1) |
| 31 | #define __preempt_count_dec() __preempt_count_sub(1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 33 | #define preempt_count_inc() preempt_count_add(1) |
| 34 | #define preempt_count_dec() preempt_count_sub(1) |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 35 | |
| 36 | #ifdef CONFIG_PREEMPT_COUNT |
| 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #define preempt_disable() \ |
| 39 | do { \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 40 | preempt_count_inc(); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | barrier(); \ |
| 42 | } while (0) |
| 43 | |
Thomas Gleixner | ba74c14 | 2011-03-21 13:32:17 +0100 | [diff] [blame] | 44 | #define sched_preempt_enable_no_resched() \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | do { \ |
| 46 | barrier(); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 47 | preempt_count_dec(); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | } while (0) |
| 49 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 50 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
Thomas Gleixner | ba74c14 | 2011-03-21 13:32:17 +0100 | [diff] [blame] | 51 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 52 | #ifdef CONFIG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #define preempt_enable() \ |
| 54 | do { \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 55 | barrier(); \ |
| 56 | if (unlikely(preempt_count_dec_and_test())) \ |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 57 | __preempt_schedule(); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | } while (0) |
| 59 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 60 | #define preempt_check_resched() \ |
| 61 | do { \ |
| 62 | if (should_resched()) \ |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 63 | __preempt_schedule(); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 64 | } while (0) |
| 65 | |
| 66 | #else |
Peter Zijlstra | 62b94a0 | 2013-11-20 16:52:19 +0100 | [diff] [blame] | 67 | #define preempt_enable() \ |
| 68 | do { \ |
| 69 | barrier(); \ |
| 70 | preempt_count_dec(); \ |
| 71 | } while (0) |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 72 | #define preempt_check_resched() do { } while (0) |
| 73 | #endif |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 74 | |
| 75 | #define preempt_disable_notrace() \ |
| 76 | do { \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 77 | __preempt_count_inc(); \ |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 78 | barrier(); \ |
| 79 | } while (0) |
| 80 | |
| 81 | #define preempt_enable_no_resched_notrace() \ |
| 82 | do { \ |
| 83 | barrier(); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 84 | __preempt_count_dec(); \ |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 85 | } while (0) |
| 86 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 87 | #ifdef CONFIG_PREEMPT |
| 88 | |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 89 | #ifndef CONFIG_CONTEXT_TRACKING |
| 90 | #define __preempt_schedule_context() __preempt_schedule() |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 91 | #endif |
| 92 | |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 93 | #define preempt_enable_notrace() \ |
| 94 | do { \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 95 | barrier(); \ |
| 96 | if (unlikely(__preempt_count_dec_and_test())) \ |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 97 | __preempt_schedule_context(); \ |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 98 | } while (0) |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 99 | #else |
Peter Zijlstra | 62b94a0 | 2013-11-20 16:52:19 +0100 | [diff] [blame] | 100 | #define preempt_enable_notrace() \ |
| 101 | do { \ |
| 102 | barrier(); \ |
| 103 | __preempt_count_dec(); \ |
| 104 | } while (0) |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 105 | #endif |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 106 | |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 107 | #else /* !CONFIG_PREEMPT_COUNT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
Linus Torvalds | 386afc9 | 2013-04-09 10:48:33 -0700 | [diff] [blame] | 109 | /* |
| 110 | * Even if we don't have any preemption, we need preempt disable/enable |
| 111 | * to be barriers, so that we don't have things like get_user/put_user |
| 112 | * that can cause faults and scheduling migrate into our preempt-protected |
| 113 | * region. |
| 114 | */ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 115 | #define preempt_disable() barrier() |
Linus Torvalds | 386afc9 | 2013-04-09 10:48:33 -0700 | [diff] [blame] | 116 | #define sched_preempt_enable_no_resched() barrier() |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 117 | #define preempt_enable_no_resched() barrier() |
| 118 | #define preempt_enable() barrier() |
| 119 | #define preempt_check_resched() do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
Linus Torvalds | 386afc9 | 2013-04-09 10:48:33 -0700 | [diff] [blame] | 121 | #define preempt_disable_notrace() barrier() |
| 122 | #define preempt_enable_no_resched_notrace() barrier() |
| 123 | #define preempt_enable_notrace() barrier() |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 124 | |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 125 | #endif /* CONFIG_PREEMPT_COUNT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | |
Peter Zijlstra | 62b94a0 | 2013-11-20 16:52:19 +0100 | [diff] [blame] | 127 | #ifdef MODULE |
| 128 | /* |
| 129 | * Modules have no business playing preemption tricks. |
| 130 | */ |
| 131 | #undef sched_preempt_enable_no_resched |
| 132 | #undef preempt_enable_no_resched |
| 133 | #undef preempt_enable_no_resched_notrace |
| 134 | #undef preempt_check_resched |
| 135 | #endif |
| 136 | |
Peter Zijlstra | 8cb75e0 | 2013-11-20 12:22:37 +0100 | [diff] [blame] | 137 | #define preempt_set_need_resched() \ |
| 138 | do { \ |
| 139 | set_preempt_need_resched(); \ |
| 140 | } while (0) |
| 141 | #define preempt_fold_need_resched() \ |
| 142 | do { \ |
| 143 | if (tif_need_resched()) \ |
| 144 | set_preempt_need_resched(); \ |
| 145 | } while (0) |
Peter Zijlstra | 8cb75e0 | 2013-11-20 12:22:37 +0100 | [diff] [blame] | 146 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 147 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 148 | |
| 149 | struct preempt_notifier; |
| 150 | |
| 151 | /** |
| 152 | * preempt_ops - notifiers called when a task is preempted and rescheduled |
| 153 | * @sched_in: we're about to be rescheduled: |
| 154 | * notifier: struct preempt_notifier for the task being scheduled |
| 155 | * cpu: cpu we're scheduled on |
| 156 | * @sched_out: we've just been preempted |
| 157 | * notifier: struct preempt_notifier for the task being preempted |
| 158 | * next: the task that's kicking us out |
Tejun Heo | 8592e64 | 2009-12-02 12:56:46 +0900 | [diff] [blame] | 159 | * |
| 160 | * Please note that sched_in and out are called under different |
| 161 | * contexts. sched_out is called with rq lock held and irq disabled |
| 162 | * while sched_in is called without rq lock and irq enabled. This |
| 163 | * difference is intentional and depended upon by its users. |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 164 | */ |
| 165 | struct preempt_ops { |
| 166 | void (*sched_in)(struct preempt_notifier *notifier, int cpu); |
| 167 | void (*sched_out)(struct preempt_notifier *notifier, |
| 168 | struct task_struct *next); |
| 169 | }; |
| 170 | |
| 171 | /** |
| 172 | * preempt_notifier - key for installing preemption notifiers |
| 173 | * @link: internal use |
| 174 | * @ops: defines the notifier functions to be called |
| 175 | * |
| 176 | * Usually used in conjunction with container_of(). |
| 177 | */ |
| 178 | struct preempt_notifier { |
| 179 | struct hlist_node link; |
| 180 | struct preempt_ops *ops; |
| 181 | }; |
| 182 | |
| 183 | void preempt_notifier_register(struct preempt_notifier *notifier); |
| 184 | void preempt_notifier_unregister(struct preempt_notifier *notifier); |
| 185 | |
| 186 | static inline void preempt_notifier_init(struct preempt_notifier *notifier, |
| 187 | struct preempt_ops *ops) |
| 188 | { |
| 189 | INIT_HLIST_NODE(¬ifier->link); |
| 190 | notifier->ops = ops; |
| 191 | } |
| 192 | |
| 193 | #endif |
| 194 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | #endif /* __LINUX_PREEMPT_H */ |