Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PREEMPT_H |
| 2 | #define __LINUX_PREEMPT_H |
| 3 | |
| 4 | /* |
| 5 | * include/linux/preempt.h - macros for accessing and manipulating |
| 6 | * preempt_count (used for kernel preemption, interrupt count, etc.) |
| 7 | */ |
| 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/linkage.h> |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 10 | #include <linux/list.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
Peter Zijlstra | f27dde8 | 2013-08-14 14:55:31 +0200 | [diff] [blame] | 12 | /* |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 13 | * We put the hardirq and softirq counter into the preemption |
| 14 | * counter. The bitmask has the following meaning: |
| 15 | * |
| 16 | * - bits 0-7 are the preemption count (max preemption depth: 256) |
| 17 | * - bits 8-15 are the softirq count (max # of softirqs: 256) |
| 18 | * |
| 19 | * The hardirq count could in theory be the same as the number of |
| 20 | * interrupts in the system, but we run all interrupt handlers with |
| 21 | * interrupts disabled, so we cannot have nesting interrupts. Though |
| 22 | * there are a few palaeontologic drivers which reenable interrupts in |
| 23 | * the handler, so we need more than one bit here. |
| 24 | * |
Frederic Weisbecker | 2e10e71 | 2015-05-12 16:41:47 +0200 | [diff] [blame] | 25 | * PREEMPT_MASK: 0x000000ff |
| 26 | * SOFTIRQ_MASK: 0x0000ff00 |
| 27 | * HARDIRQ_MASK: 0x000f0000 |
| 28 | * NMI_MASK: 0x00100000 |
| 29 | * PREEMPT_ACTIVE: 0x00200000 |
| 30 | * PREEMPT_NEED_RESCHED: 0x80000000 |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 31 | */ |
| 32 | #define PREEMPT_BITS 8 |
| 33 | #define SOFTIRQ_BITS 8 |
| 34 | #define HARDIRQ_BITS 4 |
| 35 | #define NMI_BITS 1 |
| 36 | |
| 37 | #define PREEMPT_SHIFT 0 |
| 38 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
| 39 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) |
| 40 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) |
| 41 | |
| 42 | #define __IRQ_MASK(x) ((1UL << (x))-1) |
| 43 | |
| 44 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) |
| 45 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
| 46 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
| 47 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) |
| 48 | |
| 49 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) |
| 50 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) |
| 51 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
| 52 | #define NMI_OFFSET (1UL << NMI_SHIFT) |
| 53 | |
| 54 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
| 55 | |
| 56 | #define PREEMPT_ACTIVE_BITS 1 |
| 57 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) |
| 58 | #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) |
| 59 | |
Frederic Weisbecker | 2e10e71 | 2015-05-12 16:41:47 +0200 | [diff] [blame] | 60 | /* We use the MSB mostly because its available */ |
| 61 | #define PREEMPT_NEED_RESCHED 0x80000000 |
| 62 | |
| 63 | /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ |
| 64 | #include <asm/preempt.h> |
| 65 | |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 66 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
| 67 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
| 68 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
| 69 | | NMI_MASK)) |
| 70 | |
| 71 | /* |
| 72 | * Are we doing bottom half or hardware interrupt processing? |
| 73 | * Are we in a softirq context? Interrupt context? |
| 74 | * in_softirq - Are we currently processing softirq or have bh disabled? |
| 75 | * in_serving_softirq - Are we currently processing softirq? |
| 76 | */ |
| 77 | #define in_irq() (hardirq_count()) |
| 78 | #define in_softirq() (softirq_count()) |
| 79 | #define in_interrupt() (irq_count()) |
| 80 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
| 81 | |
| 82 | /* |
| 83 | * Are we in NMI context? |
| 84 | */ |
| 85 | #define in_nmi() (preempt_count() & NMI_MASK) |
| 86 | |
Konstantin Khlebnikov | fe32d3c | 2015-07-15 12:52:04 +0300 | [diff] [blame] | 87 | /* |
| 88 | * The preempt_count offset after preempt_disable(); |
| 89 | */ |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 90 | #if defined(CONFIG_PREEMPT_COUNT) |
Konstantin Khlebnikov | fe32d3c | 2015-07-15 12:52:04 +0300 | [diff] [blame] | 91 | # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 92 | #else |
Konstantin Khlebnikov | fe32d3c | 2015-07-15 12:52:04 +0300 | [diff] [blame] | 93 | # define PREEMPT_DISABLE_OFFSET 0 |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 94 | #endif |
| 95 | |
| 96 | /* |
Konstantin Khlebnikov | fe32d3c | 2015-07-15 12:52:04 +0300 | [diff] [blame] | 97 | * The preempt_count offset after spin_lock() |
| 98 | */ |
| 99 | #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET |
| 100 | |
| 101 | /* |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 102 | * The preempt_count offset needed for things like: |
| 103 | * |
| 104 | * spin_lock_bh() |
| 105 | * |
| 106 | * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and |
| 107 | * softirqs, such that unlock sequences of: |
| 108 | * |
| 109 | * spin_unlock(); |
| 110 | * local_bh_enable(); |
| 111 | * |
| 112 | * Work as expected. |
| 113 | */ |
Konstantin Khlebnikov | fe32d3c | 2015-07-15 12:52:04 +0300 | [diff] [blame] | 114 | #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 115 | |
| 116 | /* |
| 117 | * Are we running in atomic context? WARNING: this macro cannot |
| 118 | * always detect atomic context; in particular, it cannot know about |
| 119 | * held spinlocks in non-preemptible kernels. Thus it should not be |
| 120 | * used in the general case to determine whether sleeping is possible. |
| 121 | * Do not use in_atomic() in driver code. |
| 122 | */ |
Frederic Weisbecker | 3e51f3c | 2015-05-12 16:41:51 +0200 | [diff] [blame] | 123 | #define in_atomic() (preempt_count() != 0) |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 124 | |
| 125 | /* |
| 126 | * Check whether we were atomic before we did preempt_disable(): |
Frederic Weisbecker | e017cf2 | 2015-05-12 16:41:50 +0200 | [diff] [blame] | 127 | * (used by the scheduler) |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 128 | */ |
| 129 | #define in_atomic_preempt_off() \ |
Frederic Weisbecker | 90b62b5 | 2015-05-12 16:41:48 +0200 | [diff] [blame] | 130 | ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET) |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 131 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 132 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 133 | extern void preempt_count_add(int val); |
| 134 | extern void preempt_count_sub(int val); |
Konstantin Khlebnikov | fe32d3c | 2015-07-15 12:52:04 +0300 | [diff] [blame] | 135 | #define preempt_count_dec_and_test() \ |
| 136 | ({ preempt_count_sub(1); should_resched(0); }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | #else |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 138 | #define preempt_count_add(val) __preempt_count_add(val) |
| 139 | #define preempt_count_sub(val) __preempt_count_sub(val) |
| 140 | #define preempt_count_dec_and_test() __preempt_count_dec_and_test() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | #endif |
| 142 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 143 | #define __preempt_count_inc() __preempt_count_add(1) |
| 144 | #define __preempt_count_dec() __preempt_count_sub(1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 146 | #define preempt_count_inc() preempt_count_add(1) |
| 147 | #define preempt_count_dec() preempt_count_sub(1) |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 148 | |
Frederic Weisbecker | b30f0e3 | 2015-05-12 16:41:49 +0200 | [diff] [blame] | 149 | #define preempt_active_enter() \ |
| 150 | do { \ |
| 151 | preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ |
| 152 | barrier(); \ |
| 153 | } while (0) |
| 154 | |
| 155 | #define preempt_active_exit() \ |
| 156 | do { \ |
| 157 | barrier(); \ |
| 158 | preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \ |
| 159 | } while (0) |
| 160 | |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 161 | #ifdef CONFIG_PREEMPT_COUNT |
| 162 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | #define preempt_disable() \ |
| 164 | do { \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 165 | preempt_count_inc(); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | barrier(); \ |
| 167 | } while (0) |
| 168 | |
Thomas Gleixner | ba74c14 | 2011-03-21 13:32:17 +0100 | [diff] [blame] | 169 | #define sched_preempt_enable_no_resched() \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | do { \ |
| 171 | barrier(); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 172 | preempt_count_dec(); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | } while (0) |
| 174 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 175 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
Thomas Gleixner | ba74c14 | 2011-03-21 13:32:17 +0100 | [diff] [blame] | 176 | |
Frederic Weisbecker | 2e10e71 | 2015-05-12 16:41:47 +0200 | [diff] [blame] | 177 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
| 178 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 179 | #ifdef CONFIG_PREEMPT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | #define preempt_enable() \ |
| 181 | do { \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 182 | barrier(); \ |
| 183 | if (unlikely(preempt_count_dec_and_test())) \ |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 184 | __preempt_schedule(); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | } while (0) |
| 186 | |
Frederic Weisbecker | 9a92e3d | 2015-06-04 17:39:09 +0200 | [diff] [blame] | 187 | #define preempt_enable_notrace() \ |
| 188 | do { \ |
| 189 | barrier(); \ |
| 190 | if (unlikely(__preempt_count_dec_and_test())) \ |
| 191 | __preempt_schedule_notrace(); \ |
| 192 | } while (0) |
| 193 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 194 | #define preempt_check_resched() \ |
| 195 | do { \ |
Konstantin Khlebnikov | fe32d3c | 2015-07-15 12:52:04 +0300 | [diff] [blame] | 196 | if (should_resched(0)) \ |
Peter Zijlstra | 1a338ac | 2013-08-14 14:51:00 +0200 | [diff] [blame] | 197 | __preempt_schedule(); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 198 | } while (0) |
| 199 | |
Frederic Weisbecker | 9a92e3d | 2015-06-04 17:39:09 +0200 | [diff] [blame] | 200 | #else /* !CONFIG_PREEMPT */ |
Peter Zijlstra | 62b94a0 | 2013-11-20 16:52:19 +0100 | [diff] [blame] | 201 | #define preempt_enable() \ |
| 202 | do { \ |
| 203 | barrier(); \ |
| 204 | preempt_count_dec(); \ |
| 205 | } while (0) |
Frederic Weisbecker | 9a92e3d | 2015-06-04 17:39:09 +0200 | [diff] [blame] | 206 | |
| 207 | #define preempt_enable_notrace() \ |
| 208 | do { \ |
| 209 | barrier(); \ |
| 210 | __preempt_count_dec(); \ |
| 211 | } while (0) |
| 212 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 213 | #define preempt_check_resched() do { } while (0) |
Frederic Weisbecker | 9a92e3d | 2015-06-04 17:39:09 +0200 | [diff] [blame] | 214 | #endif /* CONFIG_PREEMPT */ |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 215 | |
| 216 | #define preempt_disable_notrace() \ |
| 217 | do { \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 218 | __preempt_count_inc(); \ |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 219 | barrier(); \ |
| 220 | } while (0) |
| 221 | |
| 222 | #define preempt_enable_no_resched_notrace() \ |
| 223 | do { \ |
| 224 | barrier(); \ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 225 | __preempt_count_dec(); \ |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 226 | } while (0) |
| 227 | |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 228 | #else /* !CONFIG_PREEMPT_COUNT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | |
Linus Torvalds | 386afc9 | 2013-04-09 10:48:33 -0700 | [diff] [blame] | 230 | /* |
| 231 | * Even if we don't have any preemption, we need preempt disable/enable |
| 232 | * to be barriers, so that we don't have things like get_user/put_user |
| 233 | * that can cause faults and scheduling migrate into our preempt-protected |
| 234 | * region. |
| 235 | */ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 236 | #define preempt_disable() barrier() |
Linus Torvalds | 386afc9 | 2013-04-09 10:48:33 -0700 | [diff] [blame] | 237 | #define sched_preempt_enable_no_resched() barrier() |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 238 | #define preempt_enable_no_resched() barrier() |
| 239 | #define preempt_enable() barrier() |
| 240 | #define preempt_check_resched() do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | |
Linus Torvalds | 386afc9 | 2013-04-09 10:48:33 -0700 | [diff] [blame] | 242 | #define preempt_disable_notrace() barrier() |
| 243 | #define preempt_enable_no_resched_notrace() barrier() |
| 244 | #define preempt_enable_notrace() barrier() |
Frederic Weisbecker | 2e10e71 | 2015-05-12 16:41:47 +0200 | [diff] [blame] | 245 | #define preemptible() 0 |
Steven Rostedt | 5028252 | 2008-05-12 21:20:41 +0200 | [diff] [blame] | 246 | |
Frederic Weisbecker | bdd4e85 | 2011-06-08 01:13:27 +0200 | [diff] [blame] | 247 | #endif /* CONFIG_PREEMPT_COUNT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | |
Peter Zijlstra | 62b94a0 | 2013-11-20 16:52:19 +0100 | [diff] [blame] | 249 | #ifdef MODULE |
| 250 | /* |
| 251 | * Modules have no business playing preemption tricks. |
| 252 | */ |
| 253 | #undef sched_preempt_enable_no_resched |
| 254 | #undef preempt_enable_no_resched |
| 255 | #undef preempt_enable_no_resched_notrace |
| 256 | #undef preempt_check_resched |
| 257 | #endif |
| 258 | |
Peter Zijlstra | 8cb75e0 | 2013-11-20 12:22:37 +0100 | [diff] [blame] | 259 | #define preempt_set_need_resched() \ |
| 260 | do { \ |
| 261 | set_preempt_need_resched(); \ |
| 262 | } while (0) |
| 263 | #define preempt_fold_need_resched() \ |
| 264 | do { \ |
| 265 | if (tif_need_resched()) \ |
| 266 | set_preempt_need_resched(); \ |
| 267 | } while (0) |
Peter Zijlstra | 8cb75e0 | 2013-11-20 12:22:37 +0100 | [diff] [blame] | 268 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 269 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 270 | |
| 271 | struct preempt_notifier; |
| 272 | |
| 273 | /** |
| 274 | * preempt_ops - notifiers called when a task is preempted and rescheduled |
| 275 | * @sched_in: we're about to be rescheduled: |
| 276 | * notifier: struct preempt_notifier for the task being scheduled |
| 277 | * cpu: cpu we're scheduled on |
| 278 | * @sched_out: we've just been preempted |
| 279 | * notifier: struct preempt_notifier for the task being preempted |
| 280 | * next: the task that's kicking us out |
Tejun Heo | 8592e64 | 2009-12-02 12:56:46 +0900 | [diff] [blame] | 281 | * |
| 282 | * Please note that sched_in and out are called under different |
| 283 | * contexts. sched_out is called with rq lock held and irq disabled |
| 284 | * while sched_in is called without rq lock and irq enabled. This |
| 285 | * difference is intentional and depended upon by its users. |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 286 | */ |
| 287 | struct preempt_ops { |
| 288 | void (*sched_in)(struct preempt_notifier *notifier, int cpu); |
| 289 | void (*sched_out)(struct preempt_notifier *notifier, |
| 290 | struct task_struct *next); |
| 291 | }; |
| 292 | |
| 293 | /** |
| 294 | * preempt_notifier - key for installing preemption notifiers |
| 295 | * @link: internal use |
| 296 | * @ops: defines the notifier functions to be called |
| 297 | * |
| 298 | * Usually used in conjunction with container_of(). |
| 299 | */ |
| 300 | struct preempt_notifier { |
| 301 | struct hlist_node link; |
| 302 | struct preempt_ops *ops; |
| 303 | }; |
| 304 | |
Peter Zijlstra | 2ecd9d2 | 2015-07-03 18:53:58 +0200 | [diff] [blame] | 305 | void preempt_notifier_inc(void); |
| 306 | void preempt_notifier_dec(void); |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 307 | void preempt_notifier_register(struct preempt_notifier *notifier); |
| 308 | void preempt_notifier_unregister(struct preempt_notifier *notifier); |
| 309 | |
| 310 | static inline void preempt_notifier_init(struct preempt_notifier *notifier, |
| 311 | struct preempt_ops *ops) |
| 312 | { |
| 313 | INIT_HLIST_NODE(¬ifier->link); |
| 314 | notifier->ops = ops; |
| 315 | } |
| 316 | |
| 317 | #endif |
| 318 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | #endif /* __LINUX_PREEMPT_H */ |