Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* interrupt.h */ |
| 2 | #ifndef _LINUX_INTERRUPT_H |
| 3 | #define _LINUX_INTERRUPT_H |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/kernel.h> |
| 6 | #include <linux/linkage.h> |
| 7 | #include <linux/bitops.h> |
| 8 | #include <linux/preempt.h> |
| 9 | #include <linux/cpumask.h> |
Jan Beulich | 908dcec | 2006-06-23 02:06:00 -0700 | [diff] [blame] | 10 | #include <linux/irqreturn.h> |
Thomas Gleixner | dd3a1db | 2008-10-16 18:20:58 +0200 | [diff] [blame^] | 11 | #include <linux/irqnr.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/hardirq.h> |
Al Viro | f037360 | 2005-11-13 16:06:57 -0800 | [diff] [blame] | 13 | #include <linux/sched.h> |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 14 | #include <linux/irqflags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <asm/atomic.h> |
| 16 | #include <asm/ptrace.h> |
| 17 | #include <asm/system.h> |
| 18 | |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 19 | /* |
| 20 | * These correspond to the IORESOURCE_IRQ_* defines in |
| 21 | * linux/ioport.h to select the interrupt line behaviour. When |
| 22 | * requesting an interrupt without specifying a IRQF_TRIGGER, the |
| 23 | * setting should be assumed to be "as already configured", which |
| 24 | * may be as per machine or firmware initialisation. |
| 25 | */ |
| 26 | #define IRQF_TRIGGER_NONE 0x00000000 |
| 27 | #define IRQF_TRIGGER_RISING 0x00000001 |
| 28 | #define IRQF_TRIGGER_FALLING 0x00000002 |
| 29 | #define IRQF_TRIGGER_HIGH 0x00000004 |
| 30 | #define IRQF_TRIGGER_LOW 0x00000008 |
| 31 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ |
| 32 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) |
| 33 | #define IRQF_TRIGGER_PROBE 0x00000010 |
| 34 | |
| 35 | /* |
| 36 | * These flags used only by the kernel as part of the |
| 37 | * irq handling routines. |
| 38 | * |
| 39 | * IRQF_DISABLED - keep irqs disabled when calling the action handler |
| 40 | * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator |
| 41 | * IRQF_SHARED - allow sharing the irq among several devices |
| 42 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur |
| 43 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 44 | * IRQF_PERCPU - Interrupt is per cpu |
| 45 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing |
Bernhard Walle | d85a60d | 2007-05-08 00:35:24 -0700 | [diff] [blame] | 46 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
| 47 | * registered first in an shared interrupt is considered for |
| 48 | * performance reasons) |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 49 | */ |
| 50 | #define IRQF_DISABLED 0x00000020 |
| 51 | #define IRQF_SAMPLE_RANDOM 0x00000040 |
| 52 | #define IRQF_SHARED 0x00000080 |
| 53 | #define IRQF_PROBE_SHARED 0x00000100 |
| 54 | #define IRQF_TIMER 0x00000200 |
Thomas Gleixner | 284c668 | 2006-07-03 02:20:32 +0200 | [diff] [blame] | 55 | #define IRQF_PERCPU 0x00000400 |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 56 | #define IRQF_NOBALANCING 0x00000800 |
Bernhard Walle | d85a60d | 2007-05-08 00:35:24 -0700 | [diff] [blame] | 57 | #define IRQF_IRQPOLL 0x00001000 |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 58 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 59 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
David Howells | da48279 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 60 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | struct irqaction { |
David Howells | da48279 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 62 | irq_handler_t handler; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | unsigned long flags; |
| 64 | cpumask_t mask; |
| 65 | const char *name; |
| 66 | void *dev_id; |
| 67 | struct irqaction *next; |
| 68 | int irq; |
| 69 | struct proc_dir_entry *dir; |
| 70 | }; |
| 71 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 72 | extern irqreturn_t no_action(int cpl, void *dev_id); |
Monakhov Dmitriy | 616883d | 2007-05-08 00:25:07 -0700 | [diff] [blame] | 73 | extern int __must_check request_irq(unsigned int, irq_handler_t handler, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | unsigned long, const char *, void *); |
| 75 | extern void free_irq(unsigned int, void *); |
| 76 | |
Al Viro | 0af3678 | 2007-07-27 14:24:33 +0100 | [diff] [blame] | 77 | struct device; |
| 78 | |
Monakhov Dmitriy | 616883d | 2007-05-08 00:25:07 -0700 | [diff] [blame] | 79 | extern int __must_check devm_request_irq(struct device *dev, unsigned int irq, |
Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 80 | irq_handler_t handler, unsigned long irqflags, |
| 81 | const char *devname, void *dev_id); |
| 82 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); |
| 83 | |
Ingo Molnar | d7e9629 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 84 | /* |
| 85 | * On lockdep we dont want to enable hardirqs in hardirq |
| 86 | * context. Use local_irq_enable_in_hardirq() to annotate |
| 87 | * kernel code that has to do this nevertheless (pretty much |
| 88 | * the only valid case is for old/broken hardware that is |
| 89 | * insanely slow). |
| 90 | * |
| 91 | * NOTE: in theory this might break fragile code that relies |
| 92 | * on hardirq delivery - in practice we dont seem to have such |
| 93 | * places left. So the only effect should be slightly increased |
| 94 | * irqs-off latencies. |
| 95 | */ |
| 96 | #ifdef CONFIG_LOCKDEP |
| 97 | # define local_irq_enable_in_hardirq() do { } while (0) |
| 98 | #else |
| 99 | # define local_irq_enable_in_hardirq() local_irq_enable() |
| 100 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | extern void disable_irq_nosync(unsigned int irq); |
| 103 | extern void disable_irq(unsigned int irq); |
| 104 | extern void enable_irq(unsigned int irq); |
Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 105 | |
Russell King | d7b9068 | 2008-04-17 07:46:24 +0200 | [diff] [blame] | 106 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
| 107 | |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 108 | extern cpumask_t irq_default_affinity; |
| 109 | |
Russell King | d7b9068 | 2008-04-17 07:46:24 +0200 | [diff] [blame] | 110 | extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); |
| 111 | extern int irq_can_set_affinity(unsigned int irq); |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 112 | extern int irq_select_affinity(unsigned int irq); |
Russell King | d7b9068 | 2008-04-17 07:46:24 +0200 | [diff] [blame] | 113 | |
| 114 | #else /* CONFIG_SMP */ |
| 115 | |
| 116 | static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) |
| 117 | { |
| 118 | return -EINVAL; |
| 119 | } |
| 120 | |
| 121 | static inline int irq_can_set_affinity(unsigned int irq) |
| 122 | { |
| 123 | return 0; |
| 124 | } |
| 125 | |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 126 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
| 127 | |
Russell King | d7b9068 | 2008-04-17 07:46:24 +0200 | [diff] [blame] | 128 | #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ |
| 129 | |
Al Viro | e9ed7e7 | 2007-07-21 23:29:12 +0100 | [diff] [blame] | 130 | #ifdef CONFIG_GENERIC_HARDIRQS |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 131 | /* |
| 132 | * Special lockdep variants of irq disabling/enabling. |
| 133 | * These should be used for locking constructs that |
| 134 | * know that a particular irq context which is disabled, |
| 135 | * and which is the only irq-context user of a lock, |
| 136 | * that it's safe to take the lock in the irq-disabled |
| 137 | * section without disabling hardirqs. |
| 138 | * |
| 139 | * On !CONFIG_LOCKDEP they are equivalent to the normal |
| 140 | * irq disable/enable methods. |
| 141 | */ |
| 142 | static inline void disable_irq_nosync_lockdep(unsigned int irq) |
| 143 | { |
| 144 | disable_irq_nosync(irq); |
| 145 | #ifdef CONFIG_LOCKDEP |
| 146 | local_irq_disable(); |
| 147 | #endif |
| 148 | } |
| 149 | |
Arjan van de Ven | e8106b9 | 2006-09-29 02:01:08 -0700 | [diff] [blame] | 150 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) |
| 151 | { |
| 152 | disable_irq_nosync(irq); |
| 153 | #ifdef CONFIG_LOCKDEP |
| 154 | local_irq_save(*flags); |
| 155 | #endif |
| 156 | } |
| 157 | |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 158 | static inline void disable_irq_lockdep(unsigned int irq) |
| 159 | { |
| 160 | disable_irq(irq); |
| 161 | #ifdef CONFIG_LOCKDEP |
| 162 | local_irq_disable(); |
| 163 | #endif |
| 164 | } |
| 165 | |
| 166 | static inline void enable_irq_lockdep(unsigned int irq) |
| 167 | { |
| 168 | #ifdef CONFIG_LOCKDEP |
| 169 | local_irq_enable(); |
| 170 | #endif |
| 171 | enable_irq(irq); |
| 172 | } |
| 173 | |
Arjan van de Ven | e8106b9 | 2006-09-29 02:01:08 -0700 | [diff] [blame] | 174 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) |
| 175 | { |
| 176 | #ifdef CONFIG_LOCKDEP |
| 177 | local_irq_restore(*flags); |
| 178 | #endif |
| 179 | enable_irq(irq); |
| 180 | } |
| 181 | |
Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 182 | /* IRQ wakeup (PM) control: */ |
| 183 | extern int set_irq_wake(unsigned int irq, unsigned int on); |
| 184 | |
| 185 | static inline int enable_irq_wake(unsigned int irq) |
| 186 | { |
| 187 | return set_irq_wake(irq, 1); |
| 188 | } |
| 189 | |
| 190 | static inline int disable_irq_wake(unsigned int irq) |
| 191 | { |
| 192 | return set_irq_wake(irq, 0); |
| 193 | } |
| 194 | |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 195 | #else /* !CONFIG_GENERIC_HARDIRQS */ |
| 196 | /* |
| 197 | * NOTE: non-genirq architectures, if they want to support the lock |
| 198 | * validator need to define the methods below in their asm/irq.h |
| 199 | * files, under an #ifdef CONFIG_LOCKDEP section. |
| 200 | */ |
Roman Zippel | b3e2fd9 | 2007-05-01 22:32:42 +0200 | [diff] [blame] | 201 | #ifndef CONFIG_LOCKDEP |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 202 | # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) |
Roman Zippel | b3e2fd9 | 2007-05-01 22:32:42 +0200 | [diff] [blame] | 203 | # define disable_irq_nosync_lockdep_irqsave(irq, flags) \ |
| 204 | disable_irq_nosync(irq) |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 205 | # define disable_irq_lockdep(irq) disable_irq(irq) |
| 206 | # define enable_irq_lockdep(irq) enable_irq(irq) |
Roman Zippel | b3e2fd9 | 2007-05-01 22:32:42 +0200 | [diff] [blame] | 207 | # define enable_irq_lockdep_irqrestore(irq, flags) \ |
| 208 | enable_irq(irq) |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 209 | # endif |
| 210 | |
Guennadi Liakhovetski | aa5346a | 2007-10-16 01:24:01 -0700 | [diff] [blame] | 211 | static inline int enable_irq_wake(unsigned int irq) |
| 212 | { |
| 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | static inline int disable_irq_wake(unsigned int irq) |
| 217 | { |
| 218 | return 0; |
| 219 | } |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 220 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | |
Andi Kleen | 3f74478 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 222 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
| 223 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
| 224 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) |
| 225 | #endif |
| 226 | |
Benjamin Herrenschmidt | 2d3fbbb | 2007-05-10 22:22:46 -0700 | [diff] [blame] | 227 | /* Some architectures might implement lazy enabling/disabling of |
| 228 | * interrupts. In some cases, such as stop_machine, we might want |
| 229 | * to ensure that after a local_irq_disable(), interrupts have |
| 230 | * really been disabled in hardware. Such architectures need to |
| 231 | * implement the following hook. |
| 232 | */ |
| 233 | #ifndef hard_irq_disable |
| 234 | #define hard_irq_disable() do { } while(0) |
| 235 | #endif |
| 236 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
| 238 | frequency threaded job scheduling. For almost all the purposes |
| 239 | tasklets are more than enough. F.e. all serial device BHs et |
| 240 | al. should be converted to tasklets, not to softirqs. |
| 241 | */ |
| 242 | |
| 243 | enum |
| 244 | { |
| 245 | HI_SOFTIRQ=0, |
| 246 | TIMER_SOFTIRQ, |
| 247 | NET_TX_SOFTIRQ, |
| 248 | NET_RX_SOFTIRQ, |
Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 249 | BLOCK_SOFTIRQ, |
Christoph Lameter | c9819f4 | 2006-12-10 02:20:25 -0800 | [diff] [blame] | 250 | TASKLET_SOFTIRQ, |
| 251 | SCHED_SOFTIRQ, |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 252 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 253 | HRTIMER_SOFTIRQ, |
| 254 | #endif |
Dipankar Sarma | c2d727a | 2008-01-25 21:08:23 +0100 | [diff] [blame] | 255 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | }; |
| 257 | |
| 258 | /* softirq mask and active fields moved to irq_cpustat_t in |
| 259 | * asm/hardirq.h to get better cache usage. KAO |
| 260 | */ |
| 261 | |
| 262 | struct softirq_action |
| 263 | { |
| 264 | void (*action)(struct softirq_action *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | }; |
| 266 | |
| 267 | asmlinkage void do_softirq(void); |
Adrian Bunk | eb0f1c4 | 2008-04-29 00:59:12 -0700 | [diff] [blame] | 268 | asmlinkage void __do_softirq(void); |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 269 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | extern void softirq_init(void); |
Andi Kleen | 3f74478 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 271 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 272 | extern void raise_softirq_irqoff(unsigned int nr); |
| 273 | extern void raise_softirq(unsigned int nr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | |
| 275 | |
| 276 | /* Tasklets --- multithreaded analogue of BHs. |
| 277 | |
| 278 | Main feature differing them of generic softirqs: tasklet |
| 279 | is running only on one CPU simultaneously. |
| 280 | |
| 281 | Main feature differing them of BHs: different tasklets |
| 282 | may be run simultaneously on different CPUs. |
| 283 | |
| 284 | Properties: |
| 285 | * If tasklet_schedule() is called, then tasklet is guaranteed |
| 286 | to be executed on some cpu at least once after this. |
| 287 | * If the tasklet is already scheduled, but its excecution is still not |
| 288 | started, it will be executed only once. |
| 289 | * If this tasklet is already running on another CPU (or schedule is called |
| 290 | from tasklet itself), it is rescheduled for later. |
| 291 | * Tasklet is strictly serialized wrt itself, but not |
| 292 | wrt another tasklets. If client needs some intertask synchronization, |
| 293 | he makes it with spinlocks. |
| 294 | */ |
| 295 | |
| 296 | struct tasklet_struct |
| 297 | { |
| 298 | struct tasklet_struct *next; |
| 299 | unsigned long state; |
| 300 | atomic_t count; |
| 301 | void (*func)(unsigned long); |
| 302 | unsigned long data; |
| 303 | }; |
| 304 | |
| 305 | #define DECLARE_TASKLET(name, func, data) \ |
| 306 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } |
| 307 | |
| 308 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ |
| 309 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } |
| 310 | |
| 311 | |
| 312 | enum |
| 313 | { |
| 314 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ |
| 315 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ |
| 316 | }; |
| 317 | |
| 318 | #ifdef CONFIG_SMP |
| 319 | static inline int tasklet_trylock(struct tasklet_struct *t) |
| 320 | { |
| 321 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); |
| 322 | } |
| 323 | |
| 324 | static inline void tasklet_unlock(struct tasklet_struct *t) |
| 325 | { |
| 326 | smp_mb__before_clear_bit(); |
| 327 | clear_bit(TASKLET_STATE_RUN, &(t)->state); |
| 328 | } |
| 329 | |
| 330 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) |
| 331 | { |
| 332 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } |
| 333 | } |
| 334 | #else |
| 335 | #define tasklet_trylock(t) 1 |
| 336 | #define tasklet_unlock_wait(t) do { } while (0) |
| 337 | #define tasklet_unlock(t) do { } while (0) |
| 338 | #endif |
| 339 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 340 | extern void __tasklet_schedule(struct tasklet_struct *t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | |
| 342 | static inline void tasklet_schedule(struct tasklet_struct *t) |
| 343 | { |
| 344 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
| 345 | __tasklet_schedule(t); |
| 346 | } |
| 347 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 348 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | |
| 350 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) |
| 351 | { |
| 352 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
| 353 | __tasklet_hi_schedule(t); |
| 354 | } |
| 355 | |
| 356 | |
| 357 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) |
| 358 | { |
| 359 | atomic_inc(&t->count); |
| 360 | smp_mb__after_atomic_inc(); |
| 361 | } |
| 362 | |
| 363 | static inline void tasklet_disable(struct tasklet_struct *t) |
| 364 | { |
| 365 | tasklet_disable_nosync(t); |
| 366 | tasklet_unlock_wait(t); |
| 367 | smp_mb(); |
| 368 | } |
| 369 | |
| 370 | static inline void tasklet_enable(struct tasklet_struct *t) |
| 371 | { |
| 372 | smp_mb__before_atomic_dec(); |
| 373 | atomic_dec(&t->count); |
| 374 | } |
| 375 | |
| 376 | static inline void tasklet_hi_enable(struct tasklet_struct *t) |
| 377 | { |
| 378 | smp_mb__before_atomic_dec(); |
| 379 | atomic_dec(&t->count); |
| 380 | } |
| 381 | |
| 382 | extern void tasklet_kill(struct tasklet_struct *t); |
| 383 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); |
| 384 | extern void tasklet_init(struct tasklet_struct *t, |
| 385 | void (*func)(unsigned long), unsigned long data); |
| 386 | |
| 387 | /* |
| 388 | * Autoprobing for irqs: |
| 389 | * |
| 390 | * probe_irq_on() and probe_irq_off() provide robust primitives |
| 391 | * for accurate IRQ probing during kernel initialization. They are |
| 392 | * reasonably simple to use, are not "fooled" by spurious interrupts, |
| 393 | * and, unlike other attempts at IRQ probing, they do not get hung on |
| 394 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). |
| 395 | * |
| 396 | * For reasonably foolproof probing, use them as follows: |
| 397 | * |
| 398 | * 1. clear and/or mask the device's internal interrupt. |
| 399 | * 2. sti(); |
| 400 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs |
| 401 | * 4. enable the device and cause it to trigger an interrupt. |
| 402 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. |
| 403 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple |
| 404 | * 7. service the device to clear its pending interrupt. |
| 405 | * 8. loop again if paranoia is required. |
| 406 | * |
| 407 | * probe_irq_on() returns a mask of allocated irq's. |
| 408 | * |
| 409 | * probe_irq_off() takes the mask as a parameter, |
| 410 | * and returns the irq number which occurred, |
| 411 | * or zero if none occurred, or a negative irq number |
| 412 | * if more than one irq occurred. |
| 413 | */ |
| 414 | |
| 415 | #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) |
| 416 | static inline unsigned long probe_irq_on(void) |
| 417 | { |
| 418 | return 0; |
| 419 | } |
| 420 | static inline int probe_irq_off(unsigned long val) |
| 421 | { |
| 422 | return 0; |
| 423 | } |
| 424 | static inline unsigned int probe_irq_mask(unsigned long val) |
| 425 | { |
| 426 | return 0; |
| 427 | } |
| 428 | #else |
| 429 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ |
| 430 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ |
| 431 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ |
| 432 | #endif |
| 433 | |
Andrew Morton | 6168a70 | 2007-02-17 21:22:39 -0800 | [diff] [blame] | 434 | #ifdef CONFIG_PROC_FS |
| 435 | /* Initialize /proc/irq/ */ |
| 436 | extern void init_irq_proc(void); |
| 437 | #else |
| 438 | static inline void init_irq_proc(void) |
| 439 | { |
| 440 | } |
| 441 | #endif |
| 442 | |
Adrian Bunk | f74596d | 2008-02-06 01:36:35 -0800 | [diff] [blame] | 443 | int show_interrupts(struct seq_file *p, void *v); |
| 444 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | #endif |