Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* interrupt.h */ |
| 2 | #ifndef _LINUX_INTERRUPT_H |
| 3 | #define _LINUX_INTERRUPT_H |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/kernel.h> |
| 6 | #include <linux/linkage.h> |
| 7 | #include <linux/bitops.h> |
| 8 | #include <linux/preempt.h> |
| 9 | #include <linux/cpumask.h> |
Jan Beulich | 908dcec | 2006-06-23 02:06:00 -0700 | [diff] [blame] | 10 | #include <linux/irqreturn.h> |
Thomas Gleixner | dd3a1db | 2008-10-16 18:20:58 +0200 | [diff] [blame] | 11 | #include <linux/irqnr.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/hardirq.h> |
Al Viro | f037360 | 2005-11-13 16:06:57 -0800 | [diff] [blame] | 13 | #include <linux/sched.h> |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 14 | #include <linux/irqflags.h> |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 15 | #include <linux/smp.h> |
| 16 | #include <linux/percpu.h> |
Ingo Molnar | 0ebb26e | 2008-12-12 11:26:39 +0100 | [diff] [blame] | 17 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <asm/atomic.h> |
| 19 | #include <asm/ptrace.h> |
| 20 | #include <asm/system.h> |
| 21 | |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 22 | /* |
| 23 | * These correspond to the IORESOURCE_IRQ_* defines in |
| 24 | * linux/ioport.h to select the interrupt line behaviour. When |
| 25 | * requesting an interrupt without specifying a IRQF_TRIGGER, the |
| 26 | * setting should be assumed to be "as already configured", which |
| 27 | * may be as per machine or firmware initialisation. |
| 28 | */ |
| 29 | #define IRQF_TRIGGER_NONE 0x00000000 |
| 30 | #define IRQF_TRIGGER_RISING 0x00000001 |
| 31 | #define IRQF_TRIGGER_FALLING 0x00000002 |
| 32 | #define IRQF_TRIGGER_HIGH 0x00000004 |
| 33 | #define IRQF_TRIGGER_LOW 0x00000008 |
| 34 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ |
| 35 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) |
| 36 | #define IRQF_TRIGGER_PROBE 0x00000010 |
| 37 | |
| 38 | /* |
| 39 | * These flags used only by the kernel as part of the |
| 40 | * irq handling routines. |
| 41 | * |
| 42 | * IRQF_DISABLED - keep irqs disabled when calling the action handler |
| 43 | * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator |
| 44 | * IRQF_SHARED - allow sharing the irq among several devices |
| 45 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur |
| 46 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 47 | * IRQF_PERCPU - Interrupt is per cpu |
| 48 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing |
Bernhard Walle | d85a60d | 2007-05-08 00:35:24 -0700 | [diff] [blame] | 49 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
| 50 | * registered first in an shared interrupt is considered for |
| 51 | * performance reasons) |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 52 | */ |
| 53 | #define IRQF_DISABLED 0x00000020 |
| 54 | #define IRQF_SAMPLE_RANDOM 0x00000040 |
| 55 | #define IRQF_SHARED 0x00000080 |
| 56 | #define IRQF_PROBE_SHARED 0x00000100 |
| 57 | #define IRQF_TIMER 0x00000200 |
Thomas Gleixner | 284c668 | 2006-07-03 02:20:32 +0200 | [diff] [blame] | 58 | #define IRQF_PERCPU 0x00000400 |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 59 | #define IRQF_NOBALANCING 0x00000800 |
Bernhard Walle | d85a60d | 2007-05-08 00:35:24 -0700 | [diff] [blame] | 60 | #define IRQF_IRQPOLL 0x00001000 |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 61 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 62 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
David Howells | da48279 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 63 | |
Thomas Gleixner | a9d0a1a | 2009-03-03 16:58:16 +0100 | [diff] [blame] | 64 | /** |
| 65 | * struct irqaction - per interrupt action descriptor |
| 66 | * @handler: interrupt handler function |
| 67 | * @flags: flags (see IRQF_* above) |
| 68 | * @mask: no comment as it is useless and about to be removed |
| 69 | * @name: name of the device |
| 70 | * @dev_id: cookie to identify the device |
| 71 | * @next: pointer to the next irqaction for shared interrupts |
| 72 | * @irq: interrupt number |
| 73 | * @dir: pointer to the proc/irq/NN/name entry |
| 74 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | struct irqaction { |
David Howells | da48279 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 76 | irq_handler_t handler; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | unsigned long flags; |
| 78 | cpumask_t mask; |
| 79 | const char *name; |
| 80 | void *dev_id; |
| 81 | struct irqaction *next; |
| 82 | int irq; |
| 83 | struct proc_dir_entry *dir; |
| 84 | }; |
| 85 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 86 | extern irqreturn_t no_action(int cpl, void *dev_id); |
Monakhov Dmitriy | 616883d | 2007-05-08 00:25:07 -0700 | [diff] [blame] | 87 | extern int __must_check request_irq(unsigned int, irq_handler_t handler, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | unsigned long, const char *, void *); |
| 89 | extern void free_irq(unsigned int, void *); |
| 90 | |
Al Viro | 0af3678 | 2007-07-27 14:24:33 +0100 | [diff] [blame] | 91 | struct device; |
| 92 | |
Monakhov Dmitriy | 616883d | 2007-05-08 00:25:07 -0700 | [diff] [blame] | 93 | extern int __must_check devm_request_irq(struct device *dev, unsigned int irq, |
Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 94 | irq_handler_t handler, unsigned long irqflags, |
| 95 | const char *devname, void *dev_id); |
| 96 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); |
| 97 | |
Ingo Molnar | d7e9629 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 98 | /* |
| 99 | * On lockdep we dont want to enable hardirqs in hardirq |
| 100 | * context. Use local_irq_enable_in_hardirq() to annotate |
| 101 | * kernel code that has to do this nevertheless (pretty much |
| 102 | * the only valid case is for old/broken hardware that is |
| 103 | * insanely slow). |
| 104 | * |
| 105 | * NOTE: in theory this might break fragile code that relies |
| 106 | * on hardirq delivery - in practice we dont seem to have such |
| 107 | * places left. So the only effect should be slightly increased |
| 108 | * irqs-off latencies. |
| 109 | */ |
| 110 | #ifdef CONFIG_LOCKDEP |
| 111 | # define local_irq_enable_in_hardirq() do { } while (0) |
| 112 | #else |
| 113 | # define local_irq_enable_in_hardirq() local_irq_enable() |
| 114 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | extern void disable_irq_nosync(unsigned int irq); |
| 117 | extern void disable_irq(unsigned int irq); |
| 118 | extern void enable_irq(unsigned int irq); |
Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 119 | |
Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 120 | /* The following three functions are for the core kernel use only. */ |
| 121 | extern void suspend_device_irqs(void); |
| 122 | extern void resume_device_irqs(void); |
| 123 | #ifdef CONFIG_PM_SLEEP |
| 124 | extern int check_wakeup_irqs(void); |
| 125 | #else |
| 126 | static inline int check_wakeup_irqs(void) { return 0; } |
| 127 | #endif |
| 128 | |
Russell King | d7b9068 | 2008-04-17 07:46:24 +0200 | [diff] [blame] | 129 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
| 130 | |
Rusty Russell | d036e67 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 131 | extern cpumask_var_t irq_default_affinity; |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 132 | |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 133 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
Russell King | d7b9068 | 2008-04-17 07:46:24 +0200 | [diff] [blame] | 134 | extern int irq_can_set_affinity(unsigned int irq); |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 135 | extern int irq_select_affinity(unsigned int irq); |
Russell King | d7b9068 | 2008-04-17 07:46:24 +0200 | [diff] [blame] | 136 | |
| 137 | #else /* CONFIG_SMP */ |
| 138 | |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 139 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
Russell King | d7b9068 | 2008-04-17 07:46:24 +0200 | [diff] [blame] | 140 | { |
| 141 | return -EINVAL; |
| 142 | } |
| 143 | |
| 144 | static inline int irq_can_set_affinity(unsigned int irq) |
| 145 | { |
| 146 | return 0; |
| 147 | } |
| 148 | |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 149 | static inline int irq_select_affinity(unsigned int irq) { return 0; } |
| 150 | |
Russell King | d7b9068 | 2008-04-17 07:46:24 +0200 | [diff] [blame] | 151 | #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */ |
| 152 | |
Al Viro | e9ed7e7 | 2007-07-21 23:29:12 +0100 | [diff] [blame] | 153 | #ifdef CONFIG_GENERIC_HARDIRQS |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 154 | /* |
| 155 | * Special lockdep variants of irq disabling/enabling. |
| 156 | * These should be used for locking constructs that |
| 157 | * know that a particular irq context which is disabled, |
| 158 | * and which is the only irq-context user of a lock, |
| 159 | * that it's safe to take the lock in the irq-disabled |
| 160 | * section without disabling hardirqs. |
| 161 | * |
| 162 | * On !CONFIG_LOCKDEP they are equivalent to the normal |
| 163 | * irq disable/enable methods. |
| 164 | */ |
| 165 | static inline void disable_irq_nosync_lockdep(unsigned int irq) |
| 166 | { |
| 167 | disable_irq_nosync(irq); |
| 168 | #ifdef CONFIG_LOCKDEP |
| 169 | local_irq_disable(); |
| 170 | #endif |
| 171 | } |
| 172 | |
Arjan van de Ven | e8106b9 | 2006-09-29 02:01:08 -0700 | [diff] [blame] | 173 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) |
| 174 | { |
| 175 | disable_irq_nosync(irq); |
| 176 | #ifdef CONFIG_LOCKDEP |
| 177 | local_irq_save(*flags); |
| 178 | #endif |
| 179 | } |
| 180 | |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 181 | static inline void disable_irq_lockdep(unsigned int irq) |
| 182 | { |
| 183 | disable_irq(irq); |
| 184 | #ifdef CONFIG_LOCKDEP |
| 185 | local_irq_disable(); |
| 186 | #endif |
| 187 | } |
| 188 | |
| 189 | static inline void enable_irq_lockdep(unsigned int irq) |
| 190 | { |
| 191 | #ifdef CONFIG_LOCKDEP |
| 192 | local_irq_enable(); |
| 193 | #endif |
| 194 | enable_irq(irq); |
| 195 | } |
| 196 | |
Arjan van de Ven | e8106b9 | 2006-09-29 02:01:08 -0700 | [diff] [blame] | 197 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) |
| 198 | { |
| 199 | #ifdef CONFIG_LOCKDEP |
| 200 | local_irq_restore(*flags); |
| 201 | #endif |
| 202 | enable_irq(irq); |
| 203 | } |
| 204 | |
Thomas Gleixner | ba9a233 | 2006-06-29 02:24:55 -0700 | [diff] [blame] | 205 | /* IRQ wakeup (PM) control: */ |
| 206 | extern int set_irq_wake(unsigned int irq, unsigned int on); |
| 207 | |
| 208 | static inline int enable_irq_wake(unsigned int irq) |
| 209 | { |
| 210 | return set_irq_wake(irq, 1); |
| 211 | } |
| 212 | |
| 213 | static inline int disable_irq_wake(unsigned int irq) |
| 214 | { |
| 215 | return set_irq_wake(irq, 0); |
| 216 | } |
| 217 | |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 218 | #else /* !CONFIG_GENERIC_HARDIRQS */ |
| 219 | /* |
| 220 | * NOTE: non-genirq architectures, if they want to support the lock |
| 221 | * validator need to define the methods below in their asm/irq.h |
| 222 | * files, under an #ifdef CONFIG_LOCKDEP section. |
| 223 | */ |
Roman Zippel | b3e2fd9 | 2007-05-01 22:32:42 +0200 | [diff] [blame] | 224 | #ifndef CONFIG_LOCKDEP |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 225 | # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) |
Roman Zippel | b3e2fd9 | 2007-05-01 22:32:42 +0200 | [diff] [blame] | 226 | # define disable_irq_nosync_lockdep_irqsave(irq, flags) \ |
| 227 | disable_irq_nosync(irq) |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 228 | # define disable_irq_lockdep(irq) disable_irq(irq) |
| 229 | # define enable_irq_lockdep(irq) enable_irq(irq) |
Roman Zippel | b3e2fd9 | 2007-05-01 22:32:42 +0200 | [diff] [blame] | 230 | # define enable_irq_lockdep_irqrestore(irq, flags) \ |
| 231 | enable_irq(irq) |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 232 | # endif |
| 233 | |
Guennadi Liakhovetski | aa5346a | 2007-10-16 01:24:01 -0700 | [diff] [blame] | 234 | static inline int enable_irq_wake(unsigned int irq) |
| 235 | { |
| 236 | return 0; |
| 237 | } |
| 238 | |
| 239 | static inline int disable_irq_wake(unsigned int irq) |
| 240 | { |
| 241 | return 0; |
| 242 | } |
Ingo Molnar | c01d403 | 2006-07-03 00:24:27 -0700 | [diff] [blame] | 243 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | |
Andi Kleen | 3f74478 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 245 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
| 246 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
| 247 | #define or_softirq_pending(x) (local_softirq_pending() |= (x)) |
| 248 | #endif |
| 249 | |
Benjamin Herrenschmidt | 2d3fbbb | 2007-05-10 22:22:46 -0700 | [diff] [blame] | 250 | /* Some architectures might implement lazy enabling/disabling of |
| 251 | * interrupts. In some cases, such as stop_machine, we might want |
| 252 | * to ensure that after a local_irq_disable(), interrupts have |
| 253 | * really been disabled in hardware. Such architectures need to |
| 254 | * implement the following hook. |
| 255 | */ |
| 256 | #ifndef hard_irq_disable |
| 257 | #define hard_irq_disable() do { } while(0) |
| 258 | #endif |
| 259 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
| 261 | frequency threaded job scheduling. For almost all the purposes |
| 262 | tasklets are more than enough. F.e. all serial device BHs et |
| 263 | al. should be converted to tasklets, not to softirqs. |
| 264 | */ |
| 265 | |
| 266 | enum |
| 267 | { |
| 268 | HI_SOFTIRQ=0, |
| 269 | TIMER_SOFTIRQ, |
| 270 | NET_TX_SOFTIRQ, |
| 271 | NET_RX_SOFTIRQ, |
Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 272 | BLOCK_SOFTIRQ, |
Christoph Lameter | c9819f4 | 2006-12-10 02:20:25 -0800 | [diff] [blame] | 273 | TASKLET_SOFTIRQ, |
| 274 | SCHED_SOFTIRQ, |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 275 | HRTIMER_SOFTIRQ, |
| 276 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
Alexey Dobriyan | 978b011 | 2008-09-06 20:04:36 +0200 | [diff] [blame] | 277 | |
| 278 | NR_SOFTIRQS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | }; |
| 280 | |
| 281 | /* softirq mask and active fields moved to irq_cpustat_t in |
| 282 | * asm/hardirq.h to get better cache usage. KAO |
| 283 | */ |
| 284 | |
| 285 | struct softirq_action |
| 286 | { |
| 287 | void (*action)(struct softirq_action *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | }; |
| 289 | |
| 290 | asmlinkage void do_softirq(void); |
Adrian Bunk | eb0f1c4 | 2008-04-29 00:59:12 -0700 | [diff] [blame] | 291 | asmlinkage void __do_softirq(void); |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 292 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | extern void softirq_init(void); |
Andi Kleen | 3f74478 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 294 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 295 | extern void raise_softirq_irqoff(unsigned int nr); |
| 296 | extern void raise_softirq(unsigned int nr); |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame^] | 297 | extern void wakeup_softirqd(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | |
David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 299 | /* This is the worklist that queues up per-cpu softirq work. |
| 300 | * |
| 301 | * send_remote_sendirq() adds work to these lists, and |
| 302 | * the softirq handler itself dequeues from them. The queues |
| 303 | * are protected by disabling local cpu interrupts and they must |
| 304 | * only be accessed by the local cpu that they are for. |
| 305 | */ |
| 306 | DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); |
| 307 | |
| 308 | /* Try to send a softirq to a remote cpu. If this cannot be done, the |
| 309 | * work will be queued to the local cpu. |
| 310 | */ |
| 311 | extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); |
| 312 | |
| 313 | /* Like send_remote_softirq(), but the caller must disable local cpu interrupts |
| 314 | * and compute the current cpu, passed in as 'this_cpu'. |
| 315 | */ |
| 316 | extern void __send_remote_softirq(struct call_single_data *cp, int cpu, |
| 317 | int this_cpu, int softirq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | |
| 319 | /* Tasklets --- multithreaded analogue of BHs. |
| 320 | |
| 321 | Main feature differing them of generic softirqs: tasklet |
| 322 | is running only on one CPU simultaneously. |
| 323 | |
| 324 | Main feature differing them of BHs: different tasklets |
| 325 | may be run simultaneously on different CPUs. |
| 326 | |
| 327 | Properties: |
| 328 | * If tasklet_schedule() is called, then tasklet is guaranteed |
| 329 | to be executed on some cpu at least once after this. |
| 330 | * If the tasklet is already scheduled, but its excecution is still not |
| 331 | started, it will be executed only once. |
| 332 | * If this tasklet is already running on another CPU (or schedule is called |
| 333 | from tasklet itself), it is rescheduled for later. |
| 334 | * Tasklet is strictly serialized wrt itself, but not |
| 335 | wrt another tasklets. If client needs some intertask synchronization, |
| 336 | he makes it with spinlocks. |
| 337 | */ |
| 338 | |
| 339 | struct tasklet_struct |
| 340 | { |
| 341 | struct tasklet_struct *next; |
| 342 | unsigned long state; |
| 343 | atomic_t count; |
| 344 | void (*func)(unsigned long); |
| 345 | unsigned long data; |
| 346 | }; |
| 347 | |
| 348 | #define DECLARE_TASKLET(name, func, data) \ |
| 349 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } |
| 350 | |
| 351 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ |
| 352 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } |
| 353 | |
| 354 | |
| 355 | enum |
| 356 | { |
| 357 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ |
| 358 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ |
| 359 | }; |
| 360 | |
| 361 | #ifdef CONFIG_SMP |
| 362 | static inline int tasklet_trylock(struct tasklet_struct *t) |
| 363 | { |
| 364 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); |
| 365 | } |
| 366 | |
| 367 | static inline void tasklet_unlock(struct tasklet_struct *t) |
| 368 | { |
| 369 | smp_mb__before_clear_bit(); |
| 370 | clear_bit(TASKLET_STATE_RUN, &(t)->state); |
| 371 | } |
| 372 | |
| 373 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) |
| 374 | { |
| 375 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } |
| 376 | } |
| 377 | #else |
| 378 | #define tasklet_trylock(t) 1 |
| 379 | #define tasklet_unlock_wait(t) do { } while (0) |
| 380 | #define tasklet_unlock(t) do { } while (0) |
| 381 | #endif |
| 382 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 383 | extern void __tasklet_schedule(struct tasklet_struct *t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | |
| 385 | static inline void tasklet_schedule(struct tasklet_struct *t) |
| 386 | { |
| 387 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
| 388 | __tasklet_schedule(t); |
| 389 | } |
| 390 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 391 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | |
| 393 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) |
| 394 | { |
| 395 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
| 396 | __tasklet_hi_schedule(t); |
| 397 | } |
| 398 | |
| 399 | |
| 400 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) |
| 401 | { |
| 402 | atomic_inc(&t->count); |
| 403 | smp_mb__after_atomic_inc(); |
| 404 | } |
| 405 | |
| 406 | static inline void tasklet_disable(struct tasklet_struct *t) |
| 407 | { |
| 408 | tasklet_disable_nosync(t); |
| 409 | tasklet_unlock_wait(t); |
| 410 | smp_mb(); |
| 411 | } |
| 412 | |
| 413 | static inline void tasklet_enable(struct tasklet_struct *t) |
| 414 | { |
| 415 | smp_mb__before_atomic_dec(); |
| 416 | atomic_dec(&t->count); |
| 417 | } |
| 418 | |
| 419 | static inline void tasklet_hi_enable(struct tasklet_struct *t) |
| 420 | { |
| 421 | smp_mb__before_atomic_dec(); |
| 422 | atomic_dec(&t->count); |
| 423 | } |
| 424 | |
| 425 | extern void tasklet_kill(struct tasklet_struct *t); |
| 426 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); |
| 427 | extern void tasklet_init(struct tasklet_struct *t, |
| 428 | void (*func)(unsigned long), unsigned long data); |
| 429 | |
| 430 | /* |
| 431 | * Autoprobing for irqs: |
| 432 | * |
| 433 | * probe_irq_on() and probe_irq_off() provide robust primitives |
| 434 | * for accurate IRQ probing during kernel initialization. They are |
| 435 | * reasonably simple to use, are not "fooled" by spurious interrupts, |
| 436 | * and, unlike other attempts at IRQ probing, they do not get hung on |
| 437 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). |
| 438 | * |
| 439 | * For reasonably foolproof probing, use them as follows: |
| 440 | * |
| 441 | * 1. clear and/or mask the device's internal interrupt. |
| 442 | * 2. sti(); |
| 443 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs |
| 444 | * 4. enable the device and cause it to trigger an interrupt. |
| 445 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. |
| 446 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple |
| 447 | * 7. service the device to clear its pending interrupt. |
| 448 | * 8. loop again if paranoia is required. |
| 449 | * |
| 450 | * probe_irq_on() returns a mask of allocated irq's. |
| 451 | * |
| 452 | * probe_irq_off() takes the mask as a parameter, |
| 453 | * and returns the irq number which occurred, |
| 454 | * or zero if none occurred, or a negative irq number |
| 455 | * if more than one irq occurred. |
| 456 | */ |
| 457 | |
| 458 | #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) |
| 459 | static inline unsigned long probe_irq_on(void) |
| 460 | { |
| 461 | return 0; |
| 462 | } |
| 463 | static inline int probe_irq_off(unsigned long val) |
| 464 | { |
| 465 | return 0; |
| 466 | } |
| 467 | static inline unsigned int probe_irq_mask(unsigned long val) |
| 468 | { |
| 469 | return 0; |
| 470 | } |
| 471 | #else |
| 472 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ |
| 473 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ |
| 474 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ |
| 475 | #endif |
| 476 | |
Andrew Morton | 6168a70 | 2007-02-17 21:22:39 -0800 | [diff] [blame] | 477 | #ifdef CONFIG_PROC_FS |
| 478 | /* Initialize /proc/irq/ */ |
| 479 | extern void init_irq_proc(void); |
| 480 | #else |
| 481 | static inline void init_irq_proc(void) |
| 482 | { |
| 483 | } |
| 484 | #endif |
| 485 | |
Ingo Molnar | 74296a8 | 2009-01-16 17:43:50 +0100 | [diff] [blame] | 486 | #if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ) |
| 487 | extern void debug_poll_all_shared_irqs(void); |
| 488 | #else |
| 489 | static inline void debug_poll_all_shared_irqs(void) { } |
| 490 | #endif |
| 491 | |
Adrian Bunk | f74596d | 2008-02-06 01:36:35 -0800 | [diff] [blame] | 492 | int show_interrupts(struct seq_file *p, void *v); |
| 493 | |
Yinghai Lu | 43a2563 | 2008-12-28 16:01:13 -0800 | [diff] [blame] | 494 | struct irq_desc; |
| 495 | |
| 496 | extern int early_irq_init(void); |
Yinghai Lu | 4a046d1 | 2009-01-12 17:39:24 -0800 | [diff] [blame] | 497 | extern int arch_probe_nr_irqs(void); |
Yinghai Lu | 43a2563 | 2008-12-28 16:01:13 -0800 | [diff] [blame] | 498 | extern int arch_early_irq_init(void); |
| 499 | extern int arch_init_chip_data(struct irq_desc *desc, int cpu); |
| 500 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | #endif |