blob: 97ac926c78a707fb6bf45293d00e8f4d86515f43 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* interrupt.h */
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <linux/cpumask.h>
Jan Beulich908dcec2006-06-23 02:06:00 -070010#include <linux/irqreturn.h>
Thomas Gleixnerdd3a1db2008-10-16 18:20:58 +020011#include <linux/irqnr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/hardirq.h>
Ingo Molnarde30a2b2006-07-03 00:24:42 -070013#include <linux/irqflags.h>
Peter Zijlstra9ba5f002009-07-22 14:18:35 +020014#include <linux/hrtimer.h>
Ben Hutchingscd7eab42011-01-19 21:01:44 +000015#include <linux/kref.h>
16#include <linux/workqueue.h>
Ingo Molnar0ebb26e2008-12-12 11:26:39 +010017
Arun Sharma600634972011-07-26 16:09:06 -070018#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/ptrace.h>
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +020020#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Thomas Gleixner6e213612006-07-01 19:29:03 -070022/*
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
28 */
29#define IRQF_TRIGGER_NONE 0x00000000
30#define IRQF_TRIGGER_RISING 0x00000001
31#define IRQF_TRIGGER_FALLING 0x00000002
32#define IRQF_TRIGGER_HIGH 0x00000004
33#define IRQF_TRIGGER_LOW 0x00000008
34#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36#define IRQF_TRIGGER_PROBE 0x00000010
37
38/*
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
41 *
Thomas Gleixner6932bf32010-03-26 00:06:55 +000042 * IRQF_DISABLED - keep irqs disabled when calling the action handler.
43 * DEPRECATED. This flag is a NOOP and scheduled to be removed
Thomas Gleixner6e213612006-07-01 19:29:03 -070044 * IRQF_SHARED - allow sharing the irq among several devices
45 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
46 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
Thomas Gleixner950f4422007-02-16 01:27:24 -080047 * IRQF_PERCPU - Interrupt is per cpu
48 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
Bernhard Walled85a60d2007-05-08 00:35:24 -070049 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
50 * registered first in an shared interrupt is considered for
51 * performance reasons)
Thomas Gleixnerb25c3402009-08-13 12:17:22 +020052 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
53 * Used by threaded interrupts which need to keep the
54 * irq line disabled until the threaded handler has been run.
Ian Campbell685fd0b2010-07-29 11:16:32 +010055 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +010056 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000057 * IRQF_NO_THREAD - Interrupt cannot be threaded
Ian Campbell9bab0b72011-10-03 15:37:00 +010058 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
59 * resume time.
Thomas Gleixner6e213612006-07-01 19:29:03 -070060 */
61#define IRQF_DISABLED 0x00000020
Thomas Gleixner6e213612006-07-01 19:29:03 -070062#define IRQF_SHARED 0x00000080
63#define IRQF_PROBE_SHARED 0x00000100
Ian Campbell685fd0b2010-07-29 11:16:32 +010064#define __IRQF_TIMER 0x00000200
Thomas Gleixner284c6682006-07-03 02:20:32 +020065#define IRQF_PERCPU 0x00000400
Thomas Gleixner950f4422007-02-16 01:27:24 -080066#define IRQF_NOBALANCING 0x00000800
Bernhard Walled85a60d2007-05-08 00:35:24 -070067#define IRQF_IRQPOLL 0x00001000
Thomas Gleixnerb25c3402009-08-13 12:17:22 +020068#define IRQF_ONESHOT 0x00002000
Ian Campbell685fd0b2010-07-29 11:16:32 +010069#define IRQF_NO_SUSPEND 0x00004000
Thomas Gleixnerdc5f2192011-02-04 13:19:20 +010070#define IRQF_FORCE_RESUME 0x00008000
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000071#define IRQF_NO_THREAD 0x00010000
Ian Campbell9bab0b72011-10-03 15:37:00 +010072#define IRQF_EARLY_RESUME 0x00020000
Ian Campbell685fd0b2010-07-29 11:16:32 +010073
Thomas Gleixner0c4602f2011-02-23 23:52:18 +000074#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
Thomas Gleixner3aa551c2009-03-23 18:28:15 +010075
Randy Dunlapb4e6b092010-05-21 09:03:01 -070076/*
Marc Zyngierae731f82010-03-15 22:56:33 +000077 * These values can be returned by request_any_context_irq() and
78 * describe the context the interrupt will be run in.
79 *
80 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
81 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
82 */
83enum {
84 IRQC_IS_HARDIRQ = 0,
85 IRQC_IS_NESTED,
86};
87
David Howells7d12e782006-10-05 14:55:46 +010088typedef irqreturn_t (*irq_handler_t)(int, void *);
David Howellsda482792006-10-05 13:06:34 +010089
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +010090/**
91 * struct irqaction - per interrupt action descriptor
92 * @handler: interrupt handler function
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +010093 * @name: name of the device
94 * @dev_id: cookie to identify the device
Marc Zyngier31d9d9b2011-09-23 17:03:06 +010095 * @percpu_dev_id: cookie to identify the device
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +010096 * @next: pointer to the next irqaction for shared interrupts
97 * @irq: interrupt number
Thomas Gleixnerc0ecaa02012-04-19 11:53:25 +020098 * @flags: flags (see IRQF_* above)
Lucas De Marchi25985ed2011-03-30 22:57:33 -030099 * @thread_fn: interrupt handler function for threaded interrupts
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100100 * @thread: thread pointer for threaded interrupts
101 * @thread_flags: flags related to @thread
Thomas Gleixnerb5faba22011-02-23 23:52:13 +0000102 * @thread_mask: bitmask for keeping track of @thread activity
Thomas Gleixnerc0ecaa02012-04-19 11:53:25 +0200103 * @dir: pointer to the proc/irq/NN/name entry
Thomas Gleixnera9d0a1a2009-03-03 16:58:16 +0100104 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105struct irqaction {
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100106 irq_handler_t handler;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100107 void *dev_id;
108 void __percpu *percpu_dev_id;
109 struct irqaction *next;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100110 irq_handler_t thread_fn;
111 struct task_struct *thread;
Thomas Gleixnerc0ecaa02012-04-19 11:53:25 +0200112 unsigned int irq;
113 unsigned int flags;
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100114 unsigned long thread_flags;
115 unsigned long thread_mask;
116 const char *name;
117 struct proc_dir_entry *dir;
Eric Dumazetf6cd2472010-11-04 11:13:48 +0100118} ____cacheline_internodealigned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
David Howells7d12e782006-10-05 14:55:46 +0100120extern irqreturn_t no_action(int cpl, void *dev_id);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100121
122extern int __must_check
123request_threaded_irq(unsigned int irq, irq_handler_t handler,
124 irq_handler_t thread_fn,
125 unsigned long flags, const char *name, void *dev);
126
127static inline int __must_check
128request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
129 const char *name, void *dev)
130{
131 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
132}
133
Marc Zyngierae731f82010-03-15 22:56:33 +0000134extern int __must_check
135request_any_context_irq(unsigned int irq, irq_handler_t handler,
136 unsigned long flags, const char *name, void *dev_id);
137
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100138extern int __must_check
139request_percpu_irq(unsigned int irq, irq_handler_t handler,
140 const char *devname, void __percpu *percpu_dev_id);
Thomas Gleixner3aa551c2009-03-23 18:28:15 +0100141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142extern void free_irq(unsigned int, void *);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100143extern void free_percpu_irq(unsigned int, void __percpu *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Al Viro0af36782007-07-27 14:24:33 +0100145struct device;
146
Arjan van de Ven935bd5b2009-03-23 18:28:16 +0100147extern int __must_check
148devm_request_threaded_irq(struct device *dev, unsigned int irq,
149 irq_handler_t handler, irq_handler_t thread_fn,
150 unsigned long irqflags, const char *devname,
151 void *dev_id);
152
153static inline int __must_check
154devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
155 unsigned long irqflags, const char *devname, void *dev_id)
156{
157 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
158 devname, dev_id);
159}
160
Stephen Boyd0668d302014-01-02 16:37:32 -0800161extern int __must_check
162devm_request_any_context_irq(struct device *dev, unsigned int irq,
163 irq_handler_t handler, unsigned long irqflags,
164 const char *devname, void *dev_id);
165
Tejun Heo9ac78492007-01-20 16:00:26 +0900166extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
167
Ingo Molnard7e96292006-07-03 00:24:27 -0700168/*
169 * On lockdep we dont want to enable hardirqs in hardirq
170 * context. Use local_irq_enable_in_hardirq() to annotate
171 * kernel code that has to do this nevertheless (pretty much
172 * the only valid case is for old/broken hardware that is
173 * insanely slow).
174 *
175 * NOTE: in theory this might break fragile code that relies
176 * on hardirq delivery - in practice we dont seem to have such
177 * places left. So the only effect should be slightly increased
178 * irqs-off latencies.
179 */
180#ifdef CONFIG_LOCKDEP
181# define local_irq_enable_in_hardirq() do { } while (0)
182#else
183# define local_irq_enable_in_hardirq() local_irq_enable()
184#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186extern void disable_irq_nosync(unsigned int irq);
187extern void disable_irq(unsigned int irq);
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100188extern void disable_percpu_irq(unsigned int irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189extern void enable_irq(unsigned int irq);
Marc Zyngier1e7c5fd2011-09-30 10:48:47 +0100190extern void enable_percpu_irq(unsigned int irq, unsigned int type);
Thomas Gleixnera92444c2014-02-15 00:55:19 +0000191extern void irq_wake_thread(unsigned int irq, void *dev_id);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700192
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +0100193/* The following three functions are for the core kernel use only. */
194extern void suspend_device_irqs(void);
195extern void resume_device_irqs(void);
196#ifdef CONFIG_PM_SLEEP
197extern int check_wakeup_irqs(void);
198#else
199static inline int check_wakeup_irqs(void) { return 0; }
200#endif
201
Martin Schwidefsky0244ad02013-08-30 09:39:53 +0200202#if defined(CONFIG_SMP)
Russell Kingd7b90682008-04-17 07:46:24 +0200203
Rusty Russelld036e672009-01-01 10:12:26 +1030204extern cpumask_var_t irq_default_affinity;
Max Krasnyansky18404752008-05-29 11:02:52 -0700205
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000206/* Internal implementation. Use the helpers below */
207extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
208 bool force);
209
210/**
211 * irq_set_affinity - Set the irq affinity of a given irq
212 * @irq: Interrupt to set affinity
Randy Dunlapdef5f122014-04-27 21:03:09 -0700213 * @cpumask: cpumask
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000214 *
215 * Fails if cpumask does not contain an online CPU
216 */
217static inline int
218irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
219{
220 return __irq_set_affinity(irq, cpumask, false);
221}
222
223/**
224 * irq_force_affinity - Force the irq affinity of a given irq
225 * @irq: Interrupt to set affinity
Randy Dunlapdef5f122014-04-27 21:03:09 -0700226 * @cpumask: cpumask
Thomas Gleixner01f8fa42014-04-16 14:36:44 +0000227 *
228 * Same as irq_set_affinity, but without checking the mask against
229 * online cpus.
230 *
231 * Solely for low level cpu hotplug code, where we need to make per
232 * cpu interrupts affine before the cpu becomes online.
233 */
234static inline int
235irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
236{
237 return __irq_set_affinity(irq, cpumask, true);
238}
239
Russell Kingd7b90682008-04-17 07:46:24 +0200240extern int irq_can_set_affinity(unsigned int irq);
Max Krasnyansky18404752008-05-29 11:02:52 -0700241extern int irq_select_affinity(unsigned int irq);
Russell Kingd7b90682008-04-17 07:46:24 +0200242
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700243extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000244
245/**
246 * struct irq_affinity_notify - context for notification of IRQ affinity changes
247 * @irq: Interrupt to which notification applies
248 * @kref: Reference count, for internal use
249 * @work: Work item, for internal use
250 * @notify: Function to be called on change. This will be
251 * called in process context.
252 * @release: Function to be called on release. This will be
253 * called in process context. Once registered, the
254 * structure must only be freed when this function is
255 * called or later.
256 */
257struct irq_affinity_notify {
258 unsigned int irq;
259 struct kref kref;
260 struct work_struct work;
261 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
262 void (*release)(struct kref *ref);
263};
264
265extern int
266irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
267
Russell Kingd7b90682008-04-17 07:46:24 +0200268#else /* CONFIG_SMP */
269
Rusty Russell0de26522008-12-13 21:20:26 +1030270static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
Russell Kingd7b90682008-04-17 07:46:24 +0200271{
272 return -EINVAL;
273}
274
275static inline int irq_can_set_affinity(unsigned int irq)
276{
277 return 0;
278}
279
Max Krasnyansky18404752008-05-29 11:02:52 -0700280static inline int irq_select_affinity(unsigned int irq) { return 0; }
281
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700282static inline int irq_set_affinity_hint(unsigned int irq,
Ben Hutchingscd7eab42011-01-19 21:01:44 +0000283 const struct cpumask *m)
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700284{
285 return -EINVAL;
286}
Martin Schwidefsky0244ad02013-08-30 09:39:53 +0200287#endif /* CONFIG_SMP */
Russell Kingd7b90682008-04-17 07:46:24 +0200288
Ingo Molnarc01d4032006-07-03 00:24:27 -0700289/*
290 * Special lockdep variants of irq disabling/enabling.
291 * These should be used for locking constructs that
292 * know that a particular irq context which is disabled,
293 * and which is the only irq-context user of a lock,
294 * that it's safe to take the lock in the irq-disabled
295 * section without disabling hardirqs.
296 *
297 * On !CONFIG_LOCKDEP they are equivalent to the normal
298 * irq disable/enable methods.
299 */
300static inline void disable_irq_nosync_lockdep(unsigned int irq)
301{
302 disable_irq_nosync(irq);
303#ifdef CONFIG_LOCKDEP
304 local_irq_disable();
305#endif
306}
307
Arjan van de Vene8106b92006-09-29 02:01:08 -0700308static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
309{
310 disable_irq_nosync(irq);
311#ifdef CONFIG_LOCKDEP
312 local_irq_save(*flags);
313#endif
314}
315
Ingo Molnarc01d4032006-07-03 00:24:27 -0700316static inline void disable_irq_lockdep(unsigned int irq)
317{
318 disable_irq(irq);
319#ifdef CONFIG_LOCKDEP
320 local_irq_disable();
321#endif
322}
323
324static inline void enable_irq_lockdep(unsigned int irq)
325{
326#ifdef CONFIG_LOCKDEP
327 local_irq_enable();
328#endif
329 enable_irq(irq);
330}
331
Arjan van de Vene8106b92006-09-29 02:01:08 -0700332static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
333{
334#ifdef CONFIG_LOCKDEP
335 local_irq_restore(*flags);
336#endif
337 enable_irq(irq);
338}
339
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700340/* IRQ wakeup (PM) control: */
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100341extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
342
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700343static inline int enable_irq_wake(unsigned int irq)
344{
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100345 return irq_set_irq_wake(irq, 1);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700346}
347
348static inline int disable_irq_wake(unsigned int irq)
349{
Thomas Gleixnera0cd9ca2011-02-10 11:36:33 +0100350 return irq_set_irq_wake(irq, 0);
Thomas Gleixnerba9a2332006-06-29 02:24:55 -0700351}
352
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000353
354#ifdef CONFIG_IRQ_FORCED_THREADING
355extern bool force_irqthreads;
356#else
357#define force_irqthreads (0)
358#endif
359
Andi Kleen3f744782005-09-12 18:49:24 +0200360#ifndef __ARCH_SET_SOFTIRQ_PENDING
361#define set_softirq_pending(x) (local_softirq_pending() = (x))
362#define or_softirq_pending(x) (local_softirq_pending() |= (x))
363#endif
364
Benjamin Herrenschmidt2d3fbbb2007-05-10 22:22:46 -0700365/* Some architectures might implement lazy enabling/disabling of
366 * interrupts. In some cases, such as stop_machine, we might want
367 * to ensure that after a local_irq_disable(), interrupts have
368 * really been disabled in hardware. Such architectures need to
369 * implement the following hook.
370 */
371#ifndef hard_irq_disable
372#define hard_irq_disable() do { } while(0)
373#endif
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
376 frequency threaded job scheduling. For almost all the purposes
377 tasklets are more than enough. F.e. all serial device BHs et
378 al. should be converted to tasklets, not to softirqs.
379 */
380
381enum
382{
383 HI_SOFTIRQ=0,
384 TIMER_SOFTIRQ,
385 NET_TX_SOFTIRQ,
386 NET_RX_SOFTIRQ,
Jens Axboeff856ba2006-01-09 16:02:34 +0100387 BLOCK_SOFTIRQ,
Jens Axboe5e605b62009-08-05 09:07:21 +0200388 BLOCK_IOPOLL_SOFTIRQ,
Christoph Lameterc9819f42006-12-10 02:20:25 -0800389 TASKLET_SOFTIRQ,
390 SCHED_SOFTIRQ,
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100391 HRTIMER_SOFTIRQ,
Shaohua Li09223372011-06-14 13:26:25 +0800392 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
Alexey Dobriyan978b0112008-09-06 20:04:36 +0200393
394 NR_SOFTIRQS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395};
396
Paul E. McKenney803b0eb2012-08-23 08:34:07 -0700397#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
398
Jason Baron5d592b42009-03-12 14:33:36 -0400399/* map softirq index to softirq name. update 'softirq_to_name' in
400 * kernel/softirq.c when adding a new softirq.
401 */
Joe Perchesce85b4f2014-01-27 17:07:16 -0800402extern const char * const softirq_to_name[NR_SOFTIRQS];
Jason Baron5d592b42009-03-12 14:33:36 -0400403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404/* softirq mask and active fields moved to irq_cpustat_t in
405 * asm/hardirq.h to get better cache usage. KAO
406 */
407
408struct softirq_action
409{
410 void (*action)(struct softirq_action *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411};
412
413asmlinkage void do_softirq(void);
Adrian Bunkeb0f1c42008-04-29 00:59:12 -0700414asmlinkage void __do_softirq(void);
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200415
416#ifdef __ARCH_HAS_DO_SOFTIRQ
417void do_softirq_own_stack(void);
418#else
419static inline void do_softirq_own_stack(void)
420{
421 __do_softirq();
422}
423#endif
424
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300425extern void open_softirq(int nr, void (*action)(struct softirq_action *));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426extern void softirq_init(void);
Steven Rostedtf0696862012-01-25 20:18:55 -0500427extern void __raise_softirq_irqoff(unsigned int nr);
Lai Jiangshan2bf21602010-08-23 18:42:48 +0900428
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800429extern void raise_softirq_irqoff(unsigned int nr);
430extern void raise_softirq(unsigned int nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -0800432DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
433
434static inline struct task_struct *this_cpu_ksoftirqd(void)
435{
436 return this_cpu_read(ksoftirqd);
437}
438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439/* Tasklets --- multithreaded analogue of BHs.
440
441 Main feature differing them of generic softirqs: tasklet
442 is running only on one CPU simultaneously.
443
444 Main feature differing them of BHs: different tasklets
445 may be run simultaneously on different CPUs.
446
447 Properties:
448 * If tasklet_schedule() is called, then tasklet is guaranteed
449 to be executed on some cpu at least once after this.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300450 * If the tasklet is already scheduled, but its execution is still not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 started, it will be executed only once.
452 * If this tasklet is already running on another CPU (or schedule is called
453 from tasklet itself), it is rescheduled for later.
454 * Tasklet is strictly serialized wrt itself, but not
455 wrt another tasklets. If client needs some intertask synchronization,
456 he makes it with spinlocks.
457 */
458
459struct tasklet_struct
460{
461 struct tasklet_struct *next;
462 unsigned long state;
463 atomic_t count;
464 void (*func)(unsigned long);
465 unsigned long data;
466};
467
468#define DECLARE_TASKLET(name, func, data) \
469struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
470
471#define DECLARE_TASKLET_DISABLED(name, func, data) \
472struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
473
474
475enum
476{
477 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
478 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
479};
480
481#ifdef CONFIG_SMP
482static inline int tasklet_trylock(struct tasklet_struct *t)
483{
484 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
485}
486
487static inline void tasklet_unlock(struct tasklet_struct *t)
488{
489 smp_mb__before_clear_bit();
490 clear_bit(TASKLET_STATE_RUN, &(t)->state);
491}
492
493static inline void tasklet_unlock_wait(struct tasklet_struct *t)
494{
495 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
496}
497#else
498#define tasklet_trylock(t) 1
499#define tasklet_unlock_wait(t) do { } while (0)
500#define tasklet_unlock(t) do { } while (0)
501#endif
502
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800503extern void __tasklet_schedule(struct tasklet_struct *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
505static inline void tasklet_schedule(struct tasklet_struct *t)
506{
507 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
508 __tasklet_schedule(t);
509}
510
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800511extern void __tasklet_hi_schedule(struct tasklet_struct *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
513static inline void tasklet_hi_schedule(struct tasklet_struct *t)
514{
515 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
516 __tasklet_hi_schedule(t);
517}
518
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200519extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
520
521/*
522 * This version avoids touching any other tasklets. Needed for kmemcheck
523 * in order not to take any page faults while enqueueing this tasklet;
524 * consider VERY carefully whether you really need this or
525 * tasklet_hi_schedule()...
526 */
527static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
528{
529 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
530 __tasklet_hi_schedule_first(t);
531}
532
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
534static inline void tasklet_disable_nosync(struct tasklet_struct *t)
535{
536 atomic_inc(&t->count);
537 smp_mb__after_atomic_inc();
538}
539
540static inline void tasklet_disable(struct tasklet_struct *t)
541{
542 tasklet_disable_nosync(t);
543 tasklet_unlock_wait(t);
544 smp_mb();
545}
546
547static inline void tasklet_enable(struct tasklet_struct *t)
548{
549 smp_mb__before_atomic_dec();
550 atomic_dec(&t->count);
551}
552
553static inline void tasklet_hi_enable(struct tasklet_struct *t)
554{
555 smp_mb__before_atomic_dec();
556 atomic_dec(&t->count);
557}
558
559extern void tasklet_kill(struct tasklet_struct *t);
560extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
561extern void tasklet_init(struct tasklet_struct *t,
562 void (*func)(unsigned long), unsigned long data);
563
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200564struct tasklet_hrtimer {
565 struct hrtimer timer;
566 struct tasklet_struct tasklet;
567 enum hrtimer_restart (*function)(struct hrtimer *);
568};
569
570extern void
571tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
572 enum hrtimer_restart (*function)(struct hrtimer *),
573 clockid_t which_clock, enum hrtimer_mode mode);
574
575static inline
576int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
577 const enum hrtimer_mode mode)
578{
579 return hrtimer_start(&ttimer->timer, time, mode);
580}
581
582static inline
583void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
584{
585 hrtimer_cancel(&ttimer->timer);
586 tasklet_kill(&ttimer->tasklet);
587}
588
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589/*
590 * Autoprobing for irqs:
591 *
592 * probe_irq_on() and probe_irq_off() provide robust primitives
593 * for accurate IRQ probing during kernel initialization. They are
594 * reasonably simple to use, are not "fooled" by spurious interrupts,
595 * and, unlike other attempts at IRQ probing, they do not get hung on
596 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
597 *
598 * For reasonably foolproof probing, use them as follows:
599 *
600 * 1. clear and/or mask the device's internal interrupt.
601 * 2. sti();
602 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
603 * 4. enable the device and cause it to trigger an interrupt.
604 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
605 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
606 * 7. service the device to clear its pending interrupt.
607 * 8. loop again if paranoia is required.
608 *
609 * probe_irq_on() returns a mask of allocated irq's.
610 *
611 * probe_irq_off() takes the mask as a parameter,
612 * and returns the irq number which occurred,
613 * or zero if none occurred, or a negative irq number
614 * if more than one irq occurred.
615 */
616
Martin Schwidefsky0244ad02013-08-30 09:39:53 +0200617#if !defined(CONFIG_GENERIC_IRQ_PROBE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618static inline unsigned long probe_irq_on(void)
619{
620 return 0;
621}
622static inline int probe_irq_off(unsigned long val)
623{
624 return 0;
625}
626static inline unsigned int probe_irq_mask(unsigned long val)
627{
628 return 0;
629}
630#else
631extern unsigned long probe_irq_on(void); /* returns 0 on failure */
632extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
633extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
634#endif
635
Andrew Morton6168a702007-02-17 21:22:39 -0800636#ifdef CONFIG_PROC_FS
637/* Initialize /proc/irq/ */
638extern void init_irq_proc(void);
639#else
640static inline void init_irq_proc(void)
641{
642}
643#endif
644
Alexey Dobriyand43c36d2009-10-07 17:09:06 +0400645struct seq_file;
Adrian Bunkf74596d2008-02-06 01:36:35 -0800646int show_interrupts(struct seq_file *p, void *v);
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100647int arch_show_interrupts(struct seq_file *p, int prec);
Adrian Bunkf74596d2008-02-06 01:36:35 -0800648
Yinghai Lu43a25632008-12-28 16:01:13 -0800649extern int early_irq_init(void);
Yinghai Lu4a046d12009-01-12 17:39:24 -0800650extern int arch_probe_nr_irqs(void);
Yinghai Lu43a25632008-12-28 16:01:13 -0800651extern int arch_early_irq_init(void);
Yinghai Lu43a25632008-12-28 16:01:13 -0800652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653#endif