Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_IRQ_H |
| 2 | #define _LINUX_IRQ_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | |
| 4 | /* |
| 5 | * Please do not include this file in generic code. There is currently |
| 6 | * no requirement for any architecture to implement anything held |
| 7 | * within this file. |
| 8 | * |
| 9 | * Thanks. --rmk |
| 10 | */ |
| 11 | |
Adrian Bunk | 23f9b31 | 2005-12-21 02:27:50 +0100 | [diff] [blame] | 12 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 14 | #ifndef CONFIG_S390 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
| 16 | #include <linux/linkage.h> |
| 17 | #include <linux/cache.h> |
| 18 | #include <linux/spinlock.h> |
| 19 | #include <linux/cpumask.h> |
Ralf Baechle | 503e576 | 2009-03-29 12:59:50 +0200 | [diff] [blame] | 20 | #include <linux/gfp.h> |
Jan Beulich | 908dcec | 2006-06-23 02:06:00 -0700 | [diff] [blame] | 21 | #include <linux/irqreturn.h> |
Thomas Gleixner | dd3a1db | 2008-10-16 18:20:58 +0200 | [diff] [blame] | 22 | #include <linux/irqnr.h> |
David Howells | 77904fd | 2007-02-28 20:13:26 -0800 | [diff] [blame] | 23 | #include <linux/errno.h> |
Ralf Baechle | 503e576 | 2009-03-29 12:59:50 +0200 | [diff] [blame] | 24 | #include <linux/topology.h> |
Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 25 | #include <linux/wait.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | #include <asm/irq.h> |
| 28 | #include <asm/ptrace.h> |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 29 | #include <asm/irq_regs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 31 | struct irq_desc; |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 32 | typedef void (*irq_flow_handler_t)(unsigned int irq, |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 33 | struct irq_desc *desc); |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 34 | |
| 35 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | /* |
| 37 | * IRQ line status. |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 38 | * |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 39 | * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 40 | * |
| 41 | * IRQ types |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | */ |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 43 | #define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */ |
| 44 | #define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */ |
| 45 | #define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */ |
| 46 | #define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) |
| 47 | #define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ |
| 48 | #define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ |
| 49 | #define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ |
| 50 | #define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ |
| 51 | |
| 52 | /* Internal flags */ |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 53 | #define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */ |
| 54 | #define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */ |
| 55 | #define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */ |
| 56 | #define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */ |
| 57 | #define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */ |
| 58 | #define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */ |
| 59 | #define IRQ_LEVEL 0x00004000 /* IRQ level triggered */ |
| 60 | #define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */ |
| 61 | #define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */ |
| 62 | #define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */ |
| 63 | #define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */ |
| 64 | #define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */ |
Ingo Molnar | d7e25f3 | 2007-02-16 01:28:24 -0800 | [diff] [blame] | 65 | #define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ |
| 66 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ |
| 67 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ |
Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 68 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ |
Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 69 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ |
| 70 | #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ |
Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 71 | #define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 72 | |
Ingo Molnar | 0d7012a | 2006-06-29 02:24:43 -0700 | [diff] [blame] | 73 | #ifdef CONFIG_IRQ_PER_CPU |
Karsten Wiese | f26fdd5 | 2005-09-06 15:17:25 -0700 | [diff] [blame] | 74 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 75 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
Karsten Wiese | f26fdd5 | 2005-09-06 15:17:25 -0700 | [diff] [blame] | 76 | #else |
| 77 | # define CHECK_IRQ_PER_CPU(var) 0 |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 78 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING |
Karsten Wiese | f26fdd5 | 2005-09-06 15:17:25 -0700 | [diff] [blame] | 79 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 81 | struct proc_dir_entry; |
Eric W. Biederman | 5b912c1 | 2007-01-28 12:52:03 -0700 | [diff] [blame] | 82 | struct msi_desc; |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 83 | |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 84 | /** |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 85 | * struct irq_chip - hardware interrupt chip descriptor |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 86 | * |
| 87 | * @name: name for /proc/interrupts |
| 88 | * @startup: start up the interrupt (defaults to ->enable if NULL) |
| 89 | * @shutdown: shut down the interrupt (defaults to ->disable if NULL) |
| 90 | * @enable: enable the interrupt (defaults to chip->unmask if NULL) |
| 91 | * @disable: disable the interrupt (defaults to chip->mask if NULL) |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 92 | * @ack: start of a new interrupt |
| 93 | * @mask: mask an interrupt source |
| 94 | * @mask_ack: ack and mask an interrupt source |
| 95 | * @unmask: unmask an interrupt source |
Ingo Molnar | 47c2a3a | 2006-06-29 02:25:03 -0700 | [diff] [blame] | 96 | * @eoi: end of interrupt - chip level |
| 97 | * @end: end of interrupt - flow level |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 98 | * @set_affinity: set the CPU affinity on SMP machines |
| 99 | * @retrigger: resend an IRQ to the CPU |
| 100 | * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ |
| 101 | * @set_wake: enable/disable power-management wake-on of an IRQ |
| 102 | * |
| 103 | * @release: release function solely used by UML |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 104 | * @typename: obsoleted by name, kept as migration helper |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | */ |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 106 | struct irq_chip { |
| 107 | const char *name; |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 108 | unsigned int (*startup)(unsigned int irq); |
| 109 | void (*shutdown)(unsigned int irq); |
| 110 | void (*enable)(unsigned int irq); |
| 111 | void (*disable)(unsigned int irq); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 112 | |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 113 | void (*ack)(unsigned int irq); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 114 | void (*mask)(unsigned int irq); |
| 115 | void (*mask_ack)(unsigned int irq); |
| 116 | void (*unmask)(unsigned int irq); |
Ingo Molnar | 47c2a3a | 2006-06-29 02:25:03 -0700 | [diff] [blame] | 117 | void (*eoi)(unsigned int irq); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 118 | |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 119 | void (*end)(unsigned int irq); |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 120 | int (*set_affinity)(unsigned int irq, |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 121 | const struct cpumask *dest); |
Ingo Molnar | c0ad90a | 2006-06-29 02:24:44 -0700 | [diff] [blame] | 122 | int (*retrigger)(unsigned int irq); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 123 | int (*set_type)(unsigned int irq, unsigned int flow_type); |
| 124 | int (*set_wake)(unsigned int irq, unsigned int on); |
Ingo Molnar | c0ad90a | 2006-06-29 02:24:44 -0700 | [diff] [blame] | 125 | |
Paolo 'Blaisorblade' Giarrusso | b77d6ad | 2005-06-21 17:16:24 -0700 | [diff] [blame] | 126 | /* Currently used only by UML, might disappear one day.*/ |
| 127 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 128 | void (*release)(unsigned int irq, void *dev_id); |
Paolo 'Blaisorblade' Giarrusso | b77d6ad | 2005-06-21 17:16:24 -0700 | [diff] [blame] | 129 | #endif |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 130 | /* |
| 131 | * For compatibility, ->typename is copied into ->name. |
| 132 | * Will disappear. |
| 133 | */ |
| 134 | const char *typename; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | }; |
| 136 | |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 137 | struct timer_rand_state; |
| 138 | struct irq_2_iommu; |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 139 | /** |
| 140 | * struct irq_desc - interrupt descriptor |
Randy Dunlap | 2ed1cdc | 2008-11-21 16:59:57 -0800 | [diff] [blame] | 141 | * @irq: interrupt number for this descriptor |
Yinghai Lu | 078a55d | 2008-12-18 16:57:52 -0800 | [diff] [blame] | 142 | * @timer_rand_state: pointer to timer rand state struct |
| 143 | * @kstat_irqs: irq stats per cpu |
| 144 | * @irq_2_iommu: iommu with this irq |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 145 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] |
| 146 | * @chip: low level interrupt hardware access |
Randy Dunlap | 472900b | 2007-02-16 01:28:25 -0800 | [diff] [blame] | 147 | * @msi_desc: MSI descriptor |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 148 | * @handler_data: per-IRQ data for the irq_chip methods |
| 149 | * @chip_data: platform-specific per-chip private data for the chip |
| 150 | * methods, to allow shared chip implementations |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 151 | * @action: the irq action chain |
| 152 | * @status: status information |
| 153 | * @depth: disable-depth, for nested irq_disable() calls |
David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 154 | * @wake_depth: enable depth, for multiple set_irq_wake() callers |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 155 | * @irq_count: stats field to detect stalled irqs |
Randy Dunlap | 5ac4d82 | 2007-07-31 00:39:03 -0700 | [diff] [blame] | 156 | * @last_unhandled: aging timer for unhandled count |
Richard Kennedy | e262a7b | 2008-11-23 14:34:43 +0000 | [diff] [blame] | 157 | * @irqs_unhandled: stats field for spurious unhandled interrupts |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 158 | * @lock: locking for SMP |
| 159 | * @affinity: IRQ affinity on SMP |
Randy Dunlap | ab33dcf | 2009-06-13 20:01:00 -0700 | [diff] [blame] | 160 | * @node: node index useful for balancing |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 161 | * @pending_mask: pending rebalanced interrupts |
Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 162 | * @threads_active: number of irqaction threads currently running |
| 163 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 164 | * @dir: /proc/irq/ procfs entry |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 165 | * @name: flow handler name for /proc/interrupts output |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | */ |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 167 | struct irq_desc { |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 168 | unsigned int irq; |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 169 | struct timer_rand_state *timer_rand_state; |
| 170 | unsigned int *kstat_irqs; |
Yinghai Lu | d7e51e6 | 2009-01-07 15:03:13 -0800 | [diff] [blame] | 171 | #ifdef CONFIG_INTR_REMAP |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 172 | struct irq_2_iommu *irq_2_iommu; |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 173 | #endif |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 174 | irq_flow_handler_t handle_irq; |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 175 | struct irq_chip *chip; |
Eric W. Biederman | 5b912c1 | 2007-01-28 12:52:03 -0700 | [diff] [blame] | 176 | struct msi_desc *msi_desc; |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 177 | void *handler_data; |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 178 | void *chip_data; |
| 179 | struct irqaction *action; /* IRQ action list */ |
| 180 | unsigned int status; /* IRQ status */ |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 181 | |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 182 | unsigned int depth; /* nested irq disables */ |
David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 183 | unsigned int wake_depth; /* nested wake enables */ |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 184 | unsigned int irq_count; /* For detecting broken IRQs */ |
Alan Cox | 4f27c00 | 2007-07-15 23:40:55 -0700 | [diff] [blame] | 185 | unsigned long last_unhandled; /* Aging timer for unhandled count */ |
Richard Kennedy | e262a7b | 2008-11-23 14:34:43 +0000 | [diff] [blame] | 186 | unsigned int irqs_unhandled; |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 187 | spinlock_t lock; |
Ingo Molnar | a53da52 | 2006-06-29 02:24:38 -0700 | [diff] [blame] | 188 | #ifdef CONFIG_SMP |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 189 | cpumask_var_t affinity; |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 190 | unsigned int node; |
Yinghai Lu | 8b8e8c1 | 2008-08-19 20:50:23 -0700 | [diff] [blame] | 191 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 192 | cpumask_var_t pending_mask; |
| 193 | #endif |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 194 | #endif |
Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 195 | atomic_t threads_active; |
| 196 | wait_queue_head_t wait_for_threads; |
Ingo Molnar | 4a733ee | 2006-06-29 02:24:42 -0700 | [diff] [blame] | 197 | #ifdef CONFIG_PROC_FS |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 198 | struct proc_dir_entry *dir; |
Ingo Molnar | 4a733ee | 2006-06-29 02:24:42 -0700 | [diff] [blame] | 199 | #endif |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 200 | const char *name; |
Ravikiran G Thirumalai | e729aa1 | 2007-05-08 00:29:13 -0700 | [diff] [blame] | 201 | } ____cacheline_internodealigned_in_smp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 203 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 204 | struct irq_desc *desc, int node); |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 205 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); |
Yinghai Lu | 9059d8f | 2008-08-19 20:50:10 -0700 | [diff] [blame] | 206 | |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 207 | #ifndef CONFIG_SPARSE_IRQ |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 208 | extern struct irq_desc irq_desc[NR_IRQS]; |
Yinghai Lu | 15e957d | 2009-04-30 01:17:50 -0700 | [diff] [blame] | 209 | #endif |
| 210 | |
| 211 | #ifdef CONFIG_NUMA_IRQ_DESC |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 212 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); |
Yinghai Lu | 15e957d | 2009-04-30 01:17:50 -0700 | [diff] [blame] | 213 | #else |
| 214 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) |
| 215 | { |
| 216 | return desc; |
| 217 | } |
| 218 | #endif |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 219 | |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 220 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); |
Thomas Gleixner | c6b7674 | 2008-10-15 14:31:29 +0200 | [diff] [blame] | 221 | |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 222 | /* |
| 223 | * Migration helpers for obsolete names, they will go away: |
| 224 | */ |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 225 | #define hw_interrupt_type irq_chip |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 226 | #define no_irq_type no_irq_chip |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 227 | typedef struct irq_desc irq_desc_t; |
| 228 | |
| 229 | /* |
| 230 | * Pick up the arch-dependent methods: |
| 231 | */ |
| 232 | #include <asm/hw_irq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 234 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 235 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | |
| 237 | #ifdef CONFIG_GENERIC_HARDIRQS |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 238 | |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 239 | #ifdef CONFIG_SMP |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 240 | |
Yinghai Lu | 8b8e8c1 | 2008-08-19 20:50:23 -0700 | [diff] [blame] | 241 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 242 | |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 243 | void move_native_irq(int irq); |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 244 | void move_masked_irq(int irq); |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 245 | |
Yinghai Lu | 8b8e8c1 | 2008-08-19 20:50:23 -0700 | [diff] [blame] | 246 | #else /* CONFIG_GENERIC_PENDING_IRQ */ |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 247 | |
| 248 | static inline void move_irq(int irq) |
| 249 | { |
| 250 | } |
| 251 | |
| 252 | static inline void move_native_irq(int irq) |
| 253 | { |
| 254 | } |
| 255 | |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 256 | static inline void move_masked_irq(int irq) |
| 257 | { |
| 258 | } |
| 259 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 260 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 261 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 262 | #else /* CONFIG_SMP */ |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 263 | |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 264 | #define move_native_irq(x) |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 265 | #define move_masked_irq(x) |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 266 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 267 | #endif /* CONFIG_SMP */ |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 268 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | extern int no_irq_affinity; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 271 | static inline int irq_balancing_disabled(unsigned int irq) |
| 272 | { |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 273 | struct irq_desc *desc; |
| 274 | |
| 275 | desc = irq_to_desc(irq); |
| 276 | return desc->status & IRQ_NO_BALANCING_MASK; |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 277 | } |
| 278 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 279 | /* Handle irq action chains: */ |
Thomas Gleixner | bedd30d | 2008-09-30 23:14:27 +0200 | [diff] [blame] | 280 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 281 | |
Ingo Molnar | 2e60bbb | 2006-06-29 02:24:39 -0700 | [diff] [blame] | 282 | /* |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 283 | * Built-in IRQ handlers for various IRQ types, |
| 284 | * callable via desc->chip->handle_irq() |
| 285 | */ |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 286 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); |
| 287 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); |
| 288 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); |
| 289 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); |
| 290 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); |
| 291 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 292 | |
| 293 | /* |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 294 | * Monolithic do_IRQ implementation. |
Ingo Molnar | 2e60bbb | 2006-06-29 02:24:39 -0700 | [diff] [blame] | 295 | */ |
David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 296 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 297 | extern unsigned int __do_IRQ(unsigned int irq); |
David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 298 | #endif |
Ingo Molnar | 2e60bbb | 2006-06-29 02:24:39 -0700 | [diff] [blame] | 299 | |
Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 300 | /* |
| 301 | * Architectures call this to let the generic IRQ layer |
| 302 | * handle an interrupt. If the descriptor is attached to an |
| 303 | * irqchip-style controller then we call the ->handle_irq() handler, |
| 304 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. |
| 305 | */ |
Yinghai Lu | 46926b6 | 2008-08-19 20:50:15 -0700 | [diff] [blame] | 306 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) |
Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 307 | { |
David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 308 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 309 | desc->handle_irq(irq, desc); |
David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 310 | #else |
Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 311 | if (likely(desc->handle_irq)) |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 312 | desc->handle_irq(irq, desc); |
Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 313 | else |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 314 | __do_IRQ(irq); |
David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 315 | #endif |
Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 316 | } |
| 317 | |
Yinghai Lu | 46926b6 | 2008-08-19 20:50:15 -0700 | [diff] [blame] | 318 | static inline void generic_handle_irq(unsigned int irq) |
| 319 | { |
| 320 | generic_handle_irq_desc(irq, irq_to_desc(irq)); |
| 321 | } |
| 322 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 323 | /* Handling of unhandled and spurious interrupts: */ |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 324 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
Thomas Gleixner | bedd30d | 2008-09-30 23:14:27 +0200 | [diff] [blame] | 325 | irqreturn_t action_ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | |
Thomas Gleixner | a4633ad | 2006-06-29 02:24:48 -0700 | [diff] [blame] | 327 | /* Resending of interrupts :*/ |
| 328 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
| 329 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 330 | /* Enable/disable irq debugging output: */ |
| 331 | extern int noirqdebug_setup(char *str); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 333 | /* Checks whether the interrupt can be requested by request_irq(): */ |
| 334 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
| 335 | |
Thomas Gleixner | f8b5473 | 2006-07-01 22:30:08 +0100 | [diff] [blame] | 336 | /* Dummy irq-chip implementations: */ |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 337 | extern struct irq_chip no_irq_chip; |
Thomas Gleixner | f8b5473 | 2006-07-01 22:30:08 +0100 | [diff] [blame] | 338 | extern struct irq_chip dummy_irq_chip; |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 339 | |
| 340 | extern void |
Ingo Molnar | 145fc65 | 2006-10-19 23:28:28 -0700 | [diff] [blame] | 341 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, |
| 342 | irq_flow_handler_t handle); |
| 343 | extern void |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 344 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
| 345 | irq_flow_handler_t handle, const char *name); |
| 346 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 347 | extern void |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 348 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
| 349 | const char *name); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 350 | |
Kevin Hilman | b019e57 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 351 | /* caller has locked the irq_desc and both params are valid */ |
| 352 | static inline void __set_irq_handler_unlocked(int irq, |
| 353 | irq_flow_handler_t handler) |
| 354 | { |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 355 | struct irq_desc *desc; |
| 356 | |
| 357 | desc = irq_to_desc(irq); |
| 358 | desc->handle_irq = handler; |
Kevin Hilman | b019e57 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 359 | } |
| 360 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 361 | /* |
| 362 | * Set a highlevel flow handler for a given IRQ: |
| 363 | */ |
| 364 | static inline void |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 365 | set_irq_handler(unsigned int irq, irq_flow_handler_t handle) |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 366 | { |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 367 | __set_irq_handler(irq, handle, 0, NULL); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 368 | } |
| 369 | |
| 370 | /* |
| 371 | * Set a highlevel chained flow handler for a given IRQ. |
| 372 | * (a chained handler is automatically enabled and set to |
| 373 | * IRQ_NOREQUEST and IRQ_NOPROBE) |
| 374 | */ |
| 375 | static inline void |
| 376 | set_irq_chained_handler(unsigned int irq, |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 377 | irq_flow_handler_t handle) |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 378 | { |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 379 | __set_irq_handler(irq, handle, 1, NULL); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 380 | } |
| 381 | |
Ralf Baechle | 46f4f8f | 2008-02-08 04:22:01 -0800 | [diff] [blame] | 382 | extern void set_irq_noprobe(unsigned int irq); |
| 383 | extern void set_irq_probe(unsigned int irq); |
| 384 | |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 385 | /* Handle dynamic irq creation and destruction */ |
Yinghai Lu | d047f53 | 2009-04-27 18:02:23 -0700 | [diff] [blame] | 386 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 387 | extern int create_irq(void); |
| 388 | extern void destroy_irq(unsigned int irq); |
Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 389 | |
Eric W. Biederman | 1f80025 | 2006-10-04 02:16:56 -0700 | [diff] [blame] | 390 | /* Test to see if a driver has successfully requested an irq */ |
| 391 | static inline int irq_has_action(unsigned int irq) |
| 392 | { |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 393 | struct irq_desc *desc = irq_to_desc(irq); |
Eric W. Biederman | 1f80025 | 2006-10-04 02:16:56 -0700 | [diff] [blame] | 394 | return desc->action != NULL; |
| 395 | } |
| 396 | |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 397 | /* Dynamic irq helper functions */ |
| 398 | extern void dynamic_irq_init(unsigned int irq); |
| 399 | extern void dynamic_irq_cleanup(unsigned int irq); |
| 400 | |
| 401 | /* Set/get chip/data for an IRQ: */ |
Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 402 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); |
| 403 | extern int set_irq_data(unsigned int irq, void *data); |
| 404 | extern int set_irq_chip_data(unsigned int irq, void *data); |
| 405 | extern int set_irq_type(unsigned int irq, unsigned int type); |
Eric W. Biederman | 5b912c1 | 2007-01-28 12:52:03 -0700 | [diff] [blame] | 406 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); |
Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 407 | |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 408 | #define get_irq_chip(irq) (irq_to_desc(irq)->chip) |
| 409 | #define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data) |
| 410 | #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) |
| 411 | #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) |
Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 412 | |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 413 | #define get_irq_desc_chip(desc) ((desc)->chip) |
| 414 | #define get_irq_desc_chip_data(desc) ((desc)->chip_data) |
| 415 | #define get_irq_desc_data(desc) ((desc)->handler_data) |
| 416 | #define get_irq_desc_msi(desc) ((desc)->msi_desc) |
| 417 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 418 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 420 | #endif /* !CONFIG_S390 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 422 | #ifdef CONFIG_SMP |
| 423 | /** |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 424 | * alloc_desc_masks - allocate cpumasks for irq_desc |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 425 | * @desc: pointer to irq_desc struct |
Randy Dunlap | ab33dcf | 2009-06-13 20:01:00 -0700 | [diff] [blame] | 426 | * @node: node which will be handling the cpumasks |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 427 | * @boot: true if need bootmem |
| 428 | * |
| 429 | * Allocates affinity and pending_mask cpumask if required. |
| 430 | * Returns true if successful (or not required). |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 431 | */ |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 432 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, |
Yinghai Lu | 38c7fed | 2009-05-25 15:10:58 +0300 | [diff] [blame] | 433 | bool boot) |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 434 | { |
Yinghai Lu | 38c7fed | 2009-05-25 15:10:58 +0300 | [diff] [blame] | 435 | gfp_t gfp = GFP_ATOMIC; |
| 436 | |
| 437 | if (boot) |
| 438 | gfp = GFP_NOWAIT; |
| 439 | |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 440 | #ifdef CONFIG_CPUMASK_OFFSTACK |
Yinghai Lu | 38c7fed | 2009-05-25 15:10:58 +0300 | [diff] [blame] | 441 | if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 442 | return false; |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 443 | |
| 444 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
Yinghai Lu | 38c7fed | 2009-05-25 15:10:58 +0300 | [diff] [blame] | 445 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 446 | free_cpumask_var(desc->affinity); |
| 447 | return false; |
| 448 | } |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 449 | #endif |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 450 | #endif |
| 451 | return true; |
| 452 | } |
| 453 | |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 454 | static inline void init_desc_masks(struct irq_desc *desc) |
| 455 | { |
| 456 | cpumask_setall(desc->affinity); |
| 457 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 458 | cpumask_clear(desc->pending_mask); |
| 459 | #endif |
| 460 | } |
| 461 | |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 462 | /** |
| 463 | * init_copy_desc_masks - copy cpumasks for irq_desc |
| 464 | * @old_desc: pointer to old irq_desc struct |
| 465 | * @new_desc: pointer to new irq_desc struct |
| 466 | * |
| 467 | * Insures affinity and pending_masks are copied to new irq_desc. |
| 468 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the |
| 469 | * irq_desc struct so the copy is redundant. |
| 470 | */ |
| 471 | |
| 472 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
| 473 | struct irq_desc *new_desc) |
| 474 | { |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 475 | #ifdef CONFIG_CPUMASK_OFFSTACK |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 476 | cpumask_copy(new_desc->affinity, old_desc->affinity); |
| 477 | |
| 478 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 479 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); |
| 480 | #endif |
| 481 | #endif |
| 482 | } |
| 483 | |
Yinghai Lu | 9756b15 | 2009-03-30 20:37:20 -0700 | [diff] [blame] | 484 | static inline void free_desc_masks(struct irq_desc *old_desc, |
| 485 | struct irq_desc *new_desc) |
| 486 | { |
| 487 | free_cpumask_var(old_desc->affinity); |
| 488 | |
| 489 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 490 | free_cpumask_var(old_desc->pending_mask); |
| 491 | #endif |
| 492 | } |
| 493 | |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 494 | #else /* !CONFIG_SMP */ |
| 495 | |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 496 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 497 | bool boot) |
| 498 | { |
| 499 | return true; |
| 500 | } |
| 501 | |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 502 | static inline void init_desc_masks(struct irq_desc *desc) |
| 503 | { |
| 504 | } |
| 505 | |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 506 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
| 507 | struct irq_desc *new_desc) |
| 508 | { |
| 509 | } |
| 510 | |
Yinghai Lu | 9756b15 | 2009-03-30 20:37:20 -0700 | [diff] [blame] | 511 | static inline void free_desc_masks(struct irq_desc *old_desc, |
| 512 | struct irq_desc *new_desc) |
| 513 | { |
| 514 | } |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 515 | #endif /* CONFIG_SMP */ |
| 516 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 517 | #endif /* _LINUX_IRQ_H */ |