Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_IRQ_H |
| 2 | #define _LINUX_IRQ_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | |
| 4 | /* |
| 5 | * Please do not include this file in generic code. There is currently |
| 6 | * no requirement for any architecture to implement anything held |
| 7 | * within this file. |
| 8 | * |
| 9 | * Thanks. --rmk |
| 10 | */ |
| 11 | |
Adrian Bunk | 23f9b31 | 2005-12-21 02:27:50 +0100 | [diff] [blame] | 12 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/linkage.h> |
| 14 | #include <linux/cache.h> |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/cpumask.h> |
Ralf Baechle | 503e576 | 2009-03-29 12:59:50 +0200 | [diff] [blame] | 17 | #include <linux/gfp.h> |
Jan Beulich | 908dcec | 2006-06-23 02:06:00 -0700 | [diff] [blame] | 18 | #include <linux/irqreturn.h> |
Thomas Gleixner | dd3a1db | 2008-10-16 18:20:58 +0200 | [diff] [blame] | 19 | #include <linux/irqnr.h> |
David Howells | 77904fd | 2007-02-28 20:13:26 -0800 | [diff] [blame] | 20 | #include <linux/errno.h> |
Ralf Baechle | 503e576 | 2009-03-29 12:59:50 +0200 | [diff] [blame] | 21 | #include <linux/topology.h> |
Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 22 | #include <linux/wait.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
| 24 | #include <asm/irq.h> |
| 25 | #include <asm/ptrace.h> |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 26 | #include <asm/irq_regs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Thomas Gleixner | ab7798f | 2011-03-25 16:48:50 +0100 | [diff] [blame] | 28 | struct seq_file; |
Paul Gortmaker | ec53cf2 | 2011-09-19 20:33:19 -0400 | [diff] [blame] | 29 | struct module; |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 30 | struct irq_desc; |
Thomas Gleixner | 7812957 | 2011-02-10 15:14:20 +0100 | [diff] [blame] | 31 | struct irq_data; |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 32 | typedef void (*irq_flow_handler_t)(unsigned int irq, |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 33 | struct irq_desc *desc); |
Thomas Gleixner | 7812957 | 2011-02-10 15:14:20 +0100 | [diff] [blame] | 34 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 35 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | /* |
| 37 | * IRQ line status. |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 38 | * |
Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 39 | * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 40 | * |
Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 41 | * IRQ_TYPE_NONE - default, unspecified type |
| 42 | * IRQ_TYPE_EDGE_RISING - rising edge triggered |
| 43 | * IRQ_TYPE_EDGE_FALLING - falling edge triggered |
| 44 | * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered |
| 45 | * IRQ_TYPE_LEVEL_HIGH - high level triggered |
| 46 | * IRQ_TYPE_LEVEL_LOW - low level triggered |
| 47 | * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits |
| 48 | * IRQ_TYPE_SENSE_MASK - Mask for all the above bits |
Benjamin Herrenschmidt | 3fca40c | 2012-04-19 17:29:42 +0000 | [diff] [blame] | 49 | * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type |
| 50 | * to setup the HW to a sane default (used |
| 51 | * by irqdomain map() callbacks to synchronize |
| 52 | * the HW state and SW flags for a newly |
| 53 | * allocated descriptor). |
| 54 | * |
Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 55 | * IRQ_TYPE_PROBE - Special flag for probing in progress |
| 56 | * |
| 57 | * Bits which can be modified via irq_set/clear/modify_status_flags() |
| 58 | * IRQ_LEVEL - Interrupt is level type. Will be also |
| 59 | * updated in the code when the above trigger |
Geert Uytterhoeven | 0911f12 | 2011-04-10 11:01:51 +0200 | [diff] [blame] | 60 | * bits are modified via irq_set_irq_type() |
Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 61 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect |
| 62 | * it from affinity setting |
| 63 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing |
| 64 | * IRQ_NOREQUEST - Interrupt cannot be requested via |
| 65 | * request_irq() |
Paul Mundt | 7f1b124 | 2011-04-07 06:01:44 +0900 | [diff] [blame] | 66 | * IRQ_NOTHREAD - Interrupt cannot be threaded |
Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 67 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in |
| 68 | * request/setup_irq() |
| 69 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) |
| 70 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context |
| 71 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 72 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable |
Thomas Gleixner | b39898c | 2013-11-06 12:30:07 +0100 | [diff] [blame] | 73 | * IRQ_IS_POLLED - Always polled by another interrupt. Exclude |
| 74 | * it from the spurious interrupt detection |
| 75 | * mechanism and from core side polling. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | */ |
Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 77 | enum { |
| 78 | IRQ_TYPE_NONE = 0x00000000, |
| 79 | IRQ_TYPE_EDGE_RISING = 0x00000001, |
| 80 | IRQ_TYPE_EDGE_FALLING = 0x00000002, |
| 81 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), |
| 82 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, |
| 83 | IRQ_TYPE_LEVEL_LOW = 0x00000008, |
| 84 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), |
| 85 | IRQ_TYPE_SENSE_MASK = 0x0000000f, |
Benjamin Herrenschmidt | 3fca40c | 2012-04-19 17:29:42 +0000 | [diff] [blame] | 86 | IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK, |
Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 87 | |
Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 88 | IRQ_TYPE_PROBE = 0x00000010, |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 89 | |
Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 90 | IRQ_LEVEL = (1 << 8), |
| 91 | IRQ_PER_CPU = (1 << 9), |
| 92 | IRQ_NOPROBE = (1 << 10), |
| 93 | IRQ_NOREQUEST = (1 << 11), |
| 94 | IRQ_NOAUTOEN = (1 << 12), |
| 95 | IRQ_NO_BALANCING = (1 << 13), |
| 96 | IRQ_MOVE_PCNTXT = (1 << 14), |
| 97 | IRQ_NESTED_THREAD = (1 << 15), |
Paul Mundt | 7f1b124 | 2011-04-07 06:01:44 +0900 | [diff] [blame] | 98 | IRQ_NOTHREAD = (1 << 16), |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 99 | IRQ_PER_CPU_DEVID = (1 << 17), |
Thomas Gleixner | b39898c | 2013-11-06 12:30:07 +0100 | [diff] [blame] | 100 | IRQ_IS_POLLED = (1 << 18), |
Thomas Gleixner | 5d4d8fc | 2011-02-08 17:27:18 +0100 | [diff] [blame] | 101 | }; |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 102 | |
Thomas Gleixner | 4424718 | 2010-09-28 10:40:18 +0200 | [diff] [blame] | 103 | #define IRQF_MODIFY_MASK \ |
| 104 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
Thomas Gleixner | 872434d | 2011-02-05 16:25:25 +0100 | [diff] [blame] | 105 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
Thomas Gleixner | b39898c | 2013-11-06 12:30:07 +0100 | [diff] [blame] | 106 | IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ |
| 107 | IRQ_IS_POLLED) |
Thomas Gleixner | 4424718 | 2010-09-28 10:40:18 +0200 | [diff] [blame] | 108 | |
Thomas Gleixner | 8f53f92 | 2011-02-08 16:50:00 +0100 | [diff] [blame] | 109 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
| 110 | |
Thomas Gleixner | 3b8249e | 2011-02-07 16:02:20 +0100 | [diff] [blame] | 111 | /* |
| 112 | * Return value for chip->irq_set_affinity() |
| 113 | * |
| 114 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity |
| 115 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity |
| 116 | */ |
| 117 | enum { |
| 118 | IRQ_SET_MASK_OK = 0, |
| 119 | IRQ_SET_MASK_OK_NOCOPY, |
| 120 | }; |
| 121 | |
Eric W. Biederman | 5b912c1 | 2007-01-28 12:52:03 -0700 | [diff] [blame] | 122 | struct msi_desc; |
Grant Likely | 08a543a | 2011-07-26 03:19:06 -0600 | [diff] [blame] | 123 | struct irq_domain; |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 124 | |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 125 | /** |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 126 | * struct irq_data - per irq and irq chip data passed down to chip functions |
Thomas Gleixner | 966dc73 | 2013-05-06 14:30:22 +0000 | [diff] [blame] | 127 | * @mask: precomputed bitmask for accessing the chip registers |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 128 | * @irq: interrupt number |
Grant Likely | 08a543a | 2011-07-26 03:19:06 -0600 | [diff] [blame] | 129 | * @hwirq: hardware interrupt number, local to the interrupt domain |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 130 | * @node: node index useful for balancing |
Randy Dunlap | 30398bf | 2011-03-18 09:33:56 -0700 | [diff] [blame] | 131 | * @state_use_accessors: status information for irq chip functions. |
Thomas Gleixner | 91c4991 | 2011-02-03 20:48:29 +0100 | [diff] [blame] | 132 | * Use accessor functions to deal with it |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 133 | * @chip: low level interrupt hardware access |
Grant Likely | 08a543a | 2011-07-26 03:19:06 -0600 | [diff] [blame] | 134 | * @domain: Interrupt translation domain; responsible for mapping |
| 135 | * between hwirq number and linux irq number. |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 136 | * @handler_data: per-IRQ data for the irq_chip methods |
| 137 | * @chip_data: platform-specific per-chip private data for the chip |
| 138 | * methods, to allow shared chip implementations |
| 139 | * @msi_desc: MSI descriptor |
| 140 | * @affinity: IRQ affinity on SMP |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 141 | * |
| 142 | * The fields here need to overlay the ones in irq_desc until we |
| 143 | * cleaned up the direct references and switched everything over to |
| 144 | * irq_data. |
| 145 | */ |
| 146 | struct irq_data { |
Thomas Gleixner | 966dc73 | 2013-05-06 14:30:22 +0000 | [diff] [blame] | 147 | u32 mask; |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 148 | unsigned int irq; |
Grant Likely | 08a543a | 2011-07-26 03:19:06 -0600 | [diff] [blame] | 149 | unsigned long hwirq; |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 150 | unsigned int node; |
Thomas Gleixner | 91c4991 | 2011-02-03 20:48:29 +0100 | [diff] [blame] | 151 | unsigned int state_use_accessors; |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 152 | struct irq_chip *chip; |
Grant Likely | 08a543a | 2011-07-26 03:19:06 -0600 | [diff] [blame] | 153 | struct irq_domain *domain; |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 154 | void *handler_data; |
| 155 | void *chip_data; |
| 156 | struct msi_desc *msi_desc; |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 157 | cpumask_var_t affinity; |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 158 | }; |
| 159 | |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 160 | /* |
| 161 | * Bit masks for irq_data.state |
| 162 | * |
Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 163 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 164 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending |
Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 165 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ |
| 166 | * IRQD_PER_CPU - Interrupt is per cpu |
Thomas Gleixner | 2bdd105 | 2011-02-08 17:22:00 +0100 | [diff] [blame] | 167 | * IRQD_AFFINITY_SET - Interrupt affinity was set |
Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 168 | * IRQD_LEVEL - Interrupt is level triggered |
Thomas Gleixner | 7f94226 | 2011-02-10 19:46:26 +0100 | [diff] [blame] | 169 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup |
| 170 | * from suspend |
Thomas Gleixner | e1ef824 | 2011-02-10 22:25:31 +0100 | [diff] [blame] | 171 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process |
| 172 | * context |
Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 173 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt |
| 174 | * IRQD_IRQ_MASKED - Masked state of the interrupt |
| 175 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 176 | */ |
| 177 | enum { |
Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 178 | IRQD_TRIGGER_MASK = 0xf, |
Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 179 | IRQD_SETAFFINITY_PENDING = (1 << 8), |
| 180 | IRQD_NO_BALANCING = (1 << 10), |
| 181 | IRQD_PER_CPU = (1 << 11), |
Thomas Gleixner | 2bdd105 | 2011-02-08 17:22:00 +0100 | [diff] [blame] | 182 | IRQD_AFFINITY_SET = (1 << 12), |
Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 183 | IRQD_LEVEL = (1 << 13), |
Thomas Gleixner | 7f94226 | 2011-02-10 19:46:26 +0100 | [diff] [blame] | 184 | IRQD_WAKEUP_STATE = (1 << 14), |
Thomas Gleixner | e1ef824 | 2011-02-10 22:25:31 +0100 | [diff] [blame] | 185 | IRQD_MOVE_PCNTXT = (1 << 15), |
Thomas Gleixner | 801a0e9 | 2011-03-27 11:02:49 +0200 | [diff] [blame] | 186 | IRQD_IRQ_DISABLED = (1 << 16), |
Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 187 | IRQD_IRQ_MASKED = (1 << 17), |
| 188 | IRQD_IRQ_INPROGRESS = (1 << 18), |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame] | 189 | }; |
| 190 | |
| 191 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) |
| 192 | { |
| 193 | return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; |
| 194 | } |
| 195 | |
Thomas Gleixner | a005677 | 2011-02-08 17:11:03 +0100 | [diff] [blame] | 196 | static inline bool irqd_is_per_cpu(struct irq_data *d) |
| 197 | { |
| 198 | return d->state_use_accessors & IRQD_PER_CPU; |
| 199 | } |
| 200 | |
| 201 | static inline bool irqd_can_balance(struct irq_data *d) |
| 202 | { |
| 203 | return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); |
| 204 | } |
| 205 | |
Thomas Gleixner | 2bdd105 | 2011-02-08 17:22:00 +0100 | [diff] [blame] | 206 | static inline bool irqd_affinity_was_set(struct irq_data *d) |
| 207 | { |
| 208 | return d->state_use_accessors & IRQD_AFFINITY_SET; |
| 209 | } |
| 210 | |
Thomas Gleixner | ee38c04 | 2011-03-28 17:11:13 +0200 | [diff] [blame] | 211 | static inline void irqd_mark_affinity_was_set(struct irq_data *d) |
| 212 | { |
| 213 | d->state_use_accessors |= IRQD_AFFINITY_SET; |
| 214 | } |
| 215 | |
Thomas Gleixner | 876dbd4 | 2011-02-08 17:28:12 +0100 | [diff] [blame] | 216 | static inline u32 irqd_get_trigger_type(struct irq_data *d) |
| 217 | { |
| 218 | return d->state_use_accessors & IRQD_TRIGGER_MASK; |
| 219 | } |
| 220 | |
| 221 | /* |
| 222 | * Must only be called inside irq_chip.irq_set_type() functions. |
| 223 | */ |
| 224 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) |
| 225 | { |
| 226 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; |
| 227 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; |
| 228 | } |
| 229 | |
| 230 | static inline bool irqd_is_level_type(struct irq_data *d) |
| 231 | { |
| 232 | return d->state_use_accessors & IRQD_LEVEL; |
| 233 | } |
| 234 | |
Thomas Gleixner | 7f94226 | 2011-02-10 19:46:26 +0100 | [diff] [blame] | 235 | static inline bool irqd_is_wakeup_set(struct irq_data *d) |
| 236 | { |
| 237 | return d->state_use_accessors & IRQD_WAKEUP_STATE; |
| 238 | } |
| 239 | |
Thomas Gleixner | e1ef824 | 2011-02-10 22:25:31 +0100 | [diff] [blame] | 240 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) |
| 241 | { |
| 242 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; |
| 243 | } |
| 244 | |
Thomas Gleixner | 801a0e9 | 2011-03-27 11:02:49 +0200 | [diff] [blame] | 245 | static inline bool irqd_irq_disabled(struct irq_data *d) |
| 246 | { |
| 247 | return d->state_use_accessors & IRQD_IRQ_DISABLED; |
| 248 | } |
| 249 | |
Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 250 | static inline bool irqd_irq_masked(struct irq_data *d) |
| 251 | { |
| 252 | return d->state_use_accessors & IRQD_IRQ_MASKED; |
| 253 | } |
| 254 | |
| 255 | static inline bool irqd_irq_inprogress(struct irq_data *d) |
| 256 | { |
| 257 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; |
| 258 | } |
| 259 | |
Thomas Gleixner | 9cff60d | 2011-03-28 16:41:14 +0200 | [diff] [blame] | 260 | /* |
| 261 | * Functions for chained handlers which can be enabled/disabled by the |
| 262 | * standard disable_irq/enable_irq calls. Must be called with |
| 263 | * irq_desc->lock held. |
| 264 | */ |
| 265 | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) |
| 266 | { |
| 267 | d->state_use_accessors |= IRQD_IRQ_INPROGRESS; |
| 268 | } |
| 269 | |
| 270 | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) |
| 271 | { |
| 272 | d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; |
| 273 | } |
| 274 | |
Grant Likely | a699e4e | 2012-04-03 07:11:04 -0600 | [diff] [blame] | 275 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
| 276 | { |
| 277 | return d->hwirq; |
| 278 | } |
| 279 | |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 280 | /** |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 281 | * struct irq_chip - hardware interrupt chip descriptor |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 282 | * |
| 283 | * @name: name for /proc/interrupts |
Thomas Gleixner | f882265 | 2010-09-27 12:44:32 +0000 | [diff] [blame] | 284 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
| 285 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
| 286 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) |
| 287 | * @irq_disable: disable the interrupt |
| 288 | * @irq_ack: start of a new interrupt |
| 289 | * @irq_mask: mask an interrupt source |
| 290 | * @irq_mask_ack: ack and mask an interrupt source |
| 291 | * @irq_unmask: unmask an interrupt source |
| 292 | * @irq_eoi: end of interrupt |
| 293 | * @irq_set_affinity: set the CPU affinity on SMP machines |
| 294 | * @irq_retrigger: resend an IRQ to the CPU |
| 295 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ |
| 296 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ |
| 297 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips |
| 298 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
David Daney | 0fdb4b2 | 2011-03-25 12:38:49 -0700 | [diff] [blame] | 299 | * @irq_cpu_online: configure an interrupt source for a secondary CPU |
| 300 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU |
Thomas Gleixner | cfefd21 | 2011-04-15 22:36:08 +0200 | [diff] [blame] | 301 | * @irq_suspend: function called from core code on suspend once per chip |
| 302 | * @irq_resume: function called from core code on resume once per chip |
| 303 | * @irq_pm_shutdown: function called from core code on shutdown once per chip |
Thomas Gleixner | d005181 | 2013-05-06 14:30:24 +0000 | [diff] [blame] | 304 | * @irq_calc_mask: Optional function to set irq_data.mask for special cases |
Thomas Gleixner | ab7798f | 2011-03-25 16:48:50 +0100 | [diff] [blame] | 305 | * @irq_print_chip: optional to print special chip info in show_interrupts |
Thomas Gleixner | 2bff17a | 2011-02-10 13:08:38 +0100 | [diff] [blame] | 306 | * @flags: chip specific flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | */ |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 308 | struct irq_chip { |
| 309 | const char *name; |
Thomas Gleixner | f882265 | 2010-09-27 12:44:32 +0000 | [diff] [blame] | 310 | unsigned int (*irq_startup)(struct irq_data *data); |
| 311 | void (*irq_shutdown)(struct irq_data *data); |
| 312 | void (*irq_enable)(struct irq_data *data); |
| 313 | void (*irq_disable)(struct irq_data *data); |
| 314 | |
| 315 | void (*irq_ack)(struct irq_data *data); |
| 316 | void (*irq_mask)(struct irq_data *data); |
| 317 | void (*irq_mask_ack)(struct irq_data *data); |
| 318 | void (*irq_unmask)(struct irq_data *data); |
| 319 | void (*irq_eoi)(struct irq_data *data); |
| 320 | |
| 321 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); |
| 322 | int (*irq_retrigger)(struct irq_data *data); |
| 323 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); |
| 324 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); |
| 325 | |
| 326 | void (*irq_bus_lock)(struct irq_data *data); |
| 327 | void (*irq_bus_sync_unlock)(struct irq_data *data); |
| 328 | |
David Daney | 0fdb4b2 | 2011-03-25 12:38:49 -0700 | [diff] [blame] | 329 | void (*irq_cpu_online)(struct irq_data *data); |
| 330 | void (*irq_cpu_offline)(struct irq_data *data); |
| 331 | |
Thomas Gleixner | cfefd21 | 2011-04-15 22:36:08 +0200 | [diff] [blame] | 332 | void (*irq_suspend)(struct irq_data *data); |
| 333 | void (*irq_resume)(struct irq_data *data); |
| 334 | void (*irq_pm_shutdown)(struct irq_data *data); |
| 335 | |
Thomas Gleixner | d005181 | 2013-05-06 14:30:24 +0000 | [diff] [blame] | 336 | void (*irq_calc_mask)(struct irq_data *data); |
| 337 | |
Thomas Gleixner | ab7798f | 2011-03-25 16:48:50 +0100 | [diff] [blame] | 338 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); |
| 339 | |
Thomas Gleixner | 2bff17a | 2011-02-10 13:08:38 +0100 | [diff] [blame] | 340 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | }; |
| 342 | |
Thomas Gleixner | d4d5e08 | 2011-02-10 13:16:14 +0100 | [diff] [blame] | 343 | /* |
| 344 | * irq_chip specific flags |
| 345 | * |
Thomas Gleixner | 77694b4 | 2011-02-15 10:33:57 +0100 | [diff] [blame] | 346 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() |
| 347 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled |
Thomas Gleixner | d209a69 | 2011-03-11 21:22:14 +0100 | [diff] [blame] | 348 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path |
Thomas Gleixner | b3d4223 | 2011-03-27 16:05:36 +0200 | [diff] [blame] | 349 | * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks |
| 350 | * when irq enabled |
Santosh Shilimkar | 60f96b4 | 2011-09-09 13:59:35 +0530 | [diff] [blame] | 351 | * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip |
Thomas Gleixner | d4d5e08 | 2011-02-10 13:16:14 +0100 | [diff] [blame] | 352 | */ |
| 353 | enum { |
| 354 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), |
Thomas Gleixner | 77694b4 | 2011-02-15 10:33:57 +0100 | [diff] [blame] | 355 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), |
Thomas Gleixner | d209a69 | 2011-03-11 21:22:14 +0100 | [diff] [blame] | 356 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), |
Thomas Gleixner | b3d4223 | 2011-03-27 16:05:36 +0200 | [diff] [blame] | 357 | IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), |
Santosh Shilimkar | 60f96b4 | 2011-09-09 13:59:35 +0530 | [diff] [blame] | 358 | IRQCHIP_SKIP_SET_WAKE = (1 << 4), |
Thomas Gleixner | dc9b229 | 2012-07-13 19:29:45 +0200 | [diff] [blame] | 359 | IRQCHIP_ONESHOT_SAFE = (1 << 5), |
Thomas Gleixner | d4d5e08 | 2011-02-10 13:16:14 +0100 | [diff] [blame] | 360 | }; |
| 361 | |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 362 | /* This include will go away once we isolated irq_desc usage to core code */ |
| 363 | #include <linux/irqdesc.h> |
Thomas Gleixner | c6b7674 | 2008-10-15 14:31:29 +0200 | [diff] [blame] | 364 | |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 365 | /* |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 366 | * Pick up the arch-dependent methods: |
| 367 | */ |
| 368 | #include <asm/hw_irq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 370 | #ifndef NR_IRQS_LEGACY |
| 371 | # define NR_IRQS_LEGACY 0 |
| 372 | #endif |
| 373 | |
Thomas Gleixner | 1318a48 | 2010-09-27 21:01:37 +0200 | [diff] [blame] | 374 | #ifndef ARCH_IRQ_INIT_FLAGS |
| 375 | # define ARCH_IRQ_INIT_FLAGS 0 |
| 376 | #endif |
| 377 | |
Thomas Gleixner | c1594b7 | 2011-02-07 22:11:30 +0100 | [diff] [blame] | 378 | #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS |
Thomas Gleixner | 1318a48 | 2010-09-27 21:01:37 +0200 | [diff] [blame] | 379 | |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 380 | struct irqaction; |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 381 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 382 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 383 | extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); |
| 384 | extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | |
David Daney | 0fdb4b2 | 2011-03-25 12:38:49 -0700 | [diff] [blame] | 386 | extern void irq_cpu_online(void); |
| 387 | extern void irq_cpu_offline(void); |
David Daney | c2d0c55 | 2011-03-25 12:38:50 -0700 | [diff] [blame] | 388 | extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask); |
David Daney | 0fdb4b2 | 2011-03-25 12:38:49 -0700 | [diff] [blame] | 389 | |
Thomas Gleixner | 3a3856d0 | 2010-10-04 13:47:12 +0200 | [diff] [blame] | 390 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
Thomas Gleixner | a439520 | 2011-02-04 18:46:16 +0100 | [diff] [blame] | 391 | void irq_move_irq(struct irq_data *data); |
| 392 | void irq_move_masked_irq(struct irq_data *data); |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 393 | #else |
Thomas Gleixner | a439520 | 2011-02-04 18:46:16 +0100 | [diff] [blame] | 394 | static inline void irq_move_irq(struct irq_data *data) { } |
| 395 | static inline void irq_move_masked_irq(struct irq_data *data) { } |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame] | 396 | #endif |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 397 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | extern int no_irq_affinity; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | |
Thomas Gleixner | 293a7a0 | 2012-10-16 15:07:49 -0700 | [diff] [blame] | 400 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
| 401 | int irq_set_parent(int irq, int parent_irq); |
| 402 | #else |
| 403 | static inline int irq_set_parent(int irq, int parent_irq) |
| 404 | { |
| 405 | return 0; |
| 406 | } |
| 407 | #endif |
| 408 | |
Ingo Molnar | 2e60bbb | 2006-06-29 02:24:39 -0700 | [diff] [blame] | 409 | /* |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 410 | * Built-in IRQ handlers for various IRQ types, |
Krzysztof Halasa | bebd04c | 2009-11-15 18:57:24 +0100 | [diff] [blame] | 411 | * callable via desc->handle_irq() |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 412 | */ |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 413 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); |
| 414 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); |
| 415 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); |
Thomas Gleixner | 0521c8f | 2011-03-28 16:13:24 +0200 | [diff] [blame] | 416 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 417 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); |
| 418 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 419 | extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 420 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
Mark Brown | 31b47cf | 2009-08-24 20:28:04 +0100 | [diff] [blame] | 421 | extern void handle_nested_irq(unsigned int irq); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 422 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 423 | /* Handling of unhandled and spurious interrupts: */ |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 424 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
Thomas Gleixner | bedd30d | 2008-09-30 23:14:27 +0200 | [diff] [blame] | 425 | irqreturn_t action_ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | |
Thomas Gleixner | a4633adc | 2006-06-29 02:24:48 -0700 | [diff] [blame] | 427 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 428 | /* Enable/disable irq debugging output: */ |
| 429 | extern int noirqdebug_setup(char *str); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 431 | /* Checks whether the interrupt can be requested by request_irq(): */ |
| 432 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
| 433 | |
Thomas Gleixner | f8b5473 | 2006-07-01 22:30:08 +0100 | [diff] [blame] | 434 | /* Dummy irq-chip implementations: */ |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 435 | extern struct irq_chip no_irq_chip; |
Thomas Gleixner | f8b5473 | 2006-07-01 22:30:08 +0100 | [diff] [blame] | 436 | extern struct irq_chip dummy_irq_chip; |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 437 | |
| 438 | extern void |
Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 439 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 440 | irq_flow_handler_t handle, const char *name); |
| 441 | |
Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 442 | static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, |
| 443 | irq_flow_handler_t handle) |
| 444 | { |
| 445 | irq_set_chip_and_handler_name(irq, chip, handle, NULL); |
| 446 | } |
| 447 | |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 448 | extern int irq_set_percpu_devid(unsigned int irq); |
| 449 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 450 | extern void |
Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 451 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 452 | const char *name); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 453 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 454 | static inline void |
Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 455 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 456 | { |
Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 457 | __irq_set_handler(irq, handle, 0, NULL); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | /* |
| 461 | * Set a highlevel chained flow handler for a given IRQ. |
| 462 | * (a chained handler is automatically enabled and set to |
Paul Mundt | 7f1b124 | 2011-04-07 06:01:44 +0900 | [diff] [blame] | 463 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 464 | */ |
| 465 | static inline void |
Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 466 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 467 | { |
Thomas Gleixner | 3836ca0 | 2011-02-14 20:09:19 +0100 | [diff] [blame] | 468 | __irq_set_handler(irq, handle, 1, NULL); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 469 | } |
| 470 | |
Thomas Gleixner | 4424718 | 2010-09-28 10:40:18 +0200 | [diff] [blame] | 471 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
| 472 | |
| 473 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) |
| 474 | { |
| 475 | irq_modify_status(irq, 0, set); |
| 476 | } |
| 477 | |
| 478 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) |
| 479 | { |
| 480 | irq_modify_status(irq, clr, 0); |
| 481 | } |
| 482 | |
Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 483 | static inline void irq_set_noprobe(unsigned int irq) |
Thomas Gleixner | 4424718 | 2010-09-28 10:40:18 +0200 | [diff] [blame] | 484 | { |
| 485 | irq_modify_status(irq, 0, IRQ_NOPROBE); |
| 486 | } |
| 487 | |
Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 488 | static inline void irq_set_probe(unsigned int irq) |
Thomas Gleixner | 4424718 | 2010-09-28 10:40:18 +0200 | [diff] [blame] | 489 | { |
| 490 | irq_modify_status(irq, IRQ_NOPROBE, 0); |
| 491 | } |
Ralf Baechle | 46f4f8f | 2008-02-08 04:22:01 -0800 | [diff] [blame] | 492 | |
Paul Mundt | 7f1b124 | 2011-04-07 06:01:44 +0900 | [diff] [blame] | 493 | static inline void irq_set_nothread(unsigned int irq) |
| 494 | { |
| 495 | irq_modify_status(irq, 0, IRQ_NOTHREAD); |
| 496 | } |
| 497 | |
| 498 | static inline void irq_set_thread(unsigned int irq) |
| 499 | { |
| 500 | irq_modify_status(irq, IRQ_NOTHREAD, 0); |
| 501 | } |
| 502 | |
Thomas Gleixner | 6f91a52 | 2011-02-14 13:33:16 +0100 | [diff] [blame] | 503 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) |
| 504 | { |
| 505 | if (nest) |
| 506 | irq_set_status_flags(irq, IRQ_NESTED_THREAD); |
| 507 | else |
| 508 | irq_clear_status_flags(irq, IRQ_NESTED_THREAD); |
| 509 | } |
| 510 | |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 511 | static inline void irq_set_percpu_devid_flags(unsigned int irq) |
| 512 | { |
| 513 | irq_set_status_flags(irq, |
| 514 | IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | |
| 515 | IRQ_NOPROBE | IRQ_PER_CPU_DEVID); |
| 516 | } |
| 517 | |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 518 | /* Handle dynamic irq creation and destruction */ |
Yinghai Lu | d047f53a | 2009-04-27 18:02:23 -0700 | [diff] [blame] | 519 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
Joerg Roedel | 5afba62 | 2012-09-26 12:44:38 +0200 | [diff] [blame] | 520 | extern unsigned int __create_irqs(unsigned int from, unsigned int count, |
| 521 | int node); |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 522 | extern int create_irq(void); |
| 523 | extern void destroy_irq(unsigned int irq); |
Joerg Roedel | 5afba62 | 2012-09-26 12:44:38 +0200 | [diff] [blame] | 524 | extern void destroy_irqs(unsigned int irq, unsigned int count); |
Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 525 | |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 526 | /* |
| 527 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and |
| 528 | * irq_free_desc instead. |
| 529 | */ |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 530 | extern void dynamic_irq_cleanup(unsigned int irq); |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 531 | static inline void dynamic_irq_init(unsigned int irq) |
| 532 | { |
| 533 | dynamic_irq_cleanup(irq); |
| 534 | } |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 535 | |
| 536 | /* Set/get chip/data for an IRQ: */ |
Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 537 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); |
| 538 | extern int irq_set_handler_data(unsigned int irq, void *data); |
| 539 | extern int irq_set_chip_data(unsigned int irq, void *data); |
| 540 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); |
| 541 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); |
Alexander Gordeev | 51906e7 | 2012-11-19 16:01:29 +0100 | [diff] [blame] | 542 | extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, |
| 543 | struct msi_desc *entry); |
Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 544 | extern struct irq_data *irq_get_irq_data(unsigned int irq); |
Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 545 | |
Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 546 | static inline struct irq_chip *irq_get_chip(unsigned int irq) |
Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 547 | { |
| 548 | struct irq_data *d = irq_get_irq_data(irq); |
| 549 | return d ? d->chip : NULL; |
| 550 | } |
| 551 | |
| 552 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) |
| 553 | { |
| 554 | return d->chip; |
| 555 | } |
| 556 | |
Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 557 | static inline void *irq_get_chip_data(unsigned int irq) |
Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 558 | { |
| 559 | struct irq_data *d = irq_get_irq_data(irq); |
| 560 | return d ? d->chip_data : NULL; |
| 561 | } |
| 562 | |
| 563 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) |
| 564 | { |
| 565 | return d->chip_data; |
| 566 | } |
| 567 | |
Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 568 | static inline void *irq_get_handler_data(unsigned int irq) |
Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 569 | { |
| 570 | struct irq_data *d = irq_get_irq_data(irq); |
| 571 | return d ? d->handler_data : NULL; |
| 572 | } |
| 573 | |
Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 574 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) |
Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 575 | { |
| 576 | return d->handler_data; |
| 577 | } |
| 578 | |
Thomas Gleixner | a0cd9ca | 2011-02-10 11:36:33 +0100 | [diff] [blame] | 579 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) |
Thomas Gleixner | f303a6d | 2010-09-28 17:34:01 +0200 | [diff] [blame] | 580 | { |
| 581 | struct irq_data *d = irq_get_irq_data(irq); |
| 582 | return d ? d->msi_desc : NULL; |
| 583 | } |
| 584 | |
| 585 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) |
| 586 | { |
| 587 | return d->msi_desc; |
| 588 | } |
| 589 | |
Javier Martinez Canillas | 1f6236b | 2013-06-14 18:40:43 +0200 | [diff] [blame] | 590 | static inline u32 irq_get_trigger_type(unsigned int irq) |
| 591 | { |
| 592 | struct irq_data *d = irq_get_irq_data(irq); |
| 593 | return d ? irqd_get_trigger_type(d) : 0; |
| 594 | } |
| 595 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 596 | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
| 597 | struct module *owner); |
| 598 | |
Paul Gortmaker | ec53cf2 | 2011-09-19 20:33:19 -0400 | [diff] [blame] | 599 | /* use macros to avoid needing export.h for THIS_MODULE */ |
| 600 | #define irq_alloc_descs(irq, from, cnt, node) \ |
| 601 | __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) |
| 602 | |
| 603 | #define irq_alloc_desc(node) \ |
| 604 | irq_alloc_descs(-1, 0, 1, node) |
| 605 | |
| 606 | #define irq_alloc_desc_at(at, node) \ |
| 607 | irq_alloc_descs(at, at, 1, node) |
| 608 | |
| 609 | #define irq_alloc_desc_from(from, node) \ |
| 610 | irq_alloc_descs(-1, from, 1, node) |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 611 | |
Alexander Gordeev | 51906e7 | 2012-11-19 16:01:29 +0100 | [diff] [blame] | 612 | #define irq_alloc_descs_from(from, cnt, node) \ |
| 613 | irq_alloc_descs(-1, from, cnt, node) |
| 614 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 615 | void irq_free_descs(unsigned int irq, unsigned int cnt); |
Thomas Gleixner | 06f6c33 | 2010-10-12 12:31:46 +0200 | [diff] [blame] | 616 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 617 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 618 | static inline void irq_free_desc(unsigned int irq) |
| 619 | { |
| 620 | irq_free_descs(irq, 1); |
| 621 | } |
| 622 | |
Paul Mundt | 639bd12 | 2010-10-26 16:19:13 +0900 | [diff] [blame] | 623 | static inline int irq_reserve_irq(unsigned int irq) |
| 624 | { |
| 625 | return irq_reserve_irqs(irq, 1); |
| 626 | } |
| 627 | |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 628 | #ifndef irq_reg_writel |
| 629 | # define irq_reg_writel(val, addr) writel(val, addr) |
| 630 | #endif |
| 631 | #ifndef irq_reg_readl |
| 632 | # define irq_reg_readl(addr) readl(addr) |
| 633 | #endif |
| 634 | |
| 635 | /** |
| 636 | * struct irq_chip_regs - register offsets for struct irq_gci |
| 637 | * @enable: Enable register offset to reg_base |
| 638 | * @disable: Disable register offset to reg_base |
| 639 | * @mask: Mask register offset to reg_base |
| 640 | * @ack: Ack register offset to reg_base |
| 641 | * @eoi: Eoi register offset to reg_base |
| 642 | * @type: Type configuration register offset to reg_base |
| 643 | * @polarity: Polarity configuration register offset to reg_base |
| 644 | */ |
| 645 | struct irq_chip_regs { |
| 646 | unsigned long enable; |
| 647 | unsigned long disable; |
| 648 | unsigned long mask; |
| 649 | unsigned long ack; |
| 650 | unsigned long eoi; |
| 651 | unsigned long type; |
| 652 | unsigned long polarity; |
| 653 | }; |
| 654 | |
| 655 | /** |
| 656 | * struct irq_chip_type - Generic interrupt chip instance for a flow type |
| 657 | * @chip: The real interrupt chip which provides the callbacks |
| 658 | * @regs: Register offsets for this chip |
| 659 | * @handler: Flow handler associated with this chip |
| 660 | * @type: Chip can handle these flow types |
Gerlando Falauto | 899f0e6 | 2013-05-06 14:30:19 +0000 | [diff] [blame] | 661 | * @mask_cache_priv: Cached mask register private to the chip type |
| 662 | * @mask_cache: Pointer to cached mask register |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 663 | * |
| 664 | * A irq_generic_chip can have several instances of irq_chip_type when |
| 665 | * it requires different functions and register offsets for different |
| 666 | * flow types. |
| 667 | */ |
| 668 | struct irq_chip_type { |
| 669 | struct irq_chip chip; |
| 670 | struct irq_chip_regs regs; |
| 671 | irq_flow_handler_t handler; |
| 672 | u32 type; |
Gerlando Falauto | 899f0e6 | 2013-05-06 14:30:19 +0000 | [diff] [blame] | 673 | u32 mask_cache_priv; |
| 674 | u32 *mask_cache; |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 675 | }; |
| 676 | |
| 677 | /** |
| 678 | * struct irq_chip_generic - Generic irq chip data structure |
| 679 | * @lock: Lock to protect register and cache data access |
| 680 | * @reg_base: Register base address (virtual) |
| 681 | * @irq_base: Interrupt base nr for this chip |
| 682 | * @irq_cnt: Number of interrupts handled by this chip |
Gerlando Falauto | 899f0e6 | 2013-05-06 14:30:19 +0000 | [diff] [blame] | 683 | * @mask_cache: Cached mask register shared between all chip types |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 684 | * @type_cache: Cached type register |
| 685 | * @polarity_cache: Cached polarity register |
| 686 | * @wake_enabled: Interrupt can wakeup from suspend |
| 687 | * @wake_active: Interrupt is marked as an wakeup from suspend source |
| 688 | * @num_ct: Number of available irq_chip_type instances (usually 1) |
| 689 | * @private: Private data for non generic chip callbacks |
Thomas Gleixner | 088f40b | 2013-05-06 14:30:27 +0000 | [diff] [blame] | 690 | * @installed: bitfield to denote installed interrupts |
Grant Likely | e8bd834 | 2013-05-29 03:10:52 +0100 | [diff] [blame] | 691 | * @unused: bitfield to denote unused interrupts |
Thomas Gleixner | 088f40b | 2013-05-06 14:30:27 +0000 | [diff] [blame] | 692 | * @domain: irq domain pointer |
Thomas Gleixner | cfefd21 | 2011-04-15 22:36:08 +0200 | [diff] [blame] | 693 | * @list: List head for keeping track of instances |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 694 | * @chip_types: Array of interrupt irq_chip_types |
| 695 | * |
| 696 | * Note, that irq_chip_generic can have multiple irq_chip_type |
| 697 | * implementations which can be associated to a particular irq line of |
| 698 | * an irq_chip_generic instance. That allows to share and protect |
| 699 | * state in an irq_chip_generic instance when we need to implement |
| 700 | * different flow mechanisms (level/edge) for it. |
| 701 | */ |
| 702 | struct irq_chip_generic { |
| 703 | raw_spinlock_t lock; |
| 704 | void __iomem *reg_base; |
| 705 | unsigned int irq_base; |
| 706 | unsigned int irq_cnt; |
| 707 | u32 mask_cache; |
| 708 | u32 type_cache; |
| 709 | u32 polarity_cache; |
| 710 | u32 wake_enabled; |
| 711 | u32 wake_active; |
| 712 | unsigned int num_ct; |
| 713 | void *private; |
Thomas Gleixner | 088f40b | 2013-05-06 14:30:27 +0000 | [diff] [blame] | 714 | unsigned long installed; |
Grant Likely | e8bd834 | 2013-05-29 03:10:52 +0100 | [diff] [blame] | 715 | unsigned long unused; |
Thomas Gleixner | 088f40b | 2013-05-06 14:30:27 +0000 | [diff] [blame] | 716 | struct irq_domain *domain; |
Thomas Gleixner | cfefd21 | 2011-04-15 22:36:08 +0200 | [diff] [blame] | 717 | struct list_head list; |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 718 | struct irq_chip_type chip_types[0]; |
| 719 | }; |
| 720 | |
| 721 | /** |
| 722 | * enum irq_gc_flags - Initialization flags for generic irq chips |
| 723 | * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg |
| 724 | * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for |
| 725 | * irq chips which need to call irq_set_wake() on |
| 726 | * the parent irq. Usually GPIO implementations |
Gerlando Falauto | af80b0f | 2013-05-06 14:30:21 +0000 | [diff] [blame] | 727 | * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private |
Thomas Gleixner | 966dc73 | 2013-05-06 14:30:22 +0000 | [diff] [blame] | 728 | * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 729 | */ |
| 730 | enum irq_gc_flags { |
| 731 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, |
| 732 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, |
Gerlando Falauto | af80b0f | 2013-05-06 14:30:21 +0000 | [diff] [blame] | 733 | IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, |
Thomas Gleixner | 966dc73 | 2013-05-06 14:30:22 +0000 | [diff] [blame] | 734 | IRQ_GC_NO_MASK = 1 << 3, |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 735 | }; |
| 736 | |
Thomas Gleixner | 088f40b | 2013-05-06 14:30:27 +0000 | [diff] [blame] | 737 | /* |
| 738 | * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains |
| 739 | * @irqs_per_chip: Number of interrupts per chip |
| 740 | * @num_chips: Number of chips |
| 741 | * @irq_flags_to_set: IRQ* flags to set on irq setup |
| 742 | * @irq_flags_to_clear: IRQ* flags to clear on irq setup |
| 743 | * @gc_flags: Generic chip specific setup flags |
| 744 | * @gc: Array of pointers to generic interrupt chips |
| 745 | */ |
| 746 | struct irq_domain_chip_generic { |
| 747 | unsigned int irqs_per_chip; |
| 748 | unsigned int num_chips; |
| 749 | unsigned int irq_flags_to_clear; |
| 750 | unsigned int irq_flags_to_set; |
| 751 | enum irq_gc_flags gc_flags; |
| 752 | struct irq_chip_generic *gc[0]; |
| 753 | }; |
| 754 | |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 755 | /* Generic chip callback functions */ |
| 756 | void irq_gc_noop(struct irq_data *d); |
| 757 | void irq_gc_mask_disable_reg(struct irq_data *d); |
| 758 | void irq_gc_mask_set_bit(struct irq_data *d); |
| 759 | void irq_gc_mask_clr_bit(struct irq_data *d); |
| 760 | void irq_gc_unmask_enable_reg(struct irq_data *d); |
Simon Guinot | 659fb32 | 2011-07-06 12:41:31 -0400 | [diff] [blame] | 761 | void irq_gc_ack_set_bit(struct irq_data *d); |
| 762 | void irq_gc_ack_clr_bit(struct irq_data *d); |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 763 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); |
| 764 | void irq_gc_eoi(struct irq_data *d); |
| 765 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); |
| 766 | |
| 767 | /* Setup functions for irq_chip_generic */ |
| 768 | struct irq_chip_generic * |
| 769 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, |
| 770 | void __iomem *reg_base, irq_flow_handler_t handler); |
| 771 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, |
| 772 | enum irq_gc_flags flags, unsigned int clr, |
| 773 | unsigned int set); |
| 774 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); |
Thomas Gleixner | cfefd21 | 2011-04-15 22:36:08 +0200 | [diff] [blame] | 775 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, |
| 776 | unsigned int clr, unsigned int set); |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 777 | |
Thomas Gleixner | 088f40b | 2013-05-06 14:30:27 +0000 | [diff] [blame] | 778 | struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); |
| 779 | int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, |
| 780 | int num_ct, const char *name, |
| 781 | irq_flow_handler_t handler, |
| 782 | unsigned int clr, unsigned int set, |
| 783 | enum irq_gc_flags flags); |
| 784 | |
| 785 | |
Thomas Gleixner | 7d82806 | 2011-04-03 11:42:53 +0200 | [diff] [blame] | 786 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) |
| 787 | { |
| 788 | return container_of(d->chip, struct irq_chip_type, chip); |
| 789 | } |
| 790 | |
| 791 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) |
| 792 | |
| 793 | #ifdef CONFIG_SMP |
| 794 | static inline void irq_gc_lock(struct irq_chip_generic *gc) |
| 795 | { |
| 796 | raw_spin_lock(&gc->lock); |
| 797 | } |
| 798 | |
| 799 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) |
| 800 | { |
| 801 | raw_spin_unlock(&gc->lock); |
| 802 | } |
| 803 | #else |
| 804 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } |
| 805 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } |
| 806 | #endif |
| 807 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 808 | #endif /* _LINUX_IRQ_H */ |