blob: 93cec21e788bdb7b4024fa5237b9d7a8801c1333 [file] [log] [blame]
Paul Mundt2be6bb02010-10-05 22:10:30 +09001/*
2 * Support for virtual IRQ subgroups.
3 *
4 * Copyright (C) 2010 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#define pr_fmt(fmt) "intc: " fmt
11
12#include <linux/slab.h>
13#include <linux/irq.h>
14#include <linux/list.h>
15#include <linux/radix-tree.h>
16#include <linux/spinlock.h>
Paul Gortmaker07c92732011-07-10 12:57:12 -040017#include <linux/export.h>
Paul Mundt2be6bb02010-10-05 22:10:30 +090018#include "internals.h"
19
Rob Herring0f552392012-01-17 13:10:25 -060020static struct intc_map_entry intc_irq_xlate[INTC_NR_IRQS];
Paul Mundt2be6bb02010-10-05 22:10:30 +090021
22struct intc_virq_list {
23 unsigned int irq;
24 struct intc_virq_list *next;
25};
26
27#define for_each_virq(entry, head) \
28 for (entry = head; entry; entry = entry->next)
29
30/*
31 * Tags for the radix tree
32 */
33#define INTC_TAG_VIRQ_NEEDS_ALLOC 0
34
35void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d)
36{
37 unsigned long flags;
38
39 raw_spin_lock_irqsave(&intc_big_lock, flags);
40 intc_irq_xlate[irq].enum_id = id;
41 intc_irq_xlate[irq].desc = d;
42 raw_spin_unlock_irqrestore(&intc_big_lock, flags);
43}
44
45struct intc_map_entry *intc_irq_xlate_get(unsigned int irq)
46{
47 return intc_irq_xlate + irq;
48}
49
50int intc_irq_lookup(const char *chipname, intc_enum enum_id)
51{
52 struct intc_map_entry *ptr;
53 struct intc_desc_int *d;
54 int irq = -1;
55
56 list_for_each_entry(d, &intc_list, list) {
57 int tagged;
58
59 if (strcmp(d->chip.name, chipname) != 0)
60 continue;
61
62 /*
63 * Catch early lookups for subgroup VIRQs that have not
64 * yet been allocated an IRQ. This already includes a
65 * fast-path out if the tree is untagged, so there is no
66 * need to explicitly test the root tree.
67 */
68 tagged = radix_tree_tag_get(&d->tree, enum_id,
69 INTC_TAG_VIRQ_NEEDS_ALLOC);
70 if (unlikely(tagged))
71 break;
72
73 ptr = radix_tree_lookup(&d->tree, enum_id);
74 if (ptr) {
75 irq = ptr - intc_irq_xlate;
76 break;
77 }
78 }
79
80 return irq;
81}
82EXPORT_SYMBOL_GPL(intc_irq_lookup);
83
84static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
85{
86 struct intc_virq_list **last, *entry;
Paul Mundt26599a92010-10-27 15:42:10 +090087 struct irq_data *data = irq_get_irq_data(irq);
Paul Mundt2be6bb02010-10-05 22:10:30 +090088
89 /* scan for duplicates */
Paul Mundt26599a92010-10-27 15:42:10 +090090 last = (struct intc_virq_list **)&data->handler_data;
91 for_each_virq(entry, data->handler_data) {
Paul Mundt2be6bb02010-10-05 22:10:30 +090092 if (entry->irq == virq)
93 return 0;
94 last = &entry->next;
95 }
96
97 entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
98 if (!entry) {
99 pr_err("can't allocate VIRQ mapping for %d\n", virq);
100 return -ENOMEM;
101 }
102
103 entry->irq = virq;
104
105 *last = entry;
106
107 return 0;
108}
109
110static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
111{
Paul Mundt26599a92010-10-27 15:42:10 +0900112 struct irq_data *data = irq_get_irq_data(irq);
113 struct irq_chip *chip = irq_data_get_irq_chip(data);
Thomas Gleixnerfcb89182011-03-24 16:31:17 +0100114 struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data);
Paul Mundt2be6bb02010-10-05 22:10:30 +0900115 struct intc_desc_int *d = get_intc_desc(irq);
116
Paul Mundt26599a92010-10-27 15:42:10 +0900117 chip->irq_mask_ack(data);
Paul Mundt2be6bb02010-10-05 22:10:30 +0900118
119 for_each_virq(entry, vlist) {
120 unsigned long addr, handle;
121
Thomas Gleixnerfcb89182011-03-24 16:31:17 +0100122 handle = (unsigned long)irq_get_handler_data(entry->irq);
Paul Mundt2be6bb02010-10-05 22:10:30 +0900123 addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
124
125 if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
126 generic_handle_irq(entry->irq);
127 }
128
Paul Mundt26599a92010-10-27 15:42:10 +0900129 chip->irq_unmask(data);
Paul Mundt2be6bb02010-10-05 22:10:30 +0900130}
131
132static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
133 struct intc_desc_int *d,
134 unsigned int index)
135{
136 unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
137
138 return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
139 0, 1, (subgroup->reg_width - 1) - index);
140}
141
142static void __init intc_subgroup_init_one(struct intc_desc *desc,
143 struct intc_desc_int *d,
144 struct intc_subgroup *subgroup)
145{
146 struct intc_map_entry *mapped;
147 unsigned int pirq;
148 unsigned long flags;
149 int i;
150
151 mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
152 if (!mapped) {
153 WARN_ON(1);
154 return;
155 }
156
157 pirq = mapped - intc_irq_xlate;
158
159 raw_spin_lock_irqsave(&d->lock, flags);
160
161 for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
162 struct intc_subgroup_entry *entry;
163 int err;
164
165 if (!subgroup->enum_ids[i])
166 continue;
167
168 entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
169 if (!entry)
170 break;
171
172 entry->pirq = pirq;
173 entry->enum_id = subgroup->enum_ids[i];
174 entry->handle = intc_subgroup_data(subgroup, d, i);
175
176 err = radix_tree_insert(&d->tree, entry->enum_id, entry);
177 if (unlikely(err < 0))
178 break;
179
180 radix_tree_tag_set(&d->tree, entry->enum_id,
181 INTC_TAG_VIRQ_NEEDS_ALLOC);
182 }
183
184 raw_spin_unlock_irqrestore(&d->lock, flags);
185}
186
187void __init intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d)
188{
189 int i;
190
191 if (!desc->hw.subgroups)
192 return;
193
194 for (i = 0; i < desc->hw.nr_subgroups; i++)
195 intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
196}
197
198static void __init intc_subgroup_map(struct intc_desc_int *d)
199{
200 struct intc_subgroup_entry *entries[32];
201 unsigned long flags;
202 unsigned int nr_found;
203 int i;
204
205 raw_spin_lock_irqsave(&d->lock, flags);
206
207restart:
208 nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
209 (void ***)entries, 0, ARRAY_SIZE(entries),
210 INTC_TAG_VIRQ_NEEDS_ALLOC);
211
212 for (i = 0; i < nr_found; i++) {
213 struct intc_subgroup_entry *entry;
214 int irq;
215
216 entry = radix_tree_deref_slot((void **)entries[i]);
217 if (unlikely(!entry))
218 continue;
Paul Mundt6318af92010-11-15 14:30:30 +0900219 if (radix_tree_deref_retry(entry))
Paul Mundt2be6bb02010-10-05 22:10:30 +0900220 goto restart;
221
222 irq = create_irq();
223 if (unlikely(irq < 0)) {
224 pr_err("no more free IRQs, bailing..\n");
225 break;
226 }
227
228 pr_info("Setting up a chained VIRQ from %d -> %d\n",
229 irq, entry->pirq);
230
231 intc_irq_xlate_set(irq, entry->enum_id, d);
232
Thomas Gleixnerfcb89182011-03-24 16:31:17 +0100233 irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq),
Paul Mundt2be6bb02010-10-05 22:10:30 +0900234 handle_simple_irq, "virq");
Thomas Gleixnerfcb89182011-03-24 16:31:17 +0100235 irq_set_chip_data(irq, irq_get_chip_data(entry->pirq));
Paul Mundt2be6bb02010-10-05 22:10:30 +0900236
Thomas Gleixnerfcb89182011-03-24 16:31:17 +0100237 irq_set_handler_data(irq, (void *)entry->handle);
Paul Mundt2be6bb02010-10-05 22:10:30 +0900238
Paul Mundt442f56d2011-04-18 11:45:08 +0900239 /*
240 * Set the virtual IRQ as non-threadable.
241 */
242 irq_set_nothread(irq);
243
Thomas Gleixnerfcb89182011-03-24 16:31:17 +0100244 irq_set_chained_handler(entry->pirq, intc_virq_handler);
Paul Mundt2be6bb02010-10-05 22:10:30 +0900245 add_virq_to_pirq(entry->pirq, irq);
246
247 radix_tree_tag_clear(&d->tree, entry->enum_id,
248 INTC_TAG_VIRQ_NEEDS_ALLOC);
249 radix_tree_replace_slot((void **)entries[i],
250 &intc_irq_xlate[irq]);
251 }
252
253 raw_spin_unlock_irqrestore(&d->lock, flags);
254}
255
256void __init intc_finalize(void)
257{
258 struct intc_desc_int *d;
259
260 list_for_each_entry(d, &intc_list, list)
261 if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
262 intc_subgroup_map(d);
263}