blob: 9cd92e645028ecc8041ab856a5cf35621a2929c7 [file] [log] [blame]
David Gibsona1d0d982011-04-14 22:32:06 +00001/*
2 * Copyright 2008-2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/cpu.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/irq.h>
14#include <linux/kernel.h>
15#include <linux/msi.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18#include <linux/smp.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
Aneesh Kumar K.Vdc7a9bd2013-11-18 14:50:18 +053021#include <linux/of_address.h>
22#include <linux/of_irq.h>
David Gibsona1d0d982011-04-14 22:32:06 +000023
24#include <asm/io.h>
25#include <asm/irq.h>
26#include <asm/xics.h>
27
28#include "wsp.h"
29#include "ics.h"
30
31
32/* WSP ICS */
33
34struct wsp_ics {
35 struct ics ics;
36 struct device_node *dn;
37 void __iomem *regs;
38 spinlock_t lock;
39 unsigned long *bitmap;
40 u32 chip_id;
41 u32 lsi_base;
42 u32 lsi_count;
43 u64 hwirq_start;
44 u64 count;
45#ifdef CONFIG_SMP
46 int *hwirq_cpu_map;
47#endif
48};
49
50#define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
51
52#define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
53#define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
54#define IODA_TBL_DATA_REG(base) ((base) + 0x20)
55#define XIVE_UPDATE_REG(base) ((base) + 0x28)
56#define ICS_INT_CAPS_REG(base) ((base) + 0x30)
57
58#define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
59#define TBL_SELECT_XIST (1UL << 48)
60#define TBL_SELECT_XIVT (1UL << 49)
61
62#define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */
63
64#define XIST_REQUIRED 0x8
65#define XIST_REJECTED 0x4
66#define XIST_PRESENTED 0x2
67#define XIST_PENDING 0x1
68
69#define XIVE_SERVER_SHIFT 42
70#define XIVE_SERVER_MASK 0xFFFFULL
71#define XIVE_PRIORITY_MASK 0xFFULL
72#define XIVE_PRIORITY_SHIFT 32
73#define XIVE_WRITE_ENABLE (1ULL << 63)
74
75/*
76 * The docs refer to a 6 bit field called ChipID, which consists of a
77 * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero
78 * so we ignore it, and every where we use "chip id" in this code we
79 * mean the NodeID.
80 */
81#define WSP_ICS_CHIP_SHIFT 17
82
83
84static struct wsp_ics *ics_list;
85static int num_ics;
86
87/* ICS Source controller accessors */
88
89static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
90{
91 unsigned long flags;
92 u64 xive;
93
94 spin_lock_irqsave(&ics->lock, flags);
95 out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
96 xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
97 spin_unlock_irqrestore(&ics->lock, flags);
98
99 return xive;
100}
101
102static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
103{
104 xive &= ~XIVE_ADDR_MASK;
105 xive |= (irq & XIVE_ADDR_MASK);
106 xive |= XIVE_WRITE_ENABLE;
107
108 out_be64(XIVE_UPDATE_REG(ics->regs), xive);
109}
110
111static u64 xive_set_server(u64 xive, unsigned int server)
112{
113 u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT);
114
115 xive &= mask;
116 xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT;
117
118 return xive;
119}
120
121static u64 xive_set_priority(u64 xive, unsigned int priority)
122{
123 u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT);
124
125 xive &= mask;
126 xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT;
127
128 return xive;
129}
130
131
132#ifdef CONFIG_SMP
133/* Find logical CPUs within mask on a given chip and store result in ret */
134void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret)
135{
136 int cpu, chip;
137 struct device_node *cpu_dn, *dn;
138 const u32 *prop;
139
140 cpumask_clear(ret);
141 for_each_cpu(cpu, mask) {
142 cpu_dn = of_get_cpu_node(cpu, NULL);
143 if (!cpu_dn)
144 continue;
145
146 prop = of_get_property(cpu_dn, "at-node", NULL);
147 if (!prop) {
148 of_node_put(cpu_dn);
149 continue;
150 }
151
152 dn = of_find_node_by_phandle(*prop);
153 of_node_put(cpu_dn);
154
155 chip = wsp_get_chip_id(dn);
156 if (chip == chip_id)
157 cpumask_set_cpu(cpu, ret);
158
159 of_node_put(dn);
160 }
161}
162
163/* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */
164static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
165 const cpumask_t *affinity)
166{
167 cpumask_var_t avail, newmask;
168 int ret = -ENOMEM, cpu, cpu_rover = 0, target;
169 int index = hwirq - ics->hwirq_start;
170 unsigned int nodeid;
171
172 BUG_ON(index < 0 || index >= ics->count);
173
174 if (!ics->hwirq_cpu_map)
175 return -ENOMEM;
176
177 if (!distribute_irqs) {
178 ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server;
179 return 0;
180 }
181
182 /* Allocate needed CPU masks */
183 if (!alloc_cpumask_var(&avail, GFP_KERNEL))
184 goto ret;
185 if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
186 goto freeavail;
187
188 /* Find PBus attached to the source of this IRQ */
189 nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */
190
191 /* Find CPUs that could handle this IRQ */
192 if (affinity)
193 cpumask_and(avail, cpu_online_mask, affinity);
194 else
195 cpumask_copy(avail, cpu_online_mask);
196
197 /* Narrow selection down to logical CPUs on the same chip */
198 cpus_on_chip(nodeid, avail, newmask);
199
200 /* Ensure we haven't narrowed it down to 0 */
201 if (unlikely(cpumask_empty(newmask))) {
202 if (unlikely(cpumask_empty(avail))) {
203 ret = -1;
204 goto out;
205 }
206 cpumask_copy(newmask, avail);
207 }
208
209 /* Choose a CPU out of those we narrowed it down to in round robin */
210 target = hwirq % cpumask_weight(newmask);
211 for_each_cpu(cpu, newmask) {
212 if (cpu_rover++ >= target) {
213 ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu);
214 ret = 0;
215 goto out;
216 }
217 }
218
219 /* Shouldn't happen */
220 WARN_ON(1);
221
222out:
223 free_cpumask_var(newmask);
224freeavail:
225 free_cpumask_var(avail);
226ret:
227 if (ret < 0) {
228 ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask);
229 pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n",
230 hwirq, ics->hwirq_cpu_map[index]);
231 }
232 return ret;
233}
234
235static void alloc_irq_map(struct wsp_ics *ics)
236{
237 int i;
238
239 ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL);
240 if (!ics->hwirq_cpu_map) {
241 pr_warning("Allocate hwirq_cpu_map failed, "
242 "IRQ balancing disabled\n");
243 return;
244 }
245
246 for (i=0; i < ics->count; i++)
247 ics->hwirq_cpu_map[i] = xics_default_server;
248}
249
250static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
251{
252 int index = hwirq - ics->hwirq_start;
253
254 BUG_ON(index < 0 || index >= ics->count);
255
256 if (!ics->hwirq_cpu_map)
257 return xics_default_server;
258
259 return ics->hwirq_cpu_map[index];
260}
261#else /* !CONFIG_SMP */
262static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
263 const cpumask_t *affinity)
264{
265 return 0;
266}
267
268static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
269{
270 return xics_default_server;
271}
272
273static void alloc_irq_map(struct wsp_ics *ics) { }
274#endif
275
276static void wsp_chip_unmask_irq(struct irq_data *d)
277{
278 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
279 struct wsp_ics *ics;
280 int server;
281 u64 xive;
282
283 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
284 return;
285
286 ics = d->chip_data;
287 if (WARN_ON(!ics))
288 return;
289
290 server = get_irq_server(ics, hw_irq);
291
292 xive = wsp_ics_get_xive(ics, hw_irq);
293 xive = xive_set_server(xive, server);
294 xive = xive_set_priority(xive, DEFAULT_PRIORITY);
295 wsp_ics_set_xive(ics, hw_irq, xive);
296}
297
298static unsigned int wsp_chip_startup(struct irq_data *d)
299{
300 /* unmask it */
301 wsp_chip_unmask_irq(d);
302 return 0;
303}
304
305static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics)
306{
307 u64 xive;
308
309 if (hw_irq == XICS_IPI)
310 return;
311
312 if (WARN_ON(!ics))
313 return;
314 xive = wsp_ics_get_xive(ics, hw_irq);
315 xive = xive_set_server(xive, xics_default_server);
316 xive = xive_set_priority(xive, LOWEST_PRIORITY);
317 wsp_ics_set_xive(ics, hw_irq, xive);
318}
319
320static void wsp_chip_mask_irq(struct irq_data *d)
321{
322 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
323 struct wsp_ics *ics = d->chip_data;
324
325 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
326 return;
327
328 wsp_mask_real_irq(hw_irq, ics);
329}
330
331static int wsp_chip_set_affinity(struct irq_data *d,
332 const struct cpumask *cpumask, bool force)
333{
334 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
335 struct wsp_ics *ics;
336 int ret;
337 u64 xive;
338
339 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
340 return -1;
341
342 ics = d->chip_data;
343 if (WARN_ON(!ics))
344 return -1;
345 xive = wsp_ics_get_xive(ics, hw_irq);
346
347 /*
348 * For the moment only implement delivery to all cpus or one cpu.
349 * Get current irq_server for the given irq
350 */
Benjamin Herrenschmidt7a768d32012-02-08 18:11:01 +0000351 ret = cache_hwirq_map(ics, hw_irq, cpumask);
David Gibsona1d0d982011-04-14 22:32:06 +0000352 if (ret == -1) {
353 char cpulist[128];
354 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
355 pr_warning("%s: No online cpus in the mask %s for irq %d\n",
356 __func__, cpulist, d->irq);
357 return -1;
358 } else if (ret == -ENOMEM) {
359 pr_warning("%s: Out of memory\n", __func__);
360 return -1;
361 }
362
363 xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
364 wsp_ics_set_xive(ics, hw_irq, xive);
365
Alexander Gordeevdcb615a2013-05-13 00:57:49 +0000366 return IRQ_SET_MASK_OK;
David Gibsona1d0d982011-04-14 22:32:06 +0000367}
368
369static struct irq_chip wsp_irq_chip = {
370 .name = "WSP ICS",
371 .irq_startup = wsp_chip_startup,
372 .irq_mask = wsp_chip_mask_irq,
373 .irq_unmask = wsp_chip_unmask_irq,
374 .irq_set_affinity = wsp_chip_set_affinity
375};
376
377static int wsp_ics_host_match(struct ics *ics, struct device_node *dn)
378{
379 /* All ICSs in the system implement a global irq number space,
380 * so match against them all. */
381 return of_device_is_compatible(dn, "ibm,ppc-xics");
382}
383
384static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq)
385{
386 if (hwirq >= wsp_ics->hwirq_start &&
387 hwirq < wsp_ics->hwirq_start + wsp_ics->count)
388 return 1;
389
390 return 0;
391}
392
393static int wsp_ics_map(struct ics *ics, unsigned int virq)
394{
395 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
396 unsigned int hw_irq = virq_to_hw(virq);
397 unsigned long flags;
398
399 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
400 return -ENOENT;
401
402 irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq);
403
404 irq_set_chip_data(virq, wsp_ics);
405
406 spin_lock_irqsave(&wsp_ics->lock, flags);
407 bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0);
408 spin_unlock_irqrestore(&wsp_ics->lock, flags);
409
410 return 0;
411}
412
413static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq)
414{
415 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
416
417 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
418 return;
419
420 pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
421 wsp_mask_real_irq(hw_irq, wsp_ics);
422}
423
424static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq)
425{
426 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
427
428 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
429 return -ENOENT;
430
431 return get_irq_server(wsp_ics, hw_irq);
432}
433
434/* HW Number allocation API */
435
436static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn)
437{
438 struct device_node *iparent;
439 int i;
440
441 iparent = of_irq_find_parent(dn);
442 if (!iparent) {
443 pr_err("wsp_ics: Failed to find interrupt parent!\n");
444 return NULL;
445 }
446
447 for(i = 0; i < num_ics; i++) {
448 if(ics_list[i].dn == iparent)
449 break;
450 }
451
452 if (i >= num_ics) {
453 pr_err("wsp_ics: Unable to find parent bitmap!\n");
454 return NULL;
455 }
456
457 return &ics_list[i];
458}
459
460int wsp_ics_alloc_irq(struct device_node *dn, int num)
461{
462 struct wsp_ics *ics;
463 int order, offset;
464
465 ics = wsp_ics_find_dn_ics(dn);
466 if (!ics)
467 return -ENODEV;
468
469 /* Fast, but overly strict if num isn't a power of two */
470 order = get_count_order(num);
471
472 spin_lock_irq(&ics->lock);
473 offset = bitmap_find_free_region(ics->bitmap, ics->count, order);
474 spin_unlock_irq(&ics->lock);
475
476 if (offset < 0)
477 return offset;
478
479 return offset + ics->hwirq_start;
480}
481
482void wsp_ics_free_irq(struct device_node *dn, unsigned int irq)
483{
484 struct wsp_ics *ics;
485
486 ics = wsp_ics_find_dn_ics(dn);
487 if (WARN_ON(!ics))
488 return;
489
490 spin_lock_irq(&ics->lock);
491 bitmap_release_region(ics->bitmap, irq, 0);
492 spin_unlock_irq(&ics->lock);
493}
494
495/* Initialisation */
496
497static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics,
498 struct device_node *dn)
499{
500 int len, i, j, size;
501 u32 start, count;
502 const u32 *p;
503
504 size = BITS_TO_LONGS(ics->count) * sizeof(long);
505 ics->bitmap = kzalloc(size, GFP_KERNEL);
506 if (!ics->bitmap) {
507 pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n");
508 return -ENOMEM;
509 }
510
511 spin_lock_init(&ics->lock);
512
513 p = of_get_property(dn, "available-ranges", &len);
514 if (!p || !len) {
515 /* FIXME this should be a WARN() once mambo is updated */
516 pr_err("wsp_ics: No available-ranges defined for %s\n",
517 dn->full_name);
518 return 0;
519 }
520
521 if (len % (2 * sizeof(u32)) != 0) {
522 /* FIXME this should be a WARN() once mambo is updated */
523 pr_err("wsp_ics: Invalid available-ranges for %s\n",
524 dn->full_name);
525 return 0;
526 }
527
528 bitmap_fill(ics->bitmap, ics->count);
529
530 for (i = 0; i < len / sizeof(u32); i += 2) {
531 start = of_read_number(p + i, 1);
532 count = of_read_number(p + i + 1, 1);
533
534 pr_devel("%s: start: %d count: %d\n", __func__, start, count);
535
536 if ((start + count) > (ics->hwirq_start + ics->count) ||
537 start < ics->hwirq_start) {
538 pr_err("wsp_ics: Invalid range! -> %d to %d\n",
539 start, start + count);
540 break;
541 }
542
543 for (j = 0; j < count; j++)
544 bitmap_release_region(ics->bitmap,
545 (start + j) - ics->hwirq_start, 0);
546 }
547
548 /* Ensure LSIs are not available for allocation */
549 bitmap_allocate_region(ics->bitmap, ics->lsi_base,
550 get_count_order(ics->lsi_count));
551
552 return 0;
553}
554
555static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn)
556{
557 u32 lsi_buid, msi_buid, msi_base, msi_count;
558 void __iomem *regs;
559 const u32 *p;
560 int rc, len, i;
561 u64 caps, buid;
562
563 p = of_get_property(dn, "interrupt-ranges", &len);
564 if (!p || len < (2 * sizeof(u32))) {
565 pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n",
566 dn->full_name);
567 return -ENOENT;
568 }
569
570 if (len > (2 * sizeof(u32))) {
571 pr_err("wsp_ics: Multiple ics ranges not supported.\n");
572 return -EINVAL;
573 }
574
575 regs = of_iomap(dn, 0);
576 if (!regs) {
577 pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name);
578 return -ENXIO;
579 }
580
581 ics->hwirq_start = of_read_number(p, 1);
582 ics->count = of_read_number(p + 1, 1);
583 ics->regs = regs;
584
585 ics->chip_id = wsp_get_chip_id(dn);
586 if (WARN_ON(ics->chip_id < 0))
587 ics->chip_id = 0;
588
589 /* Get some informations about the critter */
590 caps = in_be64(ICS_INT_CAPS_REG(ics->regs));
591 buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs));
592 ics->lsi_count = caps >> 56;
593 msi_count = (caps >> 44) & 0x7ff;
594
595 /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the
596 * rest is mixed in the interrupt number. We store the whole
597 * thing though
598 */
599 lsi_buid = (buid >> 48) & 0x1ff;
600 ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5;
601 msi_buid = (buid >> 37) & 0x7;
602 msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11;
603
604 pr_info("wsp_ics: Found %s\n", dn->full_name);
605 pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n",
606 ics->hwirq_start, ics->hwirq_start + ics->count - 1);
607 pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
608 ics->lsi_count, ics->lsi_base,
609 ics->lsi_base + ics->lsi_count - 1);
610 pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
611 msi_count, msi_base,
612 msi_base + msi_count - 1);
613
614 /* Let's check the HW config is sane */
615 if (ics->lsi_base < ics->hwirq_start ||
616 (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count))
617 pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
618 if (msi_base < ics->hwirq_start ||
619 (msi_base + msi_count) > (ics->hwirq_start + ics->count))
620 pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
621
622 /* We don't check for overlap between LSI and MSI, which will happen
623 * if we use the same BUID, I'm not sure yet how legit that is.
624 */
625
626 rc = wsp_ics_bitmap_setup(ics, dn);
627 if (rc) {
628 iounmap(regs);
629 return rc;
630 }
631
632 ics->dn = of_node_get(dn);
633 alloc_irq_map(ics);
634
635 for(i = 0; i < ics->count; i++)
636 wsp_mask_real_irq(ics->hwirq_start + i, ics);
637
638 ics->ics.map = wsp_ics_map;
639 ics->ics.mask_unknown = wsp_ics_mask_unknown;
640 ics->ics.get_server = wsp_ics_get_server;
641 ics->ics.host_match = wsp_ics_host_match;
642
643 xics_register_ics(&ics->ics);
644
645 return 0;
646}
647
648static void __init wsp_ics_set_default_server(void)
649{
650 struct device_node *np;
651 u32 hwid;
652
653 /* Find the server number for the boot cpu. */
654 np = of_get_cpu_node(boot_cpuid, NULL);
655 BUG_ON(!np);
656
657 hwid = get_hard_smp_processor_id(boot_cpuid);
658
659 pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name);
660 xics_default_server = hwid;
661
662 of_node_put(np);
663}
664
665static int __init wsp_ics_init(void)
666{
667 struct device_node *dn;
668 struct wsp_ics *ics;
669 int rc, found;
670
671 wsp_ics_set_default_server();
672
673 found = 0;
674 for_each_compatible_node(dn, NULL, "ibm,ppc-xics")
675 found++;
676
677 if (found == 0) {
678 pr_err("wsp_ics: No ICS's found!\n");
679 return -ENODEV;
680 }
681
682 ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL);
683 if (!ics_list) {
684 pr_err("wsp_ics: No memory for structs.\n");
685 return -ENOMEM;
686 }
687
688 num_ics = 0;
689 ics = ics_list;
690 for_each_compatible_node(dn, NULL, "ibm,wsp-xics") {
691 rc = wsp_ics_setup(ics, dn);
692 if (rc == 0) {
693 ics++;
694 num_ics++;
695 }
696 }
697
698 if (found != num_ics) {
699 pr_err("wsp_ics: Failed setting up %d ICS's\n",
700 found - num_ics);
701 return -1;
702 }
703
704 return 0;
705}
706
707void __init wsp_init_irq(void)
708{
709 wsp_ics_init();
710 xics_init();
711
712 /* We need to patch our irq chip's EOI to point to the right ICP */
713 wsp_irq_chip.irq_eoi = icp_ops->eoi;
714}
Michael Ellermanf9a71e02011-08-08 12:30:55 +0000715
716#ifdef CONFIG_PCI_MSI
717static void wsp_ics_msi_unmask_irq(struct irq_data *d)
718{
719 wsp_chip_unmask_irq(d);
720 unmask_msi_irq(d);
721}
722
723static unsigned int wsp_ics_msi_startup(struct irq_data *d)
724{
725 wsp_ics_msi_unmask_irq(d);
726 return 0;
727}
728
729static void wsp_ics_msi_mask_irq(struct irq_data *d)
730{
731 mask_msi_irq(d);
732 wsp_chip_mask_irq(d);
733}
734
735/*
736 * we do it this way because we reassinge default EOI handling in
737 * irq_init() above
738 */
739static void wsp_ics_eoi(struct irq_data *data)
740{
741 wsp_irq_chip.irq_eoi(data);
742}
743
744static struct irq_chip wsp_ics_msi = {
745 .name = "WSP ICS MSI",
746 .irq_startup = wsp_ics_msi_startup,
747 .irq_mask = wsp_ics_msi_mask_irq,
748 .irq_unmask = wsp_ics_msi_unmask_irq,
749 .irq_eoi = wsp_ics_eoi,
750 .irq_set_affinity = wsp_chip_set_affinity
751};
752
753void wsp_ics_set_msi_chip(unsigned int irq)
754{
755 irq_set_chip(irq, &wsp_ics_msi);
756}
757
758void wsp_ics_set_std_chip(unsigned int irq)
759{
760 irq_set_chip(irq, &wsp_irq_chip);
761}
762#endif /* CONFIG_PCI_MSI */