[ARM] 4108/2:  Allow multiple GIC interrupt controllers in a system

The current implementation only assumes one GIC to be present in the
system. However, there are platforms with more than one cascaded interrupt
controllers (RealView/EB MPCore for example).

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 09b9d1b..4deece5 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -14,7 +14,9 @@
  *
  * o There is one CPU Interface per CPU, which sends interrupts sent
  *   by the Distributor, and interrupts generated locally, to the
- *   associated CPU.
+ *   associated CPU. The base address of the CPU interface is usually
+ *   aliased so that the same address points to different chips depending
+ *   on the CPU it is accessed from.
  *
  * Note that IRQs 0-31 are special - they are local to each CPU.
  * As such, the enable set/clear, pending set/clear and active bit
@@ -31,10 +33,38 @@
 #include <asm/mach/irq.h>
 #include <asm/hardware/gic.h>
 
-static void __iomem *gic_dist_base;
-static void __iomem *gic_cpu_base;
 static DEFINE_SPINLOCK(irq_controller_lock);
 
+struct gic_chip_data {
+	unsigned int irq_offset;
+	void __iomem *dist_base;
+	void __iomem *cpu_base;
+};
+
+#ifndef MAX_GIC_NR
+#define MAX_GIC_NR	1
+#endif
+
+static struct gic_chip_data gic_data[MAX_GIC_NR];
+
+static inline void __iomem *gic_dist_base(unsigned int irq)
+{
+	struct gic_chip_data *gic_data = get_irq_chip_data(irq);
+	return gic_data->dist_base;
+}
+
+static inline void __iomem *gic_cpu_base(unsigned int irq)
+{
+	struct gic_chip_data *gic_data = get_irq_chip_data(irq);
+	return gic_data->cpu_base;
+}
+
+static inline unsigned int gic_irq(unsigned int irq)
+{
+	struct gic_chip_data *gic_data = get_irq_chip_data(irq);
+	return irq - gic_data->irq_offset;
+}
+
 /*
  * Routines to acknowledge, disable and enable interrupts
  *
@@ -55,8 +85,8 @@
 	u32 mask = 1 << (irq % 32);
 
 	spin_lock(&irq_controller_lock);
-	writel(mask, gic_dist_base + GIC_DIST_ENABLE_CLEAR + (irq / 32) * 4);
-	writel(irq, gic_cpu_base + GIC_CPU_EOI);
+	writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
+	writel(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI);
 	spin_unlock(&irq_controller_lock);
 }
 
@@ -65,7 +95,7 @@
 	u32 mask = 1 << (irq % 32);
 
 	spin_lock(&irq_controller_lock);
-	writel(mask, gic_dist_base + GIC_DIST_ENABLE_CLEAR + (irq / 32) * 4);
+	writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
 	spin_unlock(&irq_controller_lock);
 }
 
@@ -74,14 +104,14 @@
 	u32 mask = 1 << (irq % 32);
 
 	spin_lock(&irq_controller_lock);
-	writel(mask, gic_dist_base + GIC_DIST_ENABLE_SET + (irq / 32) * 4);
+	writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4);
 	spin_unlock(&irq_controller_lock);
 }
 
 #ifdef CONFIG_SMP
 static void gic_set_cpu(unsigned int irq, cpumask_t mask_val)
 {
-	void __iomem *reg = gic_dist_base + GIC_DIST_TARGET + (irq & ~3);
+	void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
 	unsigned int shift = (irq % 4) * 8;
 	unsigned int cpu = first_cpu(mask_val);
 	u32 val;
@@ -95,6 +125,37 @@
 }
 #endif
 
+static void fastcall gic_handle_cascade_irq(unsigned int irq,
+					    struct irq_desc *desc)
+{
+	struct gic_chip_data *chip_data = get_irq_data(irq);
+	struct irq_chip *chip = get_irq_chip(irq);
+	unsigned int cascade_irq;
+	unsigned long status;
+
+	/* primary controller ack'ing */
+	chip->ack(irq);
+
+	spin_lock(&irq_controller_lock);
+	status = readl(chip_data->cpu_base + GIC_CPU_INTACK);
+	spin_unlock(&irq_controller_lock);
+
+	cascade_irq = (status & 0x3ff);
+	if (cascade_irq > 1020)
+		goto out;
+	if (cascade_irq < 32 || cascade_irq >= NR_IRQS) {
+		do_bad_IRQ(cascade_irq, desc);
+		goto out;
+	}
+
+	cascade_irq += chip_data->irq_offset;
+	generic_handle_irq(cascade_irq);
+
+ out:
+	/* primary controller unmasking */
+	chip->unmask(irq);
+}
+
 static struct irq_chip gic_chip = {
 	.name		= "GIC",
 	.ack		= gic_ack_irq,
@@ -105,15 +166,29 @@
 #endif
 };
 
-void __init gic_dist_init(void __iomem *base)
+void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
+{
+	if (gic_nr >= MAX_GIC_NR)
+		BUG();
+	if (set_irq_data(irq, &gic_data[gic_nr]) != 0)
+		BUG();
+	set_irq_chained_handler(irq, gic_handle_cascade_irq);
+}
+
+void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
+			  unsigned int irq_start)
 {
 	unsigned int max_irq, i;
 	u32 cpumask = 1 << smp_processor_id();
 
+	if (gic_nr >= MAX_GIC_NR)
+		BUG();
+
 	cpumask |= cpumask << 8;
 	cpumask |= cpumask << 16;
 
-	gic_dist_base = base;
+	gic_data[gic_nr].dist_base = base;
+	gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31;
 
 	writel(0, base + GIC_DIST_CTRL);
 
@@ -158,8 +233,9 @@
 	/*
 	 * Setup the Linux IRQ subsystem.
 	 */
-	for (i = 29; i < max_irq; i++) {
+	for (i = irq_start; i < gic_data[gic_nr].irq_offset + max_irq; i++) {
 		set_irq_chip(i, &gic_chip);
+		set_irq_chip_data(i, &gic_data[gic_nr]);
 		set_irq_handler(i, handle_level_irq);
 		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
 	}
@@ -167,9 +243,13 @@
 	writel(1, base + GIC_DIST_CTRL);
 }
 
-void __cpuinit gic_cpu_init(void __iomem *base)
+void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
 {
-	gic_cpu_base = base;
+	if (gic_nr >= MAX_GIC_NR)
+		BUG();
+
+	gic_data[gic_nr].cpu_base = base;
+
 	writel(0xf0, base + GIC_CPU_PRIMASK);
 	writel(1, base + GIC_CPU_CTRL);
 }
@@ -179,6 +259,7 @@
 {
 	unsigned long map = *cpus_addr(cpumask);
 
-	writel(map << 16 | irq, gic_dist_base + GIC_DIST_SOFTINT);
+	/* this always happens on GIC0 */
+	writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
 }
 #endif