Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/mach-pxa/irq.c |
| 3 | * |
eric miao | e3630db | 2008-03-04 11:42:26 +0800 | [diff] [blame] | 4 | * Generic PXA IRQ handling |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
| 6 | * Author: Nicolas Pitre |
| 7 | * Created: Jun 15, 2001 |
| 8 | * Copyright: MontaVista Software Inc. |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of the GNU General Public License version 2 as |
| 12 | * published by the Free Software Foundation. |
| 13 | */ |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 14 | #include <linux/bitops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/init.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/interrupt.h> |
Rafael J. Wysocki | 2eaa03b | 2011-04-22 22:03:11 +0200 | [diff] [blame] | 18 | #include <linux/syscore_ops.h> |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 19 | #include <linux/io.h> |
| 20 | #include <linux/irq.h> |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 21 | #include <linux/of_address.h> |
| 22 | #include <linux/of_irq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Jamie Iles | 5a567d7 | 2011-10-08 11:20:42 +0100 | [diff] [blame] | 24 | #include <asm/exception.h> |
| 25 | |
Russell King | a09e64f | 2008-08-05 16:14:15 +0100 | [diff] [blame] | 26 | #include <mach/hardware.h> |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 27 | #include <mach/irqs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
| 29 | #include "generic.h" |
| 30 | |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 31 | #define ICIP (0x000) |
| 32 | #define ICMR (0x004) |
| 33 | #define ICLR (0x008) |
| 34 | #define ICFR (0x00c) |
| 35 | #define ICPR (0x010) |
| 36 | #define ICCR (0x014) |
| 37 | #define ICHP (0x018) |
| 38 | #define IPR(i) (((i) < 32) ? (0x01c + ((i) << 2)) : \ |
| 39 | ((i) < 64) ? (0x0b0 + (((i) - 32) << 2)) : \ |
| 40 | (0x144 + (((i) - 64) << 2))) |
Eric Miao | a551e4f | 2011-04-27 22:48:05 +0800 | [diff] [blame] | 41 | #define ICHP_VAL_IRQ (1 << 31) |
| 42 | #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 43 | #define IPR_VALID (1 << 31) |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 44 | |
| 45 | #define MAX_INTERNAL_IRQS 128 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
| 47 | /* |
| 48 | * This is for peripheral IRQs internal to the PXA chip. |
| 49 | */ |
| 50 | |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 51 | static void __iomem *pxa_irq_base; |
eric miao | f6fb7af | 2008-03-04 13:53:05 +0800 | [diff] [blame] | 52 | static int pxa_internal_irq_nr; |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 53 | static bool cpu_has_ipr; |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 54 | static struct irq_domain *pxa_irq_domain; |
Haojian Zhuang | bb71bdd | 2010-11-17 19:03:36 +0800 | [diff] [blame] | 55 | |
Eric Miao | a1015a1 | 2011-01-12 16:42:24 -0600 | [diff] [blame] | 56 | static inline void __iomem *irq_base(int i) |
| 57 | { |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 58 | static unsigned long phys_base_offset[] = { |
| 59 | 0x0, |
| 60 | 0x9c, |
| 61 | 0x130, |
Eric Miao | a1015a1 | 2011-01-12 16:42:24 -0600 | [diff] [blame] | 62 | }; |
| 63 | |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 64 | return pxa_irq_base + phys_base_offset[i]; |
Eric Miao | a1015a1 | 2011-01-12 16:42:24 -0600 | [diff] [blame] | 65 | } |
| 66 | |
Eric Miao | 5d284e3 | 2011-04-27 22:48:04 +0800 | [diff] [blame] | 67 | void pxa_mask_irq(struct irq_data *d) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | { |
Lennert Buytenhek | a3f4c92 | 2010-11-29 11:18:26 +0100 | [diff] [blame] | 69 | void __iomem *base = irq_data_get_irq_chip_data(d); |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 70 | irq_hw_number_t irq = irqd_to_hwirq(d); |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 71 | uint32_t icmr = __raw_readl(base + ICMR); |
| 72 | |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 73 | icmr &= ~BIT(irq & 0x1f); |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 74 | __raw_writel(icmr, base + ICMR); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | } |
| 76 | |
Eric Miao | 5d284e3 | 2011-04-27 22:48:04 +0800 | [diff] [blame] | 77 | void pxa_unmask_irq(struct irq_data *d) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | { |
Lennert Buytenhek | a3f4c92 | 2010-11-29 11:18:26 +0100 | [diff] [blame] | 79 | void __iomem *base = irq_data_get_irq_chip_data(d); |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 80 | irq_hw_number_t irq = irqd_to_hwirq(d); |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 81 | uint32_t icmr = __raw_readl(base + ICMR); |
| 82 | |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 83 | icmr |= BIT(irq & 0x1f); |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 84 | __raw_writel(icmr, base + ICMR); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | } |
| 86 | |
eric miao | f6fb7af | 2008-03-04 13:53:05 +0800 | [diff] [blame] | 87 | static struct irq_chip pxa_internal_irq_chip = { |
David Brownell | 38c677c | 2006-08-01 22:26:25 +0100 | [diff] [blame] | 88 | .name = "SC", |
Lennert Buytenhek | a3f4c92 | 2010-11-29 11:18:26 +0100 | [diff] [blame] | 89 | .irq_ack = pxa_mask_irq, |
| 90 | .irq_mask = pxa_mask_irq, |
| 91 | .irq_unmask = pxa_unmask_irq, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | }; |
| 93 | |
Eric Miao | a551e4f | 2011-04-27 22:48:05 +0800 | [diff] [blame] | 94 | asmlinkage void __exception_irq_entry icip_handle_irq(struct pt_regs *regs) |
| 95 | { |
| 96 | uint32_t icip, icmr, mask; |
| 97 | |
| 98 | do { |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 99 | icip = __raw_readl(pxa_irq_base + ICIP); |
| 100 | icmr = __raw_readl(pxa_irq_base + ICMR); |
Eric Miao | a551e4f | 2011-04-27 22:48:05 +0800 | [diff] [blame] | 101 | mask = icip & icmr; |
| 102 | |
| 103 | if (mask == 0) |
| 104 | break; |
| 105 | |
| 106 | handle_IRQ(PXA_IRQ(fls(mask) - 1), regs); |
| 107 | } while (1); |
| 108 | } |
| 109 | |
| 110 | asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs) |
| 111 | { |
| 112 | uint32_t ichp; |
| 113 | |
| 114 | do { |
| 115 | __asm__ __volatile__("mrc p6, 0, %0, c5, c0, 0\n": "=r"(ichp)); |
| 116 | |
| 117 | if ((ichp & ICHP_VAL_IRQ) == 0) |
| 118 | break; |
| 119 | |
| 120 | handle_IRQ(PXA_IRQ(ICHP_IRQ(ichp)), regs); |
| 121 | } while (1); |
| 122 | } |
| 123 | |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 124 | static int pxa_irq_map(struct irq_domain *h, unsigned int virq, |
| 125 | irq_hw_number_t hw) |
Eric Miao | 53665a5 | 2007-06-06 06:36:04 +0100 | [diff] [blame] | 126 | { |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 127 | void __iomem *base = irq_base(hw / 32); |
Eric Miao | 53665a5 | 2007-06-06 06:36:04 +0100 | [diff] [blame] | 128 | |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 129 | /* initialize interrupt priority */ |
| 130 | if (cpu_has_ipr) |
| 131 | __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); |
| 132 | |
| 133 | irq_set_chip_and_handler(virq, &pxa_internal_irq_chip, |
| 134 | handle_level_irq); |
| 135 | irq_set_chip_data(virq, base); |
| 136 | set_irq_flags(virq, IRQF_VALID); |
| 137 | |
| 138 | return 0; |
| 139 | } |
| 140 | |
| 141 | static struct irq_domain_ops pxa_irq_ops = { |
| 142 | .map = pxa_irq_map, |
| 143 | .xlate = irq_domain_xlate_onecell, |
| 144 | }; |
| 145 | |
| 146 | static __init void |
| 147 | pxa_init_irq_common(struct device_node *node, int irq_nr, |
| 148 | int (*fn)(struct irq_data *, unsigned int)) |
| 149 | { |
| 150 | int n; |
Haojian Zhuang | c482ae4 | 2009-11-02 14:02:21 -0500 | [diff] [blame] | 151 | |
eric miao | f6fb7af | 2008-03-04 13:53:05 +0800 | [diff] [blame] | 152 | pxa_internal_irq_nr = irq_nr; |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 153 | pxa_irq_domain = irq_domain_add_legacy(node, irq_nr, |
| 154 | PXA_IRQ(0), 0, |
| 155 | &pxa_irq_ops, NULL); |
| 156 | if (!pxa_irq_domain) |
| 157 | panic("Unable to add PXA IRQ domain\n"); |
| 158 | irq_set_default_host(pxa_irq_domain); |
Eric Miao | 53665a5 | 2007-06-06 06:36:04 +0100 | [diff] [blame] | 159 | |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 160 | for (n = 0; n < irq_nr; n += 32) { |
Marek Vasut | 1b624fb | 2011-01-10 23:53:12 +0100 | [diff] [blame] | 161 | void __iomem *base = irq_base(n >> 5); |
Eric Miao | 53665a5 | 2007-06-06 06:36:04 +0100 | [diff] [blame] | 162 | |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 163 | __raw_writel(0, base + ICMR); /* disable all IRQs */ |
| 164 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ |
Haojian Zhuang | d2c3706 | 2009-08-19 19:49:31 +0800 | [diff] [blame] | 165 | } |
Eric Miao | 53665a5 | 2007-06-06 06:36:04 +0100 | [diff] [blame] | 166 | /* only unmasked interrupts kick us out of idle */ |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 167 | __raw_writel(1, irq_base(0) + ICCR); |
Eric Miao | 53665a5 | 2007-06-06 06:36:04 +0100 | [diff] [blame] | 168 | |
Lennert Buytenhek | a3f4c92 | 2010-11-29 11:18:26 +0100 | [diff] [blame] | 169 | pxa_internal_irq_chip.irq_set_wake = fn; |
eric miao | c95530c | 2007-08-29 10:22:17 +0100 | [diff] [blame] | 170 | } |
eric miao | c0165504 | 2008-01-28 23:00:02 +0000 | [diff] [blame] | 171 | |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 172 | void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) |
| 173 | { |
| 174 | BUG_ON(irq_nr > MAX_INTERNAL_IRQS); |
| 175 | |
| 176 | pxa_irq_base = io_p2v(0x40d00000); |
| 177 | cpu_has_ipr = !cpu_is_pxa25x(); |
| 178 | pxa_init_irq_common(NULL, irq_nr, fn); |
| 179 | } |
| 180 | |
eric miao | c0165504 | 2008-01-28 23:00:02 +0000 | [diff] [blame] | 181 | #ifdef CONFIG_PM |
Haojian Zhuang | c482ae4 | 2009-11-02 14:02:21 -0500 | [diff] [blame] | 182 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; |
| 183 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; |
eric miao | c0165504 | 2008-01-28 23:00:02 +0000 | [diff] [blame] | 184 | |
Rafael J. Wysocki | 2eaa03b | 2011-04-22 22:03:11 +0200 | [diff] [blame] | 185 | static int pxa_irq_suspend(void) |
eric miao | c0165504 | 2008-01-28 23:00:02 +0000 | [diff] [blame] | 186 | { |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 187 | int i; |
eric miao | f6fb7af | 2008-03-04 13:53:05 +0800 | [diff] [blame] | 188 | |
Marek Vasut | 1b624fb | 2011-01-10 23:53:12 +0100 | [diff] [blame] | 189 | for (i = 0; i < pxa_internal_irq_nr / 32; i++) { |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 190 | void __iomem *base = irq_base(i); |
| 191 | |
| 192 | saved_icmr[i] = __raw_readl(base + ICMR); |
| 193 | __raw_writel(0, base + ICMR); |
eric miao | c0165504 | 2008-01-28 23:00:02 +0000 | [diff] [blame] | 194 | } |
Eric Miao | c70f5a6 | 2010-01-11 20:39:37 +0800 | [diff] [blame] | 195 | |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 196 | if (cpu_has_ipr) { |
Eric Miao | c70f5a6 | 2010-01-11 20:39:37 +0800 | [diff] [blame] | 197 | for (i = 0; i < pxa_internal_irq_nr; i++) |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 198 | saved_ipr[i] = __raw_readl(pxa_irq_base + IPR(i)); |
Eric Miao | c70f5a6 | 2010-01-11 20:39:37 +0800 | [diff] [blame] | 199 | } |
eric miao | c0165504 | 2008-01-28 23:00:02 +0000 | [diff] [blame] | 200 | |
| 201 | return 0; |
| 202 | } |
| 203 | |
Rafael J. Wysocki | 2eaa03b | 2011-04-22 22:03:11 +0200 | [diff] [blame] | 204 | static void pxa_irq_resume(void) |
eric miao | c0165504 | 2008-01-28 23:00:02 +0000 | [diff] [blame] | 205 | { |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 206 | int i; |
eric miao | f6fb7af | 2008-03-04 13:53:05 +0800 | [diff] [blame] | 207 | |
Marek Vasut | 1b624fb | 2011-01-10 23:53:12 +0100 | [diff] [blame] | 208 | for (i = 0; i < pxa_internal_irq_nr / 32; i++) { |
Haojian Zhuang | a79a9ad | 2010-11-24 11:54:22 +0800 | [diff] [blame] | 209 | void __iomem *base = irq_base(i); |
| 210 | |
| 211 | __raw_writel(saved_icmr[i], base + ICMR); |
| 212 | __raw_writel(0, base + ICLR); |
| 213 | } |
| 214 | |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 215 | if (cpu_has_ipr) |
Eric Miao | c70f5a6 | 2010-01-11 20:39:37 +0800 | [diff] [blame] | 216 | for (i = 0; i < pxa_internal_irq_nr; i++) |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 217 | __raw_writel(saved_ipr[i], pxa_irq_base + IPR(i)); |
Eric Miao | c70f5a6 | 2010-01-11 20:39:37 +0800 | [diff] [blame] | 218 | |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 219 | __raw_writel(1, pxa_irq_base + ICCR); |
eric miao | c0165504 | 2008-01-28 23:00:02 +0000 | [diff] [blame] | 220 | } |
| 221 | #else |
| 222 | #define pxa_irq_suspend NULL |
| 223 | #define pxa_irq_resume NULL |
| 224 | #endif |
| 225 | |
Rafael J. Wysocki | 2eaa03b | 2011-04-22 22:03:11 +0200 | [diff] [blame] | 226 | struct syscore_ops pxa_irq_syscore_ops = { |
eric miao | c0165504 | 2008-01-28 23:00:02 +0000 | [diff] [blame] | 227 | .suspend = pxa_irq_suspend, |
| 228 | .resume = pxa_irq_resume, |
| 229 | }; |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 230 | |
| 231 | #ifdef CONFIG_OF |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 232 | static const struct of_device_id intc_ids[] __initconst = { |
| 233 | { .compatible = "marvell,pxa-intc", }, |
| 234 | {} |
| 235 | }; |
| 236 | |
| 237 | void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) |
| 238 | { |
| 239 | struct device_node *node; |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 240 | struct resource res; |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 241 | int ret; |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 242 | |
| 243 | node = of_find_matching_node(NULL, intc_ids); |
| 244 | if (!node) { |
| 245 | pr_err("Failed to find interrupt controller in arch-pxa\n"); |
| 246 | return; |
| 247 | } |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 248 | |
| 249 | ret = of_property_read_u32(node, "marvell,intc-nr-irqs", |
| 250 | &pxa_internal_irq_nr); |
| 251 | if (ret) { |
| 252 | pr_err("Not found marvell,intc-nr-irqs property\n"); |
| 253 | return; |
| 254 | } |
| 255 | |
| 256 | ret = of_address_to_resource(node, 0, &res); |
| 257 | if (ret < 0) { |
| 258 | pr_err("No registers defined for node\n"); |
| 259 | return; |
| 260 | } |
| 261 | pxa_irq_base = io_p2v(res.start); |
| 262 | |
| 263 | if (of_find_property(node, "marvell,intc-priority", NULL)) |
| 264 | cpu_has_ipr = 1; |
| 265 | |
| 266 | ret = irq_alloc_descs(-1, 0, pxa_internal_irq_nr, 0); |
| 267 | if (ret < 0) { |
| 268 | pr_err("Failed to allocate IRQ numbers\n"); |
| 269 | return; |
| 270 | } |
| 271 | |
Robert Jarzmik | d6cf30c | 2015-02-14 22:41:56 +0100 | [diff] [blame] | 272 | pxa_init_irq_common(node, pxa_internal_irq_nr, fn); |
Daniel Mack | 089d036 | 2012-07-22 19:50:22 +0200 | [diff] [blame] | 273 | } |
| 274 | #endif /* CONFIG_OF */ |