Sebastian Hesselbarth | 350d71b9 | 2013-09-09 14:01:20 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Synopsys DW APB ICTL irqchip driver. |
| 3 | * |
| 4 | * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
| 5 | * |
| 6 | * based on GPL'ed 2.6 kernel sources |
| 7 | * (c) Marvell International Ltd. |
| 8 | * |
| 9 | * This file is licensed under the terms of the GNU General Public |
| 10 | * License version 2. This program is licensed "as is" without any |
| 11 | * warranty of any kind, whether express or implied. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/io.h> |
| 15 | #include <linux/irq.h> |
| 16 | #include <linux/irqchip/chained_irq.h> |
| 17 | #include <linux/of_address.h> |
| 18 | #include <linux/of_irq.h> |
| 19 | |
| 20 | #include "irqchip.h" |
| 21 | |
| 22 | #define APB_INT_ENABLE_L 0x00 |
| 23 | #define APB_INT_ENABLE_H 0x04 |
| 24 | #define APB_INT_MASK_L 0x08 |
| 25 | #define APB_INT_MASK_H 0x0c |
| 26 | #define APB_INT_FINALSTATUS_L 0x30 |
| 27 | #define APB_INT_FINALSTATUS_H 0x34 |
| 28 | |
| 29 | static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc) |
| 30 | { |
| 31 | struct irq_chip *chip = irq_get_chip(irq); |
| 32 | struct irq_chip_generic *gc = irq_get_handler_data(irq); |
| 33 | struct irq_domain *d = gc->private; |
| 34 | u32 stat; |
| 35 | int n; |
| 36 | |
| 37 | chained_irq_enter(chip, desc); |
| 38 | |
| 39 | for (n = 0; n < gc->num_ct; n++) { |
| 40 | stat = readl_relaxed(gc->reg_base + |
| 41 | APB_INT_FINALSTATUS_L + 4 * n); |
| 42 | while (stat) { |
| 43 | u32 hwirq = ffs(stat) - 1; |
| 44 | generic_handle_irq(irq_find_mapping(d, |
| 45 | gc->irq_base + hwirq + 32 * n)); |
| 46 | stat &= ~(1 << hwirq); |
| 47 | } |
| 48 | } |
| 49 | |
| 50 | chained_irq_exit(chip, desc); |
| 51 | } |
| 52 | |
Jisheng Zhang | 1655b05 | 2014-11-12 14:22:54 +0800 | [diff] [blame] | 53 | #ifdef CONFIG_PM |
| 54 | static void dw_apb_ictl_resume(struct irq_data *d) |
| 55 | { |
| 56 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
| 57 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
| 58 | |
| 59 | irq_gc_lock(gc); |
| 60 | writel_relaxed(~0, gc->reg_base + ct->regs.enable); |
| 61 | writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask); |
| 62 | irq_gc_unlock(gc); |
| 63 | } |
| 64 | #else |
| 65 | #define dw_apb_ictl_resume NULL |
| 66 | #endif /* CONFIG_PM */ |
| 67 | |
Sebastian Hesselbarth | 350d71b9 | 2013-09-09 14:01:20 +0200 | [diff] [blame] | 68 | static int __init dw_apb_ictl_init(struct device_node *np, |
| 69 | struct device_node *parent) |
| 70 | { |
| 71 | unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; |
| 72 | struct resource r; |
| 73 | struct irq_domain *domain; |
| 74 | struct irq_chip_generic *gc; |
| 75 | void __iomem *iobase; |
| 76 | int ret, nrirqs, irq; |
| 77 | u32 reg; |
| 78 | |
| 79 | /* Map the parent interrupt for the chained handler */ |
| 80 | irq = irq_of_parse_and_map(np, 0); |
| 81 | if (irq <= 0) { |
| 82 | pr_err("%s: unable to parse irq\n", np->full_name); |
| 83 | return -EINVAL; |
| 84 | } |
| 85 | |
| 86 | ret = of_address_to_resource(np, 0, &r); |
| 87 | if (ret) { |
| 88 | pr_err("%s: unable to get resource\n", np->full_name); |
| 89 | return ret; |
| 90 | } |
| 91 | |
| 92 | if (!request_mem_region(r.start, resource_size(&r), np->full_name)) { |
| 93 | pr_err("%s: unable to request mem region\n", np->full_name); |
| 94 | return -ENOMEM; |
| 95 | } |
| 96 | |
| 97 | iobase = ioremap(r.start, resource_size(&r)); |
| 98 | if (!iobase) { |
| 99 | pr_err("%s: unable to map resource\n", np->full_name); |
| 100 | ret = -ENOMEM; |
| 101 | goto err_release; |
| 102 | } |
| 103 | |
| 104 | /* |
| 105 | * DW IP can be configured to allow 2-64 irqs. We can determine |
| 106 | * the number of irqs supported by writing into enable register |
| 107 | * and look for bits not set, as corresponding flip-flops will |
| 108 | * have been removed by sythesis tool. |
| 109 | */ |
| 110 | |
| 111 | /* mask and enable all interrupts */ |
Jisheng Zhang | 8876ce7 | 2014-11-12 14:22:52 +0800 | [diff] [blame] | 112 | writel_relaxed(~0, iobase + APB_INT_MASK_L); |
| 113 | writel_relaxed(~0, iobase + APB_INT_MASK_H); |
| 114 | writel_relaxed(~0, iobase + APB_INT_ENABLE_L); |
| 115 | writel_relaxed(~0, iobase + APB_INT_ENABLE_H); |
Sebastian Hesselbarth | 350d71b9 | 2013-09-09 14:01:20 +0200 | [diff] [blame] | 116 | |
Jisheng Zhang | 8876ce7 | 2014-11-12 14:22:52 +0800 | [diff] [blame] | 117 | reg = readl_relaxed(iobase + APB_INT_ENABLE_H); |
Sebastian Hesselbarth | 350d71b9 | 2013-09-09 14:01:20 +0200 | [diff] [blame] | 118 | if (reg) |
| 119 | nrirqs = 32 + fls(reg); |
| 120 | else |
Jisheng Zhang | 8876ce7 | 2014-11-12 14:22:52 +0800 | [diff] [blame] | 121 | nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L)); |
Sebastian Hesselbarth | 350d71b9 | 2013-09-09 14:01:20 +0200 | [diff] [blame] | 122 | |
| 123 | domain = irq_domain_add_linear(np, nrirqs, |
| 124 | &irq_generic_chip_ops, NULL); |
| 125 | if (!domain) { |
| 126 | pr_err("%s: unable to add irq domain\n", np->full_name); |
| 127 | ret = -ENOMEM; |
| 128 | goto err_unmap; |
| 129 | } |
| 130 | |
| 131 | ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1, |
| 132 | np->name, handle_level_irq, clr, 0, |
Jisheng Zhang | a9d5fcc | 2014-11-12 14:22:53 +0800 | [diff] [blame] | 133 | IRQ_GC_MASK_CACHE_PER_TYPE | |
Sebastian Hesselbarth | 350d71b9 | 2013-09-09 14:01:20 +0200 | [diff] [blame] | 134 | IRQ_GC_INIT_MASK_CACHE); |
| 135 | if (ret) { |
| 136 | pr_err("%s: unable to alloc irq domain gc\n", np->full_name); |
| 137 | goto err_unmap; |
| 138 | } |
| 139 | |
| 140 | gc = irq_get_domain_generic_chip(domain, 0); |
| 141 | gc->private = domain; |
| 142 | gc->reg_base = iobase; |
| 143 | |
| 144 | gc->chip_types[0].regs.mask = APB_INT_MASK_L; |
Jisheng Zhang | 1655b05 | 2014-11-12 14:22:54 +0800 | [diff] [blame] | 145 | gc->chip_types[0].regs.enable = APB_INT_ENABLE_L; |
Sebastian Hesselbarth | 350d71b9 | 2013-09-09 14:01:20 +0200 | [diff] [blame] | 146 | gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; |
| 147 | gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit; |
Jisheng Zhang | 1655b05 | 2014-11-12 14:22:54 +0800 | [diff] [blame] | 148 | gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume; |
Sebastian Hesselbarth | 350d71b9 | 2013-09-09 14:01:20 +0200 | [diff] [blame] | 149 | |
| 150 | if (nrirqs > 32) { |
| 151 | gc->chip_types[1].regs.mask = APB_INT_MASK_H; |
Jisheng Zhang | 1655b05 | 2014-11-12 14:22:54 +0800 | [diff] [blame] | 152 | gc->chip_types[1].regs.enable = APB_INT_ENABLE_H; |
Sebastian Hesselbarth | 350d71b9 | 2013-09-09 14:01:20 +0200 | [diff] [blame] | 153 | gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit; |
| 154 | gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit; |
Jisheng Zhang | 1655b05 | 2014-11-12 14:22:54 +0800 | [diff] [blame] | 155 | gc->chip_types[1].chip.irq_resume = dw_apb_ictl_resume; |
Sebastian Hesselbarth | 350d71b9 | 2013-09-09 14:01:20 +0200 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | irq_set_handler_data(irq, gc); |
| 159 | irq_set_chained_handler(irq, dw_apb_ictl_handler); |
| 160 | |
| 161 | return 0; |
| 162 | |
| 163 | err_unmap: |
| 164 | iounmap(iobase); |
| 165 | err_release: |
| 166 | release_mem_region(r.start, resource_size(&r)); |
| 167 | return ret; |
| 168 | } |
| 169 | IRQCHIP_DECLARE(dw_apb_ictl, |
| 170 | "snps,dw-apb-ictl", dw_apb_ictl_init); |