Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 1 | /* |
| 2 | * IRQ chip definitions for INTC IRQs. |
| 3 | * |
| 4 | * Copyright (C) 2007, 2008 Magnus Damm |
Paul Mundt | b59f9f9 | 2012-01-24 17:41:55 +0900 | [diff] [blame] | 5 | * Copyright (C) 2009 - 2012 Paul Mundt |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 6 | * |
| 7 | * This file is subject to the terms and conditions of the GNU General Public |
| 8 | * License. See the file "COPYING" in the main directory of this archive |
| 9 | * for more details. |
| 10 | */ |
| 11 | #include <linux/cpumask.h> |
Paul Mundt | b59f9f9 | 2012-01-24 17:41:55 +0900 | [diff] [blame] | 12 | #include <linux/bsearch.h> |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 13 | #include <linux/io.h> |
| 14 | #include "internals.h" |
| 15 | |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 16 | void _intc_enable(struct irq_data *data, unsigned long handle) |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 17 | { |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 18 | unsigned int irq = data->irq; |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 19 | struct intc_desc_int *d = get_intc_desc(irq); |
| 20 | unsigned long addr; |
| 21 | unsigned int cpu; |
| 22 | |
| 23 | for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { |
| 24 | #ifdef CONFIG_SMP |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 25 | if (!cpumask_test_cpu(cpu, data->affinity)) |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 26 | continue; |
| 27 | #endif |
| 28 | addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); |
| 29 | intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ |
| 30 | [_INTC_FN(handle)], irq); |
| 31 | } |
| 32 | |
| 33 | intc_balancing_enable(irq); |
| 34 | } |
| 35 | |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 36 | static void intc_enable(struct irq_data *data) |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 37 | { |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 38 | _intc_enable(data, (unsigned long)irq_data_get_irq_chip_data(data)); |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 39 | } |
| 40 | |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 41 | static void intc_disable(struct irq_data *data) |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 42 | { |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 43 | unsigned int irq = data->irq; |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 44 | struct intc_desc_int *d = get_intc_desc(irq); |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 45 | unsigned long handle = (unsigned long)irq_data_get_irq_chip_data(data); |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 46 | unsigned long addr; |
| 47 | unsigned int cpu; |
| 48 | |
| 49 | intc_balancing_disable(irq); |
| 50 | |
| 51 | for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { |
| 52 | #ifdef CONFIG_SMP |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 53 | if (!cpumask_test_cpu(cpu, data->affinity)) |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 54 | continue; |
| 55 | #endif |
| 56 | addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); |
| 57 | intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\ |
| 58 | [_INTC_FN(handle)], irq); |
| 59 | } |
| 60 | } |
| 61 | |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 62 | #ifdef CONFIG_SMP |
| 63 | /* |
| 64 | * This is held with the irq desc lock held, so we don't require any |
| 65 | * additional locking here at the intc desc level. The affinity mask is |
| 66 | * later tested in the enable/disable paths. |
| 67 | */ |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 68 | static int intc_set_affinity(struct irq_data *data, |
| 69 | const struct cpumask *cpumask, |
| 70 | bool force) |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 71 | { |
| 72 | if (!cpumask_intersects(cpumask, cpu_online_mask)) |
| 73 | return -1; |
| 74 | |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 75 | cpumask_copy(data->affinity, cpumask); |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 76 | |
Paul Mundt | 3037764 | 2012-01-24 16:55:57 +0900 | [diff] [blame] | 77 | return IRQ_SET_MASK_OK_NOCOPY; |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 78 | } |
| 79 | #endif |
| 80 | |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 81 | static void intc_mask_ack(struct irq_data *data) |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 82 | { |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 83 | unsigned int irq = data->irq; |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 84 | struct intc_desc_int *d = get_intc_desc(irq); |
| 85 | unsigned long handle = intc_get_ack_handle(irq); |
Paul Mundt | 0dd4d5c | 2012-10-15 14:08:48 +0900 | [diff] [blame] | 86 | void __iomem *addr; |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 87 | |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 88 | intc_disable(data); |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 89 | |
| 90 | /* read register and write zero only to the associated bit */ |
| 91 | if (handle) { |
| 92 | unsigned int value; |
| 93 | |
Paul Mundt | 0dd4d5c | 2012-10-15 14:08:48 +0900 | [diff] [blame] | 94 | addr = (void __iomem *)INTC_REG(d, _INTC_ADDR_D(handle), 0); |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 95 | value = intc_set_field_from_handle(0, 1, handle); |
| 96 | |
| 97 | switch (_INTC_FN(handle)) { |
| 98 | case REG_FN_MODIFY_BASE + 0: /* 8bit */ |
| 99 | __raw_readb(addr); |
| 100 | __raw_writeb(0xff ^ value, addr); |
| 101 | break; |
| 102 | case REG_FN_MODIFY_BASE + 1: /* 16bit */ |
| 103 | __raw_readw(addr); |
| 104 | __raw_writew(0xffff ^ value, addr); |
| 105 | break; |
| 106 | case REG_FN_MODIFY_BASE + 3: /* 32bit */ |
| 107 | __raw_readl(addr); |
| 108 | __raw_writel(0xffffffff ^ value, addr); |
| 109 | break; |
| 110 | default: |
| 111 | BUG(); |
| 112 | break; |
| 113 | } |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp, |
| 118 | unsigned int nr_hp, |
| 119 | unsigned int irq) |
| 120 | { |
Paul Mundt | b59f9f9 | 2012-01-24 17:41:55 +0900 | [diff] [blame] | 121 | struct intc_handle_int key; |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 122 | |
Paul Mundt | b59f9f9 | 2012-01-24 17:41:55 +0900 | [diff] [blame] | 123 | key.irq = irq; |
| 124 | key.handle = 0; |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 125 | |
Paul Mundt | b59f9f9 | 2012-01-24 17:41:55 +0900 | [diff] [blame] | 126 | return bsearch(&key, hp, nr_hp, sizeof(*hp), intc_handle_int_cmp); |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | int intc_set_priority(unsigned int irq, unsigned int prio) |
| 130 | { |
| 131 | struct intc_desc_int *d = get_intc_desc(irq); |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 132 | struct irq_data *data = irq_get_irq_data(irq); |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 133 | struct intc_handle_int *ihp; |
| 134 | |
| 135 | if (!intc_get_prio_level(irq) || prio <= 1) |
| 136 | return -EINVAL; |
| 137 | |
| 138 | ihp = intc_find_irq(d->prio, d->nr_prio, irq); |
| 139 | if (ihp) { |
| 140 | if (prio >= (1 << _INTC_WIDTH(ihp->handle))) |
| 141 | return -EINVAL; |
| 142 | |
| 143 | intc_set_prio_level(irq, prio); |
| 144 | |
| 145 | /* |
| 146 | * only set secondary masking method directly |
| 147 | * primary masking method is using intc_prio_level[irq] |
| 148 | * priority level will be set during next enable() |
| 149 | */ |
| 150 | if (_INTC_FN(ihp->handle) != REG_FN_ERR) |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 151 | _intc_enable(data, ihp->handle); |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 152 | } |
| 153 | return 0; |
| 154 | } |
| 155 | |
Magnus Damm | 8a5a778 | 2011-01-19 08:16:29 +0000 | [diff] [blame] | 156 | #define SENSE_VALID_FLAG 0x80 |
| 157 | #define VALID(x) (x | SENSE_VALID_FLAG) |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 158 | |
| 159 | static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = { |
| 160 | [IRQ_TYPE_EDGE_FALLING] = VALID(0), |
| 161 | [IRQ_TYPE_EDGE_RISING] = VALID(1), |
| 162 | [IRQ_TYPE_LEVEL_LOW] = VALID(2), |
| 163 | /* SH7706, SH7707 and SH7709 do not support high level triggered */ |
| 164 | #if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \ |
| 165 | !defined(CONFIG_CPU_SUBTYPE_SH7707) && \ |
| 166 | !defined(CONFIG_CPU_SUBTYPE_SH7709) |
| 167 | [IRQ_TYPE_LEVEL_HIGH] = VALID(3), |
| 168 | #endif |
Magnus Damm | 7d377b1 | 2011-10-12 16:21:08 +0900 | [diff] [blame] | 169 | #if defined(CONFIG_ARM) /* all recent SH-Mobile / R-Mobile ARM support this */ |
Magnus Damm | 9a14a92 | 2011-07-15 10:58:55 +0000 | [diff] [blame] | 170 | [IRQ_TYPE_EDGE_BOTH] = VALID(4), |
| 171 | #endif |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 172 | }; |
| 173 | |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 174 | static int intc_set_type(struct irq_data *data, unsigned int type) |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 175 | { |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 176 | unsigned int irq = data->irq; |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 177 | struct intc_desc_int *d = get_intc_desc(irq); |
| 178 | unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK]; |
| 179 | struct intc_handle_int *ihp; |
| 180 | unsigned long addr; |
| 181 | |
| 182 | if (!value) |
| 183 | return -EINVAL; |
| 184 | |
Magnus Damm | 52e3124 | 2011-10-17 17:59:54 +0900 | [diff] [blame] | 185 | value &= ~SENSE_VALID_FLAG; |
| 186 | |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 187 | ihp = intc_find_irq(d->sense, d->nr_sense, irq); |
| 188 | if (ihp) { |
Magnus Damm | 52e3124 | 2011-10-17 17:59:54 +0900 | [diff] [blame] | 189 | /* PINT has 2-bit sense registers, should fail on EDGE_BOTH */ |
| 190 | if (value >= (1 << _INTC_WIDTH(ihp->handle))) |
| 191 | return -EINVAL; |
| 192 | |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 193 | addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0); |
Magnus Damm | 52e3124 | 2011-10-17 17:59:54 +0900 | [diff] [blame] | 194 | intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value); |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | return 0; |
| 198 | } |
| 199 | |
| 200 | struct irq_chip intc_irq_chip = { |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 201 | .irq_mask = intc_disable, |
| 202 | .irq_unmask = intc_enable, |
| 203 | .irq_mask_ack = intc_mask_ack, |
| 204 | .irq_enable = intc_enable, |
| 205 | .irq_disable = intc_disable, |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 206 | .irq_set_type = intc_set_type, |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 207 | #ifdef CONFIG_SMP |
Paul Mundt | 26599a9 | 2010-10-27 15:42:10 +0900 | [diff] [blame] | 208 | .irq_set_affinity = intc_set_affinity, |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 209 | #endif |
Paul Mundt | 5bbda4e | 2012-01-24 14:54:10 +0900 | [diff] [blame] | 210 | .flags = IRQCHIP_SKIP_SET_WAKE, |
Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 211 | }; |