Duy Truong | 790f06d | 2013-02-13 16:38:12 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved. |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
| 14 | |
| 15 | #include <linux/init.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/err.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/list.h> |
| 20 | #include <linux/of.h> |
| 21 | #include <linux/of_address.h> |
| 22 | #include <linux/of_irq.h> |
| 23 | #include <linux/irqdomain.h> |
| 24 | #include <linux/interrupt.h> |
| 25 | #include <linux/spmi.h> |
| 26 | #include <linux/radix-tree.h> |
| 27 | #include <linux/slab.h> |
| 28 | #include <linux/printk.h> |
| 29 | |
| 30 | #include <asm/irq.h> |
| 31 | #include <asm/mach/irq.h> |
| 32 | #include <mach/qpnp-int.h> |
| 33 | |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 34 | /* 16 slave_ids, 256 per_ids per slave, and 8 ints per per_id */ |
| 35 | #define QPNPINT_NR_IRQS (16 * 256 * 8) |
| 36 | |
| 37 | enum qpnpint_regs { |
| 38 | QPNPINT_REG_RT_STS = 0x10, |
| 39 | QPNPINT_REG_SET_TYPE = 0x11, |
| 40 | QPNPINT_REG_POLARITY_HIGH = 0x12, |
| 41 | QPNPINT_REG_POLARITY_LOW = 0x13, |
| 42 | QPNPINT_REG_LATCHED_CLR = 0x14, |
| 43 | QPNPINT_REG_EN_SET = 0x15, |
| 44 | QPNPINT_REG_EN_CLR = 0x16, |
| 45 | QPNPINT_REG_LATCHED_STS = 0x18, |
| 46 | }; |
| 47 | |
| 48 | struct q_perip_data { |
| 49 | uint8_t type; /* bitmap */ |
| 50 | uint8_t pol_high; /* bitmap */ |
| 51 | uint8_t pol_low; /* bitmap */ |
| 52 | uint8_t int_en; /* bitmap */ |
| 53 | uint8_t use_count; |
| 54 | }; |
| 55 | |
| 56 | struct q_irq_data { |
| 57 | uint32_t priv_d; /* data to optimize arbiter interactions */ |
| 58 | struct q_chip_data *chip_d; |
| 59 | struct q_perip_data *per_d; |
| 60 | uint8_t mask_shift; |
| 61 | uint8_t spmi_slave; |
| 62 | uint16_t spmi_offset; |
| 63 | }; |
| 64 | |
| 65 | struct q_chip_data { |
| 66 | int bus_nr; |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 67 | struct irq_domain *domain; |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 68 | struct qpnp_local_int cb; |
| 69 | struct spmi_controller *spmi_ctrl; |
| 70 | struct radix_tree_root per_tree; |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 71 | struct list_head list; |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 72 | }; |
| 73 | |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 74 | static LIST_HEAD(qpnpint_chips); |
| 75 | static DEFINE_MUTEX(qpnpint_chips_mutex); |
| 76 | |
| 77 | #define QPNPINT_MAX_BUSSES 4 |
| 78 | struct q_chip_data *chip_lookup[QPNPINT_MAX_BUSSES]; |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 79 | |
| 80 | /** |
| 81 | * qpnpint_encode_hwirq - translate between qpnp_irq_spec and |
| 82 | * hwirq representation. |
| 83 | * |
| 84 | * slave_offset = (addr->slave * 256 * 8); |
| 85 | * perip_offset = slave_offset + (addr->perip * 8); |
| 86 | * return perip_offset + addr->irq; |
| 87 | */ |
| 88 | static inline int qpnpint_encode_hwirq(struct qpnp_irq_spec *spec) |
| 89 | { |
| 90 | uint32_t hwirq; |
| 91 | |
| 92 | if (spec->slave > 15 || spec->irq > 7) |
| 93 | return -EINVAL; |
| 94 | |
| 95 | hwirq = (spec->slave << 11); |
| 96 | hwirq |= (spec->per << 3); |
| 97 | hwirq |= spec->irq; |
| 98 | |
| 99 | return hwirq; |
| 100 | } |
| 101 | /** |
| 102 | * qpnpint_decode_hwirq - translate between hwirq and |
| 103 | * qpnp_irq_spec representation. |
| 104 | */ |
| 105 | static inline int qpnpint_decode_hwirq(unsigned long hwirq, |
| 106 | struct qpnp_irq_spec *spec) |
| 107 | { |
| 108 | if (hwirq > 65535) |
| 109 | return -EINVAL; |
| 110 | |
| 111 | spec->slave = (hwirq >> 11) & 0xF; |
| 112 | spec->per = (hwirq >> 3) & 0xFF; |
| 113 | spec->irq = hwirq & 0x7; |
| 114 | return 0; |
| 115 | } |
| 116 | |
| 117 | static int qpnpint_spmi_write(struct q_irq_data *irq_d, uint8_t reg, |
| 118 | void *buf, uint32_t len) |
| 119 | { |
| 120 | struct q_chip_data *chip_d = irq_d->chip_d; |
| 121 | int rc; |
| 122 | |
| 123 | if (!chip_d->spmi_ctrl) |
| 124 | return -ENODEV; |
| 125 | |
| 126 | rc = spmi_ext_register_writel(chip_d->spmi_ctrl, irq_d->spmi_slave, |
| 127 | irq_d->spmi_offset + reg, buf, len); |
| 128 | return rc; |
| 129 | } |
| 130 | |
| 131 | static void qpnpint_irq_mask(struct irq_data *d) |
| 132 | { |
| 133 | struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d); |
| 134 | struct q_chip_data *chip_d = irq_d->chip_d; |
| 135 | struct q_perip_data *per_d = irq_d->per_d; |
| 136 | struct qpnp_irq_spec q_spec; |
| 137 | int rc; |
| 138 | |
| 139 | pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq); |
| 140 | |
| 141 | if (chip_d->cb.mask) { |
| 142 | rc = qpnpint_decode_hwirq(d->hwirq, &q_spec); |
| 143 | if (rc) |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 144 | pr_err("decode failed on hwirq %lu\n", d->hwirq); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 145 | else |
| 146 | chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec, |
| 147 | irq_d->priv_d); |
| 148 | } |
| 149 | |
| 150 | per_d->int_en &= ~irq_d->mask_shift; |
| 151 | |
| 152 | rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR, |
| 153 | (u8 *)&irq_d->mask_shift, 1); |
| 154 | if (rc) |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 155 | pr_err("spmi failure on irq %d\n", d->irq); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | static void qpnpint_irq_mask_ack(struct irq_data *d) |
| 159 | { |
| 160 | struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d); |
| 161 | struct q_chip_data *chip_d = irq_d->chip_d; |
| 162 | struct q_perip_data *per_d = irq_d->per_d; |
| 163 | struct qpnp_irq_spec q_spec; |
| 164 | int rc; |
| 165 | |
| 166 | pr_debug("hwirq %lu irq: %d mask: 0x%x\n", d->hwirq, d->irq, |
| 167 | irq_d->mask_shift); |
| 168 | |
| 169 | if (chip_d->cb.mask) { |
| 170 | rc = qpnpint_decode_hwirq(d->hwirq, &q_spec); |
| 171 | if (rc) |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 172 | pr_err("decode failed on hwirq %lu\n", d->hwirq); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 173 | else |
| 174 | chip_d->cb.mask(chip_d->spmi_ctrl, &q_spec, |
| 175 | irq_d->priv_d); |
| 176 | } |
| 177 | |
| 178 | per_d->int_en &= ~irq_d->mask_shift; |
| 179 | |
| 180 | rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR, |
| 181 | &irq_d->mask_shift, 1); |
| 182 | if (rc) |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 183 | pr_err("spmi failure on irq %d\n", d->irq); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 184 | |
| 185 | rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_LATCHED_CLR, |
| 186 | &irq_d->mask_shift, 1); |
| 187 | if (rc) |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 188 | pr_err("spmi failure on irq %d\n", d->irq); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 189 | } |
| 190 | |
| 191 | static void qpnpint_irq_unmask(struct irq_data *d) |
| 192 | { |
| 193 | struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d); |
| 194 | struct q_chip_data *chip_d = irq_d->chip_d; |
| 195 | struct q_perip_data *per_d = irq_d->per_d; |
| 196 | struct qpnp_irq_spec q_spec; |
| 197 | int rc; |
| 198 | |
| 199 | pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq); |
| 200 | |
| 201 | if (chip_d->cb.unmask) { |
| 202 | rc = qpnpint_decode_hwirq(d->hwirq, &q_spec); |
| 203 | if (rc) |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 204 | pr_err("decode failed on hwirq %lu\n", d->hwirq); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 205 | else |
| 206 | chip_d->cb.unmask(chip_d->spmi_ctrl, &q_spec, |
| 207 | irq_d->priv_d); |
| 208 | } |
| 209 | |
| 210 | per_d->int_en |= irq_d->mask_shift; |
| 211 | rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_SET, |
| 212 | &irq_d->mask_shift, 1); |
| 213 | if (rc) |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 214 | pr_err("spmi failure on irq %d\n", d->irq); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type) |
| 218 | { |
| 219 | struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d); |
| 220 | struct q_perip_data *per_d = irq_d->per_d; |
| 221 | int rc; |
| 222 | u8 buf[3]; |
| 223 | |
| 224 | pr_debug("hwirq %lu irq: %d flow: 0x%x\n", d->hwirq, |
| 225 | d->irq, flow_type); |
| 226 | |
| 227 | per_d->pol_high &= ~irq_d->mask_shift; |
| 228 | per_d->pol_low &= ~irq_d->mask_shift; |
| 229 | if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { |
| 230 | per_d->type |= irq_d->mask_shift; /* edge trig */ |
| 231 | if (flow_type & IRQF_TRIGGER_RISING) |
| 232 | per_d->pol_high |= irq_d->mask_shift; |
| 233 | if (flow_type & IRQF_TRIGGER_FALLING) |
| 234 | per_d->pol_low |= irq_d->mask_shift; |
| 235 | } else { |
| 236 | if ((flow_type & IRQF_TRIGGER_HIGH) && |
| 237 | (flow_type & IRQF_TRIGGER_LOW)) |
| 238 | return -EINVAL; |
| 239 | per_d->type &= ~irq_d->mask_shift; /* level trig */ |
| 240 | if (flow_type & IRQF_TRIGGER_HIGH) |
| 241 | per_d->pol_high |= irq_d->mask_shift; |
| 242 | else |
Michael Bohan | 69701d3 | 2012-06-07 17:05:41 -0700 | [diff] [blame] | 243 | per_d->pol_low |= irq_d->mask_shift; |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 244 | } |
| 245 | |
| 246 | buf[0] = per_d->type; |
| 247 | buf[1] = per_d->pol_high; |
| 248 | buf[2] = per_d->pol_low; |
| 249 | |
| 250 | rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_SET_TYPE, &buf, 3); |
| 251 | if (rc) |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 252 | pr_err("spmi failure on irq %d\n", d->irq); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 253 | return rc; |
| 254 | } |
| 255 | |
Michael Bohan | c86e2b7 | 2012-05-29 16:57:52 -0700 | [diff] [blame] | 256 | static int qpnpint_irq_set_wake(struct irq_data *d, unsigned int on) |
| 257 | { |
| 258 | return 0; |
| 259 | } |
| 260 | |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 261 | static struct irq_chip qpnpint_chip = { |
| 262 | .name = "qpnp-int", |
| 263 | .irq_mask = qpnpint_irq_mask, |
| 264 | .irq_mask_ack = qpnpint_irq_mask_ack, |
| 265 | .irq_unmask = qpnpint_irq_unmask, |
| 266 | .irq_set_type = qpnpint_irq_set_type, |
Michael Bohan | c86e2b7 | 2012-05-29 16:57:52 -0700 | [diff] [blame] | 267 | .irq_set_wake = qpnpint_irq_set_wake, |
| 268 | .flags = IRQCHIP_MASK_ON_SUSPEND, |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 269 | }; |
| 270 | |
| 271 | static int qpnpint_init_irq_data(struct q_chip_data *chip_d, |
| 272 | struct q_irq_data *irq_d, |
| 273 | unsigned long hwirq) |
| 274 | { |
| 275 | struct qpnp_irq_spec q_spec; |
| 276 | int rc; |
| 277 | |
| 278 | irq_d->mask_shift = 1 << (hwirq & 0x7); |
| 279 | rc = qpnpint_decode_hwirq(hwirq, &q_spec); |
| 280 | if (rc < 0) |
| 281 | return rc; |
| 282 | irq_d->spmi_slave = q_spec.slave; |
| 283 | irq_d->spmi_offset = q_spec.per << 8; |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 284 | irq_d->chip_d = chip_d; |
| 285 | |
| 286 | if (chip_d->cb.register_priv_data) |
| 287 | rc = chip_d->cb.register_priv_data(chip_d->spmi_ctrl, &q_spec, |
| 288 | &irq_d->priv_d); |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 289 | if (rc) |
| 290 | return rc; |
| 291 | |
| 292 | irq_d->per_d->use_count++; |
| 293 | return 0; |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 294 | } |
| 295 | |
| 296 | static struct q_irq_data *qpnpint_alloc_irq_data( |
| 297 | struct q_chip_data *chip_d, |
| 298 | unsigned long hwirq) |
| 299 | { |
| 300 | struct q_irq_data *irq_d; |
| 301 | struct q_perip_data *per_d; |
Michael Bohan | 392006f | 2013-01-25 14:29:41 -0800 | [diff] [blame] | 302 | int rc; |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 303 | |
| 304 | irq_d = kzalloc(sizeof(struct q_irq_data), GFP_KERNEL); |
| 305 | if (!irq_d) |
| 306 | return ERR_PTR(-ENOMEM); |
| 307 | |
| 308 | /** |
| 309 | * The Peripheral Tree is keyed from the slave + per_id. We're |
| 310 | * ignoring the irq bits here since this peripheral structure |
| 311 | * should be common for all irqs on the same peripheral. |
| 312 | */ |
| 313 | per_d = radix_tree_lookup(&chip_d->per_tree, (hwirq & ~0x7)); |
| 314 | if (!per_d) { |
| 315 | per_d = kzalloc(sizeof(struct q_perip_data), GFP_KERNEL); |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 316 | if (!per_d) { |
Michael Bohan | 392006f | 2013-01-25 14:29:41 -0800 | [diff] [blame] | 317 | rc = -ENOMEM; |
| 318 | goto alloc_fail; |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 319 | } |
Michael Bohan | 392006f | 2013-01-25 14:29:41 -0800 | [diff] [blame] | 320 | rc = radix_tree_preload(GFP_KERNEL); |
| 321 | if (rc) |
| 322 | goto alloc_fail; |
| 323 | rc = radix_tree_insert(&chip_d->per_tree, |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 324 | (hwirq & ~0x7), per_d); |
Michael Bohan | 392006f | 2013-01-25 14:29:41 -0800 | [diff] [blame] | 325 | if (rc) |
| 326 | goto alloc_fail; |
| 327 | radix_tree_preload_end(); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 328 | } |
| 329 | irq_d->per_d = per_d; |
| 330 | |
| 331 | return irq_d; |
Michael Bohan | 392006f | 2013-01-25 14:29:41 -0800 | [diff] [blame] | 332 | |
| 333 | alloc_fail: |
| 334 | kfree(per_d); |
| 335 | kfree(irq_d); |
| 336 | return ERR_PTR(rc); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 337 | } |
| 338 | |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 339 | static int qpnpint_irq_domain_dt_translate(struct irq_domain *d, |
| 340 | struct device_node *controller, |
| 341 | const u32 *intspec, unsigned int intsize, |
| 342 | unsigned long *out_hwirq, |
| 343 | unsigned int *out_type) |
| 344 | { |
| 345 | struct qpnp_irq_spec addr; |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 346 | int ret; |
| 347 | |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 348 | pr_debug("intspec[0] 0x%x intspec[1] 0x%x intspec[2] 0x%x\n", |
| 349 | intspec[0], intspec[1], intspec[2]); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 350 | |
| 351 | if (d->of_node != controller) |
| 352 | return -EINVAL; |
| 353 | if (intsize != 3) |
| 354 | return -EINVAL; |
| 355 | |
| 356 | addr.irq = intspec[2] & 0x7; |
| 357 | addr.per = intspec[1] & 0xFF; |
| 358 | addr.slave = intspec[0] & 0xF; |
| 359 | |
| 360 | ret = qpnpint_encode_hwirq(&addr); |
| 361 | if (ret < 0) { |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 362 | pr_err("invalid intspec\n"); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 363 | return ret; |
| 364 | } |
| 365 | *out_hwirq = ret; |
| 366 | *out_type = IRQ_TYPE_NONE; |
| 367 | |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 368 | return 0; |
| 369 | } |
| 370 | |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 371 | static void qpnpint_free_irq_data(struct q_irq_data *irq_d) |
| 372 | { |
| 373 | if (irq_d->per_d->use_count == 1) |
| 374 | kfree(irq_d->per_d); |
| 375 | else |
| 376 | irq_d->per_d->use_count--; |
| 377 | kfree(irq_d); |
| 378 | } |
| 379 | |
| 380 | static int qpnpint_irq_domain_map(struct irq_domain *d, |
| 381 | unsigned int virq, irq_hw_number_t hwirq) |
| 382 | { |
| 383 | struct q_chip_data *chip_d = d->host_data; |
| 384 | struct q_irq_data *irq_d; |
| 385 | int rc; |
| 386 | |
| 387 | pr_debug("hwirq = %lu\n", hwirq); |
| 388 | |
| 389 | if (hwirq < 0 || hwirq >= 32768) { |
| 390 | pr_err("hwirq %lu out of bounds\n", hwirq); |
| 391 | return -EINVAL; |
| 392 | } |
| 393 | |
| 394 | irq_radix_revmap_insert(d, virq, hwirq); |
| 395 | |
| 396 | irq_d = qpnpint_alloc_irq_data(chip_d, hwirq); |
| 397 | if (IS_ERR(irq_d)) { |
| 398 | pr_err("failed to alloc irq data for hwirq %lu\n", hwirq); |
| 399 | return PTR_ERR(irq_d); |
| 400 | } |
| 401 | |
| 402 | rc = qpnpint_init_irq_data(chip_d, irq_d, hwirq); |
| 403 | if (rc) { |
| 404 | pr_err("failed to init irq data for hwirq %lu\n", hwirq); |
| 405 | goto map_err; |
| 406 | } |
| 407 | |
| 408 | irq_set_chip_and_handler(virq, |
| 409 | &qpnpint_chip, |
| 410 | handle_level_irq); |
| 411 | irq_set_chip_data(virq, irq_d); |
| 412 | #ifdef CONFIG_ARM |
| 413 | set_irq_flags(virq, IRQF_VALID); |
| 414 | #else |
| 415 | irq_set_noprobe(virq); |
| 416 | #endif |
| 417 | return 0; |
| 418 | |
| 419 | map_err: |
| 420 | qpnpint_free_irq_data(irq_d); |
| 421 | return rc; |
| 422 | } |
| 423 | |
| 424 | void qpnpint_irq_domain_unmap(struct irq_domain *d, unsigned int virq) |
| 425 | { |
| 426 | struct q_irq_data *irq_d = irq_get_chip_data(virq); |
| 427 | |
| 428 | if (WARN_ON(!irq_d)) |
| 429 | return; |
| 430 | |
| 431 | qpnpint_free_irq_data(irq_d); |
| 432 | } |
| 433 | |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 434 | const struct irq_domain_ops qpnpint_irq_domain_ops = { |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 435 | .map = qpnpint_irq_domain_map, |
| 436 | .unmap = qpnpint_irq_domain_unmap, |
| 437 | .xlate = qpnpint_irq_domain_dt_translate, |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 438 | }; |
| 439 | |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 440 | int qpnpint_register_controller(struct device_node *node, |
| 441 | struct spmi_controller *ctrl, |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 442 | struct qpnp_local_int *li_cb) |
| 443 | { |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 444 | struct q_chip_data *chip_d; |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 445 | |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 446 | if (!node || !ctrl || ctrl->nr >= QPNPINT_MAX_BUSSES) |
| 447 | return -EINVAL; |
| 448 | |
| 449 | list_for_each_entry(chip_d, &qpnpint_chips, list) |
| 450 | if (node == chip_d->domain->of_node) { |
| 451 | chip_d->cb = *li_cb; |
| 452 | chip_d->spmi_ctrl = ctrl; |
| 453 | chip_lookup[ctrl->nr] = chip_d; |
| 454 | return 0; |
| 455 | } |
| 456 | |
| 457 | return -ENOENT; |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 458 | } |
| 459 | EXPORT_SYMBOL(qpnpint_register_controller); |
| 460 | |
| 461 | int qpnpint_handle_irq(struct spmi_controller *spmi_ctrl, |
| 462 | struct qpnp_irq_spec *spec) |
| 463 | { |
| 464 | struct irq_domain *domain; |
| 465 | unsigned long hwirq, busno; |
| 466 | int irq; |
| 467 | |
| 468 | pr_debug("spec slave = %u per = %u irq = %u\n", |
| 469 | spec->slave, spec->per, spec->irq); |
| 470 | |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 471 | busno = spmi_ctrl->nr; |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 472 | if (!spec || !spmi_ctrl || busno >= QPNPINT_MAX_BUSSES) |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 473 | return -EINVAL; |
| 474 | |
| 475 | hwirq = qpnpint_encode_hwirq(spec); |
| 476 | if (hwirq < 0) { |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 477 | pr_err("invalid irq spec passed\n"); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 478 | return -EINVAL; |
| 479 | } |
| 480 | |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 481 | domain = chip_lookup[busno]->domain; |
| 482 | irq = irq_radix_revmap_lookup(domain, hwirq); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 483 | |
| 484 | generic_handle_irq(irq); |
| 485 | |
| 486 | return 0; |
| 487 | } |
| 488 | EXPORT_SYMBOL(qpnpint_handle_irq); |
| 489 | |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 490 | int __init qpnpint_of_init(struct device_node *node, struct device_node *parent) |
| 491 | { |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 492 | struct q_chip_data *chip_d; |
| 493 | |
| 494 | chip_d = kzalloc(sizeof(struct q_chip_data), GFP_KERNEL); |
| 495 | if (!chip_d) |
| 496 | return -ENOMEM; |
| 497 | |
| 498 | chip_d->domain = irq_domain_add_tree(node, |
| 499 | &qpnpint_irq_domain_ops, chip_d); |
| 500 | if (!chip_d->domain) { |
| 501 | pr_err("Unable to allocate irq_domain\n"); |
| 502 | kfree(chip_d); |
| 503 | return -ENOMEM; |
| 504 | } |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 505 | |
| 506 | INIT_RADIX_TREE(&chip_d->per_tree, GFP_ATOMIC); |
Michael Bohan | bb6b30f | 2012-06-01 13:33:51 -0700 | [diff] [blame] | 507 | list_add(&chip_d->list, &qpnpint_chips); |
Michael Bohan | 115cf65 | 2012-01-05 14:32:59 -0800 | [diff] [blame] | 508 | |
| 509 | return 0; |
| 510 | } |
| 511 | EXPORT_SYMBOL(qpnpint_of_init); |