blob: aaf599a320b3eeb949137118511cf338460b5186 [file] [log] [blame]
Mark Brownf8beab22011-10-28 23:50:49 +02001/*
2 * regmap based irq_chip
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/export.h>
Paul Gortmaker51990e82012-01-22 11:23:42 -050014#include <linux/device.h>
Mark Brownf8beab22011-10-28 23:50:49 +020015#include <linux/regmap.h>
16#include <linux/irq.h>
17#include <linux/interrupt.h>
Mark Brown4af8be62012-05-13 10:59:56 +010018#include <linux/irqdomain.h>
Mark Brown0c00c502012-07-24 15:41:19 +010019#include <linux/pm_runtime.h>
Mark Brownf8beab22011-10-28 23:50:49 +020020#include <linux/slab.h>
21
22#include "internal.h"
23
24struct regmap_irq_chip_data {
25 struct mutex lock;
Stephen Warren7ac140e2012-08-01 11:40:47 -060026 struct irq_chip irq_chip;
Mark Brownf8beab22011-10-28 23:50:49 +020027
28 struct regmap *map;
Mark Brownb026ddb2012-05-31 21:01:46 +010029 const struct regmap_irq_chip *chip;
Mark Brownf8beab22011-10-28 23:50:49 +020030
31 int irq_base;
Mark Brown4af8be62012-05-13 10:59:56 +010032 struct irq_domain *domain;
Mark Brownf8beab22011-10-28 23:50:49 +020033
Mark Browna43fd502012-06-05 14:34:03 +010034 int irq;
35 int wake_count;
36
Mark Browna7440ea2013-01-03 14:27:15 +000037 void *status_reg_buf;
Mark Brownf8beab22011-10-28 23:50:49 +020038 unsigned int *status_buf;
39 unsigned int *mask_buf;
40 unsigned int *mask_buf_def;
Mark Browna43fd502012-06-05 14:34:03 +010041 unsigned int *wake_buf;
Graeme Gregory022f926a2012-05-14 22:40:43 +090042
43 unsigned int irq_reg_stride;
Mark Brownf8beab22011-10-28 23:50:49 +020044};
45
46static inline const
47struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
48 int irq)
49{
Mark Brown4af8be62012-05-13 10:59:56 +010050 return &data->chip->irqs[irq];
Mark Brownf8beab22011-10-28 23:50:49 +020051}
52
53static void regmap_irq_lock(struct irq_data *data)
54{
55 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
56
57 mutex_lock(&d->lock);
58}
59
60static void regmap_irq_sync_unlock(struct irq_data *data)
61{
62 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
Stephen Warren56806552012-04-10 23:37:22 -060063 struct regmap *map = d->map;
Mark Brownf8beab22011-10-28 23:50:49 +020064 int i, ret;
Stephen Warren16032622012-07-27 13:01:54 -060065 u32 reg;
Mark Brownf8beab22011-10-28 23:50:49 +020066
Mark Brown0c00c502012-07-24 15:41:19 +010067 if (d->chip->runtime_pm) {
68 ret = pm_runtime_get_sync(map->dev);
69 if (ret < 0)
70 dev_err(map->dev, "IRQ sync failed to resume: %d\n",
71 ret);
72 }
73
Mark Brownf8beab22011-10-28 23:50:49 +020074 /*
75 * If there's been a change in the mask write it back to the
76 * hardware. We rely on the use of the regmap core cache to
77 * suppress pointless writes.
78 */
79 for (i = 0; i < d->chip->num_regs; i++) {
Stephen Warren16032622012-07-27 13:01:54 -060080 reg = d->chip->mask_base +
81 (i * map->reg_stride * d->irq_reg_stride);
Xiaofan Tian36ac9142012-08-30 17:03:35 +080082 if (d->chip->mask_invert)
83 ret = regmap_update_bits(d->map, reg,
84 d->mask_buf_def[i], ~d->mask_buf[i]);
85 else
86 ret = regmap_update_bits(d->map, reg,
Mark Brownf8beab22011-10-28 23:50:49 +020087 d->mask_buf_def[i], d->mask_buf[i]);
88 if (ret != 0)
89 dev_err(d->map->dev, "Failed to sync masks in %x\n",
Stephen Warren16032622012-07-27 13:01:54 -060090 reg);
Mark Brownf8beab22011-10-28 23:50:49 +020091 }
92
Mark Brown0c00c502012-07-24 15:41:19 +010093 if (d->chip->runtime_pm)
94 pm_runtime_put(map->dev);
95
Mark Browna43fd502012-06-05 14:34:03 +010096 /* If we've changed our wakeup count propagate it to the parent */
97 if (d->wake_count < 0)
98 for (i = d->wake_count; i < 0; i++)
99 irq_set_irq_wake(d->irq, 0);
100 else if (d->wake_count > 0)
101 for (i = 0; i < d->wake_count; i++)
102 irq_set_irq_wake(d->irq, 1);
103
104 d->wake_count = 0;
105
Mark Brownf8beab22011-10-28 23:50:49 +0200106 mutex_unlock(&d->lock);
107}
108
109static void regmap_irq_enable(struct irq_data *data)
110{
111 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
Stephen Warren56806552012-04-10 23:37:22 -0600112 struct regmap *map = d->map;
Mark Brown4af8be62012-05-13 10:59:56 +0100113 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
Mark Brownf8beab22011-10-28 23:50:49 +0200114
Stephen Warrenf01ee602012-04-09 13:40:24 -0600115 d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
Mark Brownf8beab22011-10-28 23:50:49 +0200116}
117
118static void regmap_irq_disable(struct irq_data *data)
119{
120 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
Stephen Warren56806552012-04-10 23:37:22 -0600121 struct regmap *map = d->map;
Mark Brown4af8be62012-05-13 10:59:56 +0100122 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
Mark Brownf8beab22011-10-28 23:50:49 +0200123
Stephen Warrenf01ee602012-04-09 13:40:24 -0600124 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
Mark Brownf8beab22011-10-28 23:50:49 +0200125}
126
Mark Browna43fd502012-06-05 14:34:03 +0100127static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
128{
129 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
130 struct regmap *map = d->map;
131 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
132
Mark Browna43fd502012-06-05 14:34:03 +0100133 if (on) {
Laxman Dewangan55ac85e2012-12-19 19:42:28 +0530134 if (d->wake_buf)
135 d->wake_buf[irq_data->reg_offset / map->reg_stride]
136 &= ~irq_data->mask;
Mark Browna43fd502012-06-05 14:34:03 +0100137 d->wake_count++;
138 } else {
Laxman Dewangan55ac85e2012-12-19 19:42:28 +0530139 if (d->wake_buf)
140 d->wake_buf[irq_data->reg_offset / map->reg_stride]
141 |= irq_data->mask;
Mark Browna43fd502012-06-05 14:34:03 +0100142 d->wake_count--;
143 }
144
145 return 0;
146}
147
Stephen Warren7ac140e2012-08-01 11:40:47 -0600148static const struct irq_chip regmap_irq_chip = {
Mark Brownf8beab22011-10-28 23:50:49 +0200149 .irq_bus_lock = regmap_irq_lock,
150 .irq_bus_sync_unlock = regmap_irq_sync_unlock,
151 .irq_disable = regmap_irq_disable,
152 .irq_enable = regmap_irq_enable,
Mark Browna43fd502012-06-05 14:34:03 +0100153 .irq_set_wake = regmap_irq_set_wake,
Mark Brownf8beab22011-10-28 23:50:49 +0200154};
155
156static irqreturn_t regmap_irq_thread(int irq, void *d)
157{
158 struct regmap_irq_chip_data *data = d;
Mark Brownb026ddb2012-05-31 21:01:46 +0100159 const struct regmap_irq_chip *chip = data->chip;
Mark Brownf8beab22011-10-28 23:50:49 +0200160 struct regmap *map = data->map;
161 int ret, i;
Mark Brownd23511f2011-11-28 18:50:39 +0000162 bool handled = false;
Stephen Warren16032622012-07-27 13:01:54 -0600163 u32 reg;
Mark Brownf8beab22011-10-28 23:50:49 +0200164
Mark Brown0c00c502012-07-24 15:41:19 +0100165 if (chip->runtime_pm) {
166 ret = pm_runtime_get_sync(map->dev);
167 if (ret < 0) {
168 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
169 ret);
170 return IRQ_NONE;
171 }
172 }
173
Mark Browna7440ea2013-01-03 14:27:15 +0000174 /*
175 * Read in the statuses, using a single bulk read if possible
176 * in order to reduce the I/O overheads.
177 */
178 if (!map->use_single_rw && map->reg_stride == 1 &&
179 data->irq_reg_stride == 1) {
180 u8 *buf8 = data->status_reg_buf;
181 u16 *buf16 = data->status_reg_buf;
182 u32 *buf32 = data->status_reg_buf;
Graeme Gregory022f926a2012-05-14 22:40:43 +0900183
Mark Browna7440ea2013-01-03 14:27:15 +0000184 BUG_ON(!data->status_reg_buf);
185
186 ret = regmap_bulk_read(map, chip->status_base,
187 data->status_reg_buf,
188 chip->num_regs);
Graeme Gregory022f926a2012-05-14 22:40:43 +0900189 if (ret != 0) {
190 dev_err(map->dev, "Failed to read IRQ status: %d\n",
Mark Browna7440ea2013-01-03 14:27:15 +0000191 ret);
Mark Brownf8beab22011-10-28 23:50:49 +0200192 return IRQ_NONE;
193 }
Mark Browna7440ea2013-01-03 14:27:15 +0000194
195 for (i = 0; i < data->chip->num_regs; i++) {
196 switch (map->format.val_bytes) {
197 case 1:
198 data->status_buf[i] = buf8[i];
199 break;
200 case 2:
201 data->status_buf[i] = buf16[i];
202 break;
203 case 4:
204 data->status_buf[i] = buf32[i];
205 break;
206 default:
207 BUG();
208 return IRQ_NONE;
209 }
210 }
211
212 } else {
213 for (i = 0; i < data->chip->num_regs; i++) {
214 ret = regmap_read(map, chip->status_base +
215 (i * map->reg_stride
216 * data->irq_reg_stride),
217 &data->status_buf[i]);
218
219 if (ret != 0) {
220 dev_err(map->dev,
221 "Failed to read IRQ status: %d\n",
222 ret);
223 if (chip->runtime_pm)
224 pm_runtime_put(map->dev);
225 return IRQ_NONE;
226 }
227 }
Mark Brownbbae92c2013-01-03 13:58:33 +0000228 }
Mark Brownf8beab22011-10-28 23:50:49 +0200229
Mark Brownbbae92c2013-01-03 13:58:33 +0000230 /*
231 * Ignore masked IRQs and ack if we need to; we ack early so
232 * there is no race between handling and acknowleding the
233 * interrupt. We assume that typically few of the interrupts
234 * will fire simultaneously so don't worry about overhead from
235 * doing a write per register.
236 */
237 for (i = 0; i < data->chip->num_regs; i++) {
Mark Brownf8beab22011-10-28 23:50:49 +0200238 data->status_buf[i] &= ~data->mask_buf[i];
239
240 if (data->status_buf[i] && chip->ack_base) {
Stephen Warren16032622012-07-27 13:01:54 -0600241 reg = chip->ack_base +
242 (i * map->reg_stride * data->irq_reg_stride);
243 ret = regmap_write(map, reg, data->status_buf[i]);
Mark Brownf8beab22011-10-28 23:50:49 +0200244 if (ret != 0)
245 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
Stephen Warren16032622012-07-27 13:01:54 -0600246 reg, ret);
Mark Brownf8beab22011-10-28 23:50:49 +0200247 }
248 }
249
250 for (i = 0; i < chip->num_irqs; i++) {
Stephen Warrenf01ee602012-04-09 13:40:24 -0600251 if (data->status_buf[chip->irqs[i].reg_offset /
252 map->reg_stride] & chip->irqs[i].mask) {
Mark Brown4af8be62012-05-13 10:59:56 +0100253 handle_nested_irq(irq_find_mapping(data->domain, i));
Mark Brownd23511f2011-11-28 18:50:39 +0000254 handled = true;
Mark Brownf8beab22011-10-28 23:50:49 +0200255 }
256 }
257
Mark Brown0c00c502012-07-24 15:41:19 +0100258 if (chip->runtime_pm)
259 pm_runtime_put(map->dev);
260
Mark Brownd23511f2011-11-28 18:50:39 +0000261 if (handled)
262 return IRQ_HANDLED;
263 else
264 return IRQ_NONE;
Mark Brownf8beab22011-10-28 23:50:49 +0200265}
266
Mark Brown4af8be62012-05-13 10:59:56 +0100267static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
268 irq_hw_number_t hw)
269{
270 struct regmap_irq_chip_data *data = h->host_data;
271
272 irq_set_chip_data(virq, data);
Yunfan Zhang81380732012-09-08 03:53:25 -0700273 irq_set_chip(virq, &data->irq_chip);
Mark Brown4af8be62012-05-13 10:59:56 +0100274 irq_set_nested_thread(virq, 1);
275
276 /* ARM needs us to explicitly flag the IRQ as valid
277 * and will set them noprobe when we do so. */
278#ifdef CONFIG_ARM
279 set_irq_flags(virq, IRQF_VALID);
280#else
281 irq_set_noprobe(virq);
282#endif
283
284 return 0;
285}
286
287static struct irq_domain_ops regmap_domain_ops = {
288 .map = regmap_irq_map,
289 .xlate = irq_domain_xlate_twocell,
290};
291
Mark Brownf8beab22011-10-28 23:50:49 +0200292/**
293 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
294 *
295 * map: The regmap for the device.
296 * irq: The IRQ the device uses to signal interrupts
297 * irq_flags: The IRQF_ flags to use for the primary interrupt.
298 * chip: Configuration for the interrupt controller.
299 * data: Runtime data structure for the controller, allocated on success
300 *
301 * Returns 0 on success or an errno on failure.
302 *
303 * In order for this to be efficient the chip really should use a
304 * register cache. The chip driver is responsible for restoring the
305 * register values used by the IRQ controller over suspend and resume.
306 */
307int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
Mark Brownb026ddb2012-05-31 21:01:46 +0100308 int irq_base, const struct regmap_irq_chip *chip,
Mark Brownf8beab22011-10-28 23:50:49 +0200309 struct regmap_irq_chip_data **data)
310{
311 struct regmap_irq_chip_data *d;
Mark Brown4af8be62012-05-13 10:59:56 +0100312 int i;
Mark Brownf8beab22011-10-28 23:50:49 +0200313 int ret = -ENOMEM;
Stephen Warren16032622012-07-27 13:01:54 -0600314 u32 reg;
Mark Brownf8beab22011-10-28 23:50:49 +0200315
Stephen Warrenf01ee602012-04-09 13:40:24 -0600316 for (i = 0; i < chip->num_irqs; i++) {
317 if (chip->irqs[i].reg_offset % map->reg_stride)
318 return -EINVAL;
319 if (chip->irqs[i].reg_offset / map->reg_stride >=
320 chip->num_regs)
321 return -EINVAL;
322 }
323
Mark Brown4af8be62012-05-13 10:59:56 +0100324 if (irq_base) {
325 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
326 if (irq_base < 0) {
327 dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
328 irq_base);
329 return irq_base;
330 }
Mark Brownf8beab22011-10-28 23:50:49 +0200331 }
332
333 d = kzalloc(sizeof(*d), GFP_KERNEL);
334 if (!d)
335 return -ENOMEM;
336
Mark Brown2431d0a2012-05-13 11:18:34 +0100337 *data = d;
338
Mark Brownf8beab22011-10-28 23:50:49 +0200339 d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
340 GFP_KERNEL);
341 if (!d->status_buf)
342 goto err_alloc;
343
Mark Brownf8beab22011-10-28 23:50:49 +0200344 d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
345 GFP_KERNEL);
346 if (!d->mask_buf)
347 goto err_alloc;
348
349 d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
350 GFP_KERNEL);
351 if (!d->mask_buf_def)
352 goto err_alloc;
353
Mark Browna43fd502012-06-05 14:34:03 +0100354 if (chip->wake_base) {
355 d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
356 GFP_KERNEL);
357 if (!d->wake_buf)
358 goto err_alloc;
359 }
360
Stephen Warren7ac140e2012-08-01 11:40:47 -0600361 d->irq_chip = regmap_irq_chip;
Stephen Warrenca142752012-08-01 11:40:48 -0600362 d->irq_chip.name = chip->name;
Mark Browna43fd502012-06-05 14:34:03 +0100363 d->irq = irq;
Mark Brownf8beab22011-10-28 23:50:49 +0200364 d->map = map;
365 d->chip = chip;
366 d->irq_base = irq_base;
Graeme Gregory022f926a2012-05-14 22:40:43 +0900367
368 if (chip->irq_reg_stride)
369 d->irq_reg_stride = chip->irq_reg_stride;
370 else
371 d->irq_reg_stride = 1;
372
Mark Browna7440ea2013-01-03 14:27:15 +0000373 if (!map->use_single_rw && map->reg_stride == 1 &&
374 d->irq_reg_stride == 1) {
375 d->status_reg_buf = kmalloc(map->format.val_bytes *
376 chip->num_regs, GFP_KERNEL);
377 if (!d->status_reg_buf)
378 goto err_alloc;
379 }
380
Mark Brownf8beab22011-10-28 23:50:49 +0200381 mutex_init(&d->lock);
382
383 for (i = 0; i < chip->num_irqs; i++)
Stephen Warrenf01ee602012-04-09 13:40:24 -0600384 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
Mark Brownf8beab22011-10-28 23:50:49 +0200385 |= chip->irqs[i].mask;
386
387 /* Mask all the interrupts by default */
388 for (i = 0; i < chip->num_regs; i++) {
389 d->mask_buf[i] = d->mask_buf_def[i];
Stephen Warren16032622012-07-27 13:01:54 -0600390 reg = chip->mask_base +
391 (i * map->reg_stride * d->irq_reg_stride);
Xiaofan Tian36ac9142012-08-30 17:03:35 +0800392 if (chip->mask_invert)
393 ret = regmap_update_bits(map, reg,
394 d->mask_buf[i], ~d->mask_buf[i]);
395 else
396 ret = regmap_update_bits(map, reg,
Mark Brown0eb46ad2012-08-01 20:29:14 +0100397 d->mask_buf[i], d->mask_buf[i]);
Mark Brownf8beab22011-10-28 23:50:49 +0200398 if (ret != 0) {
399 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
Stephen Warren16032622012-07-27 13:01:54 -0600400 reg, ret);
Mark Brownf8beab22011-10-28 23:50:49 +0200401 goto err_alloc;
402 }
403 }
404
Stephen Warren40052ca2012-08-01 13:57:24 -0600405 /* Wake is disabled by default */
406 if (d->wake_buf) {
407 for (i = 0; i < chip->num_regs; i++) {
408 d->wake_buf[i] = d->mask_buf_def[i];
409 reg = chip->wake_base +
410 (i * map->reg_stride * d->irq_reg_stride);
411 ret = regmap_update_bits(map, reg, d->wake_buf[i],
412 d->wake_buf[i]);
413 if (ret != 0) {
414 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
415 reg, ret);
416 goto err_alloc;
417 }
418 }
419 }
420
Mark Brown4af8be62012-05-13 10:59:56 +0100421 if (irq_base)
422 d->domain = irq_domain_add_legacy(map->dev->of_node,
423 chip->num_irqs, irq_base, 0,
424 &regmap_domain_ops, d);
425 else
426 d->domain = irq_domain_add_linear(map->dev->of_node,
427 chip->num_irqs,
428 &regmap_domain_ops, d);
429 if (!d->domain) {
430 dev_err(map->dev, "Failed to create IRQ domain\n");
431 ret = -ENOMEM;
432 goto err_alloc;
Mark Brownf8beab22011-10-28 23:50:49 +0200433 }
434
435 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
436 chip->name, d);
437 if (ret != 0) {
438 dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
Mark Brown4af8be62012-05-13 10:59:56 +0100439 goto err_domain;
Mark Brownf8beab22011-10-28 23:50:49 +0200440 }
441
442 return 0;
443
Mark Brown4af8be62012-05-13 10:59:56 +0100444err_domain:
445 /* Should really dispose of the domain but... */
Mark Brownf8beab22011-10-28 23:50:49 +0200446err_alloc:
Mark Browna43fd502012-06-05 14:34:03 +0100447 kfree(d->wake_buf);
Mark Brownf8beab22011-10-28 23:50:49 +0200448 kfree(d->mask_buf_def);
449 kfree(d->mask_buf);
Mark Brownf8beab22011-10-28 23:50:49 +0200450 kfree(d->status_buf);
Mark Browna7440ea2013-01-03 14:27:15 +0000451 kfree(d->status_reg_buf);
Mark Brownf8beab22011-10-28 23:50:49 +0200452 kfree(d);
453 return ret;
454}
455EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
456
457/**
458 * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
459 *
460 * @irq: Primary IRQ for the device
461 * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
462 */
463void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
464{
465 if (!d)
466 return;
467
468 free_irq(irq, d);
Mark Brown4af8be62012-05-13 10:59:56 +0100469 /* We should unmap the domain but... */
Mark Browna43fd502012-06-05 14:34:03 +0100470 kfree(d->wake_buf);
Mark Brownf8beab22011-10-28 23:50:49 +0200471 kfree(d->mask_buf_def);
472 kfree(d->mask_buf);
Mark Browna7440ea2013-01-03 14:27:15 +0000473 kfree(d->status_reg_buf);
Mark Brownf8beab22011-10-28 23:50:49 +0200474 kfree(d->status_buf);
475 kfree(d);
476}
477EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
Mark Brown209a6002011-12-05 16:10:15 +0000478
479/**
480 * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
481 *
482 * Useful for drivers to request their own IRQs.
483 *
484 * @data: regmap_irq controller to operate on.
485 */
486int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
487{
Mark Brown4af8be62012-05-13 10:59:56 +0100488 WARN_ON(!data->irq_base);
Mark Brown209a6002011-12-05 16:10:15 +0000489 return data->irq_base;
490}
491EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
Mark Brown4af8be62012-05-13 10:59:56 +0100492
493/**
494 * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
495 *
496 * Useful for drivers to request their own IRQs.
497 *
498 * @data: regmap_irq controller to operate on.
499 * @irq: index of the interrupt requested in the chip IRQs
500 */
501int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
502{
Mark Brownbfd6185d2012-06-05 14:29:36 +0100503 /* Handle holes in the IRQ list */
504 if (!data->chip->irqs[irq].mask)
505 return -EINVAL;
506
Mark Brown4af8be62012-05-13 10:59:56 +0100507 return irq_create_mapping(data->domain, irq);
508}
509EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
Mark Brown90f790d2012-08-20 21:45:05 +0100510
511/**
512 * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
513 *
514 * Useful for drivers to request their own IRQs and for integration
515 * with subsystems. For ease of integration NULL is accepted as a
516 * domain, allowing devices to just call this even if no domain is
517 * allocated.
518 *
519 * @data: regmap_irq controller to operate on.
520 */
521struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
522{
523 if (data)
524 return data->domain;
525 else
526 return NULL;
527}
528EXPORT_SYMBOL_GPL(regmap_irq_get_domain);