blob: b192e992d1fa758350e070522bc5409f49c4f033 [file] [log] [blame]
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/bitops.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/irq.h>
17#include <linux/mfd/core.h>
18#include <linux/regmap.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053019#include <linux/delay.h>
20#include <linux/irqdomain.h>
21#include <linux/interrupt.h>
22#include <linux/of.h>
23#include <linux/of_irq.h>
24#include <linux/slab.h>
25#include <linux/ratelimit.h>
26#include <soc/qcom/pm.h>
27#include <linux/gpio.h>
28#include <linux/of_gpio.h>
Laxminath Kasam605b42f2017-08-01 22:02:15 +053029#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
30#include "core.h"
31#include "wcd9xxx-irq.h"
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053032
33#define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
34#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
35
36#define WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS 100
37
38#ifndef NO_IRQ
39#define NO_IRQ (-1)
40#endif
41
42#ifdef CONFIG_OF
43struct wcd9xxx_irq_drv_data {
44 struct irq_domain *domain;
45 int irq;
46};
47#endif
48
49static int virq_to_phyirq(
50 struct wcd9xxx_core_resource *wcd9xxx_res, int virq);
51static int phyirq_to_virq(
52 struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
53static unsigned int wcd9xxx_irq_get_upstream_irq(
54 struct wcd9xxx_core_resource *wcd9xxx_res);
Laxminath Kasam8f7ccc22017-08-28 17:35:04 +053055static void wcd9xxx_irq_put_downstream_irq(
56 struct wcd9xxx_core_resource *wcd9xxx_res);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053057static void wcd9xxx_irq_put_upstream_irq(
58 struct wcd9xxx_core_resource *wcd9xxx_res);
59static int wcd9xxx_map_irq(
60 struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
61
62static void wcd9xxx_irq_lock(struct irq_data *data)
63{
64 struct wcd9xxx_core_resource *wcd9xxx_res =
65 irq_data_get_irq_chip_data(data);
66 mutex_lock(&wcd9xxx_res->irq_lock);
67}
68
69static void wcd9xxx_irq_sync_unlock(struct irq_data *data)
70{
71 struct wcd9xxx_core_resource *wcd9xxx_res =
72 irq_data_get_irq_chip_data(data);
73 int i;
74
75 if ((ARRAY_SIZE(wcd9xxx_res->irq_masks_cur) >
76 WCD9XXX_MAX_IRQ_REGS) ||
77 (ARRAY_SIZE(wcd9xxx_res->irq_masks_cache) >
78 WCD9XXX_MAX_IRQ_REGS)) {
79 pr_err("%s: Array Size out of bound\n", __func__);
80 return;
81 }
82 if (!wcd9xxx_res->wcd_core_regmap) {
83 pr_err("%s: Codec core regmap not defined\n",
84 __func__);
85 return;
86 }
87
88 for (i = 0; i < ARRAY_SIZE(wcd9xxx_res->irq_masks_cur); i++) {
89 /* If there's been a change in the mask write it back
90 * to the hardware.
91 */
92 if (wcd9xxx_res->irq_masks_cur[i] !=
93 wcd9xxx_res->irq_masks_cache[i]) {
94
95 wcd9xxx_res->irq_masks_cache[i] =
96 wcd9xxx_res->irq_masks_cur[i];
97 regmap_write(wcd9xxx_res->wcd_core_regmap,
98 wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
99 wcd9xxx_res->irq_masks_cur[i]);
100 }
101 }
102
103 mutex_unlock(&wcd9xxx_res->irq_lock);
104}
105
106static void wcd9xxx_irq_enable(struct irq_data *data)
107{
108 struct wcd9xxx_core_resource *wcd9xxx_res =
109 irq_data_get_irq_chip_data(data);
110 int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
111 int byte = BIT_BYTE(wcd9xxx_irq);
112 int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
113
114 if ((byte < size) && (byte >= 0)) {
115 wcd9xxx_res->irq_masks_cur[byte] &=
116 ~(BYTE_BIT_MASK(wcd9xxx_irq));
117 } else {
118 pr_err("%s: Array size is %d but index is %d: Out of range\n",
119 __func__, size, byte);
120 }
121}
122
123static void wcd9xxx_irq_disable(struct irq_data *data)
124{
125 struct wcd9xxx_core_resource *wcd9xxx_res =
126 irq_data_get_irq_chip_data(data);
127 int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
128 int byte = BIT_BYTE(wcd9xxx_irq);
129 int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
130
131 if ((byte < size) && (byte >= 0)) {
132 wcd9xxx_res->irq_masks_cur[byte]
133 |= BYTE_BIT_MASK(wcd9xxx_irq);
134 } else {
135 pr_err("%s: Array size is %d but index is %d: Out of range\n",
136 __func__, size, byte);
137 }
138}
139
140static void wcd9xxx_irq_ack(struct irq_data *data)
141{
142 int wcd9xxx_irq = 0;
143 struct wcd9xxx_core_resource *wcd9xxx_res =
144 irq_data_get_irq_chip_data(data);
145
146 if (wcd9xxx_res == NULL) {
147 pr_err("%s: wcd9xxx_res is NULL\n", __func__);
148 return;
149 }
150 wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
151 pr_debug("%s: IRQ_ACK called for WCD9XXX IRQ: %d\n",
152 __func__, wcd9xxx_irq);
153}
154
155static void wcd9xxx_irq_mask(struct irq_data *d)
156{
157 /* do nothing but required as linux calls irq_mask without NULL check */
158}
159
160static struct irq_chip wcd9xxx_irq_chip = {
161 .name = "wcd9xxx",
162 .irq_bus_lock = wcd9xxx_irq_lock,
163 .irq_bus_sync_unlock = wcd9xxx_irq_sync_unlock,
164 .irq_disable = wcd9xxx_irq_disable,
165 .irq_enable = wcd9xxx_irq_enable,
166 .irq_mask = wcd9xxx_irq_mask,
167 .irq_ack = wcd9xxx_irq_ack,
168};
169
170bool wcd9xxx_lock_sleep(
171 struct wcd9xxx_core_resource *wcd9xxx_res)
172{
173 enum wcd9xxx_pm_state os;
174
175 /*
176 * wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
177 * and its subroutines only motly.
178 * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
179 * It can race with wcd9xxx_irq_thread.
180 * So need to embrace wlock_holders with mutex.
181 *
182 * If system didn't resume, we can simply return false so codec driver's
183 * IRQ handler can return without handling IRQ.
184 * As interrupt line is still active, codec will have another IRQ to
185 * retry shortly.
186 */
187 mutex_lock(&wcd9xxx_res->pm_lock);
188 if (wcd9xxx_res->wlock_holders++ == 0) {
189 pr_debug("%s: holding wake lock\n", __func__);
190 pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
191 msm_cpuidle_get_deep_idle_latency());
192 pm_stay_awake(wcd9xxx_res->dev);
193 }
194 mutex_unlock(&wcd9xxx_res->pm_lock);
195
196 if (!wait_event_timeout(wcd9xxx_res->pm_wq,
197 ((os = wcd9xxx_pm_cmpxchg(wcd9xxx_res,
198 WCD9XXX_PM_SLEEPABLE,
199 WCD9XXX_PM_AWAKE)) ==
200 WCD9XXX_PM_SLEEPABLE ||
201 (os == WCD9XXX_PM_AWAKE)),
202 msecs_to_jiffies(
203 WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
204 pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
205 __func__,
206 WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
207 wcd9xxx_res->wlock_holders);
208 wcd9xxx_unlock_sleep(wcd9xxx_res);
209 return false;
210 }
211 wake_up_all(&wcd9xxx_res->pm_wq);
212 return true;
213}
214EXPORT_SYMBOL(wcd9xxx_lock_sleep);
215
216void wcd9xxx_unlock_sleep(
217 struct wcd9xxx_core_resource *wcd9xxx_res)
218{
219 mutex_lock(&wcd9xxx_res->pm_lock);
220 if (--wcd9xxx_res->wlock_holders == 0) {
221 pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
222 __func__, wcd9xxx_res->pm_state, WCD9XXX_PM_SLEEPABLE);
223 /*
224 * if wcd9xxx_lock_sleep failed, pm_state would be still
225 * WCD9XXX_PM_ASLEEP, don't overwrite
226 */
227 if (likely(wcd9xxx_res->pm_state == WCD9XXX_PM_AWAKE))
228 wcd9xxx_res->pm_state = WCD9XXX_PM_SLEEPABLE;
229 pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
230 PM_QOS_DEFAULT_VALUE);
231 pm_relax(wcd9xxx_res->dev);
232 }
233 mutex_unlock(&wcd9xxx_res->pm_lock);
234 wake_up_all(&wcd9xxx_res->pm_wq);
235}
236EXPORT_SYMBOL(wcd9xxx_unlock_sleep);
237
238void wcd9xxx_nested_irq_lock(struct wcd9xxx_core_resource *wcd9xxx_res)
239{
240 mutex_lock(&wcd9xxx_res->nested_irq_lock);
241}
242
243void wcd9xxx_nested_irq_unlock(struct wcd9xxx_core_resource *wcd9xxx_res)
244{
245 mutex_unlock(&wcd9xxx_res->nested_irq_lock);
246}
247
248
249static void wcd9xxx_irq_dispatch(struct wcd9xxx_core_resource *wcd9xxx_res,
250 struct intr_data *irqdata)
251{
252 int irqbit = irqdata->intr_num;
253
254 if (!wcd9xxx_res->wcd_core_regmap) {
255 pr_err("%s: codec core regmap not defined\n",
256 __func__);
257 return;
258 }
259
260 if (irqdata->clear_first) {
261 wcd9xxx_nested_irq_lock(wcd9xxx_res);
262 regmap_write(wcd9xxx_res->wcd_core_regmap,
263 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
264 BIT_BYTE(irqbit),
265 BYTE_BIT_MASK(irqbit));
266
267 if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
268 regmap_write(wcd9xxx_res->wcd_core_regmap,
269 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
270 0x02);
271 handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
272 wcd9xxx_nested_irq_unlock(wcd9xxx_res);
273 } else {
274 wcd9xxx_nested_irq_lock(wcd9xxx_res);
275 handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
276 regmap_write(wcd9xxx_res->wcd_core_regmap,
277 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
278 BIT_BYTE(irqbit),
279 BYTE_BIT_MASK(irqbit));
280 if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
281 regmap_write(wcd9xxx_res->wcd_core_regmap,
282 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
283 0x02);
284
285 wcd9xxx_nested_irq_unlock(wcd9xxx_res);
286 }
287}
288
289static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
290{
291 int ret;
292 int i;
293 struct intr_data irqdata;
294 char linebuf[128];
295 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
296 struct wcd9xxx_core_resource *wcd9xxx_res = data;
297 int num_irq_regs = wcd9xxx_res->num_irq_regs;
298 u8 status[4], status1[4] = {0}, unmask_status[4] = {0};
299
300 if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
301 dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
302 return IRQ_NONE;
303 }
304
305 if (!wcd9xxx_res->wcd_core_regmap) {
306 dev_err(wcd9xxx_res->dev,
307 "%s: Codec core regmap not supplied\n",
308 __func__);
309 goto err_disable_irq;
310 }
311
312 memset(status, 0, sizeof(status));
313 ret = regmap_bulk_read(wcd9xxx_res->wcd_core_regmap,
314 wcd9xxx_res->intr_reg[WCD9XXX_INTR_STATUS_BASE],
315 status, num_irq_regs);
316
317 if (ret < 0) {
318 dev_err(wcd9xxx_res->dev,
319 "Failed to read interrupt status: %d\n", ret);
320 goto err_disable_irq;
321 }
322 /*
323 * If status is 0 return without clearing.
324 * status contains: HW status - masked interrupts
325 * status1 contains: unhandled interrupts - masked interrupts
326 * unmasked_status contains: unhandled interrupts
327 */
328 if (unlikely(!memcmp(status, status1, sizeof(status)))) {
329 pr_debug("%s: status is 0\n", __func__);
330 wcd9xxx_unlock_sleep(wcd9xxx_res);
331 return IRQ_HANDLED;
332 }
333
334 /*
335 * Copy status to unmask_status before masking, otherwise SW may miss
336 * to clear masked interrupt in corner case.
337 */
338 memcpy(unmask_status, status, sizeof(unmask_status));
339
340 /* Apply masking */
341 for (i = 0; i < num_irq_regs; i++)
342 status[i] &= ~wcd9xxx_res->irq_masks_cur[i];
343
344 memcpy(status1, status, sizeof(status1));
345
346 /* Find out which interrupt was triggered and call that interrupt's
347 * handler function
348 *
349 * Since codec has only one hardware irq line which is shared by
350 * codec's different internal interrupts, so it's possible master irq
351 * handler dispatches multiple nested irq handlers after breaking
352 * order. Dispatch interrupts in the order that is maintained by
353 * the interrupt table.
354 */
355 for (i = 0; i < wcd9xxx_res->intr_table_size; i++) {
356 irqdata = wcd9xxx_res->intr_table[i];
357 if (status[BIT_BYTE(irqdata.intr_num)] &
358 BYTE_BIT_MASK(irqdata.intr_num)) {
359 wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
360 status1[BIT_BYTE(irqdata.intr_num)] &=
361 ~BYTE_BIT_MASK(irqdata.intr_num);
362 unmask_status[BIT_BYTE(irqdata.intr_num)] &=
363 ~BYTE_BIT_MASK(irqdata.intr_num);
364 }
365 }
366
367 /*
368 * As a failsafe if unhandled irq is found, clear it to prevent
369 * interrupt storm.
370 * Note that we can say there was an unhandled irq only when no irq
371 * handled by nested irq handler since Taiko supports qdsp as irqs'
372 * destination for few irqs. Therefore driver shouldn't clear pending
373 * irqs when few handled while few others not.
374 */
375 if (unlikely(!memcmp(status, status1, sizeof(status)))) {
376 if (__ratelimit(&ratelimit)) {
377 pr_warn("%s: Unhandled irq found\n", __func__);
378 hex_dump_to_buffer(status, sizeof(status), 16, 1,
379 linebuf, sizeof(linebuf), false);
380 pr_warn("%s: status0 : %s\n", __func__, linebuf);
381 hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
382 linebuf, sizeof(linebuf), false);
383 pr_warn("%s: status1 : %s\n", __func__, linebuf);
384 }
385 /*
386 * unmask_status contains unhandled interrupts, hence clear all
387 * unhandled interrupts.
388 */
389 ret = regmap_bulk_write(wcd9xxx_res->wcd_core_regmap,
390 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE],
391 unmask_status, num_irq_regs);
392 if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
393 regmap_write(wcd9xxx_res->wcd_core_regmap,
394 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
395 0x02);
396 }
397 wcd9xxx_unlock_sleep(wcd9xxx_res);
398
399 return IRQ_HANDLED;
400
401err_disable_irq:
402 dev_err(wcd9xxx_res->dev,
403 "Disable irq %d\n", wcd9xxx_res->irq);
404
405 disable_irq_wake(wcd9xxx_res->irq);
406 disable_irq_nosync(wcd9xxx_res->irq);
407 wcd9xxx_unlock_sleep(wcd9xxx_res);
408 return IRQ_NONE;
409}
410
411/**
412 * wcd9xxx_free_irq
413 *
414 * @wcd9xxx_res: pointer to core resource
415 * irq: irq number
416 * @data: data pointer
417 *
418 */
419void wcd9xxx_free_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
420 int irq, void *data)
421{
422 free_irq(phyirq_to_virq(wcd9xxx_res, irq), data);
423}
424EXPORT_SYMBOL(wcd9xxx_free_irq);
425
426/**
427 * wcd9xxx_enable_irq
428 *
429 * @wcd9xxx_res: pointer to core resource
430 * irq: irq number
431 *
432 */
433void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
434{
435 if (wcd9xxx_res->irq)
436 enable_irq(phyirq_to_virq(wcd9xxx_res, irq));
437}
438EXPORT_SYMBOL(wcd9xxx_enable_irq);
439
440/**
441 * wcd9xxx_disable_irq
442 *
443 * @wcd9xxx_res: pointer to core resource
444 * irq: irq number
445 *
446 */
447void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
448{
449 if (wcd9xxx_res->irq)
450 disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq));
451}
452EXPORT_SYMBOL(wcd9xxx_disable_irq);
453
454/**
455 * wcd9xxx_disable_irq_sync
456 *
457 * @wcd9xxx_res: pointer to core resource
458 * irq: irq number
459 *
460 */
461void wcd9xxx_disable_irq_sync(
462 struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
463{
464 if (wcd9xxx_res->irq)
465 disable_irq(phyirq_to_virq(wcd9xxx_res, irq));
466}
467EXPORT_SYMBOL(wcd9xxx_disable_irq_sync);
468
469static int wcd9xxx_irq_setup_downstream_irq(
470 struct wcd9xxx_core_resource *wcd9xxx_res)
471{
472 int irq, virq, ret;
473
474 pr_debug("%s: enter\n", __func__);
475
476 for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
477 /* Map OF irq */
478 virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
479 pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
480 if (virq == NO_IRQ) {
481 pr_err("%s, No interrupt specifier for irq %d\n",
482 __func__, irq);
483 return NO_IRQ;
484 }
485
486 ret = irq_set_chip_data(virq, wcd9xxx_res);
487 if (ret) {
488 pr_err("%s: Failed to configure irq %d (%d)\n",
489 __func__, irq, ret);
490 return ret;
491 }
492
493 if (wcd9xxx_res->irq_level_high[irq])
494 irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
495 handle_level_irq);
496 else
497 irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
498 handle_edge_irq);
499
500 irq_set_nested_thread(virq, 1);
501 }
502
503 pr_debug("%s: leave\n", __func__);
504
505 return 0;
506}
507
508/**
509 * wcd9xxx_irq_init
510 *
511 * @wcd9xxx_res: pointer to core resource
512 *
513 * Returns 0 on success, appropriate error code otherwise
514 */
515int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res)
516{
517 int i, ret;
518 u8 irq_level[wcd9xxx_res->num_irq_regs];
519 struct irq_domain *domain;
520 struct device_node *pnode;
521
522 mutex_init(&wcd9xxx_res->irq_lock);
523 mutex_init(&wcd9xxx_res->nested_irq_lock);
524
525 pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node);
526 if (unlikely(!pnode))
527 return -EINVAL;
528
529 domain = irq_find_host(pnode);
530 if (unlikely(!domain))
531 return -EINVAL;
532
533 wcd9xxx_res->domain = domain;
534
535 wcd9xxx_res->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx_res);
536 if (!wcd9xxx_res->irq) {
537 pr_warn("%s: irq driver is not yet initialized\n", __func__);
538 mutex_destroy(&wcd9xxx_res->irq_lock);
539 mutex_destroy(&wcd9xxx_res->nested_irq_lock);
540 return -EPROBE_DEFER;
541 }
542 pr_debug("%s: probed irq %d\n", __func__, wcd9xxx_res->irq);
543
544 /* Setup downstream IRQs */
545 ret = wcd9xxx_irq_setup_downstream_irq(wcd9xxx_res);
546 if (ret) {
547 pr_err("%s: Failed to setup downstream IRQ\n", __func__);
548 wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
549 mutex_destroy(&wcd9xxx_res->irq_lock);
550 mutex_destroy(&wcd9xxx_res->nested_irq_lock);
551 return ret;
552 }
553
554 /* All other wcd9xxx interrupts are edge triggered */
555 wcd9xxx_res->irq_level_high[0] = true;
556
557 /* mask all the interrupts */
558 memset(irq_level, 0, wcd9xxx_res->num_irq_regs);
559 for (i = 0; i < wcd9xxx_res->num_irqs; i++) {
560 wcd9xxx_res->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
561 wcd9xxx_res->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
562 irq_level[BIT_BYTE(i)] |=
563 wcd9xxx_res->irq_level_high[i] << (i % BITS_PER_BYTE);
564 }
565
566 if (!wcd9xxx_res->wcd_core_regmap) {
567 dev_err(wcd9xxx_res->dev,
568 "%s: Codec core regmap not defined\n",
569 __func__);
570 ret = -EINVAL;
571 goto fail_irq_init;
572 }
573
574 for (i = 0; i < wcd9xxx_res->num_irq_regs; i++) {
575 /* Initialize interrupt mask and level registers */
576 regmap_write(wcd9xxx_res->wcd_core_regmap,
577 wcd9xxx_res->intr_reg[WCD9XXX_INTR_LEVEL_BASE] + i,
578 irq_level[i]);
579 regmap_write(wcd9xxx_res->wcd_core_regmap,
580 wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
581 wcd9xxx_res->irq_masks_cur[i]);
582 }
583
584 ret = request_threaded_irq(wcd9xxx_res->irq, NULL, wcd9xxx_irq_thread,
585 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
586 "wcd9xxx", wcd9xxx_res);
587 if (ret != 0)
588 dev_err(wcd9xxx_res->dev, "Failed to request IRQ %d: %d\n",
589 wcd9xxx_res->irq, ret);
590 else {
591 ret = enable_irq_wake(wcd9xxx_res->irq);
592 if (ret)
593 dev_err(wcd9xxx_res->dev,
594 "Failed to set wake interrupt on IRQ %d: %d\n",
595 wcd9xxx_res->irq, ret);
596 if (ret)
597 free_irq(wcd9xxx_res->irq, wcd9xxx_res);
598 }
599
600 if (ret)
601 goto fail_irq_init;
602
603 return ret;
604
605fail_irq_init:
606 dev_err(wcd9xxx_res->dev,
607 "%s: Failed to init wcd9xxx irq\n", __func__);
608 wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
609 mutex_destroy(&wcd9xxx_res->irq_lock);
610 mutex_destroy(&wcd9xxx_res->nested_irq_lock);
611 return ret;
612}
613EXPORT_SYMBOL(wcd9xxx_irq_init);
614
615int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
616 int irq, irq_handler_t handler,
617 const char *name, void *data)
618{
619 int virq;
620
621 virq = phyirq_to_virq(wcd9xxx_res, irq);
622
623 return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
624 name, data);
625}
626EXPORT_SYMBOL(wcd9xxx_request_irq);
627
628void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res)
629{
630 dev_dbg(wcd9xxx_res->dev, "%s: Cleaning up irq %d\n", __func__,
631 wcd9xxx_res->irq);
632
633 if (wcd9xxx_res->irq) {
634 disable_irq_wake(wcd9xxx_res->irq);
635 free_irq(wcd9xxx_res->irq, wcd9xxx_res);
636 wcd9xxx_res->irq = 0;
Laxminath Kasam8f7ccc22017-08-28 17:35:04 +0530637 wcd9xxx_irq_put_downstream_irq(wcd9xxx_res);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530638 wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
639 }
640 mutex_destroy(&wcd9xxx_res->irq_lock);
641 mutex_destroy(&wcd9xxx_res->nested_irq_lock);
642}
643
644#ifndef CONFIG_OF
645static int phyirq_to_virq(
646 struct wcd9xxx_core_resource *wcd9xxx_res,
647 int offset)
648{
649 return wcd9xxx_res->irq_base + offset;
650}
651
652static int virq_to_phyirq(
653 struct wcd9xxx_core_resource *wcd9xxx_res,
654 int virq)
655{
656 return virq - wcd9xxx_res->irq_base;
657}
658
659static unsigned int wcd9xxx_irq_get_upstream_irq(
660 struct wcd9xxx_core_resource *wcd9xxx_res)
661{
662 return wcd9xxx_res->irq;
663}
664
665static void wcd9xxx_irq_put_upstream_irq(
666 struct wcd9xxx_core_resource *wcd9xxx_res)
667{
668 /* Do nothing */
669}
670
671static int wcd9xxx_map_irq(
672 struct wcd9xxx_core_resource *wcd9xxx_core_res, int irq)
673{
674 return phyirq_to_virq(wcd9xxx_core_res, irq);
675}
676#else
677static struct wcd9xxx_irq_drv_data *
678wcd9xxx_irq_add_domain(struct device_node *node,
679 struct device_node *parent)
680{
681 struct wcd9xxx_irq_drv_data *data = NULL;
682
683 pr_debug("%s: node %s, node parent %s\n", __func__,
684 node->name, node->parent->name);
685
686 data = kzalloc(sizeof(*data), GFP_KERNEL);
687 if (!data)
688 return NULL;
689
690 /*
691 * wcd9xxx_intc interrupt controller supports N to N irq mapping with
692 * single cell binding with irq numbers(offsets) only.
693 * Use irq_domain_simple_ops that has irq_domain_simple_map and
694 * irq_domain_xlate_onetwocell.
695 */
696 data->domain = irq_domain_add_linear(node, WCD9XXX_MAX_NUM_IRQS,
697 &irq_domain_simple_ops, data);
698 if (!data->domain) {
699 kfree(data);
700 return NULL;
701 }
702
703 return data;
704}
705
706static struct wcd9xxx_irq_drv_data *
707wcd9xxx_get_irq_drv_d(const struct wcd9xxx_core_resource *wcd9xxx_res)
708{
709 struct irq_domain *domain;
710
711 domain = wcd9xxx_res->domain;
712
713 if (domain)
714 return domain->host_data;
715 else
716 return NULL;
717}
718
719static int phyirq_to_virq(struct wcd9xxx_core_resource *wcd9xxx_res, int offset)
720{
721 struct wcd9xxx_irq_drv_data *data;
722
723 data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
724 if (!data) {
725 pr_warn("%s: not registered to interrupt controller\n",
726 __func__);
727 return -EINVAL;
728 }
729 return irq_linear_revmap(data->domain, offset);
730}
731
732static int virq_to_phyirq(struct wcd9xxx_core_resource *wcd9xxx_res, int virq)
733{
734 struct irq_data *irq_data = irq_get_irq_data(virq);
735
736 if (unlikely(!irq_data)) {
737 pr_err("%s: irq_data is NULL", __func__);
738 return -EINVAL;
739 }
740 return irq_data->hwirq;
741}
742
743static unsigned int wcd9xxx_irq_get_upstream_irq(
744 struct wcd9xxx_core_resource *wcd9xxx_res)
745{
746 struct wcd9xxx_irq_drv_data *data;
747
748 data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
749 if (!data) {
750 pr_err("%s: interrupt controller is not registered\n",
751 __func__);
752 return 0;
753 }
754
755 /* Make sure data is updated before return. */
756 rmb();
757 return data->irq;
758}
759
Laxminath Kasam8f7ccc22017-08-28 17:35:04 +0530760static void wcd9xxx_irq_put_downstream_irq(
761 struct wcd9xxx_core_resource *wcd9xxx_res)
762{
763 int irq, virq, ret;
764
765 /*
766 * IRQ migration hits error if the chip data and handles
767 * are not made NULL. make associated data and handles
768 * to NULL at irq_exit
769 */
770 for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
771 virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
772 pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
773 ret = irq_set_chip_data(virq, NULL);
774 if (ret) {
775 pr_err("%s: Failed to configure irq %d (%d)\n",
776 __func__, irq, ret);
777 return;
778 }
779 irq_set_chip_and_handler(virq, NULL, NULL);
780 }
781}
782
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530783static void wcd9xxx_irq_put_upstream_irq(
784 struct wcd9xxx_core_resource *wcd9xxx_res)
785{
786 wcd9xxx_res->domain = NULL;
787}
788
789static int wcd9xxx_map_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
790{
791 return of_irq_to_resource(wcd9xxx_res->dev->of_node, irq, NULL);
792}
793
794static int wcd9xxx_irq_probe(struct platform_device *pdev)
795{
796 int irq, dir_apps_irq = -EINVAL;
797 struct wcd9xxx_irq_drv_data *data;
798 struct device_node *node = pdev->dev.of_node;
799 int ret = -EINVAL;
800
801 irq = of_get_named_gpio(node, "qcom,gpio-connect", 0);
802 if (!gpio_is_valid(irq))
803 dir_apps_irq = platform_get_irq_byname(pdev, "wcd_irq");
804
805 if (!gpio_is_valid(irq) && dir_apps_irq < 0) {
806 dev_err(&pdev->dev, "TLMM connect gpio not found\n");
807 return -EPROBE_DEFER;
808 }
809 if (dir_apps_irq > 0) {
810 irq = dir_apps_irq;
811 } else {
812 irq = gpio_to_irq(irq);
813 if (irq < 0) {
814 dev_err(&pdev->dev, "Unable to configure irq\n");
815 return irq;
816 }
817 }
818 dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
819 data = wcd9xxx_irq_add_domain(node, node->parent);
820 if (!data) {
821 pr_err("%s: irq_add_domain failed\n", __func__);
822 return -EINVAL;
823 }
824 data->irq = irq;
825
826 /* Make sure irq is saved before return. */
827 wmb();
828 ret = 0;
829
830 return ret;
831}
832
833static int wcd9xxx_irq_remove(struct platform_device *pdev)
834{
835 struct irq_domain *domain;
836 struct wcd9xxx_irq_drv_data *data;
837
838 domain = irq_find_host(pdev->dev.of_node);
839 if (unlikely(!domain)) {
840 pr_err("%s: domain is NULL", __func__);
841 return -EINVAL;
842 }
843 data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
844 data->irq = 0;
845
846 /* Make sure irq variable is updated in data, before irq removal. */
847 wmb();
848 irq_domain_remove(data->domain);
849 kfree(data);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530850
851 return 0;
852}
853
854static const struct of_device_id of_match[] = {
855 { .compatible = "qcom,wcd9xxx-irq" },
856 { }
857};
858
859static struct platform_driver wcd9xxx_irq_driver = {
860 .probe = wcd9xxx_irq_probe,
861 .remove = wcd9xxx_irq_remove,
862 .driver = {
863 .name = "wcd9xxx_intc",
864 .owner = THIS_MODULE,
865 .of_match_table = of_match_ptr(of_match),
866 },
867};
868
869int wcd9xxx_irq_drv_init(void)
870{
871 return platform_driver_register(&wcd9xxx_irq_driver);
872}
873
874void wcd9xxx_irq_drv_exit(void)
875{
876 platform_driver_unregister(&wcd9xxx_irq_driver);
877}
878#endif /* CONFIG_OF */