blob: c1efba4c69f6bc7a79b1c852f56357e4e0a52cf9 [file] [log] [blame]
Meng Wang43bbb872018-12-10 12:32:05 +08001// SPDX-License-Identifier: GPL-2.0-only
Xiaojun Sang53cd13a2018-06-29 15:14:37 +08002/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303 */
4#include <linux/bitops.h>
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/sched.h>
8#include <linux/irq.h>
9#include <linux/mfd/core.h>
10#include <linux/regmap.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053011#include <linux/delay.h>
12#include <linux/irqdomain.h>
13#include <linux/interrupt.h>
14#include <linux/of.h>
15#include <linux/of_irq.h>
16#include <linux/slab.h>
17#include <linux/ratelimit.h>
18#include <soc/qcom/pm.h>
19#include <linux/gpio.h>
20#include <linux/of_gpio.h>
Laxminath Kasam605b42f2017-08-01 22:02:15 +053021#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
Meng Wang11a25cf2018-10-31 14:11:26 +080022#include <asoc/core.h>
23#include <asoc/wcd9xxx-irq.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053024
25#define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
26#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
27
28#define WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS 100
29
30#ifndef NO_IRQ
31#define NO_IRQ (-1)
32#endif
33
34#ifdef CONFIG_OF
35struct wcd9xxx_irq_drv_data {
36 struct irq_domain *domain;
37 int irq;
38};
39#endif
40
41static int virq_to_phyirq(
42 struct wcd9xxx_core_resource *wcd9xxx_res, int virq);
43static int phyirq_to_virq(
44 struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
45static unsigned int wcd9xxx_irq_get_upstream_irq(
46 struct wcd9xxx_core_resource *wcd9xxx_res);
Laxminath Kasam8f7ccc22017-08-28 17:35:04 +053047static void wcd9xxx_irq_put_downstream_irq(
48 struct wcd9xxx_core_resource *wcd9xxx_res);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053049static void wcd9xxx_irq_put_upstream_irq(
50 struct wcd9xxx_core_resource *wcd9xxx_res);
51static int wcd9xxx_map_irq(
52 struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
53
54static void wcd9xxx_irq_lock(struct irq_data *data)
55{
56 struct wcd9xxx_core_resource *wcd9xxx_res =
57 irq_data_get_irq_chip_data(data);
58 mutex_lock(&wcd9xxx_res->irq_lock);
59}
60
61static void wcd9xxx_irq_sync_unlock(struct irq_data *data)
62{
63 struct wcd9xxx_core_resource *wcd9xxx_res =
64 irq_data_get_irq_chip_data(data);
65 int i;
66
67 if ((ARRAY_SIZE(wcd9xxx_res->irq_masks_cur) >
68 WCD9XXX_MAX_IRQ_REGS) ||
69 (ARRAY_SIZE(wcd9xxx_res->irq_masks_cache) >
70 WCD9XXX_MAX_IRQ_REGS)) {
71 pr_err("%s: Array Size out of bound\n", __func__);
72 return;
73 }
74 if (!wcd9xxx_res->wcd_core_regmap) {
75 pr_err("%s: Codec core regmap not defined\n",
76 __func__);
77 return;
78 }
79
80 for (i = 0; i < ARRAY_SIZE(wcd9xxx_res->irq_masks_cur); i++) {
81 /* If there's been a change in the mask write it back
82 * to the hardware.
83 */
84 if (wcd9xxx_res->irq_masks_cur[i] !=
85 wcd9xxx_res->irq_masks_cache[i]) {
86
87 wcd9xxx_res->irq_masks_cache[i] =
88 wcd9xxx_res->irq_masks_cur[i];
89 regmap_write(wcd9xxx_res->wcd_core_regmap,
90 wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
91 wcd9xxx_res->irq_masks_cur[i]);
92 }
93 }
94
95 mutex_unlock(&wcd9xxx_res->irq_lock);
96}
97
98static void wcd9xxx_irq_enable(struct irq_data *data)
99{
100 struct wcd9xxx_core_resource *wcd9xxx_res =
101 irq_data_get_irq_chip_data(data);
102 int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
103 int byte = BIT_BYTE(wcd9xxx_irq);
104 int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
105
106 if ((byte < size) && (byte >= 0)) {
107 wcd9xxx_res->irq_masks_cur[byte] &=
108 ~(BYTE_BIT_MASK(wcd9xxx_irq));
109 } else {
110 pr_err("%s: Array size is %d but index is %d: Out of range\n",
111 __func__, size, byte);
112 }
113}
114
115static void wcd9xxx_irq_disable(struct irq_data *data)
116{
117 struct wcd9xxx_core_resource *wcd9xxx_res =
118 irq_data_get_irq_chip_data(data);
119 int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
120 int byte = BIT_BYTE(wcd9xxx_irq);
121 int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
122
123 if ((byte < size) && (byte >= 0)) {
124 wcd9xxx_res->irq_masks_cur[byte]
125 |= BYTE_BIT_MASK(wcd9xxx_irq);
126 } else {
127 pr_err("%s: Array size is %d but index is %d: Out of range\n",
128 __func__, size, byte);
129 }
130}
131
132static void wcd9xxx_irq_ack(struct irq_data *data)
133{
134 int wcd9xxx_irq = 0;
135 struct wcd9xxx_core_resource *wcd9xxx_res =
136 irq_data_get_irq_chip_data(data);
137
138 if (wcd9xxx_res == NULL) {
139 pr_err("%s: wcd9xxx_res is NULL\n", __func__);
140 return;
141 }
142 wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
143 pr_debug("%s: IRQ_ACK called for WCD9XXX IRQ: %d\n",
144 __func__, wcd9xxx_irq);
145}
146
147static void wcd9xxx_irq_mask(struct irq_data *d)
148{
149 /* do nothing but required as linux calls irq_mask without NULL check */
150}
151
152static struct irq_chip wcd9xxx_irq_chip = {
153 .name = "wcd9xxx",
154 .irq_bus_lock = wcd9xxx_irq_lock,
155 .irq_bus_sync_unlock = wcd9xxx_irq_sync_unlock,
156 .irq_disable = wcd9xxx_irq_disable,
157 .irq_enable = wcd9xxx_irq_enable,
158 .irq_mask = wcd9xxx_irq_mask,
159 .irq_ack = wcd9xxx_irq_ack,
160};
161
162bool wcd9xxx_lock_sleep(
163 struct wcd9xxx_core_resource *wcd9xxx_res)
164{
165 enum wcd9xxx_pm_state os;
166
167 /*
168 * wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
169 * and its subroutines only motly.
170 * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
171 * It can race with wcd9xxx_irq_thread.
172 * So need to embrace wlock_holders with mutex.
173 *
174 * If system didn't resume, we can simply return false so codec driver's
175 * IRQ handler can return without handling IRQ.
176 * As interrupt line is still active, codec will have another IRQ to
177 * retry shortly.
178 */
179 mutex_lock(&wcd9xxx_res->pm_lock);
180 if (wcd9xxx_res->wlock_holders++ == 0) {
181 pr_debug("%s: holding wake lock\n", __func__);
182 pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
183 msm_cpuidle_get_deep_idle_latency());
184 pm_stay_awake(wcd9xxx_res->dev);
185 }
186 mutex_unlock(&wcd9xxx_res->pm_lock);
187
188 if (!wait_event_timeout(wcd9xxx_res->pm_wq,
189 ((os = wcd9xxx_pm_cmpxchg(wcd9xxx_res,
190 WCD9XXX_PM_SLEEPABLE,
191 WCD9XXX_PM_AWAKE)) ==
192 WCD9XXX_PM_SLEEPABLE ||
193 (os == WCD9XXX_PM_AWAKE)),
194 msecs_to_jiffies(
195 WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
196 pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
197 __func__,
198 WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
199 wcd9xxx_res->wlock_holders);
200 wcd9xxx_unlock_sleep(wcd9xxx_res);
201 return false;
202 }
203 wake_up_all(&wcd9xxx_res->pm_wq);
204 return true;
205}
206EXPORT_SYMBOL(wcd9xxx_lock_sleep);
207
208void wcd9xxx_unlock_sleep(
209 struct wcd9xxx_core_resource *wcd9xxx_res)
210{
211 mutex_lock(&wcd9xxx_res->pm_lock);
212 if (--wcd9xxx_res->wlock_holders == 0) {
213 pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
214 __func__, wcd9xxx_res->pm_state, WCD9XXX_PM_SLEEPABLE);
215 /*
216 * if wcd9xxx_lock_sleep failed, pm_state would be still
217 * WCD9XXX_PM_ASLEEP, don't overwrite
218 */
219 if (likely(wcd9xxx_res->pm_state == WCD9XXX_PM_AWAKE))
220 wcd9xxx_res->pm_state = WCD9XXX_PM_SLEEPABLE;
221 pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
222 PM_QOS_DEFAULT_VALUE);
223 pm_relax(wcd9xxx_res->dev);
224 }
225 mutex_unlock(&wcd9xxx_res->pm_lock);
226 wake_up_all(&wcd9xxx_res->pm_wq);
227}
228EXPORT_SYMBOL(wcd9xxx_unlock_sleep);
229
230void wcd9xxx_nested_irq_lock(struct wcd9xxx_core_resource *wcd9xxx_res)
231{
232 mutex_lock(&wcd9xxx_res->nested_irq_lock);
233}
234
235void wcd9xxx_nested_irq_unlock(struct wcd9xxx_core_resource *wcd9xxx_res)
236{
237 mutex_unlock(&wcd9xxx_res->nested_irq_lock);
238}
239
240
241static void wcd9xxx_irq_dispatch(struct wcd9xxx_core_resource *wcd9xxx_res,
242 struct intr_data *irqdata)
243{
244 int irqbit = irqdata->intr_num;
245
246 if (!wcd9xxx_res->wcd_core_regmap) {
247 pr_err("%s: codec core regmap not defined\n",
248 __func__);
249 return;
250 }
251
252 if (irqdata->clear_first) {
253 wcd9xxx_nested_irq_lock(wcd9xxx_res);
254 regmap_write(wcd9xxx_res->wcd_core_regmap,
255 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
256 BIT_BYTE(irqbit),
257 BYTE_BIT_MASK(irqbit));
258
259 if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
260 regmap_write(wcd9xxx_res->wcd_core_regmap,
261 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
262 0x02);
263 handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
264 wcd9xxx_nested_irq_unlock(wcd9xxx_res);
265 } else {
266 wcd9xxx_nested_irq_lock(wcd9xxx_res);
267 handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
268 regmap_write(wcd9xxx_res->wcd_core_regmap,
269 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
270 BIT_BYTE(irqbit),
271 BYTE_BIT_MASK(irqbit));
272 if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
273 regmap_write(wcd9xxx_res->wcd_core_regmap,
274 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
275 0x02);
276
277 wcd9xxx_nested_irq_unlock(wcd9xxx_res);
278 }
279}
280
281static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
282{
283 int ret;
284 int i;
285 struct intr_data irqdata;
286 char linebuf[128];
287 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
288 struct wcd9xxx_core_resource *wcd9xxx_res = data;
289 int num_irq_regs = wcd9xxx_res->num_irq_regs;
Karthikeyan Mani9bc7b452018-07-25 12:41:03 -0700290 struct wcd9xxx *wcd9xxx;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530291 u8 status[4], status1[4] = {0}, unmask_status[4] = {0};
292
293 if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
294 dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
295 return IRQ_NONE;
296 }
297
298 if (!wcd9xxx_res->wcd_core_regmap) {
299 dev_err(wcd9xxx_res->dev,
300 "%s: Codec core regmap not supplied\n",
301 __func__);
302 goto err_disable_irq;
303 }
304
Karthikeyan Mani9bc7b452018-07-25 12:41:03 -0700305 wcd9xxx = (struct wcd9xxx *)wcd9xxx_res->parent;
306 if (!wcd9xxx) {
307 dev_err(wcd9xxx_res->dev,
308 "%s: Codec core not supplied\n", __func__);
309 goto err_disable_irq;
310 }
311
312 if (!wcd9xxx->dev_up) {
313 dev_info_ratelimited(wcd9xxx_res->dev, "wcd9xxx dev not up\n");
314 /*
315 * sleep to not block the core when device is
316 * not up (slimbus will not be available) to
317 * process interrupts.
318 */
319 msleep(10);
320 }
321
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530322 memset(status, 0, sizeof(status));
323 ret = regmap_bulk_read(wcd9xxx_res->wcd_core_regmap,
324 wcd9xxx_res->intr_reg[WCD9XXX_INTR_STATUS_BASE],
325 status, num_irq_regs);
326
327 if (ret < 0) {
328 dev_err(wcd9xxx_res->dev,
329 "Failed to read interrupt status: %d\n", ret);
330 goto err_disable_irq;
331 }
332 /*
333 * If status is 0 return without clearing.
334 * status contains: HW status - masked interrupts
335 * status1 contains: unhandled interrupts - masked interrupts
336 * unmasked_status contains: unhandled interrupts
337 */
338 if (unlikely(!memcmp(status, status1, sizeof(status)))) {
339 pr_debug("%s: status is 0\n", __func__);
340 wcd9xxx_unlock_sleep(wcd9xxx_res);
341 return IRQ_HANDLED;
342 }
343
344 /*
345 * Copy status to unmask_status before masking, otherwise SW may miss
346 * to clear masked interrupt in corner case.
347 */
348 memcpy(unmask_status, status, sizeof(unmask_status));
349
350 /* Apply masking */
351 for (i = 0; i < num_irq_regs; i++)
352 status[i] &= ~wcd9xxx_res->irq_masks_cur[i];
353
354 memcpy(status1, status, sizeof(status1));
355
356 /* Find out which interrupt was triggered and call that interrupt's
357 * handler function
358 *
359 * Since codec has only one hardware irq line which is shared by
360 * codec's different internal interrupts, so it's possible master irq
361 * handler dispatches multiple nested irq handlers after breaking
362 * order. Dispatch interrupts in the order that is maintained by
363 * the interrupt table.
364 */
365 for (i = 0; i < wcd9xxx_res->intr_table_size; i++) {
366 irqdata = wcd9xxx_res->intr_table[i];
367 if (status[BIT_BYTE(irqdata.intr_num)] &
368 BYTE_BIT_MASK(irqdata.intr_num)) {
369 wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
370 status1[BIT_BYTE(irqdata.intr_num)] &=
371 ~BYTE_BIT_MASK(irqdata.intr_num);
372 unmask_status[BIT_BYTE(irqdata.intr_num)] &=
373 ~BYTE_BIT_MASK(irqdata.intr_num);
374 }
375 }
376
377 /*
378 * As a failsafe if unhandled irq is found, clear it to prevent
379 * interrupt storm.
380 * Note that we can say there was an unhandled irq only when no irq
381 * handled by nested irq handler since Taiko supports qdsp as irqs'
382 * destination for few irqs. Therefore driver shouldn't clear pending
383 * irqs when few handled while few others not.
384 */
385 if (unlikely(!memcmp(status, status1, sizeof(status)))) {
386 if (__ratelimit(&ratelimit)) {
387 pr_warn("%s: Unhandled irq found\n", __func__);
388 hex_dump_to_buffer(status, sizeof(status), 16, 1,
389 linebuf, sizeof(linebuf), false);
390 pr_warn("%s: status0 : %s\n", __func__, linebuf);
391 hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
392 linebuf, sizeof(linebuf), false);
393 pr_warn("%s: status1 : %s\n", __func__, linebuf);
394 }
395 /*
396 * unmask_status contains unhandled interrupts, hence clear all
397 * unhandled interrupts.
398 */
399 ret = regmap_bulk_write(wcd9xxx_res->wcd_core_regmap,
400 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE],
401 unmask_status, num_irq_regs);
402 if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
403 regmap_write(wcd9xxx_res->wcd_core_regmap,
404 wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
405 0x02);
406 }
407 wcd9xxx_unlock_sleep(wcd9xxx_res);
408
409 return IRQ_HANDLED;
410
411err_disable_irq:
412 dev_err(wcd9xxx_res->dev,
413 "Disable irq %d\n", wcd9xxx_res->irq);
414
415 disable_irq_wake(wcd9xxx_res->irq);
416 disable_irq_nosync(wcd9xxx_res->irq);
417 wcd9xxx_unlock_sleep(wcd9xxx_res);
418 return IRQ_NONE;
419}
420
421/**
422 * wcd9xxx_free_irq
423 *
424 * @wcd9xxx_res: pointer to core resource
425 * irq: irq number
426 * @data: data pointer
427 *
428 */
429void wcd9xxx_free_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
430 int irq, void *data)
431{
432 free_irq(phyirq_to_virq(wcd9xxx_res, irq), data);
433}
434EXPORT_SYMBOL(wcd9xxx_free_irq);
435
436/**
437 * wcd9xxx_enable_irq
438 *
439 * @wcd9xxx_res: pointer to core resource
440 * irq: irq number
441 *
442 */
443void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
444{
445 if (wcd9xxx_res->irq)
446 enable_irq(phyirq_to_virq(wcd9xxx_res, irq));
447}
448EXPORT_SYMBOL(wcd9xxx_enable_irq);
449
450/**
451 * wcd9xxx_disable_irq
452 *
453 * @wcd9xxx_res: pointer to core resource
454 * irq: irq number
455 *
456 */
457void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
458{
459 if (wcd9xxx_res->irq)
460 disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq));
461}
462EXPORT_SYMBOL(wcd9xxx_disable_irq);
463
464/**
465 * wcd9xxx_disable_irq_sync
466 *
467 * @wcd9xxx_res: pointer to core resource
468 * irq: irq number
469 *
470 */
471void wcd9xxx_disable_irq_sync(
472 struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
473{
474 if (wcd9xxx_res->irq)
475 disable_irq(phyirq_to_virq(wcd9xxx_res, irq));
476}
477EXPORT_SYMBOL(wcd9xxx_disable_irq_sync);
478
479static int wcd9xxx_irq_setup_downstream_irq(
480 struct wcd9xxx_core_resource *wcd9xxx_res)
481{
482 int irq, virq, ret;
483
484 pr_debug("%s: enter\n", __func__);
485
486 for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
487 /* Map OF irq */
488 virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
489 pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
490 if (virq == NO_IRQ) {
491 pr_err("%s, No interrupt specifier for irq %d\n",
492 __func__, irq);
493 return NO_IRQ;
494 }
495
496 ret = irq_set_chip_data(virq, wcd9xxx_res);
497 if (ret) {
498 pr_err("%s: Failed to configure irq %d (%d)\n",
499 __func__, irq, ret);
500 return ret;
501 }
502
503 if (wcd9xxx_res->irq_level_high[irq])
504 irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
505 handle_level_irq);
506 else
507 irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
508 handle_edge_irq);
509
510 irq_set_nested_thread(virq, 1);
511 }
512
513 pr_debug("%s: leave\n", __func__);
514
515 return 0;
516}
517
518/**
519 * wcd9xxx_irq_init
520 *
521 * @wcd9xxx_res: pointer to core resource
522 *
523 * Returns 0 on success, appropriate error code otherwise
524 */
525int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res)
526{
527 int i, ret;
Meng Wangc7b180e2018-11-13 09:28:48 +0800528 u8 *irq_level = NULL;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530529 struct irq_domain *domain;
530 struct device_node *pnode;
531
532 mutex_init(&wcd9xxx_res->irq_lock);
533 mutex_init(&wcd9xxx_res->nested_irq_lock);
534
535 pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node);
536 if (unlikely(!pnode))
537 return -EINVAL;
538
539 domain = irq_find_host(pnode);
540 if (unlikely(!domain))
541 return -EINVAL;
542
543 wcd9xxx_res->domain = domain;
544
545 wcd9xxx_res->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx_res);
546 if (!wcd9xxx_res->irq) {
547 pr_warn("%s: irq driver is not yet initialized\n", __func__);
548 mutex_destroy(&wcd9xxx_res->irq_lock);
549 mutex_destroy(&wcd9xxx_res->nested_irq_lock);
550 return -EPROBE_DEFER;
551 }
552 pr_debug("%s: probed irq %d\n", __func__, wcd9xxx_res->irq);
553
554 /* Setup downstream IRQs */
555 ret = wcd9xxx_irq_setup_downstream_irq(wcd9xxx_res);
556 if (ret) {
557 pr_err("%s: Failed to setup downstream IRQ\n", __func__);
Meng Wangc7b180e2018-11-13 09:28:48 +0800558 goto fail_irq_level;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530559 return ret;
560 }
561
562 /* All other wcd9xxx interrupts are edge triggered */
563 wcd9xxx_res->irq_level_high[0] = true;
564
565 /* mask all the interrupts */
Meng Wangc7b180e2018-11-13 09:28:48 +0800566 irq_level = kzalloc(wcd9xxx_res->num_irq_regs, GFP_KERNEL);
567 if (!irq_level) {
568 ret = -ENOMEM;
569 goto fail_irq_level;
570 }
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530571 for (i = 0; i < wcd9xxx_res->num_irqs; i++) {
572 wcd9xxx_res->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
573 wcd9xxx_res->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
574 irq_level[BIT_BYTE(i)] |=
575 wcd9xxx_res->irq_level_high[i] << (i % BITS_PER_BYTE);
576 }
577
578 if (!wcd9xxx_res->wcd_core_regmap) {
579 dev_err(wcd9xxx_res->dev,
580 "%s: Codec core regmap not defined\n",
581 __func__);
582 ret = -EINVAL;
583 goto fail_irq_init;
584 }
585
586 for (i = 0; i < wcd9xxx_res->num_irq_regs; i++) {
587 /* Initialize interrupt mask and level registers */
588 regmap_write(wcd9xxx_res->wcd_core_regmap,
589 wcd9xxx_res->intr_reg[WCD9XXX_INTR_LEVEL_BASE] + i,
590 irq_level[i]);
591 regmap_write(wcd9xxx_res->wcd_core_regmap,
592 wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
593 wcd9xxx_res->irq_masks_cur[i]);
594 }
595
596 ret = request_threaded_irq(wcd9xxx_res->irq, NULL, wcd9xxx_irq_thread,
597 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
598 "wcd9xxx", wcd9xxx_res);
599 if (ret != 0)
600 dev_err(wcd9xxx_res->dev, "Failed to request IRQ %d: %d\n",
601 wcd9xxx_res->irq, ret);
602 else {
603 ret = enable_irq_wake(wcd9xxx_res->irq);
604 if (ret)
605 dev_err(wcd9xxx_res->dev,
606 "Failed to set wake interrupt on IRQ %d: %d\n",
607 wcd9xxx_res->irq, ret);
608 if (ret)
609 free_irq(wcd9xxx_res->irq, wcd9xxx_res);
610 }
611
612 if (ret)
613 goto fail_irq_init;
614
Meng Wangc7b180e2018-11-13 09:28:48 +0800615 kfree(irq_level);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530616 return ret;
617
618fail_irq_init:
619 dev_err(wcd9xxx_res->dev,
620 "%s: Failed to init wcd9xxx irq\n", __func__);
Meng Wangc7b180e2018-11-13 09:28:48 +0800621 kfree(irq_level);
622fail_irq_level:
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530623 wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
624 mutex_destroy(&wcd9xxx_res->irq_lock);
625 mutex_destroy(&wcd9xxx_res->nested_irq_lock);
626 return ret;
627}
628EXPORT_SYMBOL(wcd9xxx_irq_init);
629
630int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
631 int irq, irq_handler_t handler,
632 const char *name, void *data)
633{
634 int virq;
635
636 virq = phyirq_to_virq(wcd9xxx_res, irq);
637
638 return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
639 name, data);
640}
641EXPORT_SYMBOL(wcd9xxx_request_irq);
642
643void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res)
644{
645 dev_dbg(wcd9xxx_res->dev, "%s: Cleaning up irq %d\n", __func__,
646 wcd9xxx_res->irq);
647
648 if (wcd9xxx_res->irq) {
649 disable_irq_wake(wcd9xxx_res->irq);
650 free_irq(wcd9xxx_res->irq, wcd9xxx_res);
651 wcd9xxx_res->irq = 0;
Laxminath Kasam8f7ccc22017-08-28 17:35:04 +0530652 wcd9xxx_irq_put_downstream_irq(wcd9xxx_res);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530653 wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
654 }
655 mutex_destroy(&wcd9xxx_res->irq_lock);
656 mutex_destroy(&wcd9xxx_res->nested_irq_lock);
657}
658
659#ifndef CONFIG_OF
660static int phyirq_to_virq(
661 struct wcd9xxx_core_resource *wcd9xxx_res,
662 int offset)
663{
664 return wcd9xxx_res->irq_base + offset;
665}
666
667static int virq_to_phyirq(
668 struct wcd9xxx_core_resource *wcd9xxx_res,
669 int virq)
670{
671 return virq - wcd9xxx_res->irq_base;
672}
673
674static unsigned int wcd9xxx_irq_get_upstream_irq(
675 struct wcd9xxx_core_resource *wcd9xxx_res)
676{
677 return wcd9xxx_res->irq;
678}
679
680static void wcd9xxx_irq_put_upstream_irq(
681 struct wcd9xxx_core_resource *wcd9xxx_res)
682{
683 /* Do nothing */
684}
685
686static int wcd9xxx_map_irq(
687 struct wcd9xxx_core_resource *wcd9xxx_core_res, int irq)
688{
689 return phyirq_to_virq(wcd9xxx_core_res, irq);
690}
691#else
692static struct wcd9xxx_irq_drv_data *
693wcd9xxx_irq_add_domain(struct device_node *node,
694 struct device_node *parent)
695{
696 struct wcd9xxx_irq_drv_data *data = NULL;
697
698 pr_debug("%s: node %s, node parent %s\n", __func__,
699 node->name, node->parent->name);
700
701 data = kzalloc(sizeof(*data), GFP_KERNEL);
702 if (!data)
703 return NULL;
704
705 /*
706 * wcd9xxx_intc interrupt controller supports N to N irq mapping with
707 * single cell binding with irq numbers(offsets) only.
708 * Use irq_domain_simple_ops that has irq_domain_simple_map and
709 * irq_domain_xlate_onetwocell.
710 */
711 data->domain = irq_domain_add_linear(node, WCD9XXX_MAX_NUM_IRQS,
712 &irq_domain_simple_ops, data);
713 if (!data->domain) {
714 kfree(data);
715 return NULL;
716 }
717
718 return data;
719}
720
721static struct wcd9xxx_irq_drv_data *
722wcd9xxx_get_irq_drv_d(const struct wcd9xxx_core_resource *wcd9xxx_res)
723{
724 struct irq_domain *domain;
725
726 domain = wcd9xxx_res->domain;
727
728 if (domain)
729 return domain->host_data;
730 else
731 return NULL;
732}
733
734static int phyirq_to_virq(struct wcd9xxx_core_resource *wcd9xxx_res, int offset)
735{
736 struct wcd9xxx_irq_drv_data *data;
737
738 data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
739 if (!data) {
740 pr_warn("%s: not registered to interrupt controller\n",
741 __func__);
742 return -EINVAL;
743 }
744 return irq_linear_revmap(data->domain, offset);
745}
746
747static int virq_to_phyirq(struct wcd9xxx_core_resource *wcd9xxx_res, int virq)
748{
749 struct irq_data *irq_data = irq_get_irq_data(virq);
750
751 if (unlikely(!irq_data)) {
752 pr_err("%s: irq_data is NULL", __func__);
753 return -EINVAL;
754 }
755 return irq_data->hwirq;
756}
757
758static unsigned int wcd9xxx_irq_get_upstream_irq(
759 struct wcd9xxx_core_resource *wcd9xxx_res)
760{
761 struct wcd9xxx_irq_drv_data *data;
762
763 data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
764 if (!data) {
765 pr_err("%s: interrupt controller is not registered\n",
766 __func__);
767 return 0;
768 }
769
770 /* Make sure data is updated before return. */
771 rmb();
772 return data->irq;
773}
774
Laxminath Kasam8f7ccc22017-08-28 17:35:04 +0530775static void wcd9xxx_irq_put_downstream_irq(
776 struct wcd9xxx_core_resource *wcd9xxx_res)
777{
778 int irq, virq, ret;
779
780 /*
781 * IRQ migration hits error if the chip data and handles
782 * are not made NULL. make associated data and handles
783 * to NULL at irq_exit
784 */
785 for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
786 virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
787 pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
788 ret = irq_set_chip_data(virq, NULL);
789 if (ret) {
790 pr_err("%s: Failed to configure irq %d (%d)\n",
791 __func__, irq, ret);
792 return;
793 }
794 irq_set_chip_and_handler(virq, NULL, NULL);
795 }
796}
797
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530798static void wcd9xxx_irq_put_upstream_irq(
799 struct wcd9xxx_core_resource *wcd9xxx_res)
800{
801 wcd9xxx_res->domain = NULL;
802}
803
804static int wcd9xxx_map_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
805{
806 return of_irq_to_resource(wcd9xxx_res->dev->of_node, irq, NULL);
807}
808
809static int wcd9xxx_irq_probe(struct platform_device *pdev)
810{
811 int irq, dir_apps_irq = -EINVAL;
812 struct wcd9xxx_irq_drv_data *data;
813 struct device_node *node = pdev->dev.of_node;
814 int ret = -EINVAL;
815
816 irq = of_get_named_gpio(node, "qcom,gpio-connect", 0);
817 if (!gpio_is_valid(irq))
818 dir_apps_irq = platform_get_irq_byname(pdev, "wcd_irq");
819
820 if (!gpio_is_valid(irq) && dir_apps_irq < 0) {
821 dev_err(&pdev->dev, "TLMM connect gpio not found\n");
822 return -EPROBE_DEFER;
823 }
824 if (dir_apps_irq > 0) {
825 irq = dir_apps_irq;
826 } else {
827 irq = gpio_to_irq(irq);
828 if (irq < 0) {
829 dev_err(&pdev->dev, "Unable to configure irq\n");
830 return irq;
831 }
832 }
833 dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
834 data = wcd9xxx_irq_add_domain(node, node->parent);
835 if (!data) {
836 pr_err("%s: irq_add_domain failed\n", __func__);
837 return -EINVAL;
838 }
839 data->irq = irq;
840
841 /* Make sure irq is saved before return. */
842 wmb();
843 ret = 0;
844
845 return ret;
846}
847
848static int wcd9xxx_irq_remove(struct platform_device *pdev)
849{
850 struct irq_domain *domain;
851 struct wcd9xxx_irq_drv_data *data;
852
853 domain = irq_find_host(pdev->dev.of_node);
854 if (unlikely(!domain)) {
855 pr_err("%s: domain is NULL", __func__);
856 return -EINVAL;
857 }
858 data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
859 data->irq = 0;
860
861 /* Make sure irq variable is updated in data, before irq removal. */
862 wmb();
863 irq_domain_remove(data->domain);
864 kfree(data);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530865
866 return 0;
867}
868
869static const struct of_device_id of_match[] = {
870 { .compatible = "qcom,wcd9xxx-irq" },
871 { }
872};
873
874static struct platform_driver wcd9xxx_irq_driver = {
875 .probe = wcd9xxx_irq_probe,
876 .remove = wcd9xxx_irq_remove,
877 .driver = {
878 .name = "wcd9xxx_intc",
879 .owner = THIS_MODULE,
880 .of_match_table = of_match_ptr(of_match),
Xiaojun Sang53cd13a2018-06-29 15:14:37 +0800881 .suppress_bind_attrs = true,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530882 },
883};
884
885int wcd9xxx_irq_drv_init(void)
886{
887 return platform_driver_register(&wcd9xxx_irq_driver);
888}
889
890void wcd9xxx_irq_drv_exit(void)
891{
892 platform_driver_unregister(&wcd9xxx_irq_driver);
893}
894#endif /* CONFIG_OF */