blob: 356aecbc461ca7cc6dd6e8c6dd9cc250269bf553 [file] [log] [blame]
Joonwoo Park1277cb62013-03-19 14:16:51 -07001/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +05302 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/bitops.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/irq.h>
17#include <linux/mfd/core.h>
18#include <linux/mfd/wcd9xxx/core.h>
19#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
20#include <linux/mfd/wcd9xxx/wcd9310_registers.h>
Joonwoo Parkf6574c72012-10-10 17:29:57 -070021#include <linux/mfd/wcd9xxx/wcd9xxx-slimslave.h>
22#include <linux/delay.h>
23#include <linux/irqdomain.h>
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +053024#include <linux/interrupt.h>
Joonwoo Parkf6574c72012-10-10 17:29:57 -070025#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/slab.h>
Stephen Boyd2fcabf92012-05-30 10:41:11 -070028#include <mach/cpuidle.h>
29
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +053030#define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
31#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
32
Joonwoo Park18383dc2012-09-20 17:45:57 -070033#define WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS 100
34
Joonwoo Parkf6574c72012-10-10 17:29:57 -070035#ifdef CONFIG_OF
36struct wcd9xxx_irq_drv_data {
37 struct irq_domain *domain;
38 int irq;
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +053039};
Joonwoo Parkf6574c72012-10-10 17:29:57 -070040#endif
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +053041
Joonwoo Parkf6574c72012-10-10 17:29:57 -070042static int virq_to_phyirq(struct wcd9xxx *wcd9xxx, int virq);
43static int phyirq_to_virq(struct wcd9xxx *wcd9xxx, int irq);
44static unsigned int wcd9xxx_irq_get_upstream_irq(struct wcd9xxx *wcd9xxx);
45static void wcd9xxx_irq_put_upstream_irq(struct wcd9xxx *wcd9xxx);
46static int wcd9xxx_map_irq(struct wcd9xxx *wcd9xxx, int irq);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +053047
48static void wcd9xxx_irq_lock(struct irq_data *data)
49{
50 struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data);
51 mutex_lock(&wcd9xxx->irq_lock);
52}
53
54static void wcd9xxx_irq_sync_unlock(struct irq_data *data)
55{
56 struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data);
57 int i;
58
Simmi Pateriya4fd69932012-10-26 00:57:06 +053059 if (ARRAY_SIZE(wcd9xxx->irq_masks_cur) > WCD9XXX_NUM_IRQ_REGS ||
60 ARRAY_SIZE(wcd9xxx->irq_masks_cache) > WCD9XXX_NUM_IRQ_REGS) {
61 pr_err("%s: Array Size out of bound\n", __func__);
62 return;
63 }
64
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +053065 for (i = 0; i < ARRAY_SIZE(wcd9xxx->irq_masks_cur); i++) {
66 /* If there's been a change in the mask write it back
67 * to the hardware.
68 */
69 if (wcd9xxx->irq_masks_cur[i] != wcd9xxx->irq_masks_cache[i]) {
70 wcd9xxx->irq_masks_cache[i] = wcd9xxx->irq_masks_cur[i];
Joonwoo Parkf6574c72012-10-10 17:29:57 -070071 wcd9xxx_reg_write(wcd9xxx,
72 WCD9XXX_A_INTR_MASK0 + i,
73 wcd9xxx->irq_masks_cur[i]);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +053074 }
75 }
76
77 mutex_unlock(&wcd9xxx->irq_lock);
78}
79
80static void wcd9xxx_irq_enable(struct irq_data *data)
81{
82 struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data);
Joonwoo Parkf6574c72012-10-10 17:29:57 -070083 int wcd9xxx_irq = virq_to_phyirq(wcd9xxx, data->irq);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +053084 wcd9xxx->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)] &=
85 ~(BYTE_BIT_MASK(wcd9xxx_irq));
86}
87
88static void wcd9xxx_irq_disable(struct irq_data *data)
89{
90 struct wcd9xxx *wcd9xxx = irq_data_get_irq_chip_data(data);
Joonwoo Parkf6574c72012-10-10 17:29:57 -070091 int wcd9xxx_irq = virq_to_phyirq(wcd9xxx, data->irq);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +053092 wcd9xxx->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)]
Joonwoo Parkf6574c72012-10-10 17:29:57 -070093 |= BYTE_BIT_MASK(wcd9xxx_irq);
94}
95
96static void wcd9xxx_irq_mask(struct irq_data *d)
97{
98 /* do nothing but required as linux calls irq_mask without NULL check */
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +053099}
100
101static struct irq_chip wcd9xxx_irq_chip = {
102 .name = "wcd9xxx",
103 .irq_bus_lock = wcd9xxx_irq_lock,
104 .irq_bus_sync_unlock = wcd9xxx_irq_sync_unlock,
105 .irq_disable = wcd9xxx_irq_disable,
106 .irq_enable = wcd9xxx_irq_enable,
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700107 .irq_mask = wcd9xxx_irq_mask,
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530108};
109
110enum wcd9xxx_pm_state wcd9xxx_pm_cmpxchg(struct wcd9xxx *wcd9xxx,
111 enum wcd9xxx_pm_state o,
112 enum wcd9xxx_pm_state n)
113{
114 enum wcd9xxx_pm_state old;
115 mutex_lock(&wcd9xxx->pm_lock);
116 old = wcd9xxx->pm_state;
117 if (old == o)
118 wcd9xxx->pm_state = n;
119 mutex_unlock(&wcd9xxx->pm_lock);
120 return old;
121}
122EXPORT_SYMBOL_GPL(wcd9xxx_pm_cmpxchg);
123
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700124bool wcd9xxx_lock_sleep(struct wcd9xxx *wcd9xxx)
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530125{
126 enum wcd9xxx_pm_state os;
127
Joonwoo Park18383dc2012-09-20 17:45:57 -0700128 /*
129 * wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530130 * and its subroutines only motly.
131 * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
Joonwoo Park18383dc2012-09-20 17:45:57 -0700132 * It can race with wcd9xxx_irq_thread.
133 * So need to embrace wlock_holders with mutex.
134 *
135 * If system didn't resume, we can simply return false so codec driver's
136 * IRQ handler can return without handling IRQ.
137 * As interrupt line is still active, codec will have another IRQ to
138 * retry shortly.
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530139 */
140 mutex_lock(&wcd9xxx->pm_lock);
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700141 if (wcd9xxx->wlock_holders++ == 0) {
142 pr_debug("%s: holding wake lock\n", __func__);
Stephen Boyd2fcabf92012-05-30 10:41:11 -0700143 pm_qos_update_request(&wcd9xxx->pm_qos_req,
144 msm_cpuidle_get_deep_idle_latency());
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700145 }
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530146 mutex_unlock(&wcd9xxx->pm_lock);
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700147 if (!wait_event_timeout(wcd9xxx->pm_wq,
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530148 ((os = wcd9xxx_pm_cmpxchg(wcd9xxx, WCD9XXX_PM_SLEEPABLE,
149 WCD9XXX_PM_AWAKE)) ==
150 WCD9XXX_PM_SLEEPABLE ||
151 (os == WCD9XXX_PM_AWAKE)),
Joonwoo Park18383dc2012-09-20 17:45:57 -0700152 msecs_to_jiffies(WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
153 pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
154 __func__,
155 WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx->pm_state,
156 wcd9xxx->wlock_holders);
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700157 wcd9xxx_unlock_sleep(wcd9xxx);
158 return false;
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530159 }
160 wake_up_all(&wcd9xxx->pm_wq);
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700161 return true;
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530162}
163EXPORT_SYMBOL_GPL(wcd9xxx_lock_sleep);
164
165void wcd9xxx_unlock_sleep(struct wcd9xxx *wcd9xxx)
166{
167 mutex_lock(&wcd9xxx->pm_lock);
168 if (--wcd9xxx->wlock_holders == 0) {
Joonwoo Park18383dc2012-09-20 17:45:57 -0700169 pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
170 __func__, wcd9xxx->pm_state, WCD9XXX_PM_SLEEPABLE);
171 /*
172 * if wcd9xxx_lock_sleep failed, pm_state would be still
173 * WCD9XXX_PM_ASLEEP, don't overwrite
174 */
175 if (likely(wcd9xxx->pm_state == WCD9XXX_PM_AWAKE))
176 wcd9xxx->pm_state = WCD9XXX_PM_SLEEPABLE;
Stephen Boyd2fcabf92012-05-30 10:41:11 -0700177 pm_qos_update_request(&wcd9xxx->pm_qos_req,
178 PM_QOS_DEFAULT_VALUE);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530179 }
180 mutex_unlock(&wcd9xxx->pm_lock);
181 wake_up_all(&wcd9xxx->pm_wq);
182}
183EXPORT_SYMBOL_GPL(wcd9xxx_unlock_sleep);
184
Joonwoo Park1f9d7fd2013-01-07 12:40:03 -0800185void wcd9xxx_nested_irq_lock(struct wcd9xxx *wcd9xxx)
186{
187 mutex_lock(&wcd9xxx->nested_irq_lock);
188}
189
190void wcd9xxx_nested_irq_unlock(struct wcd9xxx *wcd9xxx)
191{
192 mutex_unlock(&wcd9xxx->nested_irq_lock);
193}
194
Joonwoo Park03324832012-03-19 19:36:16 -0700195static void wcd9xxx_irq_dispatch(struct wcd9xxx *wcd9xxx, int irqbit)
196{
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700197 if ((irqbit <= WCD9XXX_IRQ_MBHC_INSERTION) &&
198 (irqbit >= WCD9XXX_IRQ_MBHC_REMOVAL)) {
Joonwoo Park1f9d7fd2013-01-07 12:40:03 -0800199 wcd9xxx_nested_irq_lock(wcd9xxx);
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700200 wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_CLEAR0 +
201 BIT_BYTE(irqbit),
202 BYTE_BIT_MASK(irqbit));
Joonwoo Park03324832012-03-19 19:36:16 -0700203 if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700204 wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_MODE, 0x02);
205 handle_nested_irq(phyirq_to_virq(wcd9xxx, irqbit));
Joonwoo Park1f9d7fd2013-01-07 12:40:03 -0800206 wcd9xxx_nested_irq_unlock(wcd9xxx);
Joonwoo Park03324832012-03-19 19:36:16 -0700207 } else {
Joonwoo Park1f9d7fd2013-01-07 12:40:03 -0800208 wcd9xxx_nested_irq_lock(wcd9xxx);
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700209 handle_nested_irq(phyirq_to_virq(wcd9xxx, irqbit));
210 wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_CLEAR0 +
211 BIT_BYTE(irqbit),
212 BYTE_BIT_MASK(irqbit));
Joonwoo Park03324832012-03-19 19:36:16 -0700213 if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700214 wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_MODE, 0x02);
Joonwoo Park1f9d7fd2013-01-07 12:40:03 -0800215 wcd9xxx_nested_irq_unlock(wcd9xxx);
Joonwoo Park03324832012-03-19 19:36:16 -0700216 }
217}
218
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700219static int wcd9xxx_num_irq_regs(const struct wcd9xxx *wcd9xxx)
220{
Joonwoo Park1277cb62013-03-19 14:16:51 -0700221 return (wcd9xxx->codec_type->num_irqs / 8) +
222 ((wcd9xxx->codec_type->num_irqs % 8) ? 1 : 0);
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700223}
224
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530225static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
226{
227 int ret;
Joonwoo Park03324832012-03-19 19:36:16 -0700228 int i;
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700229 struct wcd9xxx *wcd9xxx = data;
230 int num_irq_regs = wcd9xxx_num_irq_regs(wcd9xxx);
231 u8 status[num_irq_regs];
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530232
Joonwoo Parkd7cf2e92012-03-19 19:38:23 -0700233 if (unlikely(wcd9xxx_lock_sleep(wcd9xxx) == false)) {
234 dev_err(wcd9xxx->dev, "Failed to hold suspend\n");
235 return IRQ_NONE;
236 }
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700237 ret = wcd9xxx_bulk_read(wcd9xxx, WCD9XXX_A_INTR_STATUS0,
238 num_irq_regs, status);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530239 if (ret < 0) {
240 dev_err(wcd9xxx->dev, "Failed to read interrupt status: %d\n",
241 ret);
Joonwoo Park07329192012-10-23 13:08:37 -0700242 dev_err(wcd9xxx->dev, "Disable irq %d\n", wcd9xxx->irq);
Joonwoo Park6183dbd2012-10-26 16:39:20 -0700243 disable_irq_wake(wcd9xxx->irq);
Joonwoo Park07329192012-10-23 13:08:37 -0700244 disable_irq_nosync(wcd9xxx->irq);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530245 wcd9xxx_unlock_sleep(wcd9xxx);
246 return IRQ_NONE;
247 }
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700248
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530249 /* Apply masking */
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700250 for (i = 0; i < num_irq_regs; i++)
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530251 status[i] &= ~wcd9xxx->irq_masks_cur[i];
252
253 /* Find out which interrupt was triggered and call that interrupt's
254 * handler function
255 */
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700256 if (status[BIT_BYTE(WCD9XXX_IRQ_SLIMBUS)] &
257 BYTE_BIT_MASK(WCD9XXX_IRQ_SLIMBUS))
258 wcd9xxx_irq_dispatch(wcd9xxx, WCD9XXX_IRQ_SLIMBUS);
Joonwoo Park03324832012-03-19 19:36:16 -0700259
260 /* Since codec has only one hardware irq line which is shared by
261 * codec's different internal interrupts, so it's possible master irq
262 * handler dispatches multiple nested irq handlers after breaking
263 * order. Dispatch MBHC interrupts order to follow MBHC state
264 * machine's order */
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700265 for (i = WCD9XXX_IRQ_MBHC_INSERTION;
266 i >= WCD9XXX_IRQ_MBHC_REMOVAL; i--) {
Joonwoo Park03324832012-03-19 19:36:16 -0700267 if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i))
268 wcd9xxx_irq_dispatch(wcd9xxx, i);
269 }
Joonwoo Park1277cb62013-03-19 14:16:51 -0700270 for (i = WCD9XXX_IRQ_BG_PRECHARGE; i < wcd9xxx->codec_type->num_irqs;
271 i++) {
Joonwoo Park03324832012-03-19 19:36:16 -0700272 if (status[BIT_BYTE(i)] & BYTE_BIT_MASK(i))
273 wcd9xxx_irq_dispatch(wcd9xxx, i);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530274 }
275 wcd9xxx_unlock_sleep(wcd9xxx);
276
277 return IRQ_HANDLED;
278}
279
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700280void wcd9xxx_free_irq(struct wcd9xxx *wcd9xxx, int irq, void *data)
281{
282 free_irq(phyirq_to_virq(wcd9xxx, irq), data);
283}
284
285void wcd9xxx_enable_irq(struct wcd9xxx *wcd9xxx, int irq)
286{
287 enable_irq(phyirq_to_virq(wcd9xxx, irq));
288}
289
290void wcd9xxx_disable_irq(struct wcd9xxx *wcd9xxx, int irq)
291{
292 disable_irq_nosync(phyirq_to_virq(wcd9xxx, irq));
293}
294
295void wcd9xxx_disable_irq_sync(struct wcd9xxx *wcd9xxx, int irq)
296{
297 disable_irq(phyirq_to_virq(wcd9xxx, irq));
298}
299
300static int wcd9xxx_irq_setup_downstream_irq(struct wcd9xxx *wcd9xxx)
301{
302 int irq, virq, ret;
303
304 pr_debug("%s: enter\n", __func__);
305
Joonwoo Park1277cb62013-03-19 14:16:51 -0700306 for (irq = 0; irq < wcd9xxx->codec_type->num_irqs; irq++) {
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700307 /* Map OF irq */
308 virq = wcd9xxx_map_irq(wcd9xxx, irq);
309 pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
310 if (virq == NO_IRQ) {
311 pr_err("%s, No interrupt specifier for irq %d\n",
312 __func__, irq);
313 return NO_IRQ;
314 }
315
316 ret = irq_set_chip_data(virq, wcd9xxx);
317 if (ret) {
318 pr_err("%s: Failed to configure irq %d (%d)\n",
319 __func__, irq, ret);
320 return ret;
321 }
322
323 if (wcd9xxx->irq_level_high[irq])
324 irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
325 handle_level_irq);
326 else
327 irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
328 handle_edge_irq);
329
330 irq_set_nested_thread(virq, 1);
331 }
332
333 pr_debug("%s: leave\n", __func__);
334
335 return 0;
336}
337
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530338int wcd9xxx_irq_init(struct wcd9xxx *wcd9xxx)
339{
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700340 int i, ret;
341 u8 irq_level[wcd9xxx_num_irq_regs(wcd9xxx)];
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530342
343 mutex_init(&wcd9xxx->irq_lock);
Joonwoo Park1f9d7fd2013-01-07 12:40:03 -0800344 mutex_init(&wcd9xxx->nested_irq_lock);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530345
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700346 wcd9xxx->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530347 if (!wcd9xxx->irq) {
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700348 pr_warn("%s: irq driver is not yet initialized\n", __func__);
349 mutex_destroy(&wcd9xxx->irq_lock);
Joonwoo Park1f9d7fd2013-01-07 12:40:03 -0800350 mutex_destroy(&wcd9xxx->nested_irq_lock);
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700351 return -EPROBE_DEFER;
352 }
353 pr_debug("%s: probed irq %d\n", __func__, wcd9xxx->irq);
354
355 /* Setup downstream IRQs */
356 ret = wcd9xxx_irq_setup_downstream_irq(wcd9xxx);
357 if (ret) {
358 pr_err("%s: Failed to setup downstream IRQ\n", __func__);
359 wcd9xxx_irq_put_upstream_irq(wcd9xxx);
Joonwoo Park1f9d7fd2013-01-07 12:40:03 -0800360 mutex_destroy(&wcd9xxx->irq_lock);
361 mutex_destroy(&wcd9xxx->nested_irq_lock);
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700362 return ret;
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530363 }
364
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700365 /* All other wcd9xxx interrupts are edge triggered */
366 wcd9xxx->irq_level_high[0] = true;
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530367
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700368 /* mask all the interrupts */
369 memset(irq_level, 0, wcd9xxx_num_irq_regs(wcd9xxx));
Joonwoo Park1277cb62013-03-19 14:16:51 -0700370 for (i = 0; i < wcd9xxx->codec_type->num_irqs; i++) {
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530371 wcd9xxx->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
372 wcd9xxx->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700373 irq_level[BIT_BYTE(i)] |=
374 wcd9xxx->irq_level_high[i] << (i % BITS_PER_BYTE);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530375 }
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700376
377 for (i = 0; i < wcd9xxx_num_irq_regs(wcd9xxx); i++) {
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530378 /* Initialize interrupt mask and level registers */
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700379 wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_LEVEL0 + i,
380 irq_level[i]);
381 wcd9xxx_reg_write(wcd9xxx, WCD9XXX_A_INTR_MASK0 + i,
382 wcd9xxx->irq_masks_cur[i]);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530383 }
384
385 ret = request_threaded_irq(wcd9xxx->irq, NULL, wcd9xxx_irq_thread,
386 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
387 "wcd9xxx", wcd9xxx);
388 if (ret != 0)
389 dev_err(wcd9xxx->dev, "Failed to request IRQ %d: %d\n",
390 wcd9xxx->irq, ret);
391 else {
392 ret = enable_irq_wake(wcd9xxx->irq);
393 if (ret == 0) {
394 ret = device_init_wakeup(wcd9xxx->dev, 1);
395 if (ret) {
396 dev_err(wcd9xxx->dev, "Failed to init device"
397 "wakeup : %d\n", ret);
398 disable_irq_wake(wcd9xxx->irq);
399 }
400 } else
401 dev_err(wcd9xxx->dev, "Failed to set wake interrupt on"
402 " IRQ %d: %d\n", wcd9xxx->irq, ret);
403 if (ret)
404 free_irq(wcd9xxx->irq, wcd9xxx);
405 }
406
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700407 if (ret) {
408 pr_err("%s: Failed to init wcd9xxx irq\n", __func__);
409 wcd9xxx_irq_put_upstream_irq(wcd9xxx);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530410 mutex_destroy(&wcd9xxx->irq_lock);
Joonwoo Park1f9d7fd2013-01-07 12:40:03 -0800411 mutex_destroy(&wcd9xxx->nested_irq_lock);
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700412 }
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530413
414 return ret;
415}
416
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700417int wcd9xxx_request_irq(struct wcd9xxx *wcd9xxx, int irq, irq_handler_t handler,
418 const char *name, void *data)
419{
420 int virq;
421
422 virq = phyirq_to_virq(wcd9xxx, irq);
423
424 /*
425 * ARM needs us to explicitly flag the IRQ as valid
426 * and will set them noprobe when we do so.
427 */
428#ifdef CONFIG_ARM
429 set_irq_flags(virq, IRQF_VALID);
430#else
431 set_irq_noprobe(virq);
432#endif
433
434 return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
435 name, data);
436}
437
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530438void wcd9xxx_irq_exit(struct wcd9xxx *wcd9xxx)
439{
440 if (wcd9xxx->irq) {
441 disable_irq_wake(wcd9xxx->irq);
442 free_irq(wcd9xxx->irq, wcd9xxx);
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700443 /* Release parent's of node */
444 wcd9xxx_irq_put_upstream_irq(wcd9xxx);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530445 device_init_wakeup(wcd9xxx->dev, 0);
446 }
447 mutex_destroy(&wcd9xxx->irq_lock);
Joonwoo Park1f9d7fd2013-01-07 12:40:03 -0800448 mutex_destroy(&wcd9xxx->nested_irq_lock);
Asish Bhattacharyab1aeae22012-02-15 08:29:28 +0530449}
Joonwoo Parkf6574c72012-10-10 17:29:57 -0700450
451#ifndef CONFIG_OF
452static int phyirq_to_virq(struct wcd9xxx *wcd9xxx, int offset)
453{
454 return wcd9xxx->irq_base + offset;
455}
456
457static int virq_to_phyirq(struct wcd9xxx *wcd9xxx, int virq)
458{
459 return virq - wcd9xxx->irq_base;
460}
461
462static unsigned int wcd9xxx_irq_get_upstream_irq(struct wcd9xxx *wcd9xxx)
463{
464 return wcd9xxx->irq;
465}
466
467static void wcd9xxx_irq_put_upstream_irq(struct wcd9xxx *wcd9xxx)
468{
469 /* Do nothing */
470}
471
472static int wcd9xxx_map_irq(struct wcd9xxx *wcd9xxx, int irq)
473{
474 return phyirq_to_virq(wcd9xxx, irq);
475}
476#else
477int __init wcd9xxx_irq_of_init(struct device_node *node,
478 struct device_node *parent)
479{
480 struct wcd9xxx_irq_drv_data *data;
481
482 pr_debug("%s: node %s, node parent %s\n", __func__,
483 node->name, node->parent->name);
484
485 data = kzalloc(sizeof(*data), GFP_KERNEL);
486 if (!data)
487 return -ENOMEM;
488
489 /*
490 * wcd9xxx_intc interrupt controller supports N to N irq mapping with
491 * single cell binding with irq numbers(offsets) only.
492 * Use irq_domain_simple_ops that has irq_domain_simple_map and
493 * irq_domain_xlate_onetwocell.
494 */
495 data->domain = irq_domain_add_linear(node, WCD9XXX_MAX_NUM_IRQS,
496 &irq_domain_simple_ops, data);
497 if (!data->domain) {
498 kfree(data);
499 return -ENOMEM;
500 }
501
502 return 0;
503}
504
505static struct wcd9xxx_irq_drv_data *
506wcd9xxx_get_irq_drv_d(const struct wcd9xxx *wcd9xxx)
507{
508 struct device_node *pnode;
509 struct irq_domain *domain;
510
511 pnode = of_irq_find_parent(wcd9xxx->dev->of_node);
512 /* Shouldn't happen */
513 if (unlikely(!pnode))
514 return NULL;
515
516 domain = irq_find_host(pnode);
517 return (struct wcd9xxx_irq_drv_data *)domain->host_data;
518}
519
520static int phyirq_to_virq(struct wcd9xxx *wcd9xxx, int offset)
521{
522 struct wcd9xxx_irq_drv_data *data;
523
524 data = wcd9xxx_get_irq_drv_d(wcd9xxx);
525 if (!data) {
526 pr_warn("%s: not registered to interrupt controller\n",
527 __func__);
528 return -EINVAL;
529 }
530 return irq_linear_revmap(data->domain, offset);
531}
532
533static int virq_to_phyirq(struct wcd9xxx *wcd9xxx, int virq)
534{
535 struct irq_data *irq_data = irq_get_irq_data(virq);
536 return irq_data->hwirq;
537}
538
539static unsigned int wcd9xxx_irq_get_upstream_irq(struct wcd9xxx *wcd9xxx)
540{
541 struct wcd9xxx_irq_drv_data *data;
542
543 /* Hold parent's of node */
544 if (!of_node_get(of_irq_find_parent(wcd9xxx->dev->of_node)))
545 return -EINVAL;
546
547 data = wcd9xxx_get_irq_drv_d(wcd9xxx);
548 if (!data) {
549 pr_err("%s: interrupt controller is not registerd\n", __func__);
550 return 0;
551 }
552
553 rmb();
554 return data->irq;
555}
556
557static void wcd9xxx_irq_put_upstream_irq(struct wcd9xxx *wcd9xxx)
558{
559 /* Hold parent's of node */
560 of_node_put(of_irq_find_parent(wcd9xxx->dev->of_node));
561}
562
563static int wcd9xxx_map_irq(struct wcd9xxx *wcd9xxx, int irq)
564{
565 return of_irq_to_resource(wcd9xxx->dev->of_node, irq, NULL);
566}
567
568static int __devinit wcd9xxx_irq_probe(struct platform_device *pdev)
569{
570 int irq;
571 struct irq_domain *domain;
572 struct wcd9xxx_irq_drv_data *data;
573 int ret = -EINVAL;
574
575 irq = platform_get_irq_byname(pdev, "cdc-int");
576 if (irq < 0) {
577 dev_err(&pdev->dev, "%s: Couldn't find cdc-int node(%d)\n",
578 __func__, irq);
579 return -EINVAL;
580 } else {
581 dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
582 domain = irq_find_host(pdev->dev.of_node);
583 data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
584 data->irq = irq;
585 wmb();
586 ret = 0;
587 }
588
589 return ret;
590}
591
592static int wcd9xxx_irq_remove(struct platform_device *pdev)
593{
594 struct irq_domain *domain;
595 struct wcd9xxx_irq_drv_data *data;
596
597 domain = irq_find_host(pdev->dev.of_node);
598 data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
599 data->irq = 0;
600 wmb();
601
602 return 0;
603}
604
605static const struct of_device_id of_match[] = {
606 { .compatible = "qcom,wcd9xxx-irq" },
607 { }
608};
609
610static struct platform_driver wcd9xxx_irq_driver = {
611 .probe = wcd9xxx_irq_probe,
612 .remove = wcd9xxx_irq_remove,
613 .driver = {
614 .name = "wcd9xxx_intc",
615 .owner = THIS_MODULE,
616 .of_match_table = of_match_ptr(of_match),
617 },
618};
619
620static int wcd9xxx_irq_drv_init(void)
621{
622 return platform_driver_register(&wcd9xxx_irq_driver);
623}
624subsys_initcall(wcd9xxx_irq_drv_init);
625
626static void wcd9xxx_irq_drv_exit(void)
627{
628 platform_driver_unregister(&wcd9xxx_irq_driver);
629}
630module_exit(wcd9xxx_irq_drv_exit);
631#endif /* CONFIG_OF */