blob: 5c9a9509c1deff504e9ca87bd197135716440e26 [file] [log] [blame]
Duy Truong790f06d2013-02-13 16:38:12 -08001/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/bitmap.h>
18#include <linux/bitops.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/irq.h>
Praveen Chidambaram78499012011-11-01 17:15:17 -060023#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <linux/spinlock.h>
Praveen Chidambaram78499012011-11-01 17:15:17 -060025#include <asm/hardware/gic.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <mach/msm_iomap.h>
27#include <mach/gpio.h>
28
Subhash Jadavani909e04f2012-04-12 10:52:50 +053029#include <mach/mpm.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
31/******************************************************************************
32 * Debug Definitions
33 *****************************************************************************/
34
35enum {
36 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ = BIT(0),
37 MSM_MPM_DEBUG_PENDING_IRQ = BIT(1),
38 MSM_MPM_DEBUG_WRITE = BIT(2),
39 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE = BIT(3),
40};
41
42static int msm_mpm_debug_mask = 1;
43module_param_named(
44 debug_mask, msm_mpm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
45);
46
47/******************************************************************************
48 * Request and Status Definitions
49 *****************************************************************************/
50
51enum {
52 MSM_MPM_REQUEST_REG_ENABLE,
53 MSM_MPM_REQUEST_REG_DETECT_CTL,
54 MSM_MPM_REQUEST_REG_POLARITY,
55 MSM_MPM_REQUEST_REG_CLEAR,
56};
57
58enum {
59 MSM_MPM_STATUS_REG_PENDING,
60};
61
62/******************************************************************************
63 * IRQ Mapping Definitions
64 *****************************************************************************/
65
66#define MSM_MPM_NR_APPS_IRQS (NR_MSM_IRQS + NR_GPIO_IRQS)
67
68#define MSM_MPM_REG_WIDTH DIV_ROUND_UP(MSM_MPM_NR_MPM_IRQS, 32)
69#define MSM_MPM_IRQ_INDEX(irq) (irq / 32)
70#define MSM_MPM_IRQ_MASK(irq) BIT(irq % 32)
71
Praveen Chidambaram78499012011-11-01 17:15:17 -060072static struct msm_mpm_device_data msm_mpm_dev_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073static uint8_t msm_mpm_irqs_a2m[MSM_MPM_NR_APPS_IRQS];
74
75static DEFINE_SPINLOCK(msm_mpm_lock);
76
77/*
78 * Note: the following two bitmaps only mark irqs that are _not_
79 * mappable to MPM.
80 */
81static DECLARE_BITMAP(msm_mpm_enabled_apps_irqs, MSM_MPM_NR_APPS_IRQS);
82static DECLARE_BITMAP(msm_mpm_wake_apps_irqs, MSM_MPM_NR_APPS_IRQS);
83
84static DECLARE_BITMAP(msm_mpm_gpio_irqs_mask, MSM_MPM_NR_APPS_IRQS);
85
86static uint32_t msm_mpm_enabled_irq[MSM_MPM_REG_WIDTH];
87static uint32_t msm_mpm_wake_irq[MSM_MPM_REG_WIDTH];
88static uint32_t msm_mpm_detect_ctl[MSM_MPM_REG_WIDTH];
89static uint32_t msm_mpm_polarity[MSM_MPM_REG_WIDTH];
90
91
92/******************************************************************************
93 * Low Level Functions for Accessing MPM
94 *****************************************************************************/
95
96static inline uint32_t msm_mpm_read(
97 unsigned int reg, unsigned int subreg_index)
98{
99 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
100 return __raw_readl(msm_mpm_dev_data.mpm_status_reg_base + offset * 4);
101}
102
103static inline void msm_mpm_write(
104 unsigned int reg, unsigned int subreg_index, uint32_t value)
105{
106 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
107 __raw_writel(value, msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
108
109 if (MSM_MPM_DEBUG_WRITE & msm_mpm_debug_mask)
110 pr_info("%s: reg %u.%u: 0x%08x\n",
111 __func__, reg, subreg_index, value);
112}
113
114static inline void msm_mpm_send_interrupt(void)
115{
116 __raw_writel(msm_mpm_dev_data.mpm_apps_ipc_val,
117 msm_mpm_dev_data.mpm_apps_ipc_reg);
118 /* Ensure the write is complete before returning. */
119 mb();
120}
121
122static irqreturn_t msm_mpm_irq(int irq, void *dev_id)
123{
124 return IRQ_HANDLED;
125}
126
127/******************************************************************************
128 * MPM Access Functions
129 *****************************************************************************/
130
131static void msm_mpm_set(bool wakeset)
132{
133 uint32_t *irqs;
134 unsigned int reg;
135 int i;
136
137 irqs = wakeset ? msm_mpm_wake_irq : msm_mpm_enabled_irq;
138 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
139 reg = MSM_MPM_REQUEST_REG_ENABLE;
140 msm_mpm_write(reg, i, irqs[i]);
141
142 reg = MSM_MPM_REQUEST_REG_DETECT_CTL;
143 msm_mpm_write(reg, i, msm_mpm_detect_ctl[i]);
144
145 reg = MSM_MPM_REQUEST_REG_POLARITY;
146 msm_mpm_write(reg, i, msm_mpm_polarity[i]);
147
148 reg = MSM_MPM_REQUEST_REG_CLEAR;
149 msm_mpm_write(reg, i, 0xffffffff);
150 }
151
152 /* Ensure that the set operation is complete before sending the
153 * interrupt
154 */
155 mb();
156 msm_mpm_send_interrupt();
157}
158
159static void msm_mpm_clear(void)
160{
161 int i;
162
163 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
164 msm_mpm_write(MSM_MPM_REQUEST_REG_ENABLE, i, 0);
165 msm_mpm_write(MSM_MPM_REQUEST_REG_CLEAR, i, 0xffffffff);
166 }
167
168 /* Ensure the clear is complete before sending the interrupt */
169 mb();
170 msm_mpm_send_interrupt();
171}
172
173/******************************************************************************
174 * Interrupt Mapping Functions
175 *****************************************************************************/
176
177static inline bool msm_mpm_is_valid_apps_irq(unsigned int irq)
178{
179 return irq < ARRAY_SIZE(msm_mpm_irqs_a2m);
180}
181
182static inline uint8_t msm_mpm_get_irq_a2m(unsigned int irq)
183{
184 return msm_mpm_irqs_a2m[irq];
185}
186
187static inline void msm_mpm_set_irq_a2m(unsigned int apps_irq,
188 unsigned int mpm_irq)
189{
190 msm_mpm_irqs_a2m[apps_irq] = (uint8_t) mpm_irq;
191}
192
193static inline bool msm_mpm_is_valid_mpm_irq(unsigned int irq)
194{
195 return irq < msm_mpm_dev_data.irqs_m2a_size;
196}
197
198static inline uint16_t msm_mpm_get_irq_m2a(unsigned int irq)
199{
200 return msm_mpm_dev_data.irqs_m2a[irq];
201}
202
203static bool msm_mpm_bypass_apps_irq(unsigned int irq)
204{
205 int i;
206
207 for (i = 0; i < msm_mpm_dev_data.bypassed_apps_irqs_size; i++)
208 if (irq == msm_mpm_dev_data.bypassed_apps_irqs[i])
209 return true;
210
211 return false;
212}
213
214static int msm_mpm_enable_irq_exclusive(
215 unsigned int irq, bool enable, bool wakeset)
216{
217 uint32_t mpm_irq;
218
219 if (!msm_mpm_is_valid_apps_irq(irq))
220 return -EINVAL;
221
222 if (msm_mpm_bypass_apps_irq(irq))
223 return 0;
224
225 mpm_irq = msm_mpm_get_irq_a2m(irq);
226 if (mpm_irq) {
227 uint32_t *mpm_irq_masks = wakeset ?
228 msm_mpm_wake_irq : msm_mpm_enabled_irq;
229 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_irq);
230 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_irq);
231
232 if (enable)
233 mpm_irq_masks[index] |= mask;
234 else
235 mpm_irq_masks[index] &= ~mask;
236 } else {
237 unsigned long *apps_irq_bitmap = wakeset ?
238 msm_mpm_wake_apps_irqs : msm_mpm_enabled_apps_irqs;
239
240 if (enable)
241 __set_bit(irq, apps_irq_bitmap);
242 else
243 __clear_bit(irq, apps_irq_bitmap);
244 }
245
246 return 0;
247}
248
249static int msm_mpm_set_irq_type_exclusive(
250 unsigned int irq, unsigned int flow_type)
251{
252 uint32_t mpm_irq;
253
254 if (!msm_mpm_is_valid_apps_irq(irq))
255 return -EINVAL;
256
257 if (msm_mpm_bypass_apps_irq(irq))
258 return 0;
259
260 mpm_irq = msm_mpm_get_irq_a2m(irq);
261 if (mpm_irq) {
262 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_irq);
263 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_irq);
264
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600265 if (index >= MSM_MPM_REG_WIDTH)
266 return -EFAULT;
267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 if (flow_type & IRQ_TYPE_EDGE_BOTH)
269 msm_mpm_detect_ctl[index] |= mask;
270 else
271 msm_mpm_detect_ctl[index] &= ~mask;
272
273 if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
274 msm_mpm_polarity[index] |= mask;
275 else
276 msm_mpm_polarity[index] &= ~mask;
277 }
278
279 return 0;
280}
281
282static int __msm_mpm_enable_irq(unsigned int irq, unsigned int enable)
283{
284 unsigned long flags;
285 int rc;
286
287 spin_lock_irqsave(&msm_mpm_lock, flags);
288 rc = msm_mpm_enable_irq_exclusive(irq, (bool)enable, false);
289 spin_unlock_irqrestore(&msm_mpm_lock, flags);
290
291 return rc;
292}
293
294static void msm_mpm_enable_irq(struct irq_data *d)
295{
296 __msm_mpm_enable_irq(d->irq, 1);
297}
298
299static void msm_mpm_disable_irq(struct irq_data *d)
300{
301 __msm_mpm_enable_irq(d->irq, 0);
302}
303
304static int msm_mpm_set_irq_wake(struct irq_data *d, unsigned int on)
305{
306 unsigned long flags;
307 int rc;
308
309 spin_lock_irqsave(&msm_mpm_lock, flags);
310 rc = msm_mpm_enable_irq_exclusive(d->irq, (bool)on, true);
311 spin_unlock_irqrestore(&msm_mpm_lock, flags);
312
313 return rc;
314}
315
316static int msm_mpm_set_irq_type(struct irq_data *d, unsigned int flow_type)
317{
318 unsigned long flags;
319 int rc;
320
321 spin_lock_irqsave(&msm_mpm_lock, flags);
322 rc = msm_mpm_set_irq_type_exclusive(d->irq, flow_type);
323 spin_unlock_irqrestore(&msm_mpm_lock, flags);
324
325 return rc;
326}
327
328/******************************************************************************
329 * Public functions
330 *****************************************************************************/
Subhash Jadavanife608a22012-04-13 10:45:53 +0530331int msm_mpm_enable_pin(unsigned int pin, unsigned int enable)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332{
333 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
334 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
335 unsigned long flags;
336
337 spin_lock_irqsave(&msm_mpm_lock, flags);
338
339 if (enable)
340 msm_mpm_enabled_irq[index] |= mask;
341 else
342 msm_mpm_enabled_irq[index] &= ~mask;
343
344 spin_unlock_irqrestore(&msm_mpm_lock, flags);
345 return 0;
346}
347
Subhash Jadavanife608a22012-04-13 10:45:53 +0530348int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349{
350 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
351 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
352 unsigned long flags;
353
354 spin_lock_irqsave(&msm_mpm_lock, flags);
355
356 if (on)
357 msm_mpm_wake_irq[index] |= mask;
358 else
359 msm_mpm_wake_irq[index] &= ~mask;
360
361 spin_unlock_irqrestore(&msm_mpm_lock, flags);
362 return 0;
363}
364
Subhash Jadavanife608a22012-04-13 10:45:53 +0530365int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366{
367 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
368 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
369 unsigned long flags;
370
371 spin_lock_irqsave(&msm_mpm_lock, flags);
372
373 if (flow_type & IRQ_TYPE_EDGE_BOTH)
374 msm_mpm_detect_ctl[index] |= mask;
375 else
376 msm_mpm_detect_ctl[index] &= ~mask;
377
378 if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
379 msm_mpm_polarity[index] |= mask;
380 else
381 msm_mpm_polarity[index] &= ~mask;
382
383 spin_unlock_irqrestore(&msm_mpm_lock, flags);
384 return 0;
385}
386
387bool msm_mpm_irqs_detectable(bool from_idle)
388{
389 unsigned long *apps_irq_bitmap;
390 int debug_mask;
Anji Jonnala51dab622012-09-20 14:56:20 +0530391 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700392
393 if (from_idle) {
394 apps_irq_bitmap = msm_mpm_enabled_apps_irqs;
395 debug_mask = msm_mpm_debug_mask &
396 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE;
397 } else {
398 apps_irq_bitmap = msm_mpm_wake_apps_irqs;
399 debug_mask = msm_mpm_debug_mask &
400 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ;
401 }
402
403 if (debug_mask) {
Anji Jonnala51dab622012-09-20 14:56:20 +0530404 i = find_first_bit(apps_irq_bitmap, MSM_MPM_NR_APPS_IRQS);
405 while (i < MSM_MPM_NR_APPS_IRQS) {
406 struct irq_desc *desc = i ?
407 irq_to_desc(i) : NULL;
408 pr_info("%s: cannot monitor irq=%d %s\n",
409 __func__, i, desc->name);
410 i = find_next_bit(apps_irq_bitmap,
411 MSM_MPM_NR_APPS_IRQS, i + 1);
412 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415 return (bool)__bitmap_empty(apps_irq_bitmap, MSM_MPM_NR_APPS_IRQS);
416}
417
418bool msm_mpm_gpio_irqs_detectable(bool from_idle)
419{
420 unsigned long *apps_irq_bitmap = from_idle ?
421 msm_mpm_enabled_apps_irqs : msm_mpm_wake_apps_irqs;
422
423 return !__bitmap_intersects(msm_mpm_gpio_irqs_mask, apps_irq_bitmap,
424 MSM_MPM_NR_APPS_IRQS);
425}
426
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600427void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428{
429 msm_mpm_set(!from_idle);
430}
431
432void msm_mpm_exit_sleep(bool from_idle)
433{
434 unsigned long pending;
435 int i;
436 int k;
437
438 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
439 pending = msm_mpm_read(MSM_MPM_STATUS_REG_PENDING, i);
440
441 if (MSM_MPM_DEBUG_PENDING_IRQ & msm_mpm_debug_mask)
442 pr_info("%s: pending.%d: 0x%08lx", __func__,
443 i, pending);
444
445 k = find_first_bit(&pending, 32);
446 while (k < 32) {
447 unsigned int mpm_irq = 32 * i + k;
448 unsigned int apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
449 struct irq_desc *desc = apps_irq ?
450 irq_to_desc(apps_irq) : NULL;
451
452 if (desc && !irqd_is_level_type(&desc->irq_data)) {
453 irq_set_pending(apps_irq);
454 if (from_idle)
455 check_irq_resend(desc, apps_irq);
456 }
457
458 k = find_next_bit(&pending, 32, k + 1);
459 }
460 }
461
462 msm_mpm_clear();
463}
464
465static int __init msm_mpm_early_init(void)
466{
467 uint8_t mpm_irq;
468 uint16_t apps_irq;
469
470 for (mpm_irq = 0; msm_mpm_is_valid_mpm_irq(mpm_irq); mpm_irq++) {
471 apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
472 if (apps_irq && msm_mpm_is_valid_apps_irq(apps_irq))
473 msm_mpm_set_irq_a2m(apps_irq, mpm_irq);
474 }
475
476 return 0;
477}
478core_initcall(msm_mpm_early_init);
479
Praveen Chidambaram78499012011-11-01 17:15:17 -0600480void __init msm_mpm_irq_extn_init(struct msm_mpm_device_data *mpm_data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481{
482 gic_arch_extn.irq_mask = msm_mpm_disable_irq;
483 gic_arch_extn.irq_unmask = msm_mpm_enable_irq;
484 gic_arch_extn.irq_disable = msm_mpm_disable_irq;
485 gic_arch_extn.irq_set_type = msm_mpm_set_irq_type;
486 gic_arch_extn.irq_set_wake = msm_mpm_set_irq_wake;
487
488 msm_gpio_irq_extn.irq_mask = msm_mpm_disable_irq;
489 msm_gpio_irq_extn.irq_unmask = msm_mpm_enable_irq;
490 msm_gpio_irq_extn.irq_disable = msm_mpm_disable_irq;
491 msm_gpio_irq_extn.irq_set_type = msm_mpm_set_irq_type;
492 msm_gpio_irq_extn.irq_set_wake = msm_mpm_set_irq_wake;
493
494 bitmap_set(msm_mpm_gpio_irqs_mask, NR_MSM_IRQS, NR_GPIO_IRQS);
Praveen Chidambaram78499012011-11-01 17:15:17 -0600495
496 if (!mpm_data) {
497#ifdef CONFIG_MSM_MPM
498 BUG();
499#endif
500 return;
501 }
502
503 memcpy(&msm_mpm_dev_data, mpm_data, sizeof(struct msm_mpm_device_data));
504
505 msm_mpm_dev_data.irqs_m2a =
506 kzalloc(msm_mpm_dev_data.irqs_m2a_size * sizeof(uint16_t),
507 GFP_KERNEL);
508 BUG_ON(!msm_mpm_dev_data.irqs_m2a);
509 memcpy(msm_mpm_dev_data.irqs_m2a, mpm_data->irqs_m2a,
510 msm_mpm_dev_data.irqs_m2a_size * sizeof(uint16_t));
511 msm_mpm_dev_data.bypassed_apps_irqs =
512 kzalloc(msm_mpm_dev_data.bypassed_apps_irqs_size *
513 sizeof(uint16_t), GFP_KERNEL);
514 BUG_ON(!msm_mpm_dev_data.bypassed_apps_irqs);
515 memcpy(msm_mpm_dev_data.bypassed_apps_irqs,
516 mpm_data->bypassed_apps_irqs,
517 msm_mpm_dev_data.bypassed_apps_irqs_size * sizeof(uint16_t));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518}
519
520static int __init msm_mpm_init(void)
521{
522 unsigned int irq = msm_mpm_dev_data.mpm_ipc_irq;
523 int rc;
524
525 rc = request_irq(irq, msm_mpm_irq,
526 IRQF_TRIGGER_RISING, "mpm_drv", msm_mpm_irq);
527
528 if (rc) {
529 pr_err("%s: failed to request irq %u: %d\n",
530 __func__, irq, rc);
531 goto init_bail;
532 }
533
534 rc = irq_set_irq_wake(irq, 1);
535 if (rc) {
536 pr_err("%s: failed to set wakeup irq %u: %d\n",
537 __func__, irq, rc);
538 goto init_free_bail;
539 }
540
541 return 0;
542
543init_free_bail:
544 free_irq(irq, msm_mpm_irq);
545
546init_bail:
547 return rc;
548}
549device_initcall(msm_mpm_init);