blob: 09f784d41f72938a5ffff2a89b6b0ad5eec2ea9e [file] [log] [blame]
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -07001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/init.h>
18#include <linux/bitmap.h>
19#include <linux/bitops.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23#include <linux/irqdomain.h>
24#include <linux/list.h>
25#include <linux/platform_device.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <asm/hardware/gic.h>
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -060031#include <asm/arch_timer.h>
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060032#include <mach/gpio.h>
33#include <mach/mpm.h>
34
35enum {
36 MSM_MPM_GIC_IRQ_DOMAIN,
37 MSM_MPM_GPIO_IRQ_DOMAIN,
38 MSM_MPM_NR_IRQ_DOMAINS,
39};
40
41enum {
42 MSM_MPM_SET_ENABLED,
43 MSM_MPM_SET_WAKEUP,
44 MSM_NR_IRQS_SET,
45};
46
47struct mpm_irqs_a2m {
48 struct irq_domain *domain;
49 struct device_node *parent;
50 irq_hw_number_t hwirq;
51 unsigned long pin;
52 struct hlist_node node;
53};
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -070054#define MAX_DOMAIN_NAME 5
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060055
56struct mpm_irqs {
57 struct irq_domain *domain;
58 unsigned long *enabled_irqs;
59 unsigned long *wakeup_irqs;
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -070060 unsigned long size;
61 char domain_name[MAX_DOMAIN_NAME];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060062};
63
64static struct mpm_irqs unlisted_irqs[MSM_MPM_NR_IRQ_DOMAINS];
65
66static struct hlist_head irq_hash[MSM_MPM_NR_MPM_IRQS];
67static unsigned int msm_mpm_irqs_m2a[MSM_MPM_NR_MPM_IRQS];
68#define MSM_MPM_REG_WIDTH DIV_ROUND_UP(MSM_MPM_NR_MPM_IRQS, 32)
69
70#define MSM_MPM_IRQ_INDEX(irq) (irq / 32)
71#define MSM_MPM_IRQ_MASK(irq) BIT(irq % 32)
72
73#define MSM_MPM_DETECT_CTL_INDEX(irq) (irq / 16)
74#define MSM_MPM_DETECT_CTL_SHIFT(irq) ((irq % 16) * 2)
75
76#define hashfn(val) (val % MSM_MPM_NR_MPM_IRQS)
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -060077#define SCLK_HZ (32768)
78#define ARCH_TIMER_HZ (19200000)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060079static struct msm_mpm_device_data msm_mpm_dev_data;
80
81enum mpm_reg_offsets {
82 MSM_MPM_REG_WAKEUP,
83 MSM_MPM_REG_ENABLE,
84 MSM_MPM_REG_DETECT_CTL,
85 MSM_MPM_REG_DETECT_CTL1,
86 MSM_MPM_REG_POLARITY,
87 MSM_MPM_REG_STATUS,
88};
89
90static DEFINE_SPINLOCK(msm_mpm_lock);
91
92static uint32_t msm_mpm_enabled_irq[MSM_MPM_REG_WIDTH];
93static uint32_t msm_mpm_wake_irq[MSM_MPM_REG_WIDTH];
94static uint32_t msm_mpm_detect_ctl[MSM_MPM_REG_WIDTH * 2];
95static uint32_t msm_mpm_polarity[MSM_MPM_REG_WIDTH];
96
97enum {
98 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ = BIT(0),
99 MSM_MPM_DEBUG_PENDING_IRQ = BIT(1),
100 MSM_MPM_DEBUG_WRITE = BIT(2),
101 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE = BIT(3),
102};
103
104static int msm_mpm_debug_mask = 1;
105module_param_named(
106 debug_mask, msm_mpm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
107);
108
109enum mpm_state {
110 MSM_MPM_IRQ_MAPPING_DONE = BIT(0),
111 MSM_MPM_DEVICE_PROBED = BIT(1),
112};
113
114static enum mpm_state msm_mpm_initialized;
115
116static inline bool msm_mpm_is_initialized(void)
117{
118 return msm_mpm_initialized &
119 (MSM_MPM_IRQ_MAPPING_DONE | MSM_MPM_DEVICE_PROBED);
120
121}
122
123static inline uint32_t msm_mpm_read(
124 unsigned int reg, unsigned int subreg_index)
125{
126 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
127 return __raw_readl(msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
128}
129
130static inline void msm_mpm_write(
131 unsigned int reg, unsigned int subreg_index, uint32_t value)
132{
133 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
134
135 __raw_writel(value, msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
136 if (MSM_MPM_DEBUG_WRITE & msm_mpm_debug_mask)
137 pr_info("%s: reg %u.%u: 0x%08x\n",
138 __func__, reg, subreg_index, value);
139}
140
141static inline void msm_mpm_send_interrupt(void)
142{
143 __raw_writel(msm_mpm_dev_data.mpm_apps_ipc_val,
144 msm_mpm_dev_data.mpm_apps_ipc_reg);
145 /* Ensure the write is complete before returning. */
146 wmb();
147}
148
149static irqreturn_t msm_mpm_irq(int irq, void *dev_id)
150{
151 /*
152 * When the system resumes from deep sleep mode, the RPM hardware wakes
153 * up the Apps processor by triggering this interrupt. This interrupt
154 * has to be enabled and set as wake for the irq to get SPM out of
155 * sleep. Handle the interrupt here to make sure that it gets cleared.
156 */
157 return IRQ_HANDLED;
158}
159
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600160static void msm_mpm_set(cycle_t wakeup, bool wakeset)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600161{
162 uint32_t *irqs;
163 unsigned int reg;
164 int i;
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600165 uint32_t *expiry_timer;
166
167 expiry_timer = (uint32_t *)&wakeup;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600168
169 irqs = wakeset ? msm_mpm_wake_irq : msm_mpm_enabled_irq;
170 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600171 reg = MSM_MPM_REG_WAKEUP;
172 msm_mpm_write(reg, i, expiry_timer[i]);
173
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600174 reg = MSM_MPM_REG_ENABLE;
175 msm_mpm_write(reg, i, irqs[i]);
176
177 reg = MSM_MPM_REG_DETECT_CTL;
178 msm_mpm_write(reg, i, msm_mpm_detect_ctl[i]);
179
180 reg = MSM_MPM_REG_DETECT_CTL1;
181 msm_mpm_write(reg, i, msm_mpm_detect_ctl[2+i]);
182
183 reg = MSM_MPM_REG_POLARITY;
184 msm_mpm_write(reg, i, msm_mpm_polarity[i]);
Mahesh Sivasubramanian79e24272012-12-14 17:32:25 -0700185
186 reg = MSM_MPM_REG_STATUS;
187 msm_mpm_write(reg, i, 0);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600188 }
189
190 /*
191 * Ensure that the set operation is complete before sending the
192 * interrupt
193 */
194 wmb();
195 msm_mpm_send_interrupt();
196}
197
198static inline unsigned int msm_mpm_get_irq_m2a(unsigned int pin)
199{
200 return msm_mpm_irqs_m2a[pin];
201}
202
203static inline uint16_t msm_mpm_get_irq_a2m(struct irq_data *d)
204{
205 struct hlist_node *elem;
206 struct mpm_irqs_a2m *node = NULL;
207
208 hlist_for_each_entry(node, elem, &irq_hash[hashfn(d->hwirq)], node) {
209 if ((node->hwirq == d->hwirq)
210 && (d->domain == node->domain)) {
211 /* Update the linux irq mapping */
212 msm_mpm_irqs_m2a[node->pin] = d->irq;
213 break;
214 }
215 }
216 return node ? node->pin : 0;
217}
218
219static int msm_mpm_enable_irq_exclusive(
220 struct irq_data *d, bool enable, bool wakeset)
221{
222 uint16_t mpm_pin;
223
224 WARN_ON(!d);
225 if (!d)
226 return 0;
227
228 mpm_pin = msm_mpm_get_irq_a2m(d);
229
230 if (mpm_pin == 0xff)
231 return 0;
232
233 if (mpm_pin) {
234 uint32_t *mpm_irq_masks = wakeset ?
235 msm_mpm_wake_irq : msm_mpm_enabled_irq;
236 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_pin);
237 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_pin);
238
239 if (enable)
240 mpm_irq_masks[index] |= mask;
241 else
242 mpm_irq_masks[index] &= ~mask;
243 } else {
244 int i;
245 unsigned long *irq_apps;
246
247 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
248 if (d->domain == unlisted_irqs[i].domain)
249 break;
250 }
251
252 if (i == MSM_MPM_NR_IRQ_DOMAINS)
253 return 0;
254 irq_apps = wakeset ? unlisted_irqs[i].wakeup_irqs :
255 unlisted_irqs[i].enabled_irqs;
256
257 if (enable)
258 __set_bit(d->hwirq, irq_apps);
259 else
260 __clear_bit(d->hwirq, irq_apps);
261
262 }
263
264 return 0;
265}
266
267static void msm_mpm_set_detect_ctl(int pin, unsigned int flow_type)
268{
269 uint32_t index;
270 uint32_t val = 0;
271 uint32_t shift;
272
273 index = MSM_MPM_DETECT_CTL_INDEX(pin);
274 shift = MSM_MPM_DETECT_CTL_SHIFT(pin);
275
276 if (flow_type & IRQ_TYPE_EDGE_RISING)
277 val |= 0x02;
278
279 if (flow_type & IRQ_TYPE_EDGE_FALLING)
280 val |= 0x01;
281
282 msm_mpm_detect_ctl[index] &= ~(0x3 << shift);
283 msm_mpm_detect_ctl[index] |= (val & 0x03) << shift;
284}
285
286static int msm_mpm_set_irq_type_exclusive(
287 struct irq_data *d, unsigned int flow_type)
288{
289 uint32_t mpm_irq;
290
291 mpm_irq = msm_mpm_get_irq_a2m(d);
292
293 if (mpm_irq == 0xff)
294 return 0;
295
296 if (mpm_irq) {
297 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_irq);
298 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_irq);
299
300 if (index >= MSM_MPM_REG_WIDTH)
301 return -EFAULT;
302
303 msm_mpm_set_detect_ctl(mpm_irq, flow_type);
304
305 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
306 msm_mpm_polarity[index] |= mask;
307 else
308 msm_mpm_polarity[index] &= ~mask;
309 }
310 return 0;
311}
312
313static int __msm_mpm_enable_irq(struct irq_data *d, bool enable)
314{
315 unsigned long flags;
316 int rc;
317
318 if (!msm_mpm_is_initialized())
319 return -EINVAL;
320
321 spin_lock_irqsave(&msm_mpm_lock, flags);
322
323 rc = msm_mpm_enable_irq_exclusive(d, enable, false);
324 spin_unlock_irqrestore(&msm_mpm_lock, flags);
325
326 return rc;
327}
328
329static void msm_mpm_enable_irq(struct irq_data *d)
330{
331 __msm_mpm_enable_irq(d, true);
332}
333
334static void msm_mpm_disable_irq(struct irq_data *d)
335{
336 __msm_mpm_enable_irq(d, false);
337}
338
339static int msm_mpm_set_irq_wake(struct irq_data *d, unsigned int on)
340{
341 unsigned long flags;
342 int rc;
343
344 if (!msm_mpm_is_initialized())
345 return -EINVAL;
346
347 spin_lock_irqsave(&msm_mpm_lock, flags);
348 rc = msm_mpm_enable_irq_exclusive(d, (bool)on, true);
349 spin_unlock_irqrestore(&msm_mpm_lock, flags);
350
351 return rc;
352}
353
354static int msm_mpm_set_irq_type(struct irq_data *d, unsigned int flow_type)
355{
356 unsigned long flags;
357 int rc;
358
359 if (!msm_mpm_is_initialized())
360 return -EINVAL;
361
362 spin_lock_irqsave(&msm_mpm_lock, flags);
363 rc = msm_mpm_set_irq_type_exclusive(d, flow_type);
364 spin_unlock_irqrestore(&msm_mpm_lock, flags);
365
366 return rc;
367}
368
369/******************************************************************************
370 * Public functions
371 *****************************************************************************/
372int msm_mpm_enable_pin(unsigned int pin, unsigned int enable)
373{
374 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
375 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
376 unsigned long flags;
377
378 if (!msm_mpm_is_initialized())
379 return -EINVAL;
380
381 if (pin > MSM_MPM_NR_MPM_IRQS)
382 return -EINVAL;
383
384 spin_lock_irqsave(&msm_mpm_lock, flags);
385
386 if (enable)
387 msm_mpm_enabled_irq[index] |= mask;
388 else
389 msm_mpm_enabled_irq[index] &= ~mask;
390
391 spin_unlock_irqrestore(&msm_mpm_lock, flags);
392 return 0;
393}
394
395int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on)
396{
397 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
398 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
399 unsigned long flags;
400
401 if (!msm_mpm_is_initialized())
402 return -EINVAL;
403
404 if (pin >= MSM_MPM_NR_MPM_IRQS)
405 return -EINVAL;
406
407 spin_lock_irqsave(&msm_mpm_lock, flags);
408
409 if (on)
410 msm_mpm_wake_irq[index] |= mask;
411 else
412 msm_mpm_wake_irq[index] &= ~mask;
413
414 spin_unlock_irqrestore(&msm_mpm_lock, flags);
415 return 0;
416}
417
418int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type)
419{
420 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
421 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
422 unsigned long flags;
423
424 if (!msm_mpm_is_initialized())
425 return -EINVAL;
426
427 if (pin >= MSM_MPM_NR_MPM_IRQS)
428 return -EINVAL;
429
430 spin_lock_irqsave(&msm_mpm_lock, flags);
431
432 msm_mpm_set_detect_ctl(pin, flow_type);
433
434 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
435 msm_mpm_polarity[index] |= mask;
436 else
437 msm_mpm_polarity[index] &= ~mask;
438
439 spin_unlock_irqrestore(&msm_mpm_lock, flags);
440 return 0;
441}
442
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700443static bool msm_mpm_interrupts_detectable(int d, bool from_idle)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600444{
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700445 unsigned long *irq_bitmap;
446 bool debug_mask, ret = false;
447 struct mpm_irqs *unlisted = &unlisted_irqs[d];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600448
449 if (!msm_mpm_is_initialized())
450 return false;
451
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700452 if (from_idle) {
453 irq_bitmap = unlisted->enabled_irqs;
454 debug_mask = msm_mpm_debug_mask &
455 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE;
456 } else {
457 irq_bitmap = unlisted->wakeup_irqs;
458 debug_mask = msm_mpm_debug_mask &
459 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ;
460 }
461
462 ret = (bool) __bitmap_empty(irq_bitmap, unlisted->size);
463
464 if (debug_mask && !ret) {
465 int i = 0;
466 i = find_first_bit(irq_bitmap, unlisted->size);
467 pr_info("%s(): %s preventing system sleep modes during %s\n",
468 __func__, unlisted->domain_name,
469 from_idle ? "idle" : "suspend");
470
471 while (i < unlisted->size) {
472 pr_info("\thwirq: %d\n", i);
473 i = find_next_bit(irq_bitmap, unlisted->size, i + 1);
474 }
475 }
476
477 return ret;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600478}
479
480bool msm_mpm_gpio_irqs_detectable(bool from_idle)
481{
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700482 return msm_mpm_interrupts_detectable(MSM_MPM_GPIO_IRQ_DOMAIN,
483 from_idle);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600484}
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700485bool msm_mpm_irqs_detectable(bool from_idle)
486{
487 return msm_mpm_interrupts_detectable(MSM_MPM_GIC_IRQ_DOMAIN,
488 from_idle);
489}
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600490void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600491{
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600492 cycle_t wakeup = (u64)sclk_count * ARCH_TIMER_HZ;
493
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600494 if (!msm_mpm_is_initialized()) {
495 pr_err("%s(): MPM not initialized\n", __func__);
496 return;
497 }
498
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600499 if (sclk_count) {
500 do_div(wakeup, SCLK_HZ);
501 wakeup += arch_counter_get_cntpct();
502 } else {
503 wakeup = (~0ULL);
504 }
505
506 msm_mpm_set(wakeup, !from_idle);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600507}
508
509void msm_mpm_exit_sleep(bool from_idle)
510{
511 unsigned long pending;
512 int i;
513 int k;
514
515 if (!msm_mpm_is_initialized()) {
516 pr_err("%s(): MPM not initialized\n", __func__);
517 return;
518 }
519
520 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
521 pending = msm_mpm_read(MSM_MPM_REG_STATUS, i);
522
523 if (MSM_MPM_DEBUG_PENDING_IRQ & msm_mpm_debug_mask)
524 pr_info("%s: pending.%d: 0x%08lx", __func__,
525 i, pending);
526
527 k = find_first_bit(&pending, 32);
528 while (k < 32) {
529 unsigned int mpm_irq = 32 * i + k;
530 unsigned int apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
531 struct irq_desc *desc = apps_irq ?
532 irq_to_desc(apps_irq) : NULL;
533
534 if (desc && !irqd_is_level_type(&desc->irq_data)) {
535 irq_set_pending(apps_irq);
536 if (from_idle) {
537 raw_spin_lock(&desc->lock);
538 check_irq_resend(desc, apps_irq);
539 raw_spin_unlock(&desc->lock);
540 }
541 }
542
543 k = find_next_bit(&pending, 32, k + 1);
544 }
545 }
546}
547
548static int __devinit msm_mpm_dev_probe(struct platform_device *pdev)
549{
550 struct resource *res = NULL;
551 int offset, ret;
552 struct msm_mpm_device_data *dev = &msm_mpm_dev_data;
553
554 if (msm_mpm_initialized & MSM_MPM_DEVICE_PROBED) {
555 pr_warn("MPM device probed multiple times\n");
556 return 0;
557 }
558
559 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vmpm");
560 if (!res) {
561 pr_err("%s(): Missing RPM memory resource\n", __func__);
562 goto fail;
563 }
564
565 dev->mpm_request_reg_base = devm_request_and_ioremap(&pdev->dev, res);
566
567 if (!dev->mpm_request_reg_base) {
568 pr_err("%s(): Unable to iomap\n", __func__);
569 goto fail;
570 }
571
572 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipc");
573 if (!res) {
574 pr_err("%s(): Missing GCC memory resource\n", __func__);
575 goto failed_irq_get;
576 }
577
578 dev->mpm_apps_ipc_reg = devm_ioremap(&pdev->dev, res->start,
579 resource_size(res));
580
581 if (of_property_read_u32(pdev->dev.of_node,
582 "qcom,ipc-bit-offset", &offset)) {
583 pr_info("%s(): Cannot read ipc bit offset\n", __func__);
584 goto failed_free_irq;
585 }
586
587 dev->mpm_apps_ipc_val = (1 << offset);
588
589 if (!dev->mpm_apps_ipc_reg)
590 goto failed_irq_get;
591
592 dev->mpm_ipc_irq = platform_get_irq(pdev, 0);
593
594 if (dev->mpm_ipc_irq == -ENXIO) {
595 pr_info("%s(): Cannot find IRQ resource\n", __func__);
596 goto failed_irq_get;
597 }
598 ret = request_irq(dev->mpm_ipc_irq, msm_mpm_irq,
599 IRQF_TRIGGER_RISING, pdev->name, msm_mpm_irq);
600
601 if (ret) {
602 pr_info("%s(): request_irq failed errno: %d\n", __func__, ret);
603 goto failed_irq_get;
604 }
Mahesh Sivasubramanian62360c62012-07-26 15:27:16 -0600605 ret = irq_set_irq_wake(dev->mpm_ipc_irq, 1);
606
607 if (ret) {
608 pr_err("%s: failed to set wakeup irq %u: %d\n",
609 __func__, dev->mpm_ipc_irq, ret);
610 goto failed_irq_get;
611
612 }
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600613 msm_mpm_initialized |= MSM_MPM_DEVICE_PROBED;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600614
615 return 0;
616
617failed_free_irq:
618 free_irq(dev->mpm_ipc_irq, msm_mpm_irq);
619failed_irq_get:
620 if (dev->mpm_apps_ipc_reg)
621 devm_iounmap(&pdev->dev, dev->mpm_apps_ipc_reg);
622 if (dev->mpm_request_reg_base)
623 devm_iounmap(&pdev->dev, dev->mpm_request_reg_base);
624fail:
625 return -EINVAL;
626}
627
628static inline int __init mpm_irq_domain_linear_size(struct irq_domain *d)
629{
630 return d->revmap_data.linear.size;
631}
632
633static inline int __init mpm_irq_domain_legacy_size(struct irq_domain *d)
634{
635 return d->revmap_data.legacy.size;
636}
637
638void __init of_mpm_init(struct device_node *node)
639{
640 const __be32 *list;
641
642 struct mpm_of {
643 char *pkey;
644 char *map;
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700645 char name[MAX_DOMAIN_NAME];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600646 struct irq_chip *chip;
647 int (*get_max_irqs)(struct irq_domain *d);
648 };
649 int i;
650
651 struct mpm_of mpm_of_map[MSM_MPM_NR_IRQ_DOMAINS] = {
652 {
653 "qcom,gic-parent",
654 "qcom,gic-map",
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700655 "gic",
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600656 &gic_arch_extn,
657 mpm_irq_domain_linear_size,
658 },
659 {
660 "qcom,gpio-parent",
661 "qcom,gpio-map",
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700662 "gpio",
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600663 &msm_gpio_irq_extn,
664 mpm_irq_domain_legacy_size,
665 },
666 };
667
668 if (msm_mpm_initialized & MSM_MPM_IRQ_MAPPING_DONE) {
669 pr_warn("%s(): MPM driver mapping exists\n", __func__);
670 return;
671 }
672
673 for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++)
674 INIT_HLIST_HEAD(&irq_hash[i]);
675
676 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
677 struct device_node *parent = NULL;
678 struct mpm_irqs_a2m *mpm_node = NULL;
679 struct irq_domain *domain = NULL;
680 int size;
681
682 parent = of_parse_phandle(node, mpm_of_map[i].pkey, 0);
683
684 if (!parent) {
685 pr_warn("%s(): %s Not found\n", __func__,
686 mpm_of_map[i].pkey);
687 continue;
688 }
689
690 domain = irq_find_host(parent);
691
692 if (!domain) {
693 pr_warn("%s(): Cannot find irq controller for %s\n",
694 __func__, mpm_of_map[i].pkey);
695 continue;
696 }
697
698 size = mpm_of_map[i].get_max_irqs(domain);
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700699 unlisted_irqs[i].size = size;
700 memcpy(unlisted_irqs[i].domain_name, mpm_of_map[i].name,
701 MAX_DOMAIN_NAME);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600702
703 unlisted_irqs[i].enabled_irqs =
704 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
705 GFP_KERNEL);
706
707 if (!unlisted_irqs[i].enabled_irqs)
708 goto failed_malloc;
709
710 unlisted_irqs[i].wakeup_irqs =
711 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
712 GFP_KERNEL);
713
714 if (!unlisted_irqs[i].wakeup_irqs)
715 goto failed_malloc;
716
717 unlisted_irqs[i].domain = domain;
718
719 list = of_get_property(node, mpm_of_map[i].map, &size);
720
721 if (!list || !size) {
722 __WARN();
723 continue;
724 }
725
726 /*
727 * Size is in bytes. Convert to size of uint32_t
728 */
729 size /= sizeof(*list);
730
731 /*
732 * The data is represented by a tuple mapping hwirq to a MPM
733 * pin. The number of mappings in the device tree would be
734 * size/2
735 */
736 mpm_node = kzalloc(sizeof(struct mpm_irqs_a2m) * size / 2,
737 GFP_KERNEL);
738 if (!mpm_node)
739 goto failed_malloc;
740
741 while (size) {
742 unsigned long pin = be32_to_cpup(list++);
743 irq_hw_number_t hwirq = be32_to_cpup(list++);
744
745 mpm_node->pin = pin;
746 mpm_node->hwirq = hwirq;
747 mpm_node->parent = parent;
748 mpm_node->domain = domain;
749 INIT_HLIST_NODE(&mpm_node->node);
750
751 hlist_add_head(&mpm_node->node,
752 &irq_hash[hashfn(mpm_node->hwirq)]);
753 size -= 2;
754 mpm_node++;
755 }
756
757 if (mpm_of_map[i].chip) {
758 mpm_of_map[i].chip->irq_mask = msm_mpm_disable_irq;
759 mpm_of_map[i].chip->irq_unmask = msm_mpm_enable_irq;
760 mpm_of_map[i].chip->irq_disable = msm_mpm_disable_irq;
761 mpm_of_map[i].chip->irq_set_type = msm_mpm_set_irq_type;
762 mpm_of_map[i].chip->irq_set_wake = msm_mpm_set_irq_wake;
763 }
764
765 }
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600766 msm_mpm_initialized |= MSM_MPM_IRQ_MAPPING_DONE;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600767
768 return;
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600769
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600770failed_malloc:
771 for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++) {
772 mpm_of_map[i].chip->irq_mask = NULL;
773 mpm_of_map[i].chip->irq_unmask = NULL;
774 mpm_of_map[i].chip->irq_disable = NULL;
775 mpm_of_map[i].chip->irq_set_type = NULL;
776 mpm_of_map[i].chip->irq_set_wake = NULL;
777
778 kfree(unlisted_irqs[i].enabled_irqs);
779 kfree(unlisted_irqs[i].wakeup_irqs);
780
781 }
782}
783
784static struct of_device_id msm_mpm_match_table[] = {
785 {.compatible = "qcom,mpm-v2"},
786 {},
787};
788
789static struct platform_driver msm_mpm_dev_driver = {
790 .probe = msm_mpm_dev_probe,
791 .driver = {
792 .name = "mpm-v2",
793 .owner = THIS_MODULE,
794 .of_match_table = msm_mpm_match_table,
795 },
796};
797
798int __init msm_mpm_device_init(void)
799{
800 return platform_driver_register(&msm_mpm_dev_driver);
801}
802arch_initcall(msm_mpm_device_init);