blob: 430bda1fb14d25858735dd8425e077ceea122290 [file] [log] [blame]
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -06001/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/init.h>
18#include <linux/bitmap.h>
19#include <linux/bitops.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23#include <linux/irqdomain.h>
24#include <linux/list.h>
25#include <linux/platform_device.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <asm/hardware/gic.h>
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -060031#include <asm/arch_timer.h>
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060032#include <mach/gpio.h>
33#include <mach/mpm.h>
34
35enum {
36 MSM_MPM_GIC_IRQ_DOMAIN,
37 MSM_MPM_GPIO_IRQ_DOMAIN,
38 MSM_MPM_NR_IRQ_DOMAINS,
39};
40
41enum {
42 MSM_MPM_SET_ENABLED,
43 MSM_MPM_SET_WAKEUP,
44 MSM_NR_IRQS_SET,
45};
46
47struct mpm_irqs_a2m {
48 struct irq_domain *domain;
49 struct device_node *parent;
50 irq_hw_number_t hwirq;
51 unsigned long pin;
52 struct hlist_node node;
53};
54
55struct mpm_irqs {
56 struct irq_domain *domain;
57 unsigned long *enabled_irqs;
58 unsigned long *wakeup_irqs;
59};
60
61static struct mpm_irqs unlisted_irqs[MSM_MPM_NR_IRQ_DOMAINS];
62
63static struct hlist_head irq_hash[MSM_MPM_NR_MPM_IRQS];
64static unsigned int msm_mpm_irqs_m2a[MSM_MPM_NR_MPM_IRQS];
65#define MSM_MPM_REG_WIDTH DIV_ROUND_UP(MSM_MPM_NR_MPM_IRQS, 32)
66
67#define MSM_MPM_IRQ_INDEX(irq) (irq / 32)
68#define MSM_MPM_IRQ_MASK(irq) BIT(irq % 32)
69
70#define MSM_MPM_DETECT_CTL_INDEX(irq) (irq / 16)
71#define MSM_MPM_DETECT_CTL_SHIFT(irq) ((irq % 16) * 2)
72
73#define hashfn(val) (val % MSM_MPM_NR_MPM_IRQS)
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -060074#define SCLK_HZ (32768)
75#define ARCH_TIMER_HZ (19200000)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060076static struct msm_mpm_device_data msm_mpm_dev_data;
77
78enum mpm_reg_offsets {
79 MSM_MPM_REG_WAKEUP,
80 MSM_MPM_REG_ENABLE,
81 MSM_MPM_REG_DETECT_CTL,
82 MSM_MPM_REG_DETECT_CTL1,
83 MSM_MPM_REG_POLARITY,
84 MSM_MPM_REG_STATUS,
85};
86
87static DEFINE_SPINLOCK(msm_mpm_lock);
88
89static uint32_t msm_mpm_enabled_irq[MSM_MPM_REG_WIDTH];
90static uint32_t msm_mpm_wake_irq[MSM_MPM_REG_WIDTH];
91static uint32_t msm_mpm_detect_ctl[MSM_MPM_REG_WIDTH * 2];
92static uint32_t msm_mpm_polarity[MSM_MPM_REG_WIDTH];
93
94enum {
95 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ = BIT(0),
96 MSM_MPM_DEBUG_PENDING_IRQ = BIT(1),
97 MSM_MPM_DEBUG_WRITE = BIT(2),
98 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE = BIT(3),
99};
100
101static int msm_mpm_debug_mask = 1;
102module_param_named(
103 debug_mask, msm_mpm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
104);
105
106enum mpm_state {
107 MSM_MPM_IRQ_MAPPING_DONE = BIT(0),
108 MSM_MPM_DEVICE_PROBED = BIT(1),
109};
110
111static enum mpm_state msm_mpm_initialized;
112
113static inline bool msm_mpm_is_initialized(void)
114{
115 return msm_mpm_initialized &
116 (MSM_MPM_IRQ_MAPPING_DONE | MSM_MPM_DEVICE_PROBED);
117
118}
119
120static inline uint32_t msm_mpm_read(
121 unsigned int reg, unsigned int subreg_index)
122{
123 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
124 return __raw_readl(msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
125}
126
127static inline void msm_mpm_write(
128 unsigned int reg, unsigned int subreg_index, uint32_t value)
129{
130 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
131
132 __raw_writel(value, msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
133 if (MSM_MPM_DEBUG_WRITE & msm_mpm_debug_mask)
134 pr_info("%s: reg %u.%u: 0x%08x\n",
135 __func__, reg, subreg_index, value);
136}
137
138static inline void msm_mpm_send_interrupt(void)
139{
140 __raw_writel(msm_mpm_dev_data.mpm_apps_ipc_val,
141 msm_mpm_dev_data.mpm_apps_ipc_reg);
142 /* Ensure the write is complete before returning. */
143 wmb();
144}
145
146static irqreturn_t msm_mpm_irq(int irq, void *dev_id)
147{
148 /*
149 * When the system resumes from deep sleep mode, the RPM hardware wakes
150 * up the Apps processor by triggering this interrupt. This interrupt
151 * has to be enabled and set as wake for the irq to get SPM out of
152 * sleep. Handle the interrupt here to make sure that it gets cleared.
153 */
154 return IRQ_HANDLED;
155}
156
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600157static void msm_mpm_set(cycle_t wakeup, bool wakeset)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600158{
159 uint32_t *irqs;
160 unsigned int reg;
161 int i;
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600162 uint32_t *expiry_timer;
163
164 expiry_timer = (uint32_t *)&wakeup;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600165
166 irqs = wakeset ? msm_mpm_wake_irq : msm_mpm_enabled_irq;
167 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600168 reg = MSM_MPM_REG_WAKEUP;
169 msm_mpm_write(reg, i, expiry_timer[i]);
170
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600171 reg = MSM_MPM_REG_ENABLE;
172 msm_mpm_write(reg, i, irqs[i]);
173
174 reg = MSM_MPM_REG_DETECT_CTL;
175 msm_mpm_write(reg, i, msm_mpm_detect_ctl[i]);
176
177 reg = MSM_MPM_REG_DETECT_CTL1;
178 msm_mpm_write(reg, i, msm_mpm_detect_ctl[2+i]);
179
180 reg = MSM_MPM_REG_POLARITY;
181 msm_mpm_write(reg, i, msm_mpm_polarity[i]);
Mahesh Sivasubramanian79e24272012-12-14 17:32:25 -0700182
183 reg = MSM_MPM_REG_STATUS;
184 msm_mpm_write(reg, i, 0);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600185 }
186
187 /*
188 * Ensure that the set operation is complete before sending the
189 * interrupt
190 */
191 wmb();
192 msm_mpm_send_interrupt();
193}
194
195static inline unsigned int msm_mpm_get_irq_m2a(unsigned int pin)
196{
197 return msm_mpm_irqs_m2a[pin];
198}
199
200static inline uint16_t msm_mpm_get_irq_a2m(struct irq_data *d)
201{
202 struct hlist_node *elem;
203 struct mpm_irqs_a2m *node = NULL;
204
205 hlist_for_each_entry(node, elem, &irq_hash[hashfn(d->hwirq)], node) {
206 if ((node->hwirq == d->hwirq)
207 && (d->domain == node->domain)) {
208 /* Update the linux irq mapping */
209 msm_mpm_irqs_m2a[node->pin] = d->irq;
210 break;
211 }
212 }
213 return node ? node->pin : 0;
214}
215
216static int msm_mpm_enable_irq_exclusive(
217 struct irq_data *d, bool enable, bool wakeset)
218{
219 uint16_t mpm_pin;
220
221 WARN_ON(!d);
222 if (!d)
223 return 0;
224
225 mpm_pin = msm_mpm_get_irq_a2m(d);
226
227 if (mpm_pin == 0xff)
228 return 0;
229
230 if (mpm_pin) {
231 uint32_t *mpm_irq_masks = wakeset ?
232 msm_mpm_wake_irq : msm_mpm_enabled_irq;
233 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_pin);
234 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_pin);
235
236 if (enable)
237 mpm_irq_masks[index] |= mask;
238 else
239 mpm_irq_masks[index] &= ~mask;
240 } else {
241 int i;
242 unsigned long *irq_apps;
243
244 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
245 if (d->domain == unlisted_irqs[i].domain)
246 break;
247 }
248
249 if (i == MSM_MPM_NR_IRQ_DOMAINS)
250 return 0;
251 irq_apps = wakeset ? unlisted_irqs[i].wakeup_irqs :
252 unlisted_irqs[i].enabled_irqs;
253
254 if (enable)
255 __set_bit(d->hwirq, irq_apps);
256 else
257 __clear_bit(d->hwirq, irq_apps);
258
259 }
260
261 return 0;
262}
263
264static void msm_mpm_set_detect_ctl(int pin, unsigned int flow_type)
265{
266 uint32_t index;
267 uint32_t val = 0;
268 uint32_t shift;
269
270 index = MSM_MPM_DETECT_CTL_INDEX(pin);
271 shift = MSM_MPM_DETECT_CTL_SHIFT(pin);
272
273 if (flow_type & IRQ_TYPE_EDGE_RISING)
274 val |= 0x02;
275
276 if (flow_type & IRQ_TYPE_EDGE_FALLING)
277 val |= 0x01;
278
279 msm_mpm_detect_ctl[index] &= ~(0x3 << shift);
280 msm_mpm_detect_ctl[index] |= (val & 0x03) << shift;
281}
282
283static int msm_mpm_set_irq_type_exclusive(
284 struct irq_data *d, unsigned int flow_type)
285{
286 uint32_t mpm_irq;
287
288 mpm_irq = msm_mpm_get_irq_a2m(d);
289
290 if (mpm_irq == 0xff)
291 return 0;
292
293 if (mpm_irq) {
294 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_irq);
295 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_irq);
296
297 if (index >= MSM_MPM_REG_WIDTH)
298 return -EFAULT;
299
300 msm_mpm_set_detect_ctl(mpm_irq, flow_type);
301
302 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
303 msm_mpm_polarity[index] |= mask;
304 else
305 msm_mpm_polarity[index] &= ~mask;
306 }
307 return 0;
308}
309
310static int __msm_mpm_enable_irq(struct irq_data *d, bool enable)
311{
312 unsigned long flags;
313 int rc;
314
315 if (!msm_mpm_is_initialized())
316 return -EINVAL;
317
318 spin_lock_irqsave(&msm_mpm_lock, flags);
319
320 rc = msm_mpm_enable_irq_exclusive(d, enable, false);
321 spin_unlock_irqrestore(&msm_mpm_lock, flags);
322
323 return rc;
324}
325
326static void msm_mpm_enable_irq(struct irq_data *d)
327{
328 __msm_mpm_enable_irq(d, true);
329}
330
331static void msm_mpm_disable_irq(struct irq_data *d)
332{
333 __msm_mpm_enable_irq(d, false);
334}
335
336static int msm_mpm_set_irq_wake(struct irq_data *d, unsigned int on)
337{
338 unsigned long flags;
339 int rc;
340
341 if (!msm_mpm_is_initialized())
342 return -EINVAL;
343
344 spin_lock_irqsave(&msm_mpm_lock, flags);
345 rc = msm_mpm_enable_irq_exclusive(d, (bool)on, true);
346 spin_unlock_irqrestore(&msm_mpm_lock, flags);
347
348 return rc;
349}
350
351static int msm_mpm_set_irq_type(struct irq_data *d, unsigned int flow_type)
352{
353 unsigned long flags;
354 int rc;
355
356 if (!msm_mpm_is_initialized())
357 return -EINVAL;
358
359 spin_lock_irqsave(&msm_mpm_lock, flags);
360 rc = msm_mpm_set_irq_type_exclusive(d, flow_type);
361 spin_unlock_irqrestore(&msm_mpm_lock, flags);
362
363 return rc;
364}
365
366/******************************************************************************
367 * Public functions
368 *****************************************************************************/
369int msm_mpm_enable_pin(unsigned int pin, unsigned int enable)
370{
371 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
372 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
373 unsigned long flags;
374
375 if (!msm_mpm_is_initialized())
376 return -EINVAL;
377
378 if (pin > MSM_MPM_NR_MPM_IRQS)
379 return -EINVAL;
380
381 spin_lock_irqsave(&msm_mpm_lock, flags);
382
383 if (enable)
384 msm_mpm_enabled_irq[index] |= mask;
385 else
386 msm_mpm_enabled_irq[index] &= ~mask;
387
388 spin_unlock_irqrestore(&msm_mpm_lock, flags);
389 return 0;
390}
391
392int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on)
393{
394 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
395 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
396 unsigned long flags;
397
398 if (!msm_mpm_is_initialized())
399 return -EINVAL;
400
401 if (pin >= MSM_MPM_NR_MPM_IRQS)
402 return -EINVAL;
403
404 spin_lock_irqsave(&msm_mpm_lock, flags);
405
406 if (on)
407 msm_mpm_wake_irq[index] |= mask;
408 else
409 msm_mpm_wake_irq[index] &= ~mask;
410
411 spin_unlock_irqrestore(&msm_mpm_lock, flags);
412 return 0;
413}
414
415int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type)
416{
417 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
418 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
419 unsigned long flags;
420
421 if (!msm_mpm_is_initialized())
422 return -EINVAL;
423
424 if (pin >= MSM_MPM_NR_MPM_IRQS)
425 return -EINVAL;
426
427 spin_lock_irqsave(&msm_mpm_lock, flags);
428
429 msm_mpm_set_detect_ctl(pin, flow_type);
430
431 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
432 msm_mpm_polarity[index] |= mask;
433 else
434 msm_mpm_polarity[index] &= ~mask;
435
436 spin_unlock_irqrestore(&msm_mpm_lock, flags);
437 return 0;
438}
439
440bool msm_mpm_irqs_detectable(bool from_idle)
441{
442 /* TODO:
443 * Return true if unlisted irqs is empty
444 */
445
446 if (!msm_mpm_is_initialized())
447 return false;
448
449 return true;
450}
451
452bool msm_mpm_gpio_irqs_detectable(bool from_idle)
453{
454 /* TODO:
455 * Return true if unlisted irqs is empty
456 */
457 if (!msm_mpm_is_initialized())
458 return false;
459 return true;
460}
461
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600462void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600463{
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600464 cycle_t wakeup = (u64)sclk_count * ARCH_TIMER_HZ;
465
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600466 if (!msm_mpm_is_initialized()) {
467 pr_err("%s(): MPM not initialized\n", __func__);
468 return;
469 }
470
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600471 if (sclk_count) {
472 do_div(wakeup, SCLK_HZ);
473 wakeup += arch_counter_get_cntpct();
474 } else {
475 wakeup = (~0ULL);
476 }
477
478 msm_mpm_set(wakeup, !from_idle);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600479}
480
481void msm_mpm_exit_sleep(bool from_idle)
482{
483 unsigned long pending;
484 int i;
485 int k;
486
487 if (!msm_mpm_is_initialized()) {
488 pr_err("%s(): MPM not initialized\n", __func__);
489 return;
490 }
491
492 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
493 pending = msm_mpm_read(MSM_MPM_REG_STATUS, i);
494
495 if (MSM_MPM_DEBUG_PENDING_IRQ & msm_mpm_debug_mask)
496 pr_info("%s: pending.%d: 0x%08lx", __func__,
497 i, pending);
498
499 k = find_first_bit(&pending, 32);
500 while (k < 32) {
501 unsigned int mpm_irq = 32 * i + k;
502 unsigned int apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
503 struct irq_desc *desc = apps_irq ?
504 irq_to_desc(apps_irq) : NULL;
505
506 if (desc && !irqd_is_level_type(&desc->irq_data)) {
507 irq_set_pending(apps_irq);
508 if (from_idle) {
509 raw_spin_lock(&desc->lock);
510 check_irq_resend(desc, apps_irq);
511 raw_spin_unlock(&desc->lock);
512 }
513 }
514
515 k = find_next_bit(&pending, 32, k + 1);
516 }
517 }
518}
519
520static int __devinit msm_mpm_dev_probe(struct platform_device *pdev)
521{
522 struct resource *res = NULL;
523 int offset, ret;
524 struct msm_mpm_device_data *dev = &msm_mpm_dev_data;
525
526 if (msm_mpm_initialized & MSM_MPM_DEVICE_PROBED) {
527 pr_warn("MPM device probed multiple times\n");
528 return 0;
529 }
530
531 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vmpm");
532 if (!res) {
533 pr_err("%s(): Missing RPM memory resource\n", __func__);
534 goto fail;
535 }
536
537 dev->mpm_request_reg_base = devm_request_and_ioremap(&pdev->dev, res);
538
539 if (!dev->mpm_request_reg_base) {
540 pr_err("%s(): Unable to iomap\n", __func__);
541 goto fail;
542 }
543
544 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipc");
545 if (!res) {
546 pr_err("%s(): Missing GCC memory resource\n", __func__);
547 goto failed_irq_get;
548 }
549
550 dev->mpm_apps_ipc_reg = devm_ioremap(&pdev->dev, res->start,
551 resource_size(res));
552
553 if (of_property_read_u32(pdev->dev.of_node,
554 "qcom,ipc-bit-offset", &offset)) {
555 pr_info("%s(): Cannot read ipc bit offset\n", __func__);
556 goto failed_free_irq;
557 }
558
559 dev->mpm_apps_ipc_val = (1 << offset);
560
561 if (!dev->mpm_apps_ipc_reg)
562 goto failed_irq_get;
563
564 dev->mpm_ipc_irq = platform_get_irq(pdev, 0);
565
566 if (dev->mpm_ipc_irq == -ENXIO) {
567 pr_info("%s(): Cannot find IRQ resource\n", __func__);
568 goto failed_irq_get;
569 }
570 ret = request_irq(dev->mpm_ipc_irq, msm_mpm_irq,
571 IRQF_TRIGGER_RISING, pdev->name, msm_mpm_irq);
572
573 if (ret) {
574 pr_info("%s(): request_irq failed errno: %d\n", __func__, ret);
575 goto failed_irq_get;
576 }
Mahesh Sivasubramanian62360c62012-07-26 15:27:16 -0600577 ret = irq_set_irq_wake(dev->mpm_ipc_irq, 1);
578
579 if (ret) {
580 pr_err("%s: failed to set wakeup irq %u: %d\n",
581 __func__, dev->mpm_ipc_irq, ret);
582 goto failed_irq_get;
583
584 }
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600585 msm_mpm_initialized |= MSM_MPM_DEVICE_PROBED;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600586
587 return 0;
588
589failed_free_irq:
590 free_irq(dev->mpm_ipc_irq, msm_mpm_irq);
591failed_irq_get:
592 if (dev->mpm_apps_ipc_reg)
593 devm_iounmap(&pdev->dev, dev->mpm_apps_ipc_reg);
594 if (dev->mpm_request_reg_base)
595 devm_iounmap(&pdev->dev, dev->mpm_request_reg_base);
596fail:
597 return -EINVAL;
598}
599
600static inline int __init mpm_irq_domain_linear_size(struct irq_domain *d)
601{
602 return d->revmap_data.linear.size;
603}
604
605static inline int __init mpm_irq_domain_legacy_size(struct irq_domain *d)
606{
607 return d->revmap_data.legacy.size;
608}
609
610void __init of_mpm_init(struct device_node *node)
611{
612 const __be32 *list;
613
614 struct mpm_of {
615 char *pkey;
616 char *map;
617 struct irq_chip *chip;
618 int (*get_max_irqs)(struct irq_domain *d);
619 };
620 int i;
621
622 struct mpm_of mpm_of_map[MSM_MPM_NR_IRQ_DOMAINS] = {
623 {
624 "qcom,gic-parent",
625 "qcom,gic-map",
626 &gic_arch_extn,
627 mpm_irq_domain_linear_size,
628 },
629 {
630 "qcom,gpio-parent",
631 "qcom,gpio-map",
632 &msm_gpio_irq_extn,
633 mpm_irq_domain_legacy_size,
634 },
635 };
636
637 if (msm_mpm_initialized & MSM_MPM_IRQ_MAPPING_DONE) {
638 pr_warn("%s(): MPM driver mapping exists\n", __func__);
639 return;
640 }
641
642 for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++)
643 INIT_HLIST_HEAD(&irq_hash[i]);
644
645 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
646 struct device_node *parent = NULL;
647 struct mpm_irqs_a2m *mpm_node = NULL;
648 struct irq_domain *domain = NULL;
649 int size;
650
651 parent = of_parse_phandle(node, mpm_of_map[i].pkey, 0);
652
653 if (!parent) {
654 pr_warn("%s(): %s Not found\n", __func__,
655 mpm_of_map[i].pkey);
656 continue;
657 }
658
659 domain = irq_find_host(parent);
660
661 if (!domain) {
662 pr_warn("%s(): Cannot find irq controller for %s\n",
663 __func__, mpm_of_map[i].pkey);
664 continue;
665 }
666
667 size = mpm_of_map[i].get_max_irqs(domain);
668
669 unlisted_irqs[i].enabled_irqs =
670 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
671 GFP_KERNEL);
672
673 if (!unlisted_irqs[i].enabled_irqs)
674 goto failed_malloc;
675
676 unlisted_irqs[i].wakeup_irqs =
677 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
678 GFP_KERNEL);
679
680 if (!unlisted_irqs[i].wakeup_irqs)
681 goto failed_malloc;
682
683 unlisted_irqs[i].domain = domain;
684
685 list = of_get_property(node, mpm_of_map[i].map, &size);
686
687 if (!list || !size) {
688 __WARN();
689 continue;
690 }
691
692 /*
693 * Size is in bytes. Convert to size of uint32_t
694 */
695 size /= sizeof(*list);
696
697 /*
698 * The data is represented by a tuple mapping hwirq to a MPM
699 * pin. The number of mappings in the device tree would be
700 * size/2
701 */
702 mpm_node = kzalloc(sizeof(struct mpm_irqs_a2m) * size / 2,
703 GFP_KERNEL);
704 if (!mpm_node)
705 goto failed_malloc;
706
707 while (size) {
708 unsigned long pin = be32_to_cpup(list++);
709 irq_hw_number_t hwirq = be32_to_cpup(list++);
710
711 mpm_node->pin = pin;
712 mpm_node->hwirq = hwirq;
713 mpm_node->parent = parent;
714 mpm_node->domain = domain;
715 INIT_HLIST_NODE(&mpm_node->node);
716
717 hlist_add_head(&mpm_node->node,
718 &irq_hash[hashfn(mpm_node->hwirq)]);
719 size -= 2;
720 mpm_node++;
721 }
722
723 if (mpm_of_map[i].chip) {
724 mpm_of_map[i].chip->irq_mask = msm_mpm_disable_irq;
725 mpm_of_map[i].chip->irq_unmask = msm_mpm_enable_irq;
726 mpm_of_map[i].chip->irq_disable = msm_mpm_disable_irq;
727 mpm_of_map[i].chip->irq_set_type = msm_mpm_set_irq_type;
728 mpm_of_map[i].chip->irq_set_wake = msm_mpm_set_irq_wake;
729 }
730
731 }
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600732 msm_mpm_initialized |= MSM_MPM_IRQ_MAPPING_DONE;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600733
734 return;
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600735
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600736failed_malloc:
737 for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++) {
738 mpm_of_map[i].chip->irq_mask = NULL;
739 mpm_of_map[i].chip->irq_unmask = NULL;
740 mpm_of_map[i].chip->irq_disable = NULL;
741 mpm_of_map[i].chip->irq_set_type = NULL;
742 mpm_of_map[i].chip->irq_set_wake = NULL;
743
744 kfree(unlisted_irqs[i].enabled_irqs);
745 kfree(unlisted_irqs[i].wakeup_irqs);
746
747 }
748}
749
750static struct of_device_id msm_mpm_match_table[] = {
751 {.compatible = "qcom,mpm-v2"},
752 {},
753};
754
755static struct platform_driver msm_mpm_dev_driver = {
756 .probe = msm_mpm_dev_probe,
757 .driver = {
758 .name = "mpm-v2",
759 .owner = THIS_MODULE,
760 .of_match_table = msm_mpm_match_table,
761 },
762};
763
764int __init msm_mpm_device_init(void)
765{
766 return platform_driver_register(&msm_mpm_dev_driver);
767}
768arch_initcall(msm_mpm_device_init);