blob: 5c654b02df973608789ffba3dfa5201c0df4c6df [file] [log] [blame]
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -07001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/init.h>
18#include <linux/bitmap.h>
19#include <linux/bitops.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23#include <linux/irqdomain.h>
24#include <linux/list.h>
25#include <linux/platform_device.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
30#include <asm/hardware/gic.h>
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -060031#include <asm/arch_timer.h>
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060032#include <mach/gpio.h>
33#include <mach/mpm.h>
34
35enum {
36 MSM_MPM_GIC_IRQ_DOMAIN,
37 MSM_MPM_GPIO_IRQ_DOMAIN,
38 MSM_MPM_NR_IRQ_DOMAINS,
39};
40
41enum {
42 MSM_MPM_SET_ENABLED,
43 MSM_MPM_SET_WAKEUP,
44 MSM_NR_IRQS_SET,
45};
46
47struct mpm_irqs_a2m {
48 struct irq_domain *domain;
49 struct device_node *parent;
50 irq_hw_number_t hwirq;
51 unsigned long pin;
52 struct hlist_node node;
53};
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -070054#define MAX_DOMAIN_NAME 5
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060055
56struct mpm_irqs {
57 struct irq_domain *domain;
58 unsigned long *enabled_irqs;
59 unsigned long *wakeup_irqs;
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -070060 unsigned long size;
61 char domain_name[MAX_DOMAIN_NAME];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060062};
63
64static struct mpm_irqs unlisted_irqs[MSM_MPM_NR_IRQ_DOMAINS];
65
66static struct hlist_head irq_hash[MSM_MPM_NR_MPM_IRQS];
67static unsigned int msm_mpm_irqs_m2a[MSM_MPM_NR_MPM_IRQS];
68#define MSM_MPM_REG_WIDTH DIV_ROUND_UP(MSM_MPM_NR_MPM_IRQS, 32)
69
70#define MSM_MPM_IRQ_INDEX(irq) (irq / 32)
71#define MSM_MPM_IRQ_MASK(irq) BIT(irq % 32)
72
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060073#define hashfn(val) (val % MSM_MPM_NR_MPM_IRQS)
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -060074#define SCLK_HZ (32768)
75#define ARCH_TIMER_HZ (19200000)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060076static struct msm_mpm_device_data msm_mpm_dev_data;
77
78enum mpm_reg_offsets {
79 MSM_MPM_REG_WAKEUP,
80 MSM_MPM_REG_ENABLE,
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -060081 MSM_MPM_REG_FALLING_EDGE,
82 MSM_MPM_REG_RISING_EDGE,
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060083 MSM_MPM_REG_POLARITY,
84 MSM_MPM_REG_STATUS,
85};
86
87static DEFINE_SPINLOCK(msm_mpm_lock);
88
89static uint32_t msm_mpm_enabled_irq[MSM_MPM_REG_WIDTH];
90static uint32_t msm_mpm_wake_irq[MSM_MPM_REG_WIDTH];
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -060091static uint32_t msm_mpm_falling_edge[MSM_MPM_REG_WIDTH];
92static uint32_t msm_mpm_rising_edge[MSM_MPM_REG_WIDTH];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060093static uint32_t msm_mpm_polarity[MSM_MPM_REG_WIDTH];
94
95enum {
96 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ = BIT(0),
97 MSM_MPM_DEBUG_PENDING_IRQ = BIT(1),
98 MSM_MPM_DEBUG_WRITE = BIT(2),
99 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE = BIT(3),
100};
101
102static int msm_mpm_debug_mask = 1;
103module_param_named(
104 debug_mask, msm_mpm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
105);
106
107enum mpm_state {
108 MSM_MPM_IRQ_MAPPING_DONE = BIT(0),
109 MSM_MPM_DEVICE_PROBED = BIT(1),
110};
111
112static enum mpm_state msm_mpm_initialized;
113
114static inline bool msm_mpm_is_initialized(void)
115{
116 return msm_mpm_initialized &
117 (MSM_MPM_IRQ_MAPPING_DONE | MSM_MPM_DEVICE_PROBED);
118
119}
120
121static inline uint32_t msm_mpm_read(
122 unsigned int reg, unsigned int subreg_index)
123{
124 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
125 return __raw_readl(msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
126}
127
128static inline void msm_mpm_write(
129 unsigned int reg, unsigned int subreg_index, uint32_t value)
130{
131 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
132
133 __raw_writel(value, msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
134 if (MSM_MPM_DEBUG_WRITE & msm_mpm_debug_mask)
135 pr_info("%s: reg %u.%u: 0x%08x\n",
136 __func__, reg, subreg_index, value);
137}
138
139static inline void msm_mpm_send_interrupt(void)
140{
141 __raw_writel(msm_mpm_dev_data.mpm_apps_ipc_val,
142 msm_mpm_dev_data.mpm_apps_ipc_reg);
143 /* Ensure the write is complete before returning. */
144 wmb();
145}
146
147static irqreturn_t msm_mpm_irq(int irq, void *dev_id)
148{
149 /*
150 * When the system resumes from deep sleep mode, the RPM hardware wakes
151 * up the Apps processor by triggering this interrupt. This interrupt
152 * has to be enabled and set as wake for the irq to get SPM out of
153 * sleep. Handle the interrupt here to make sure that it gets cleared.
154 */
155 return IRQ_HANDLED;
156}
157
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600158static void msm_mpm_set(cycle_t wakeup, bool wakeset)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600159{
160 uint32_t *irqs;
161 unsigned int reg;
162 int i;
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600163 uint32_t *expiry_timer;
164
165 expiry_timer = (uint32_t *)&wakeup;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600166
167 irqs = wakeset ? msm_mpm_wake_irq : msm_mpm_enabled_irq;
168 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600169 reg = MSM_MPM_REG_WAKEUP;
170 msm_mpm_write(reg, i, expiry_timer[i]);
171
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600172 reg = MSM_MPM_REG_ENABLE;
173 msm_mpm_write(reg, i, irqs[i]);
174
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600175 reg = MSM_MPM_REG_FALLING_EDGE;
176 msm_mpm_write(reg, i, msm_mpm_falling_edge[i]);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600177
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600178 reg = MSM_MPM_REG_RISING_EDGE;
179 msm_mpm_write(reg, i, msm_mpm_rising_edge[i]);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600180
181 reg = MSM_MPM_REG_POLARITY;
182 msm_mpm_write(reg, i, msm_mpm_polarity[i]);
Mahesh Sivasubramanian79e24272012-12-14 17:32:25 -0700183
184 reg = MSM_MPM_REG_STATUS;
185 msm_mpm_write(reg, i, 0);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600186 }
187
188 /*
189 * Ensure that the set operation is complete before sending the
190 * interrupt
191 */
192 wmb();
193 msm_mpm_send_interrupt();
194}
195
196static inline unsigned int msm_mpm_get_irq_m2a(unsigned int pin)
197{
198 return msm_mpm_irqs_m2a[pin];
199}
200
201static inline uint16_t msm_mpm_get_irq_a2m(struct irq_data *d)
202{
203 struct hlist_node *elem;
204 struct mpm_irqs_a2m *node = NULL;
205
206 hlist_for_each_entry(node, elem, &irq_hash[hashfn(d->hwirq)], node) {
207 if ((node->hwirq == d->hwirq)
208 && (d->domain == node->domain)) {
209 /* Update the linux irq mapping */
210 msm_mpm_irqs_m2a[node->pin] = d->irq;
211 break;
212 }
213 }
214 return node ? node->pin : 0;
215}
216
217static int msm_mpm_enable_irq_exclusive(
218 struct irq_data *d, bool enable, bool wakeset)
219{
220 uint16_t mpm_pin;
221
222 WARN_ON(!d);
223 if (!d)
224 return 0;
225
226 mpm_pin = msm_mpm_get_irq_a2m(d);
227
228 if (mpm_pin == 0xff)
229 return 0;
230
231 if (mpm_pin) {
232 uint32_t *mpm_irq_masks = wakeset ?
233 msm_mpm_wake_irq : msm_mpm_enabled_irq;
234 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_pin);
235 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_pin);
236
237 if (enable)
238 mpm_irq_masks[index] |= mask;
239 else
240 mpm_irq_masks[index] &= ~mask;
241 } else {
242 int i;
243 unsigned long *irq_apps;
244
245 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
246 if (d->domain == unlisted_irqs[i].domain)
247 break;
248 }
249
250 if (i == MSM_MPM_NR_IRQ_DOMAINS)
251 return 0;
252 irq_apps = wakeset ? unlisted_irqs[i].wakeup_irqs :
253 unlisted_irqs[i].enabled_irqs;
254
255 if (enable)
256 __set_bit(d->hwirq, irq_apps);
257 else
258 __clear_bit(d->hwirq, irq_apps);
259
260 }
261
262 return 0;
263}
264
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600265static void msm_mpm_set_edge_ctl(int pin, unsigned int flow_type)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600266{
267 uint32_t index;
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600268 uint32_t mask;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600269
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600270 index = MSM_MPM_IRQ_INDEX(pin);
271 mask = MSM_MPM_IRQ_MASK(pin);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600272
273 if (flow_type & IRQ_TYPE_EDGE_FALLING)
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600274 msm_mpm_falling_edge[index] |= mask;
275 else
276 msm_mpm_falling_edge[index] &= ~mask;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600277
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600278 if (flow_type & IRQ_TYPE_EDGE_RISING)
279 msm_mpm_rising_edge[index] |= mask;
280 else
281 msm_mpm_rising_edge[index] &= ~mask;
282
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600283}
284
285static int msm_mpm_set_irq_type_exclusive(
286 struct irq_data *d, unsigned int flow_type)
287{
288 uint32_t mpm_irq;
289
290 mpm_irq = msm_mpm_get_irq_a2m(d);
291
292 if (mpm_irq == 0xff)
293 return 0;
294
295 if (mpm_irq) {
296 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_irq);
297 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_irq);
298
299 if (index >= MSM_MPM_REG_WIDTH)
300 return -EFAULT;
301
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600302 msm_mpm_set_edge_ctl(mpm_irq, flow_type);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600303
304 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
305 msm_mpm_polarity[index] |= mask;
306 else
307 msm_mpm_polarity[index] &= ~mask;
308 }
309 return 0;
310}
311
312static int __msm_mpm_enable_irq(struct irq_data *d, bool enable)
313{
314 unsigned long flags;
315 int rc;
316
317 if (!msm_mpm_is_initialized())
318 return -EINVAL;
319
320 spin_lock_irqsave(&msm_mpm_lock, flags);
321
322 rc = msm_mpm_enable_irq_exclusive(d, enable, false);
323 spin_unlock_irqrestore(&msm_mpm_lock, flags);
324
325 return rc;
326}
327
328static void msm_mpm_enable_irq(struct irq_data *d)
329{
330 __msm_mpm_enable_irq(d, true);
331}
332
333static void msm_mpm_disable_irq(struct irq_data *d)
334{
335 __msm_mpm_enable_irq(d, false);
336}
337
338static int msm_mpm_set_irq_wake(struct irq_data *d, unsigned int on)
339{
340 unsigned long flags;
341 int rc;
342
343 if (!msm_mpm_is_initialized())
344 return -EINVAL;
345
346 spin_lock_irqsave(&msm_mpm_lock, flags);
347 rc = msm_mpm_enable_irq_exclusive(d, (bool)on, true);
348 spin_unlock_irqrestore(&msm_mpm_lock, flags);
349
350 return rc;
351}
352
353static int msm_mpm_set_irq_type(struct irq_data *d, unsigned int flow_type)
354{
355 unsigned long flags;
356 int rc;
357
358 if (!msm_mpm_is_initialized())
359 return -EINVAL;
360
361 spin_lock_irqsave(&msm_mpm_lock, flags);
362 rc = msm_mpm_set_irq_type_exclusive(d, flow_type);
363 spin_unlock_irqrestore(&msm_mpm_lock, flags);
364
365 return rc;
366}
367
368/******************************************************************************
369 * Public functions
370 *****************************************************************************/
371int msm_mpm_enable_pin(unsigned int pin, unsigned int enable)
372{
373 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
374 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
375 unsigned long flags;
376
377 if (!msm_mpm_is_initialized())
378 return -EINVAL;
379
Mahesh Sivasubramanian01d7f4a2013-04-16 15:22:34 -0600380 if (pin >= MSM_MPM_NR_MPM_IRQS)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600381 return -EINVAL;
382
383 spin_lock_irqsave(&msm_mpm_lock, flags);
384
385 if (enable)
386 msm_mpm_enabled_irq[index] |= mask;
387 else
388 msm_mpm_enabled_irq[index] &= ~mask;
389
390 spin_unlock_irqrestore(&msm_mpm_lock, flags);
391 return 0;
392}
393
394int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on)
395{
396 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
397 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
398 unsigned long flags;
399
400 if (!msm_mpm_is_initialized())
401 return -EINVAL;
402
403 if (pin >= MSM_MPM_NR_MPM_IRQS)
404 return -EINVAL;
405
406 spin_lock_irqsave(&msm_mpm_lock, flags);
407
408 if (on)
409 msm_mpm_wake_irq[index] |= mask;
410 else
411 msm_mpm_wake_irq[index] &= ~mask;
412
413 spin_unlock_irqrestore(&msm_mpm_lock, flags);
414 return 0;
415}
416
417int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type)
418{
419 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
420 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
421 unsigned long flags;
422
423 if (!msm_mpm_is_initialized())
424 return -EINVAL;
425
426 if (pin >= MSM_MPM_NR_MPM_IRQS)
427 return -EINVAL;
428
429 spin_lock_irqsave(&msm_mpm_lock, flags);
430
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600431 msm_mpm_set_edge_ctl(pin, flow_type);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600432
433 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
434 msm_mpm_polarity[index] |= mask;
435 else
436 msm_mpm_polarity[index] &= ~mask;
437
438 spin_unlock_irqrestore(&msm_mpm_lock, flags);
439 return 0;
440}
441
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700442static bool msm_mpm_interrupts_detectable(int d, bool from_idle)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600443{
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700444 unsigned long *irq_bitmap;
445 bool debug_mask, ret = false;
446 struct mpm_irqs *unlisted = &unlisted_irqs[d];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600447
448 if (!msm_mpm_is_initialized())
449 return false;
450
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700451 if (from_idle) {
452 irq_bitmap = unlisted->enabled_irqs;
453 debug_mask = msm_mpm_debug_mask &
454 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE;
455 } else {
456 irq_bitmap = unlisted->wakeup_irqs;
457 debug_mask = msm_mpm_debug_mask &
458 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ;
459 }
460
461 ret = (bool) __bitmap_empty(irq_bitmap, unlisted->size);
462
463 if (debug_mask && !ret) {
464 int i = 0;
465 i = find_first_bit(irq_bitmap, unlisted->size);
466 pr_info("%s(): %s preventing system sleep modes during %s\n",
467 __func__, unlisted->domain_name,
468 from_idle ? "idle" : "suspend");
469
470 while (i < unlisted->size) {
471 pr_info("\thwirq: %d\n", i);
472 i = find_next_bit(irq_bitmap, unlisted->size, i + 1);
473 }
474 }
475
476 return ret;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600477}
478
479bool msm_mpm_gpio_irqs_detectable(bool from_idle)
480{
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700481 return msm_mpm_interrupts_detectable(MSM_MPM_GPIO_IRQ_DOMAIN,
482 from_idle);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600483}
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700484bool msm_mpm_irqs_detectable(bool from_idle)
485{
486 return msm_mpm_interrupts_detectable(MSM_MPM_GIC_IRQ_DOMAIN,
487 from_idle);
488}
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600489void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600490{
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600491 cycle_t wakeup = (u64)sclk_count * ARCH_TIMER_HZ;
492
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600493 if (!msm_mpm_is_initialized()) {
494 pr_err("%s(): MPM not initialized\n", __func__);
495 return;
496 }
497
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600498 if (sclk_count) {
499 do_div(wakeup, SCLK_HZ);
500 wakeup += arch_counter_get_cntpct();
501 } else {
502 wakeup = (~0ULL);
503 }
504
505 msm_mpm_set(wakeup, !from_idle);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600506}
507
508void msm_mpm_exit_sleep(bool from_idle)
509{
510 unsigned long pending;
511 int i;
512 int k;
513
514 if (!msm_mpm_is_initialized()) {
515 pr_err("%s(): MPM not initialized\n", __func__);
516 return;
517 }
518
519 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
520 pending = msm_mpm_read(MSM_MPM_REG_STATUS, i);
521
522 if (MSM_MPM_DEBUG_PENDING_IRQ & msm_mpm_debug_mask)
523 pr_info("%s: pending.%d: 0x%08lx", __func__,
524 i, pending);
525
526 k = find_first_bit(&pending, 32);
527 while (k < 32) {
528 unsigned int mpm_irq = 32 * i + k;
529 unsigned int apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
530 struct irq_desc *desc = apps_irq ?
531 irq_to_desc(apps_irq) : NULL;
532
533 if (desc && !irqd_is_level_type(&desc->irq_data)) {
534 irq_set_pending(apps_irq);
535 if (from_idle) {
536 raw_spin_lock(&desc->lock);
537 check_irq_resend(desc, apps_irq);
538 raw_spin_unlock(&desc->lock);
539 }
540 }
541
542 k = find_next_bit(&pending, 32, k + 1);
543 }
544 }
545}
546
547static int __devinit msm_mpm_dev_probe(struct platform_device *pdev)
548{
549 struct resource *res = NULL;
550 int offset, ret;
551 struct msm_mpm_device_data *dev = &msm_mpm_dev_data;
552
553 if (msm_mpm_initialized & MSM_MPM_DEVICE_PROBED) {
554 pr_warn("MPM device probed multiple times\n");
555 return 0;
556 }
557
558 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vmpm");
559 if (!res) {
560 pr_err("%s(): Missing RPM memory resource\n", __func__);
561 goto fail;
562 }
563
564 dev->mpm_request_reg_base = devm_request_and_ioremap(&pdev->dev, res);
565
566 if (!dev->mpm_request_reg_base) {
567 pr_err("%s(): Unable to iomap\n", __func__);
568 goto fail;
569 }
570
571 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipc");
572 if (!res) {
573 pr_err("%s(): Missing GCC memory resource\n", __func__);
574 goto failed_irq_get;
575 }
576
577 dev->mpm_apps_ipc_reg = devm_ioremap(&pdev->dev, res->start,
578 resource_size(res));
579
580 if (of_property_read_u32(pdev->dev.of_node,
581 "qcom,ipc-bit-offset", &offset)) {
582 pr_info("%s(): Cannot read ipc bit offset\n", __func__);
583 goto failed_free_irq;
584 }
585
586 dev->mpm_apps_ipc_val = (1 << offset);
587
588 if (!dev->mpm_apps_ipc_reg)
589 goto failed_irq_get;
590
591 dev->mpm_ipc_irq = platform_get_irq(pdev, 0);
592
593 if (dev->mpm_ipc_irq == -ENXIO) {
594 pr_info("%s(): Cannot find IRQ resource\n", __func__);
595 goto failed_irq_get;
596 }
597 ret = request_irq(dev->mpm_ipc_irq, msm_mpm_irq,
598 IRQF_TRIGGER_RISING, pdev->name, msm_mpm_irq);
599
600 if (ret) {
601 pr_info("%s(): request_irq failed errno: %d\n", __func__, ret);
602 goto failed_irq_get;
603 }
Mahesh Sivasubramanian62360c62012-07-26 15:27:16 -0600604 ret = irq_set_irq_wake(dev->mpm_ipc_irq, 1);
605
606 if (ret) {
607 pr_err("%s: failed to set wakeup irq %u: %d\n",
608 __func__, dev->mpm_ipc_irq, ret);
609 goto failed_irq_get;
610
611 }
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600612 msm_mpm_initialized |= MSM_MPM_DEVICE_PROBED;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600613
614 return 0;
615
616failed_free_irq:
617 free_irq(dev->mpm_ipc_irq, msm_mpm_irq);
618failed_irq_get:
619 if (dev->mpm_apps_ipc_reg)
620 devm_iounmap(&pdev->dev, dev->mpm_apps_ipc_reg);
621 if (dev->mpm_request_reg_base)
622 devm_iounmap(&pdev->dev, dev->mpm_request_reg_base);
623fail:
624 return -EINVAL;
625}
626
627static inline int __init mpm_irq_domain_linear_size(struct irq_domain *d)
628{
629 return d->revmap_data.linear.size;
630}
631
632static inline int __init mpm_irq_domain_legacy_size(struct irq_domain *d)
633{
634 return d->revmap_data.legacy.size;
635}
636
637void __init of_mpm_init(struct device_node *node)
638{
639 const __be32 *list;
640
641 struct mpm_of {
642 char *pkey;
643 char *map;
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700644 char name[MAX_DOMAIN_NAME];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600645 struct irq_chip *chip;
646 int (*get_max_irqs)(struct irq_domain *d);
647 };
648 int i;
649
650 struct mpm_of mpm_of_map[MSM_MPM_NR_IRQ_DOMAINS] = {
651 {
652 "qcom,gic-parent",
653 "qcom,gic-map",
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700654 "gic",
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600655 &gic_arch_extn,
656 mpm_irq_domain_linear_size,
657 },
658 {
659 "qcom,gpio-parent",
660 "qcom,gpio-map",
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700661 "gpio",
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600662 &msm_gpio_irq_extn,
663 mpm_irq_domain_legacy_size,
664 },
665 };
666
667 if (msm_mpm_initialized & MSM_MPM_IRQ_MAPPING_DONE) {
668 pr_warn("%s(): MPM driver mapping exists\n", __func__);
669 return;
670 }
671
672 for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++)
673 INIT_HLIST_HEAD(&irq_hash[i]);
674
675 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
676 struct device_node *parent = NULL;
677 struct mpm_irqs_a2m *mpm_node = NULL;
678 struct irq_domain *domain = NULL;
679 int size;
680
681 parent = of_parse_phandle(node, mpm_of_map[i].pkey, 0);
682
683 if (!parent) {
684 pr_warn("%s(): %s Not found\n", __func__,
685 mpm_of_map[i].pkey);
686 continue;
687 }
688
689 domain = irq_find_host(parent);
690
691 if (!domain) {
692 pr_warn("%s(): Cannot find irq controller for %s\n",
693 __func__, mpm_of_map[i].pkey);
694 continue;
695 }
696
697 size = mpm_of_map[i].get_max_irqs(domain);
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700698 unlisted_irqs[i].size = size;
699 memcpy(unlisted_irqs[i].domain_name, mpm_of_map[i].name,
700 MAX_DOMAIN_NAME);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600701
702 unlisted_irqs[i].enabled_irqs =
703 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
704 GFP_KERNEL);
705
706 if (!unlisted_irqs[i].enabled_irqs)
707 goto failed_malloc;
708
709 unlisted_irqs[i].wakeup_irqs =
710 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
711 GFP_KERNEL);
712
713 if (!unlisted_irqs[i].wakeup_irqs)
714 goto failed_malloc;
715
716 unlisted_irqs[i].domain = domain;
717
718 list = of_get_property(node, mpm_of_map[i].map, &size);
719
720 if (!list || !size) {
721 __WARN();
722 continue;
723 }
724
725 /*
726 * Size is in bytes. Convert to size of uint32_t
727 */
728 size /= sizeof(*list);
729
730 /*
731 * The data is represented by a tuple mapping hwirq to a MPM
732 * pin. The number of mappings in the device tree would be
733 * size/2
734 */
735 mpm_node = kzalloc(sizeof(struct mpm_irqs_a2m) * size / 2,
736 GFP_KERNEL);
737 if (!mpm_node)
738 goto failed_malloc;
739
740 while (size) {
741 unsigned long pin = be32_to_cpup(list++);
742 irq_hw_number_t hwirq = be32_to_cpup(list++);
743
744 mpm_node->pin = pin;
745 mpm_node->hwirq = hwirq;
746 mpm_node->parent = parent;
747 mpm_node->domain = domain;
748 INIT_HLIST_NODE(&mpm_node->node);
749
750 hlist_add_head(&mpm_node->node,
751 &irq_hash[hashfn(mpm_node->hwirq)]);
752 size -= 2;
753 mpm_node++;
754 }
755
756 if (mpm_of_map[i].chip) {
757 mpm_of_map[i].chip->irq_mask = msm_mpm_disable_irq;
758 mpm_of_map[i].chip->irq_unmask = msm_mpm_enable_irq;
759 mpm_of_map[i].chip->irq_disable = msm_mpm_disable_irq;
760 mpm_of_map[i].chip->irq_set_type = msm_mpm_set_irq_type;
761 mpm_of_map[i].chip->irq_set_wake = msm_mpm_set_irq_wake;
762 }
763
764 }
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600765 msm_mpm_initialized |= MSM_MPM_IRQ_MAPPING_DONE;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600766
767 return;
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600768
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600769failed_malloc:
Mahesh Sivasubramanian01d7f4a2013-04-16 15:22:34 -0600770 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600771 mpm_of_map[i].chip->irq_mask = NULL;
772 mpm_of_map[i].chip->irq_unmask = NULL;
773 mpm_of_map[i].chip->irq_disable = NULL;
774 mpm_of_map[i].chip->irq_set_type = NULL;
775 mpm_of_map[i].chip->irq_set_wake = NULL;
776
777 kfree(unlisted_irqs[i].enabled_irqs);
778 kfree(unlisted_irqs[i].wakeup_irqs);
779
780 }
781}
782
783static struct of_device_id msm_mpm_match_table[] = {
784 {.compatible = "qcom,mpm-v2"},
785 {},
786};
787
788static struct platform_driver msm_mpm_dev_driver = {
789 .probe = msm_mpm_dev_probe,
790 .driver = {
791 .name = "mpm-v2",
792 .owner = THIS_MODULE,
793 .of_match_table = msm_mpm_match_table,
794 },
795};
796
797int __init msm_mpm_device_init(void)
798{
799 return platform_driver_register(&msm_mpm_dev_driver);
800}
801arch_initcall(msm_mpm_device_init);