blob: a0746f9916eef0f60878393d676b9a1dc5a0e3cb [file] [log] [blame]
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -07001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/init.h>
18#include <linux/bitmap.h>
19#include <linux/bitops.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23#include <linux/irqdomain.h>
24#include <linux/list.h>
25#include <linux/platform_device.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -060030#include <linux/clk.h>
31#include <linux/err.h>
32#include <linux/power_supply.h>
33#include <linux/regulator/consumer.h>
34#include <linux/workqueue.h>
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060035#include <asm/hardware/gic.h>
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -060036#include <asm/arch_timer.h>
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060037#include <mach/gpio.h>
38#include <mach/mpm.h>
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -060039#include <mach/clk.h>
40#include <mach/rpm-regulator-smd.h>
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060041
42enum {
43 MSM_MPM_GIC_IRQ_DOMAIN,
44 MSM_MPM_GPIO_IRQ_DOMAIN,
45 MSM_MPM_NR_IRQ_DOMAINS,
46};
47
48enum {
49 MSM_MPM_SET_ENABLED,
50 MSM_MPM_SET_WAKEUP,
51 MSM_NR_IRQS_SET,
52};
53
54struct mpm_irqs_a2m {
55 struct irq_domain *domain;
56 struct device_node *parent;
57 irq_hw_number_t hwirq;
58 unsigned long pin;
59 struct hlist_node node;
60};
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -070061#define MAX_DOMAIN_NAME 5
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060062
63struct mpm_irqs {
64 struct irq_domain *domain;
65 unsigned long *enabled_irqs;
66 unsigned long *wakeup_irqs;
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -070067 unsigned long size;
68 char domain_name[MAX_DOMAIN_NAME];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060069};
70
71static struct mpm_irqs unlisted_irqs[MSM_MPM_NR_IRQ_DOMAINS];
72
73static struct hlist_head irq_hash[MSM_MPM_NR_MPM_IRQS];
74static unsigned int msm_mpm_irqs_m2a[MSM_MPM_NR_MPM_IRQS];
75#define MSM_MPM_REG_WIDTH DIV_ROUND_UP(MSM_MPM_NR_MPM_IRQS, 32)
76
77#define MSM_MPM_IRQ_INDEX(irq) (irq / 32)
78#define MSM_MPM_IRQ_MASK(irq) BIT(irq % 32)
79
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060080#define hashfn(val) (val % MSM_MPM_NR_MPM_IRQS)
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -060081#define SCLK_HZ (32768)
82#define ARCH_TIMER_HZ (19200000)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060083static struct msm_mpm_device_data msm_mpm_dev_data;
84
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -060085static struct clk *xo_clk;
86static bool xo_enabled;
87static struct workqueue_struct *msm_mpm_wq;
88static struct work_struct msm_mpm_work;
89static struct completion wake_wq;
90
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060091enum mpm_reg_offsets {
92 MSM_MPM_REG_WAKEUP,
93 MSM_MPM_REG_ENABLE,
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -060094 MSM_MPM_REG_FALLING_EDGE,
95 MSM_MPM_REG_RISING_EDGE,
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060096 MSM_MPM_REG_POLARITY,
97 MSM_MPM_REG_STATUS,
98};
99
100static DEFINE_SPINLOCK(msm_mpm_lock);
101
102static uint32_t msm_mpm_enabled_irq[MSM_MPM_REG_WIDTH];
103static uint32_t msm_mpm_wake_irq[MSM_MPM_REG_WIDTH];
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600104static uint32_t msm_mpm_falling_edge[MSM_MPM_REG_WIDTH];
105static uint32_t msm_mpm_rising_edge[MSM_MPM_REG_WIDTH];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600106static uint32_t msm_mpm_polarity[MSM_MPM_REG_WIDTH];
107
108enum {
109 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ = BIT(0),
110 MSM_MPM_DEBUG_PENDING_IRQ = BIT(1),
111 MSM_MPM_DEBUG_WRITE = BIT(2),
112 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE = BIT(3),
113};
114
115static int msm_mpm_debug_mask = 1;
116module_param_named(
117 debug_mask, msm_mpm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
118);
119
120enum mpm_state {
121 MSM_MPM_IRQ_MAPPING_DONE = BIT(0),
122 MSM_MPM_DEVICE_PROBED = BIT(1),
123};
124
125static enum mpm_state msm_mpm_initialized;
126
127static inline bool msm_mpm_is_initialized(void)
128{
129 return msm_mpm_initialized &
130 (MSM_MPM_IRQ_MAPPING_DONE | MSM_MPM_DEVICE_PROBED);
131
132}
133
134static inline uint32_t msm_mpm_read(
135 unsigned int reg, unsigned int subreg_index)
136{
137 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
138 return __raw_readl(msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
139}
140
141static inline void msm_mpm_write(
142 unsigned int reg, unsigned int subreg_index, uint32_t value)
143{
144 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
145
146 __raw_writel(value, msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
147 if (MSM_MPM_DEBUG_WRITE & msm_mpm_debug_mask)
148 pr_info("%s: reg %u.%u: 0x%08x\n",
149 __func__, reg, subreg_index, value);
150}
151
152static inline void msm_mpm_send_interrupt(void)
153{
154 __raw_writel(msm_mpm_dev_data.mpm_apps_ipc_val,
155 msm_mpm_dev_data.mpm_apps_ipc_reg);
156 /* Ensure the write is complete before returning. */
157 wmb();
158}
159
160static irqreturn_t msm_mpm_irq(int irq, void *dev_id)
161{
162 /*
163 * When the system resumes from deep sleep mode, the RPM hardware wakes
164 * up the Apps processor by triggering this interrupt. This interrupt
165 * has to be enabled and set as wake for the irq to get SPM out of
166 * sleep. Handle the interrupt here to make sure that it gets cleared.
167 */
168 return IRQ_HANDLED;
169}
170
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600171static void msm_mpm_set(cycle_t wakeup, bool wakeset)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600172{
173 uint32_t *irqs;
174 unsigned int reg;
175 int i;
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600176 uint32_t *expiry_timer;
177
178 expiry_timer = (uint32_t *)&wakeup;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600179
180 irqs = wakeset ? msm_mpm_wake_irq : msm_mpm_enabled_irq;
181 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600182 reg = MSM_MPM_REG_WAKEUP;
183 msm_mpm_write(reg, i, expiry_timer[i]);
184
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600185 reg = MSM_MPM_REG_ENABLE;
186 msm_mpm_write(reg, i, irqs[i]);
187
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600188 reg = MSM_MPM_REG_FALLING_EDGE;
189 msm_mpm_write(reg, i, msm_mpm_falling_edge[i]);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600190
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600191 reg = MSM_MPM_REG_RISING_EDGE;
192 msm_mpm_write(reg, i, msm_mpm_rising_edge[i]);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600193
194 reg = MSM_MPM_REG_POLARITY;
195 msm_mpm_write(reg, i, msm_mpm_polarity[i]);
Mahesh Sivasubramanian79e24272012-12-14 17:32:25 -0700196
197 reg = MSM_MPM_REG_STATUS;
198 msm_mpm_write(reg, i, 0);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600199 }
200
201 /*
202 * Ensure that the set operation is complete before sending the
203 * interrupt
204 */
205 wmb();
206 msm_mpm_send_interrupt();
207}
208
209static inline unsigned int msm_mpm_get_irq_m2a(unsigned int pin)
210{
211 return msm_mpm_irqs_m2a[pin];
212}
213
214static inline uint16_t msm_mpm_get_irq_a2m(struct irq_data *d)
215{
216 struct hlist_node *elem;
217 struct mpm_irqs_a2m *node = NULL;
218
219 hlist_for_each_entry(node, elem, &irq_hash[hashfn(d->hwirq)], node) {
220 if ((node->hwirq == d->hwirq)
221 && (d->domain == node->domain)) {
Akhila Musunuri9c47f6a2013-09-02 23:26:06 -0700222 /* Update the linux irq mapping */
223 msm_mpm_irqs_m2a[node->pin] = d->irq;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600224 break;
225 }
226 }
Akhila Musunuri9c47f6a2013-09-02 23:26:06 -0700227 return node ? node->pin : 0;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600228}
229
230static int msm_mpm_enable_irq_exclusive(
231 struct irq_data *d, bool enable, bool wakeset)
232{
233 uint16_t mpm_pin;
234
235 WARN_ON(!d);
236 if (!d)
237 return 0;
238
239 mpm_pin = msm_mpm_get_irq_a2m(d);
240
241 if (mpm_pin == 0xff)
242 return 0;
243
244 if (mpm_pin) {
245 uint32_t *mpm_irq_masks = wakeset ?
246 msm_mpm_wake_irq : msm_mpm_enabled_irq;
247 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_pin);
248 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_pin);
249
250 if (enable)
251 mpm_irq_masks[index] |= mask;
252 else
253 mpm_irq_masks[index] &= ~mask;
254 } else {
255 int i;
256 unsigned long *irq_apps;
257
258 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
259 if (d->domain == unlisted_irqs[i].domain)
260 break;
261 }
262
263 if (i == MSM_MPM_NR_IRQ_DOMAINS)
264 return 0;
265 irq_apps = wakeset ? unlisted_irqs[i].wakeup_irqs :
266 unlisted_irqs[i].enabled_irqs;
267
268 if (enable)
269 __set_bit(d->hwirq, irq_apps);
270 else
271 __clear_bit(d->hwirq, irq_apps);
272
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -0600273 if (!wakeset && (msm_mpm_initialized & MSM_MPM_DEVICE_PROBED))
274 complete(&wake_wq);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600275 }
276
277 return 0;
278}
279
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600280static void msm_mpm_set_edge_ctl(int pin, unsigned int flow_type)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600281{
282 uint32_t index;
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600283 uint32_t mask;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600284
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600285 index = MSM_MPM_IRQ_INDEX(pin);
286 mask = MSM_MPM_IRQ_MASK(pin);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600287
288 if (flow_type & IRQ_TYPE_EDGE_FALLING)
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600289 msm_mpm_falling_edge[index] |= mask;
290 else
291 msm_mpm_falling_edge[index] &= ~mask;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600292
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600293 if (flow_type & IRQ_TYPE_EDGE_RISING)
294 msm_mpm_rising_edge[index] |= mask;
295 else
296 msm_mpm_rising_edge[index] &= ~mask;
297
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600298}
299
300static int msm_mpm_set_irq_type_exclusive(
301 struct irq_data *d, unsigned int flow_type)
302{
303 uint32_t mpm_irq;
304
305 mpm_irq = msm_mpm_get_irq_a2m(d);
306
307 if (mpm_irq == 0xff)
308 return 0;
309
310 if (mpm_irq) {
311 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_irq);
312 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_irq);
313
314 if (index >= MSM_MPM_REG_WIDTH)
315 return -EFAULT;
316
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600317 msm_mpm_set_edge_ctl(mpm_irq, flow_type);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600318
319 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
320 msm_mpm_polarity[index] |= mask;
321 else
322 msm_mpm_polarity[index] &= ~mask;
323 }
324 return 0;
325}
326
327static int __msm_mpm_enable_irq(struct irq_data *d, bool enable)
328{
329 unsigned long flags;
330 int rc;
331
332 if (!msm_mpm_is_initialized())
333 return -EINVAL;
334
335 spin_lock_irqsave(&msm_mpm_lock, flags);
336
337 rc = msm_mpm_enable_irq_exclusive(d, enable, false);
338 spin_unlock_irqrestore(&msm_mpm_lock, flags);
339
340 return rc;
341}
342
343static void msm_mpm_enable_irq(struct irq_data *d)
344{
345 __msm_mpm_enable_irq(d, true);
346}
347
348static void msm_mpm_disable_irq(struct irq_data *d)
349{
350 __msm_mpm_enable_irq(d, false);
351}
352
353static int msm_mpm_set_irq_wake(struct irq_data *d, unsigned int on)
354{
355 unsigned long flags;
356 int rc;
357
358 if (!msm_mpm_is_initialized())
359 return -EINVAL;
360
361 spin_lock_irqsave(&msm_mpm_lock, flags);
362 rc = msm_mpm_enable_irq_exclusive(d, (bool)on, true);
363 spin_unlock_irqrestore(&msm_mpm_lock, flags);
364
365 return rc;
366}
367
368static int msm_mpm_set_irq_type(struct irq_data *d, unsigned int flow_type)
369{
370 unsigned long flags;
371 int rc;
372
373 if (!msm_mpm_is_initialized())
374 return -EINVAL;
375
376 spin_lock_irqsave(&msm_mpm_lock, flags);
377 rc = msm_mpm_set_irq_type_exclusive(d, flow_type);
378 spin_unlock_irqrestore(&msm_mpm_lock, flags);
379
380 return rc;
381}
382
383/******************************************************************************
384 * Public functions
385 *****************************************************************************/
386int msm_mpm_enable_pin(unsigned int pin, unsigned int enable)
387{
388 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
389 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
390 unsigned long flags;
391
392 if (!msm_mpm_is_initialized())
393 return -EINVAL;
394
Mahesh Sivasubramanian01d7f4a2013-04-16 15:22:34 -0600395 if (pin >= MSM_MPM_NR_MPM_IRQS)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600396 return -EINVAL;
397
398 spin_lock_irqsave(&msm_mpm_lock, flags);
399
400 if (enable)
401 msm_mpm_enabled_irq[index] |= mask;
402 else
403 msm_mpm_enabled_irq[index] &= ~mask;
404
405 spin_unlock_irqrestore(&msm_mpm_lock, flags);
406 return 0;
407}
408
409int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on)
410{
411 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
412 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
413 unsigned long flags;
414
415 if (!msm_mpm_is_initialized())
416 return -EINVAL;
417
418 if (pin >= MSM_MPM_NR_MPM_IRQS)
419 return -EINVAL;
420
421 spin_lock_irqsave(&msm_mpm_lock, flags);
422
423 if (on)
424 msm_mpm_wake_irq[index] |= mask;
425 else
426 msm_mpm_wake_irq[index] &= ~mask;
427
428 spin_unlock_irqrestore(&msm_mpm_lock, flags);
429 return 0;
430}
431
432int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type)
433{
434 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
435 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
436 unsigned long flags;
437
438 if (!msm_mpm_is_initialized())
439 return -EINVAL;
440
441 if (pin >= MSM_MPM_NR_MPM_IRQS)
442 return -EINVAL;
443
444 spin_lock_irqsave(&msm_mpm_lock, flags);
445
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600446 msm_mpm_set_edge_ctl(pin, flow_type);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600447
448 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
449 msm_mpm_polarity[index] |= mask;
450 else
451 msm_mpm_polarity[index] &= ~mask;
452
453 spin_unlock_irqrestore(&msm_mpm_lock, flags);
454 return 0;
455}
456
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700457static bool msm_mpm_interrupts_detectable(int d, bool from_idle)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600458{
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700459 unsigned long *irq_bitmap;
460 bool debug_mask, ret = false;
461 struct mpm_irqs *unlisted = &unlisted_irqs[d];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600462
463 if (!msm_mpm_is_initialized())
464 return false;
465
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700466 if (from_idle) {
467 irq_bitmap = unlisted->enabled_irqs;
468 debug_mask = msm_mpm_debug_mask &
469 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE;
470 } else {
471 irq_bitmap = unlisted->wakeup_irqs;
472 debug_mask = msm_mpm_debug_mask &
473 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ;
474 }
475
476 ret = (bool) __bitmap_empty(irq_bitmap, unlisted->size);
477
478 if (debug_mask && !ret) {
479 int i = 0;
480 i = find_first_bit(irq_bitmap, unlisted->size);
481 pr_info("%s(): %s preventing system sleep modes during %s\n",
482 __func__, unlisted->domain_name,
483 from_idle ? "idle" : "suspend");
484
485 while (i < unlisted->size) {
486 pr_info("\thwirq: %d\n", i);
487 i = find_next_bit(irq_bitmap, unlisted->size, i + 1);
488 }
489 }
490
491 return ret;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600492}
493
494bool msm_mpm_gpio_irqs_detectable(bool from_idle)
495{
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700496 return msm_mpm_interrupts_detectable(MSM_MPM_GPIO_IRQ_DOMAIN,
497 from_idle);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600498}
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700499bool msm_mpm_irqs_detectable(bool from_idle)
500{
501 return msm_mpm_interrupts_detectable(MSM_MPM_GIC_IRQ_DOMAIN,
502 from_idle);
503}
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600504void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600505{
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600506 cycle_t wakeup = (u64)sclk_count * ARCH_TIMER_HZ;
507
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600508 if (!msm_mpm_is_initialized()) {
509 pr_err("%s(): MPM not initialized\n", __func__);
510 return;
511 }
512
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600513 if (sclk_count) {
514 do_div(wakeup, SCLK_HZ);
515 wakeup += arch_counter_get_cntpct();
516 } else {
517 wakeup = (~0ULL);
518 }
519
520 msm_mpm_set(wakeup, !from_idle);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600521}
522
523void msm_mpm_exit_sleep(bool from_idle)
524{
525 unsigned long pending;
526 int i;
527 int k;
528
529 if (!msm_mpm_is_initialized()) {
530 pr_err("%s(): MPM not initialized\n", __func__);
531 return;
532 }
533
534 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
535 pending = msm_mpm_read(MSM_MPM_REG_STATUS, i);
536
537 if (MSM_MPM_DEBUG_PENDING_IRQ & msm_mpm_debug_mask)
538 pr_info("%s: pending.%d: 0x%08lx", __func__,
539 i, pending);
540
541 k = find_first_bit(&pending, 32);
542 while (k < 32) {
543 unsigned int mpm_irq = 32 * i + k;
544 unsigned int apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
545 struct irq_desc *desc = apps_irq ?
546 irq_to_desc(apps_irq) : NULL;
547
548 if (desc && !irqd_is_level_type(&desc->irq_data)) {
549 irq_set_pending(apps_irq);
550 if (from_idle) {
551 raw_spin_lock(&desc->lock);
552 check_irq_resend(desc, apps_irq);
553 raw_spin_unlock(&desc->lock);
554 }
555 }
556
557 k = find_next_bit(&pending, 32, k + 1);
558 }
559 }
560}
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -0600561static void msm_mpm_sys_low_power_modes(bool allow)
562{
563 if (allow) {
564 if (xo_enabled) {
565 clk_disable_unprepare(xo_clk);
566 xo_enabled = false;
567 }
568 } else {
569 if (!xo_enabled) {
570 /* If we cannot enable XO clock then we want to flag it,
571 * than having to deal with not being able to wakeup
572 * from a non-monitorable interrupt
573 */
574 BUG_ON(clk_prepare_enable(xo_clk));
575 xo_enabled = true;
576 }
577 }
578}
579
580void msm_mpm_suspend_prepare(void)
581{
582 bool allow = msm_mpm_irqs_detectable(false) &&
583 msm_mpm_gpio_irqs_detectable(false);
584 msm_mpm_sys_low_power_modes(allow);
585}
586EXPORT_SYMBOL(msm_mpm_suspend_prepare);
587
588void msm_mpm_suspend_wake(void)
589{
590 bool allow = msm_mpm_irqs_detectable(true) &&
591 msm_mpm_gpio_irqs_detectable(true);
592 msm_mpm_sys_low_power_modes(allow);
593}
594EXPORT_SYMBOL(msm_mpm_suspend_wake);
595
596static void msm_mpm_work_fn(struct work_struct *work)
597{
598 unsigned long flags;
599 while (1) {
600 bool allow;
601 wait_for_completion(&wake_wq);
602 spin_lock_irqsave(&msm_mpm_lock, flags);
603 allow = msm_mpm_irqs_detectable(true) &&
604 msm_mpm_gpio_irqs_detectable(true);
605 spin_unlock_irqrestore(&msm_mpm_lock, flags);
606 msm_mpm_sys_low_power_modes(allow);
607 }
608}
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600609
610static int __devinit msm_mpm_dev_probe(struct platform_device *pdev)
611{
612 struct resource *res = NULL;
613 int offset, ret;
614 struct msm_mpm_device_data *dev = &msm_mpm_dev_data;
615
616 if (msm_mpm_initialized & MSM_MPM_DEVICE_PROBED) {
617 pr_warn("MPM device probed multiple times\n");
618 return 0;
619 }
620
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -0600621 xo_clk = devm_clk_get(&pdev->dev, "xo");
622
623 if (IS_ERR(xo_clk)) {
624 pr_err("%s(): Cannot get clk resource for XO\n", __func__);
625 return PTR_ERR(xo_clk);
626 }
627
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600628 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vmpm");
629 if (!res) {
630 pr_err("%s(): Missing RPM memory resource\n", __func__);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600631 return -EINVAL;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600632 }
633
634 dev->mpm_request_reg_base = devm_request_and_ioremap(&pdev->dev, res);
635
636 if (!dev->mpm_request_reg_base) {
637 pr_err("%s(): Unable to iomap\n", __func__);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600638 return -EADDRNOTAVAIL;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600639 }
640
641 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipc");
642 if (!res) {
643 pr_err("%s(): Missing GCC memory resource\n", __func__);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600644 return -EINVAL;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600645 }
646
647 dev->mpm_apps_ipc_reg = devm_ioremap(&pdev->dev, res->start,
648 resource_size(res));
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600649 if (!dev->mpm_apps_ipc_reg) {
650 pr_err("%s(): Unable to iomap IPC register\n", __func__);
651 return -EADDRNOTAVAIL;
652 }
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600653
654 if (of_property_read_u32(pdev->dev.of_node,
655 "qcom,ipc-bit-offset", &offset)) {
656 pr_info("%s(): Cannot read ipc bit offset\n", __func__);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600657 return -EINVAL ;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600658 }
659
660 dev->mpm_apps_ipc_val = (1 << offset);
661
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600662 dev->mpm_ipc_irq = platform_get_irq(pdev, 0);
663
664 if (dev->mpm_ipc_irq == -ENXIO) {
665 pr_info("%s(): Cannot find IRQ resource\n", __func__);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600666 return -ENXIO;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600667 }
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600668 ret = devm_request_irq(&pdev->dev, dev->mpm_ipc_irq, msm_mpm_irq,
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600669 IRQF_TRIGGER_RISING, pdev->name, msm_mpm_irq);
670
671 if (ret) {
672 pr_info("%s(): request_irq failed errno: %d\n", __func__, ret);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600673 return ret;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600674 }
Mahesh Sivasubramanian62360c62012-07-26 15:27:16 -0600675 ret = irq_set_irq_wake(dev->mpm_ipc_irq, 1);
676
677 if (ret) {
678 pr_err("%s: failed to set wakeup irq %u: %d\n",
679 __func__, dev->mpm_ipc_irq, ret);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600680 return ret;
Mahesh Sivasubramanian62360c62012-07-26 15:27:16 -0600681
682 }
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -0600683
684 init_completion(&wake_wq);
685
686 INIT_WORK(&msm_mpm_work, msm_mpm_work_fn);
687 msm_mpm_wq = create_singlethread_workqueue("mpm");
688
689 if (msm_mpm_wq)
690 queue_work(msm_mpm_wq, &msm_mpm_work);
691 else {
692 pr_warn("%s(): Failed to create wq. So voting against XO off",
693 __func__);
694 /* Throw a BUG. Otherwise, its possible that system allows
695 * XO shutdown when there are non-monitored interrupts are
696 * pending and cause errors at a later point in time.
697 */
698 BUG_ON(clk_prepare_enable(xo_clk));
699 xo_enabled = true;
700 }
701
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600702 msm_mpm_initialized |= MSM_MPM_DEVICE_PROBED;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600703 return 0;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600704}
705
706static inline int __init mpm_irq_domain_linear_size(struct irq_domain *d)
707{
708 return d->revmap_data.linear.size;
709}
710
711static inline int __init mpm_irq_domain_legacy_size(struct irq_domain *d)
712{
713 return d->revmap_data.legacy.size;
714}
715
716void __init of_mpm_init(struct device_node *node)
717{
718 const __be32 *list;
719
720 struct mpm_of {
721 char *pkey;
722 char *map;
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700723 char name[MAX_DOMAIN_NAME];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600724 struct irq_chip *chip;
725 int (*get_max_irqs)(struct irq_domain *d);
726 };
727 int i;
728
729 struct mpm_of mpm_of_map[MSM_MPM_NR_IRQ_DOMAINS] = {
730 {
731 "qcom,gic-parent",
732 "qcom,gic-map",
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700733 "gic",
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600734 &gic_arch_extn,
735 mpm_irq_domain_linear_size,
736 },
737 {
738 "qcom,gpio-parent",
739 "qcom,gpio-map",
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700740 "gpio",
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600741 &msm_gpio_irq_extn,
742 mpm_irq_domain_legacy_size,
743 },
744 };
745
746 if (msm_mpm_initialized & MSM_MPM_IRQ_MAPPING_DONE) {
747 pr_warn("%s(): MPM driver mapping exists\n", __func__);
748 return;
749 }
750
751 for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++)
752 INIT_HLIST_HEAD(&irq_hash[i]);
753
754 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
755 struct device_node *parent = NULL;
756 struct mpm_irqs_a2m *mpm_node = NULL;
757 struct irq_domain *domain = NULL;
758 int size;
759
760 parent = of_parse_phandle(node, mpm_of_map[i].pkey, 0);
761
762 if (!parent) {
763 pr_warn("%s(): %s Not found\n", __func__,
764 mpm_of_map[i].pkey);
765 continue;
766 }
767
768 domain = irq_find_host(parent);
769
770 if (!domain) {
771 pr_warn("%s(): Cannot find irq controller for %s\n",
772 __func__, mpm_of_map[i].pkey);
773 continue;
774 }
775
776 size = mpm_of_map[i].get_max_irqs(domain);
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700777 unlisted_irqs[i].size = size;
778 memcpy(unlisted_irqs[i].domain_name, mpm_of_map[i].name,
779 MAX_DOMAIN_NAME);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600780
781 unlisted_irqs[i].enabled_irqs =
782 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
783 GFP_KERNEL);
784
785 if (!unlisted_irqs[i].enabled_irqs)
786 goto failed_malloc;
787
788 unlisted_irqs[i].wakeup_irqs =
789 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
790 GFP_KERNEL);
791
792 if (!unlisted_irqs[i].wakeup_irqs)
793 goto failed_malloc;
794
795 unlisted_irqs[i].domain = domain;
796
797 list = of_get_property(node, mpm_of_map[i].map, &size);
798
799 if (!list || !size) {
800 __WARN();
801 continue;
802 }
803
804 /*
805 * Size is in bytes. Convert to size of uint32_t
806 */
807 size /= sizeof(*list);
808
809 /*
810 * The data is represented by a tuple mapping hwirq to a MPM
811 * pin. The number of mappings in the device tree would be
812 * size/2
813 */
814 mpm_node = kzalloc(sizeof(struct mpm_irqs_a2m) * size / 2,
815 GFP_KERNEL);
816 if (!mpm_node)
817 goto failed_malloc;
818
819 while (size) {
820 unsigned long pin = be32_to_cpup(list++);
821 irq_hw_number_t hwirq = be32_to_cpup(list++);
822
823 mpm_node->pin = pin;
824 mpm_node->hwirq = hwirq;
825 mpm_node->parent = parent;
826 mpm_node->domain = domain;
827 INIT_HLIST_NODE(&mpm_node->node);
828
829 hlist_add_head(&mpm_node->node,
830 &irq_hash[hashfn(mpm_node->hwirq)]);
831 size -= 2;
832 mpm_node++;
833 }
834
835 if (mpm_of_map[i].chip) {
836 mpm_of_map[i].chip->irq_mask = msm_mpm_disable_irq;
837 mpm_of_map[i].chip->irq_unmask = msm_mpm_enable_irq;
838 mpm_of_map[i].chip->irq_disable = msm_mpm_disable_irq;
839 mpm_of_map[i].chip->irq_set_type = msm_mpm_set_irq_type;
840 mpm_of_map[i].chip->irq_set_wake = msm_mpm_set_irq_wake;
841 }
842
843 }
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600844 msm_mpm_initialized |= MSM_MPM_IRQ_MAPPING_DONE;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600845
846 return;
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600847
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600848failed_malloc:
Mahesh Sivasubramanian01d7f4a2013-04-16 15:22:34 -0600849 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600850 mpm_of_map[i].chip->irq_mask = NULL;
851 mpm_of_map[i].chip->irq_unmask = NULL;
852 mpm_of_map[i].chip->irq_disable = NULL;
853 mpm_of_map[i].chip->irq_set_type = NULL;
854 mpm_of_map[i].chip->irq_set_wake = NULL;
855
856 kfree(unlisted_irqs[i].enabled_irqs);
857 kfree(unlisted_irqs[i].wakeup_irqs);
858
859 }
860}
861
862static struct of_device_id msm_mpm_match_table[] = {
863 {.compatible = "qcom,mpm-v2"},
864 {},
865};
866
867static struct platform_driver msm_mpm_dev_driver = {
868 .probe = msm_mpm_dev_probe,
869 .driver = {
870 .name = "mpm-v2",
871 .owner = THIS_MODULE,
872 .of_match_table = msm_mpm_match_table,
873 },
874};
875
876int __init msm_mpm_device_init(void)
877{
878 return platform_driver_register(&msm_mpm_dev_driver);
879}
880arch_initcall(msm_mpm_device_init);