blob: f1bf64fd83d61db6c07a4ec20b0317cef661979c [file] [log] [blame]
Mahesh Sivasubramanian35b2f982014-01-20 22:13:58 -07001/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/init.h>
18#include <linux/bitmap.h>
19#include <linux/bitops.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23#include <linux/irqdomain.h>
24#include <linux/list.h>
25#include <linux/platform_device.h>
26#include <linux/of.h>
27#include <linux/of_address.h>
28#include <linux/slab.h>
29#include <linux/spinlock.h>
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -060030#include <linux/clk.h>
31#include <linux/err.h>
32#include <linux/power_supply.h>
33#include <linux/regulator/consumer.h>
34#include <linux/workqueue.h>
Mahesh Sivasubramanian35b2f982014-01-20 22:13:58 -070035#include <linux/mutex.h>
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060036#include <asm/hardware/gic.h>
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -060037#include <asm/arch_timer.h>
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060038#include <mach/gpio.h>
39#include <mach/mpm.h>
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -060040#include <mach/clk.h>
41#include <mach/rpm-regulator-smd.h>
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060042
43enum {
44 MSM_MPM_GIC_IRQ_DOMAIN,
45 MSM_MPM_GPIO_IRQ_DOMAIN,
46 MSM_MPM_NR_IRQ_DOMAINS,
47};
48
49enum {
50 MSM_MPM_SET_ENABLED,
51 MSM_MPM_SET_WAKEUP,
52 MSM_NR_IRQS_SET,
53};
54
55struct mpm_irqs_a2m {
56 struct irq_domain *domain;
57 struct device_node *parent;
58 irq_hw_number_t hwirq;
59 unsigned long pin;
60 struct hlist_node node;
61};
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -070062#define MAX_DOMAIN_NAME 5
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060063
64struct mpm_irqs {
65 struct irq_domain *domain;
66 unsigned long *enabled_irqs;
67 unsigned long *wakeup_irqs;
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -070068 unsigned long size;
69 char domain_name[MAX_DOMAIN_NAME];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060070};
71
72static struct mpm_irqs unlisted_irqs[MSM_MPM_NR_IRQ_DOMAINS];
73
74static struct hlist_head irq_hash[MSM_MPM_NR_MPM_IRQS];
75static unsigned int msm_mpm_irqs_m2a[MSM_MPM_NR_MPM_IRQS];
76#define MSM_MPM_REG_WIDTH DIV_ROUND_UP(MSM_MPM_NR_MPM_IRQS, 32)
77
78#define MSM_MPM_IRQ_INDEX(irq) (irq / 32)
79#define MSM_MPM_IRQ_MASK(irq) BIT(irq % 32)
80
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060081#define hashfn(val) (val % MSM_MPM_NR_MPM_IRQS)
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -060082#define SCLK_HZ (32768)
83#define ARCH_TIMER_HZ (19200000)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060084static struct msm_mpm_device_data msm_mpm_dev_data;
85
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -060086static struct clk *xo_clk;
87static bool xo_enabled;
88static struct workqueue_struct *msm_mpm_wq;
89static struct work_struct msm_mpm_work;
90static struct completion wake_wq;
91
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060092enum mpm_reg_offsets {
93 MSM_MPM_REG_WAKEUP,
94 MSM_MPM_REG_ENABLE,
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -060095 MSM_MPM_REG_FALLING_EDGE,
96 MSM_MPM_REG_RISING_EDGE,
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -060097 MSM_MPM_REG_POLARITY,
98 MSM_MPM_REG_STATUS,
99};
100
101static DEFINE_SPINLOCK(msm_mpm_lock);
102
103static uint32_t msm_mpm_enabled_irq[MSM_MPM_REG_WIDTH];
104static uint32_t msm_mpm_wake_irq[MSM_MPM_REG_WIDTH];
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600105static uint32_t msm_mpm_falling_edge[MSM_MPM_REG_WIDTH];
106static uint32_t msm_mpm_rising_edge[MSM_MPM_REG_WIDTH];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600107static uint32_t msm_mpm_polarity[MSM_MPM_REG_WIDTH];
108
109enum {
110 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ = BIT(0),
111 MSM_MPM_DEBUG_PENDING_IRQ = BIT(1),
112 MSM_MPM_DEBUG_WRITE = BIT(2),
113 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE = BIT(3),
114};
115
116static int msm_mpm_debug_mask = 1;
117module_param_named(
118 debug_mask, msm_mpm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
119);
120
121enum mpm_state {
122 MSM_MPM_IRQ_MAPPING_DONE = BIT(0),
123 MSM_MPM_DEVICE_PROBED = BIT(1),
124};
125
126static enum mpm_state msm_mpm_initialized;
127
128static inline bool msm_mpm_is_initialized(void)
129{
130 return msm_mpm_initialized &
131 (MSM_MPM_IRQ_MAPPING_DONE | MSM_MPM_DEVICE_PROBED);
132
133}
134
135static inline uint32_t msm_mpm_read(
136 unsigned int reg, unsigned int subreg_index)
137{
138 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
139 return __raw_readl(msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
140}
141
142static inline void msm_mpm_write(
143 unsigned int reg, unsigned int subreg_index, uint32_t value)
144{
145 unsigned int offset = reg * MSM_MPM_REG_WIDTH + subreg_index;
146
147 __raw_writel(value, msm_mpm_dev_data.mpm_request_reg_base + offset * 4);
148 if (MSM_MPM_DEBUG_WRITE & msm_mpm_debug_mask)
149 pr_info("%s: reg %u.%u: 0x%08x\n",
150 __func__, reg, subreg_index, value);
151}
152
153static inline void msm_mpm_send_interrupt(void)
154{
155 __raw_writel(msm_mpm_dev_data.mpm_apps_ipc_val,
156 msm_mpm_dev_data.mpm_apps_ipc_reg);
157 /* Ensure the write is complete before returning. */
158 wmb();
159}
160
161static irqreturn_t msm_mpm_irq(int irq, void *dev_id)
162{
163 /*
164 * When the system resumes from deep sleep mode, the RPM hardware wakes
165 * up the Apps processor by triggering this interrupt. This interrupt
166 * has to be enabled and set as wake for the irq to get SPM out of
167 * sleep. Handle the interrupt here to make sure that it gets cleared.
168 */
169 return IRQ_HANDLED;
170}
171
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600172static void msm_mpm_set(cycle_t wakeup, bool wakeset)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600173{
174 uint32_t *irqs;
175 unsigned int reg;
176 int i;
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600177 uint32_t *expiry_timer;
178
179 expiry_timer = (uint32_t *)&wakeup;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600180
181 irqs = wakeset ? msm_mpm_wake_irq : msm_mpm_enabled_irq;
182 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600183 reg = MSM_MPM_REG_WAKEUP;
184 msm_mpm_write(reg, i, expiry_timer[i]);
185
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600186 reg = MSM_MPM_REG_ENABLE;
187 msm_mpm_write(reg, i, irqs[i]);
188
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600189 reg = MSM_MPM_REG_FALLING_EDGE;
190 msm_mpm_write(reg, i, msm_mpm_falling_edge[i]);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600191
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600192 reg = MSM_MPM_REG_RISING_EDGE;
193 msm_mpm_write(reg, i, msm_mpm_rising_edge[i]);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600194
195 reg = MSM_MPM_REG_POLARITY;
196 msm_mpm_write(reg, i, msm_mpm_polarity[i]);
Mahesh Sivasubramanian79e24272012-12-14 17:32:25 -0700197
198 reg = MSM_MPM_REG_STATUS;
199 msm_mpm_write(reg, i, 0);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600200 }
201
202 /*
203 * Ensure that the set operation is complete before sending the
204 * interrupt
205 */
206 wmb();
207 msm_mpm_send_interrupt();
208}
209
210static inline unsigned int msm_mpm_get_irq_m2a(unsigned int pin)
211{
212 return msm_mpm_irqs_m2a[pin];
213}
214
215static inline uint16_t msm_mpm_get_irq_a2m(struct irq_data *d)
216{
217 struct hlist_node *elem;
218 struct mpm_irqs_a2m *node = NULL;
219
220 hlist_for_each_entry(node, elem, &irq_hash[hashfn(d->hwirq)], node) {
221 if ((node->hwirq == d->hwirq)
222 && (d->domain == node->domain)) {
Mahesh Sivasubramanianfc8d8ed2013-09-03 16:15:13 -0600223 /*
224 * Update the linux irq mapping. No update required for
225 * bypass interrupts
226 */
227 if (node->pin != 0xff)
228 msm_mpm_irqs_m2a[node->pin] = d->irq;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600229 break;
230 }
231 }
Mahesh Sivasubramanianfc8d8ed2013-09-03 16:15:13 -0600232 return elem ? node->pin : 0;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600233}
234
235static int msm_mpm_enable_irq_exclusive(
236 struct irq_data *d, bool enable, bool wakeset)
237{
238 uint16_t mpm_pin;
239
240 WARN_ON(!d);
241 if (!d)
242 return 0;
243
244 mpm_pin = msm_mpm_get_irq_a2m(d);
245
246 if (mpm_pin == 0xff)
247 return 0;
248
249 if (mpm_pin) {
250 uint32_t *mpm_irq_masks = wakeset ?
251 msm_mpm_wake_irq : msm_mpm_enabled_irq;
252 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_pin);
253 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_pin);
254
255 if (enable)
256 mpm_irq_masks[index] |= mask;
257 else
258 mpm_irq_masks[index] &= ~mask;
259 } else {
260 int i;
261 unsigned long *irq_apps;
262
263 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
264 if (d->domain == unlisted_irqs[i].domain)
265 break;
266 }
267
268 if (i == MSM_MPM_NR_IRQ_DOMAINS)
269 return 0;
270 irq_apps = wakeset ? unlisted_irqs[i].wakeup_irqs :
271 unlisted_irqs[i].enabled_irqs;
272
273 if (enable)
274 __set_bit(d->hwirq, irq_apps);
275 else
276 __clear_bit(d->hwirq, irq_apps);
277
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -0600278 if (!wakeset && (msm_mpm_initialized & MSM_MPM_DEVICE_PROBED))
279 complete(&wake_wq);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600280 }
281
282 return 0;
283}
284
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600285static void msm_mpm_set_edge_ctl(int pin, unsigned int flow_type)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600286{
287 uint32_t index;
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600288 uint32_t mask;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600289
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600290 index = MSM_MPM_IRQ_INDEX(pin);
291 mask = MSM_MPM_IRQ_MASK(pin);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600292
293 if (flow_type & IRQ_TYPE_EDGE_FALLING)
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600294 msm_mpm_falling_edge[index] |= mask;
295 else
296 msm_mpm_falling_edge[index] &= ~mask;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600297
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600298 if (flow_type & IRQ_TYPE_EDGE_RISING)
299 msm_mpm_rising_edge[index] |= mask;
300 else
301 msm_mpm_rising_edge[index] &= ~mask;
302
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600303}
304
305static int msm_mpm_set_irq_type_exclusive(
306 struct irq_data *d, unsigned int flow_type)
307{
308 uint32_t mpm_irq;
309
310 mpm_irq = msm_mpm_get_irq_a2m(d);
311
312 if (mpm_irq == 0xff)
313 return 0;
314
315 if (mpm_irq) {
316 uint32_t index = MSM_MPM_IRQ_INDEX(mpm_irq);
317 uint32_t mask = MSM_MPM_IRQ_MASK(mpm_irq);
318
319 if (index >= MSM_MPM_REG_WIDTH)
320 return -EFAULT;
321
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600322 msm_mpm_set_edge_ctl(mpm_irq, flow_type);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600323
324 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
325 msm_mpm_polarity[index] |= mask;
326 else
327 msm_mpm_polarity[index] &= ~mask;
328 }
329 return 0;
330}
331
332static int __msm_mpm_enable_irq(struct irq_data *d, bool enable)
333{
334 unsigned long flags;
335 int rc;
336
337 if (!msm_mpm_is_initialized())
338 return -EINVAL;
339
340 spin_lock_irqsave(&msm_mpm_lock, flags);
341
342 rc = msm_mpm_enable_irq_exclusive(d, enable, false);
343 spin_unlock_irqrestore(&msm_mpm_lock, flags);
344
345 return rc;
346}
347
348static void msm_mpm_enable_irq(struct irq_data *d)
349{
350 __msm_mpm_enable_irq(d, true);
351}
352
353static void msm_mpm_disable_irq(struct irq_data *d)
354{
355 __msm_mpm_enable_irq(d, false);
356}
357
358static int msm_mpm_set_irq_wake(struct irq_data *d, unsigned int on)
359{
360 unsigned long flags;
361 int rc;
362
363 if (!msm_mpm_is_initialized())
364 return -EINVAL;
365
366 spin_lock_irqsave(&msm_mpm_lock, flags);
367 rc = msm_mpm_enable_irq_exclusive(d, (bool)on, true);
368 spin_unlock_irqrestore(&msm_mpm_lock, flags);
369
370 return rc;
371}
372
373static int msm_mpm_set_irq_type(struct irq_data *d, unsigned int flow_type)
374{
375 unsigned long flags;
376 int rc;
377
378 if (!msm_mpm_is_initialized())
379 return -EINVAL;
380
381 spin_lock_irqsave(&msm_mpm_lock, flags);
382 rc = msm_mpm_set_irq_type_exclusive(d, flow_type);
383 spin_unlock_irqrestore(&msm_mpm_lock, flags);
384
385 return rc;
386}
387
388/******************************************************************************
389 * Public functions
390 *****************************************************************************/
391int msm_mpm_enable_pin(unsigned int pin, unsigned int enable)
392{
393 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
394 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
395 unsigned long flags;
396
397 if (!msm_mpm_is_initialized())
398 return -EINVAL;
399
Mahesh Sivasubramanian01d7f4a2013-04-16 15:22:34 -0600400 if (pin >= MSM_MPM_NR_MPM_IRQS)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600401 return -EINVAL;
402
403 spin_lock_irqsave(&msm_mpm_lock, flags);
404
405 if (enable)
406 msm_mpm_enabled_irq[index] |= mask;
407 else
408 msm_mpm_enabled_irq[index] &= ~mask;
409
410 spin_unlock_irqrestore(&msm_mpm_lock, flags);
411 return 0;
412}
413
414int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on)
415{
416 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
417 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
418 unsigned long flags;
419
420 if (!msm_mpm_is_initialized())
421 return -EINVAL;
422
423 if (pin >= MSM_MPM_NR_MPM_IRQS)
424 return -EINVAL;
425
426 spin_lock_irqsave(&msm_mpm_lock, flags);
427
428 if (on)
429 msm_mpm_wake_irq[index] |= mask;
430 else
431 msm_mpm_wake_irq[index] &= ~mask;
432
433 spin_unlock_irqrestore(&msm_mpm_lock, flags);
434 return 0;
435}
436
437int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type)
438{
439 uint32_t index = MSM_MPM_IRQ_INDEX(pin);
440 uint32_t mask = MSM_MPM_IRQ_MASK(pin);
441 unsigned long flags;
442
443 if (!msm_mpm_is_initialized())
444 return -EINVAL;
445
446 if (pin >= MSM_MPM_NR_MPM_IRQS)
447 return -EINVAL;
448
449 spin_lock_irqsave(&msm_mpm_lock, flags);
450
Mahesh Sivasubramanian12191752013-04-04 10:56:02 -0600451 msm_mpm_set_edge_ctl(pin, flow_type);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600452
453 if (flow_type & IRQ_TYPE_LEVEL_HIGH)
454 msm_mpm_polarity[index] |= mask;
455 else
456 msm_mpm_polarity[index] &= ~mask;
457
458 spin_unlock_irqrestore(&msm_mpm_lock, flags);
459 return 0;
460}
461
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700462static bool msm_mpm_interrupts_detectable(int d, bool from_idle)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600463{
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700464 unsigned long *irq_bitmap;
465 bool debug_mask, ret = false;
466 struct mpm_irqs *unlisted = &unlisted_irqs[d];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600467
468 if (!msm_mpm_is_initialized())
469 return false;
470
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700471 if (from_idle) {
472 irq_bitmap = unlisted->enabled_irqs;
473 debug_mask = msm_mpm_debug_mask &
474 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ_IDLE;
475 } else {
476 irq_bitmap = unlisted->wakeup_irqs;
477 debug_mask = msm_mpm_debug_mask &
478 MSM_MPM_DEBUG_NON_DETECTABLE_IRQ;
479 }
480
481 ret = (bool) __bitmap_empty(irq_bitmap, unlisted->size);
482
483 if (debug_mask && !ret) {
484 int i = 0;
485 i = find_first_bit(irq_bitmap, unlisted->size);
486 pr_info("%s(): %s preventing system sleep modes during %s\n",
487 __func__, unlisted->domain_name,
488 from_idle ? "idle" : "suspend");
489
490 while (i < unlisted->size) {
491 pr_info("\thwirq: %d\n", i);
492 i = find_next_bit(irq_bitmap, unlisted->size, i + 1);
493 }
494 }
495
496 return ret;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600497}
498
499bool msm_mpm_gpio_irqs_detectable(bool from_idle)
500{
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700501 return msm_mpm_interrupts_detectable(MSM_MPM_GPIO_IRQ_DOMAIN,
502 from_idle);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600503}
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700504bool msm_mpm_irqs_detectable(bool from_idle)
505{
506 return msm_mpm_interrupts_detectable(MSM_MPM_GIC_IRQ_DOMAIN,
507 from_idle);
508}
Mahesh Sivasubramanianfc2eb5b2013-11-21 18:30:06 -0700509
510void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle,
511 const struct cpumask *cpumask)
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600512{
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600513 cycle_t wakeup = (u64)sclk_count * ARCH_TIMER_HZ;
514
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600515 if (!msm_mpm_is_initialized()) {
516 pr_err("%s(): MPM not initialized\n", __func__);
517 return;
518 }
519
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600520 if (sclk_count) {
521 do_div(wakeup, SCLK_HZ);
522 wakeup += arch_counter_get_cntpct();
523 } else {
524 wakeup = (~0ULL);
525 }
526
527 msm_mpm_set(wakeup, !from_idle);
Mahesh Sivasubramanianfc2eb5b2013-11-21 18:30:06 -0700528 irq_set_affinity(msm_mpm_dev_data.mpm_ipc_irq, cpumask);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600529}
530
531void msm_mpm_exit_sleep(bool from_idle)
532{
533 unsigned long pending;
Murali Nalajala82f34592014-03-14 12:30:16 +0530534 uint32_t *enabled_intr;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600535 int i;
536 int k;
537
538 if (!msm_mpm_is_initialized()) {
539 pr_err("%s(): MPM not initialized\n", __func__);
540 return;
541 }
542
Murali Nalajala82f34592014-03-14 12:30:16 +0530543 enabled_intr = from_idle ? msm_mpm_enabled_irq :
544 msm_mpm_wake_irq;
545
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600546 for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
547 pending = msm_mpm_read(MSM_MPM_REG_STATUS, i);
Murali Nalajala82f34592014-03-14 12:30:16 +0530548 pending &= enabled_intr[i];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600549
550 if (MSM_MPM_DEBUG_PENDING_IRQ & msm_mpm_debug_mask)
Murali Nalajala82f34592014-03-14 12:30:16 +0530551 pr_info("%s: enabled_intr pending.%d: 0x%08x 0x%08lx\n",
552 __func__, i, enabled_intr[i], pending);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600553
554 k = find_first_bit(&pending, 32);
555 while (k < 32) {
556 unsigned int mpm_irq = 32 * i + k;
557 unsigned int apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
558 struct irq_desc *desc = apps_irq ?
559 irq_to_desc(apps_irq) : NULL;
560
561 if (desc && !irqd_is_level_type(&desc->irq_data)) {
562 irq_set_pending(apps_irq);
563 if (from_idle) {
564 raw_spin_lock(&desc->lock);
565 check_irq_resend(desc, apps_irq);
566 raw_spin_unlock(&desc->lock);
567 }
568 }
569
570 k = find_next_bit(&pending, 32, k + 1);
571 }
572 }
573}
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -0600574static void msm_mpm_sys_low_power_modes(bool allow)
575{
Mahesh Sivasubramanian35b2f982014-01-20 22:13:58 -0700576 static DEFINE_MUTEX(enable_xo_mutex);
577
578 mutex_lock(&enable_xo_mutex);
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -0600579 if (allow) {
580 if (xo_enabled) {
581 clk_disable_unprepare(xo_clk);
582 xo_enabled = false;
583 }
584 } else {
585 if (!xo_enabled) {
586 /* If we cannot enable XO clock then we want to flag it,
587 * than having to deal with not being able to wakeup
588 * from a non-monitorable interrupt
589 */
590 BUG_ON(clk_prepare_enable(xo_clk));
591 xo_enabled = true;
592 }
593 }
Mahesh Sivasubramanian35b2f982014-01-20 22:13:58 -0700594 mutex_unlock(&enable_xo_mutex);
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -0600595}
596
597void msm_mpm_suspend_prepare(void)
598{
599 bool allow = msm_mpm_irqs_detectable(false) &&
600 msm_mpm_gpio_irqs_detectable(false);
601 msm_mpm_sys_low_power_modes(allow);
602}
603EXPORT_SYMBOL(msm_mpm_suspend_prepare);
604
605void msm_mpm_suspend_wake(void)
606{
607 bool allow = msm_mpm_irqs_detectable(true) &&
608 msm_mpm_gpio_irqs_detectable(true);
609 msm_mpm_sys_low_power_modes(allow);
610}
611EXPORT_SYMBOL(msm_mpm_suspend_wake);
612
613static void msm_mpm_work_fn(struct work_struct *work)
614{
615 unsigned long flags;
616 while (1) {
617 bool allow;
618 wait_for_completion(&wake_wq);
619 spin_lock_irqsave(&msm_mpm_lock, flags);
620 allow = msm_mpm_irqs_detectable(true) &&
621 msm_mpm_gpio_irqs_detectable(true);
622 spin_unlock_irqrestore(&msm_mpm_lock, flags);
623 msm_mpm_sys_low_power_modes(allow);
624 }
625}
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600626
627static int __devinit msm_mpm_dev_probe(struct platform_device *pdev)
628{
629 struct resource *res = NULL;
630 int offset, ret;
631 struct msm_mpm_device_data *dev = &msm_mpm_dev_data;
632
633 if (msm_mpm_initialized & MSM_MPM_DEVICE_PROBED) {
634 pr_warn("MPM device probed multiple times\n");
635 return 0;
636 }
637
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -0600638 xo_clk = devm_clk_get(&pdev->dev, "xo");
639
640 if (IS_ERR(xo_clk)) {
641 pr_err("%s(): Cannot get clk resource for XO\n", __func__);
642 return PTR_ERR(xo_clk);
643 }
644
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600645 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vmpm");
646 if (!res) {
647 pr_err("%s(): Missing RPM memory resource\n", __func__);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600648 return -EINVAL;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600649 }
650
651 dev->mpm_request_reg_base = devm_request_and_ioremap(&pdev->dev, res);
652
653 if (!dev->mpm_request_reg_base) {
654 pr_err("%s(): Unable to iomap\n", __func__);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600655 return -EADDRNOTAVAIL;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600656 }
657
658 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipc");
659 if (!res) {
660 pr_err("%s(): Missing GCC memory resource\n", __func__);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600661 return -EINVAL;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600662 }
663
664 dev->mpm_apps_ipc_reg = devm_ioremap(&pdev->dev, res->start,
665 resource_size(res));
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600666 if (!dev->mpm_apps_ipc_reg) {
667 pr_err("%s(): Unable to iomap IPC register\n", __func__);
668 return -EADDRNOTAVAIL;
669 }
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600670
671 if (of_property_read_u32(pdev->dev.of_node,
672 "qcom,ipc-bit-offset", &offset)) {
673 pr_info("%s(): Cannot read ipc bit offset\n", __func__);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600674 return -EINVAL ;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600675 }
676
677 dev->mpm_apps_ipc_val = (1 << offset);
678
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600679 dev->mpm_ipc_irq = platform_get_irq(pdev, 0);
680
681 if (dev->mpm_ipc_irq == -ENXIO) {
682 pr_info("%s(): Cannot find IRQ resource\n", __func__);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600683 return -ENXIO;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600684 }
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600685 ret = devm_request_irq(&pdev->dev, dev->mpm_ipc_irq, msm_mpm_irq,
Mahesh Sivasubramanianed763b32014-03-05 15:22:03 -0700686 IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, pdev->name,
687 msm_mpm_irq);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600688
689 if (ret) {
690 pr_info("%s(): request_irq failed errno: %d\n", __func__, ret);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600691 return ret;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600692 }
Mahesh Sivasubramanian62360c62012-07-26 15:27:16 -0600693 ret = irq_set_irq_wake(dev->mpm_ipc_irq, 1);
694
695 if (ret) {
696 pr_err("%s: failed to set wakeup irq %u: %d\n",
697 __func__, dev->mpm_ipc_irq, ret);
Mahesh Sivasubramaniane1ce0c82013-04-29 11:48:27 -0600698 return ret;
Mahesh Sivasubramanian62360c62012-07-26 15:27:16 -0600699
700 }
Mahesh Sivasubramanian3855da12013-04-26 11:40:27 -0600701
702 init_completion(&wake_wq);
703
704 INIT_WORK(&msm_mpm_work, msm_mpm_work_fn);
705 msm_mpm_wq = create_singlethread_workqueue("mpm");
706
707 if (msm_mpm_wq)
708 queue_work(msm_mpm_wq, &msm_mpm_work);
709 else {
710 pr_warn("%s(): Failed to create wq. So voting against XO off",
711 __func__);
712 /* Throw a BUG. Otherwise, its possible that system allows
713 * XO shutdown when there are non-monitored interrupts are
714 * pending and cause errors at a later point in time.
715 */
716 BUG_ON(clk_prepare_enable(xo_clk));
717 xo_enabled = true;
718 }
719
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600720 msm_mpm_initialized |= MSM_MPM_DEVICE_PROBED;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600721 return 0;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600722}
723
724static inline int __init mpm_irq_domain_linear_size(struct irq_domain *d)
725{
726 return d->revmap_data.linear.size;
727}
728
729static inline int __init mpm_irq_domain_legacy_size(struct irq_domain *d)
730{
731 return d->revmap_data.legacy.size;
732}
733
734void __init of_mpm_init(struct device_node *node)
735{
736 const __be32 *list;
737
738 struct mpm_of {
739 char *pkey;
740 char *map;
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700741 char name[MAX_DOMAIN_NAME];
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600742 struct irq_chip *chip;
743 int (*get_max_irqs)(struct irq_domain *d);
744 };
745 int i;
746
747 struct mpm_of mpm_of_map[MSM_MPM_NR_IRQ_DOMAINS] = {
748 {
749 "qcom,gic-parent",
750 "qcom,gic-map",
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700751 "gic",
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600752 &gic_arch_extn,
753 mpm_irq_domain_linear_size,
754 },
755 {
756 "qcom,gpio-parent",
757 "qcom,gpio-map",
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700758 "gpio",
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600759 &msm_gpio_irq_extn,
760 mpm_irq_domain_legacy_size,
761 },
762 };
763
764 if (msm_mpm_initialized & MSM_MPM_IRQ_MAPPING_DONE) {
765 pr_warn("%s(): MPM driver mapping exists\n", __func__);
766 return;
767 }
768
769 for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++)
770 INIT_HLIST_HEAD(&irq_hash[i]);
771
772 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
773 struct device_node *parent = NULL;
774 struct mpm_irqs_a2m *mpm_node = NULL;
775 struct irq_domain *domain = NULL;
776 int size;
777
778 parent = of_parse_phandle(node, mpm_of_map[i].pkey, 0);
779
780 if (!parent) {
781 pr_warn("%s(): %s Not found\n", __func__,
782 mpm_of_map[i].pkey);
783 continue;
784 }
785
786 domain = irq_find_host(parent);
787
788 if (!domain) {
789 pr_warn("%s(): Cannot find irq controller for %s\n",
790 __func__, mpm_of_map[i].pkey);
791 continue;
792 }
793
794 size = mpm_of_map[i].get_max_irqs(domain);
Mahesh Sivasubramanianf1ddf042013-01-08 14:03:32 -0700795 unlisted_irqs[i].size = size;
796 memcpy(unlisted_irqs[i].domain_name, mpm_of_map[i].name,
797 MAX_DOMAIN_NAME);
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600798
799 unlisted_irqs[i].enabled_irqs =
800 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
801 GFP_KERNEL);
802
803 if (!unlisted_irqs[i].enabled_irqs)
804 goto failed_malloc;
805
806 unlisted_irqs[i].wakeup_irqs =
807 kzalloc(BITS_TO_LONGS(size) * sizeof(unsigned long),
808 GFP_KERNEL);
809
810 if (!unlisted_irqs[i].wakeup_irqs)
811 goto failed_malloc;
812
813 unlisted_irqs[i].domain = domain;
814
815 list = of_get_property(node, mpm_of_map[i].map, &size);
816
817 if (!list || !size) {
818 __WARN();
819 continue;
820 }
821
822 /*
823 * Size is in bytes. Convert to size of uint32_t
824 */
825 size /= sizeof(*list);
826
827 /*
828 * The data is represented by a tuple mapping hwirq to a MPM
829 * pin. The number of mappings in the device tree would be
830 * size/2
831 */
832 mpm_node = kzalloc(sizeof(struct mpm_irqs_a2m) * size / 2,
833 GFP_KERNEL);
834 if (!mpm_node)
835 goto failed_malloc;
836
837 while (size) {
838 unsigned long pin = be32_to_cpup(list++);
839 irq_hw_number_t hwirq = be32_to_cpup(list++);
840
841 mpm_node->pin = pin;
842 mpm_node->hwirq = hwirq;
843 mpm_node->parent = parent;
844 mpm_node->domain = domain;
845 INIT_HLIST_NODE(&mpm_node->node);
846
847 hlist_add_head(&mpm_node->node,
848 &irq_hash[hashfn(mpm_node->hwirq)]);
849 size -= 2;
850 mpm_node++;
851 }
852
853 if (mpm_of_map[i].chip) {
854 mpm_of_map[i].chip->irq_mask = msm_mpm_disable_irq;
855 mpm_of_map[i].chip->irq_unmask = msm_mpm_enable_irq;
856 mpm_of_map[i].chip->irq_disable = msm_mpm_disable_irq;
857 mpm_of_map[i].chip->irq_set_type = msm_mpm_set_irq_type;
858 mpm_of_map[i].chip->irq_set_wake = msm_mpm_set_irq_wake;
859 }
860
861 }
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600862 msm_mpm_initialized |= MSM_MPM_IRQ_MAPPING_DONE;
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600863
864 return;
Mahesh Sivasubramanianb9498582012-07-25 11:22:56 -0600865
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600866failed_malloc:
Mahesh Sivasubramanian01d7f4a2013-04-16 15:22:34 -0600867 for (i = 0; i < MSM_MPM_NR_IRQ_DOMAINS; i++) {
Mahesh Sivasubramanian102e5962012-06-20 13:12:11 -0600868 mpm_of_map[i].chip->irq_mask = NULL;
869 mpm_of_map[i].chip->irq_unmask = NULL;
870 mpm_of_map[i].chip->irq_disable = NULL;
871 mpm_of_map[i].chip->irq_set_type = NULL;
872 mpm_of_map[i].chip->irq_set_wake = NULL;
873
874 kfree(unlisted_irqs[i].enabled_irqs);
875 kfree(unlisted_irqs[i].wakeup_irqs);
876
877 }
878}
879
880static struct of_device_id msm_mpm_match_table[] = {
881 {.compatible = "qcom,mpm-v2"},
882 {},
883};
884
885static struct platform_driver msm_mpm_dev_driver = {
886 .probe = msm_mpm_dev_probe,
887 .driver = {
888 .name = "mpm-v2",
889 .owner = THIS_MODULE,
890 .of_match_table = msm_mpm_match_table,
891 },
892};
893
894int __init msm_mpm_device_init(void)
895{
896 return platform_driver_register(&msm_mpm_dev_driver);
897}
898arch_initcall(msm_mpm_device_init);