blob: b3c6d1ea26c136b6db76dd159c1fd210ef851fe2 [file] [log] [blame]
Karthik Parsha6fb932d2012-01-24 18:04:12 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/completion.h>
18#include <linux/cpuidle.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/ktime.h>
22#include <linux/pm.h>
23#include <linux/pm_qos_params.h>
24#include <linux/proc_fs.h>
25#include <linux/smp.h>
26#include <linux/suspend.h>
27#include <linux/tick.h>
28#include <linux/uaccess.h>
29#include <linux/wakelock.h>
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -060030#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#include <mach/msm_iomap.h>
32#include <mach/system.h>
33#include <asm/cacheflush.h>
34#include <asm/hardware/gic.h>
35#include <asm/pgtable.h>
36#include <asm/pgalloc.h>
Maheshkumar Sivasubramanianc6c55032011-10-25 16:01:32 -060037#include <asm/hardware/cache-l2x0.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038#ifdef CONFIG_VFP
39#include <asm/vfp.h>
40#endif
41
42#include "acpuclock.h"
43#include "clock.h"
44#include "avs.h"
Abhijeet Dharmapurikarefaca4f2011-12-27 16:24:07 -080045#include <mach/cpuidle.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046#include "idle.h"
Matt Wagantall7cca4642012-02-01 16:43:24 -080047#include "pm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048#include "rpm_resources.h"
49#include "scm-boot.h"
50#include "spm.h"
51#include "timer.h"
Pratik Patele5771792011-09-17 18:33:54 -070052#include "qdss.h"
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -060053#include "pm-boot.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054
55/******************************************************************************
56 * Debug Definitions
57 *****************************************************************************/
58
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -060059
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060enum {
61 MSM_PM_DEBUG_SUSPEND = BIT(0),
62 MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
63 MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
64 MSM_PM_DEBUG_CLOCK = BIT(3),
65 MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
Karthik Parsha6fb932d2012-01-24 18:04:12 -080066 MSM_PM_DEBUG_IDLE_CLK = BIT(5),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067 MSM_PM_DEBUG_IDLE = BIT(6),
68 MSM_PM_DEBUG_IDLE_LIMITS = BIT(7),
69 MSM_PM_DEBUG_HOTPLUG = BIT(8),
70};
71
72static int msm_pm_debug_mask = 1;
73module_param_named(
74 debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
75);
76
77
78/******************************************************************************
79 * Sleep Modes and Parameters
80 *****************************************************************************/
81
82static struct msm_pm_platform_data *msm_pm_modes;
83static int rpm_cpu0_wakeup_irq;
84
85void __init msm_pm_set_platform_data(
86 struct msm_pm_platform_data *data, int count)
87{
88 BUG_ON(MSM_PM_SLEEP_MODE_NR * num_possible_cpus() > count);
89 msm_pm_modes = data;
90}
91
92void __init msm_pm_set_rpm_wakeup_irq(unsigned int irq)
93{
94 rpm_cpu0_wakeup_irq = irq;
95}
96
97enum {
98 MSM_PM_MODE_ATTR_SUSPEND,
99 MSM_PM_MODE_ATTR_IDLE,
100 MSM_PM_MODE_ATTR_NR,
101};
102
103static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
104 [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
105 [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
106};
107
108struct msm_pm_kobj_attribute {
109 unsigned int cpu;
110 struct kobj_attribute ka;
111};
112
113#define GET_CPU_OF_ATTR(attr) \
114 (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
115
116struct msm_pm_sysfs_sleep_mode {
117 struct kobject *kobj;
118 struct attribute_group attr_group;
119 struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
120 struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
121};
122
123static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
124 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
125 [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
126 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
127 "standalone_power_collapse",
128};
129
130/*
131 * Write out the attribute.
132 */
133static ssize_t msm_pm_mode_attr_show(
134 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
135{
136 int ret = -EINVAL;
137 int i;
138
139 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
140 struct kernel_param kp;
141 unsigned int cpu;
142 struct msm_pm_platform_data *mode;
143
144 if (msm_pm_sleep_mode_labels[i] == NULL)
145 continue;
146
147 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
148 continue;
149
150 cpu = GET_CPU_OF_ATTR(attr);
151 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
152
153 if (!strcmp(attr->attr.name,
154 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
155 u32 arg = mode->suspend_enabled;
156 kp.arg = &arg;
157 ret = param_get_ulong(buf, &kp);
158 } else if (!strcmp(attr->attr.name,
159 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
160 u32 arg = mode->idle_enabled;
161 kp.arg = &arg;
162 ret = param_get_ulong(buf, &kp);
163 }
164
165 break;
166 }
167
168 if (ret > 0) {
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600169 strlcat(buf, "\n", PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170 ret++;
171 }
172
173 return ret;
174}
175
176/*
177 * Read in the new attribute value.
178 */
179static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
180 struct kobj_attribute *attr, const char *buf, size_t count)
181{
182 int ret = -EINVAL;
183 int i;
184
185 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
186 struct kernel_param kp;
187 unsigned int cpu;
188 struct msm_pm_platform_data *mode;
189
190 if (msm_pm_sleep_mode_labels[i] == NULL)
191 continue;
192
193 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
194 continue;
195
196 cpu = GET_CPU_OF_ATTR(attr);
197 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
198
199 if (!strcmp(attr->attr.name,
200 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
201 kp.arg = &mode->suspend_enabled;
202 ret = param_set_byte(buf, &kp);
203 } else if (!strcmp(attr->attr.name,
204 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
205 kp.arg = &mode->idle_enabled;
206 ret = param_set_byte(buf, &kp);
207 }
208
209 break;
210 }
211
212 return ret ? ret : count;
213}
214
215/*
216 * Add sysfs entries for one cpu.
217 */
218static int __init msm_pm_mode_sysfs_add_cpu(
219 unsigned int cpu, struct kobject *modes_kobj)
220{
221 char cpu_name[8];
222 struct kobject *cpu_kobj;
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600223 struct msm_pm_sysfs_sleep_mode *mode = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224 int i, j, k;
225 int ret;
226
227 snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
228 cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
229 if (!cpu_kobj) {
230 pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
231 ret = -ENOMEM;
232 goto mode_sysfs_add_cpu_exit;
233 }
234
235 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
236 int idx = MSM_PM_MODE(cpu, i);
237
238 if ((!msm_pm_modes[idx].suspend_supported)
239 && (!msm_pm_modes[idx].idle_supported))
240 continue;
241
242 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
243 if (!mode) {
244 pr_err("%s: cannot allocate memory for attributes\n",
245 __func__);
246 ret = -ENOMEM;
247 goto mode_sysfs_add_cpu_exit;
248 }
249
250 mode->kobj = kobject_create_and_add(
251 msm_pm_sleep_mode_labels[i], cpu_kobj);
252 if (!mode->kobj) {
253 pr_err("%s: cannot create kobject\n", __func__);
254 ret = -ENOMEM;
255 goto mode_sysfs_add_cpu_exit;
256 }
257
258 for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
259 if ((k == MSM_PM_MODE_ATTR_IDLE) &&
260 !msm_pm_modes[idx].idle_supported)
261 continue;
262 if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
263 !msm_pm_modes[idx].suspend_supported)
264 continue;
265 mode->kas[j].cpu = cpu;
266 mode->kas[j].ka.attr.mode = 0644;
267 mode->kas[j].ka.show = msm_pm_mode_attr_show;
268 mode->kas[j].ka.store = msm_pm_mode_attr_store;
269 mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
270 mode->attrs[j] = &mode->kas[j].ka.attr;
271 j++;
272 }
273 mode->attrs[j] = NULL;
274
275 mode->attr_group.attrs = mode->attrs;
276 ret = sysfs_create_group(mode->kobj, &mode->attr_group);
277 if (ret) {
278 pr_err("%s: cannot create kobject attribute group\n",
279 __func__);
280 goto mode_sysfs_add_cpu_exit;
281 }
282 }
283
284 ret = 0;
285
286mode_sysfs_add_cpu_exit:
Praveen Chidambaramd5ac2d32011-10-24 14:30:27 -0600287 if (ret) {
Praveen Chidambaram2cfda632011-10-11 16:58:09 -0600288 if (mode && mode->kobj)
289 kobject_del(mode->kobj);
290 kfree(mode);
291 }
292
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293 return ret;
294}
295
296/*
297 * Add sysfs entries for the sleep modes.
298 */
299static int __init msm_pm_mode_sysfs_add(void)
300{
301 struct kobject *module_kobj;
302 struct kobject *modes_kobj;
303 unsigned int cpu;
304 int ret;
305
306 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
307 if (!module_kobj) {
308 pr_err("%s: cannot find kobject for module %s\n",
309 __func__, KBUILD_MODNAME);
310 ret = -ENOENT;
311 goto mode_sysfs_add_exit;
312 }
313
314 modes_kobj = kobject_create_and_add("modes", module_kobj);
315 if (!modes_kobj) {
316 pr_err("%s: cannot create modes kobject\n", __func__);
317 ret = -ENOMEM;
318 goto mode_sysfs_add_exit;
319 }
320
321 for_each_possible_cpu(cpu) {
322 ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
323 if (ret)
324 goto mode_sysfs_add_exit;
325 }
326
327 ret = 0;
328
329mode_sysfs_add_exit:
330 return ret;
331}
332
333/******************************************************************************
334 * CONFIG_MSM_IDLE_STATS
335 *****************************************************************************/
336
337#ifdef CONFIG_MSM_IDLE_STATS
338enum msm_pm_time_stats_id {
339 MSM_PM_STAT_REQUESTED_IDLE,
340 MSM_PM_STAT_IDLE_WFI,
341 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
342 MSM_PM_STAT_IDLE_POWER_COLLAPSE,
343 MSM_PM_STAT_SUSPEND,
344 MSM_PM_STAT_COUNT
345};
346
347struct msm_pm_time_stats {
348 const char *name;
349 int64_t first_bucket_time;
350 int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
351 int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
352 int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
353 int count;
354 int64_t total_time;
355};
356
357struct msm_pm_cpu_time_stats {
358 struct msm_pm_time_stats stats[MSM_PM_STAT_COUNT];
359};
360
361static DEFINE_SPINLOCK(msm_pm_stats_lock);
362static DEFINE_PER_CPU_SHARED_ALIGNED(
363 struct msm_pm_cpu_time_stats, msm_pm_stats);
364
365/*
366 * Add the given time data to the statistics collection.
367 */
368static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
369{
370 unsigned long flags;
371 struct msm_pm_time_stats *stats;
372 int64_t bt;
373 int i;
374
375 spin_lock_irqsave(&msm_pm_stats_lock, flags);
376 stats = __get_cpu_var(msm_pm_stats).stats;
377
378 stats[id].total_time += t;
379 stats[id].count++;
380
381 bt = t;
382 do_div(bt, stats[id].first_bucket_time);
383
384 if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
385 (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
386 i = DIV_ROUND_UP(fls((uint32_t)bt),
387 CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
388 else
389 i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
390
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600391 if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
392 i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
393
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394 stats[id].bucket[i]++;
395
396 if (t < stats[id].min_time[i] || !stats[id].max_time[i])
397 stats[id].min_time[i] = t;
398 if (t > stats[id].max_time[i])
399 stats[id].max_time[i] = t;
400
401 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
402}
403
404/*
405 * Helper function of snprintf where buf is auto-incremented, size is auto-
406 * decremented, and there is no return value.
407 *
408 * NOTE: buf and size must be l-values (e.g. variables)
409 */
410#define SNPRINTF(buf, size, format, ...) \
411 do { \
412 if (size > 0) { \
413 int ret; \
414 ret = snprintf(buf, size, format, ## __VA_ARGS__); \
415 if (ret > size) { \
416 buf += size; \
417 size = 0; \
418 } else { \
419 buf += ret; \
420 size -= ret; \
421 } \
422 } \
423 } while (0)
424
425/*
426 * Write out the power management statistics.
427 */
428static int msm_pm_read_proc
429 (char *page, char **start, off_t off, int count, int *eof, void *data)
430{
431 unsigned int cpu = off / MSM_PM_STAT_COUNT;
432 int id = off % MSM_PM_STAT_COUNT;
433 char *p = page;
434
435 if (count < 1024) {
436 *start = (char *) 0;
437 *eof = 0;
438 return 0;
439 }
440
441 if (cpu < num_possible_cpus()) {
442 unsigned long flags;
443 struct msm_pm_time_stats *stats;
444 int i;
445 int64_t bucket_time;
446 int64_t s;
447 uint32_t ns;
448
449 spin_lock_irqsave(&msm_pm_stats_lock, flags);
450 stats = per_cpu(msm_pm_stats, cpu).stats;
451
452 s = stats[id].total_time;
453 ns = do_div(s, NSEC_PER_SEC);
454 SNPRINTF(p, count,
455 "[cpu %u] %s:\n"
456 " count: %7d\n"
457 " total_time: %lld.%09u\n",
458 cpu, stats[id].name,
459 stats[id].count,
460 s, ns);
461
462 bucket_time = stats[id].first_bucket_time;
463 for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
464 s = bucket_time;
465 ns = do_div(s, NSEC_PER_SEC);
466 SNPRINTF(p, count,
467 " <%6lld.%09u: %7d (%lld-%lld)\n",
468 s, ns, stats[id].bucket[i],
469 stats[id].min_time[i],
470 stats[id].max_time[i]);
471
472 bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
473 }
474
475 SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
476 s, ns, stats[id].bucket[i],
477 stats[id].min_time[i],
478 stats[id].max_time[i]);
479
480 *start = (char *) 1;
481 *eof = (off + 1 >= MSM_PM_STAT_COUNT * num_possible_cpus());
482
483 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
484 }
485
486 return p - page;
487}
488#undef SNPRINTF
489
490#define MSM_PM_STATS_RESET "reset"
491
492/*
493 * Reset the power management statistics values.
494 */
495static int msm_pm_write_proc(struct file *file, const char __user *buffer,
496 unsigned long count, void *data)
497{
498 char buf[sizeof(MSM_PM_STATS_RESET)];
499 int ret;
500 unsigned long flags;
501 unsigned int cpu;
502
503 if (count < strlen(MSM_PM_STATS_RESET)) {
504 ret = -EINVAL;
505 goto write_proc_failed;
506 }
507
508 if (copy_from_user(buf, buffer, strlen(MSM_PM_STATS_RESET))) {
509 ret = -EFAULT;
510 goto write_proc_failed;
511 }
512
513 if (memcmp(buf, MSM_PM_STATS_RESET, strlen(MSM_PM_STATS_RESET))) {
514 ret = -EINVAL;
515 goto write_proc_failed;
516 }
517
518 spin_lock_irqsave(&msm_pm_stats_lock, flags);
519 for_each_possible_cpu(cpu) {
520 struct msm_pm_time_stats *stats;
521 int i;
522
523 stats = per_cpu(msm_pm_stats, cpu).stats;
524 for (i = 0; i < MSM_PM_STAT_COUNT; i++) {
525 memset(stats[i].bucket,
526 0, sizeof(stats[i].bucket));
527 memset(stats[i].min_time,
528 0, sizeof(stats[i].min_time));
529 memset(stats[i].max_time,
530 0, sizeof(stats[i].max_time));
531 stats[i].count = 0;
532 stats[i].total_time = 0;
533 }
534 }
535
536 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
537 return count;
538
539write_proc_failed:
540 return ret;
541}
542#undef MSM_PM_STATS_RESET
543#endif /* CONFIG_MSM_IDLE_STATS */
544
545
546/******************************************************************************
547 * Configure Hardware before/after Low Power Mode
548 *****************************************************************************/
549
550/*
551 * Configure hardware registers in preparation for Apps power down.
552 */
553static void msm_pm_config_hw_before_power_down(void)
554{
555 return;
556}
557
558/*
559 * Clear hardware registers after Apps powers up.
560 */
561static void msm_pm_config_hw_after_power_up(void)
562{
563 return;
564}
565
566/*
567 * Configure hardware registers in preparation for SWFI.
568 */
569static void msm_pm_config_hw_before_swfi(void)
570{
571 return;
572}
573
574
575/******************************************************************************
576 * Suspend Max Sleep Time
577 *****************************************************************************/
578
579#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
580static int msm_pm_sleep_time_override;
581module_param_named(sleep_time_override,
582 msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
583#endif
584
585#define SCLK_HZ (32768)
586#define MSM_PM_SLEEP_TICK_LIMIT (0x6DDD000)
587
588static uint32_t msm_pm_max_sleep_time;
589
590/*
591 * Convert time from nanoseconds to slow clock ticks, then cap it to the
592 * specified limit
593 */
594static int64_t msm_pm_convert_and_cap_time(int64_t time_ns, int64_t limit)
595{
596 do_div(time_ns, NSEC_PER_SEC / SCLK_HZ);
597 return (time_ns > limit) ? limit : time_ns;
598}
599
600/*
601 * Set the sleep time for suspend. 0 means infinite sleep time.
602 */
603void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
604{
605 if (max_sleep_time_ns == 0) {
606 msm_pm_max_sleep_time = 0;
607 } else {
608 msm_pm_max_sleep_time = (uint32_t)msm_pm_convert_and_cap_time(
609 max_sleep_time_ns, MSM_PM_SLEEP_TICK_LIMIT);
610
611 if (msm_pm_max_sleep_time == 0)
612 msm_pm_max_sleep_time = 1;
613 }
614
615 if (msm_pm_debug_mask & MSM_PM_DEBUG_SUSPEND)
616 pr_info("%s: Requested %lld ns Giving %u sclk ticks\n",
617 __func__, max_sleep_time_ns, msm_pm_max_sleep_time);
618}
619EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
620
621
622/******************************************************************************
623 *
624 *****************************************************************************/
625
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626static struct msm_rpmrs_limits *msm_pm_idle_rs_limits;
627
628static void msm_pm_swfi(void)
629{
630 msm_pm_config_hw_before_swfi();
631 msm_arch_idle();
632}
633
Maheshkumar Sivasubramanianc6c55032011-10-25 16:01:32 -0600634#ifdef CONFIG_CACHE_L2X0
635static inline bool msm_pm_l2x0_power_collapse(void)
636{
637 bool collapsed = 0;
638
639 l2x0_suspend();
640 collapsed = msm_pm_collapse();
641 l2x0_resume(collapsed);
642
643 return collapsed;
644}
645#else
646static inline bool msm_pm_l2x0_power_collapse(void)
647{
648 return msm_pm_collapse();
649}
650#endif
651
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600652static bool msm_pm_spm_power_collapse(
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700653 unsigned int cpu, bool from_idle, bool notify_rpm)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654{
655 void *entry;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600656 bool collapsed = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657 int ret;
658
659 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
660 pr_info("CPU%u: %s: notify_rpm %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700661 cpu, __func__, (int) notify_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662
663 ret = msm_spm_set_low_power_mode(
664 MSM_SPM_MODE_POWER_COLLAPSE, notify_rpm);
665 WARN_ON(ret);
666
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700667 entry = (!cpu || from_idle) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668 msm_pm_collapse_exit : msm_secondary_startup;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700669 msm_pm_boot_config_before_pc(cpu, virt_to_phys(entry));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670
671 if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
672 pr_info("CPU%u: %s: program vector to %p\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700673 cpu, __func__, entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674
675#ifdef CONFIG_VFP
676 vfp_flush_context();
677#endif
678
Maheshkumar Sivasubramanianc6c55032011-10-25 16:01:32 -0600679 collapsed = msm_pm_l2x0_power_collapse();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700681 msm_pm_boot_config_after_pc(cpu);
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -0600682
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683 if (collapsed) {
684#ifdef CONFIG_VFP
685 vfp_reinit();
686#endif
687 cpu_init();
688 writel(0xF0, MSM_QGIC_CPU_BASE + GIC_CPU_PRIMASK);
689 writel(1, MSM_QGIC_CPU_BASE + GIC_CPU_CTRL);
690 local_fiq_enable();
691 }
692
693 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
694 pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700695 cpu, __func__, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696
697 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
698 WARN_ON(ret);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600699 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700}
701
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600702static bool msm_pm_power_collapse_standalone(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700703{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700704 unsigned int cpu = smp_processor_id();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 unsigned int avsdscr_setting;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600706 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700707
708 avsdscr_setting = avs_get_avsdscr();
709 avs_disable();
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700710 collapsed = msm_pm_spm_power_collapse(cpu, from_idle, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 avs_reset_delays(avsdscr_setting);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600712 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713}
714
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600715static bool msm_pm_power_collapse(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700717 unsigned int cpu = smp_processor_id();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 unsigned long saved_acpuclk_rate;
719 unsigned int avsdscr_setting;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600720 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721
722 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
723 pr_info("CPU%u: %s: idle %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700724 cpu, __func__, (int)from_idle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700725
726 msm_pm_config_hw_before_power_down();
727 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700728 pr_info("CPU%u: %s: pre power down\n", cpu, __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729
730 avsdscr_setting = avs_get_avsdscr();
731 avs_disable();
732
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700733 if (cpu_online(cpu))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734 saved_acpuclk_rate = acpuclk_power_collapse();
735 else
736 saved_acpuclk_rate = 0;
737
738 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
739 pr_info("CPU%u: %s: change clock rate (old rate = %lu)\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700740 cpu, __func__, saved_acpuclk_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700742 collapsed = msm_pm_spm_power_collapse(cpu, from_idle, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743
744 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
745 pr_info("CPU%u: %s: restore clock rate to %lu\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700746 cpu, __func__, saved_acpuclk_rate);
747 if (acpuclk_set_rate(cpu, saved_acpuclk_rate, SETRATE_PC) < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700748 pr_err("CPU%u: %s: failed to restore clock rate(%lu)\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700749 cpu, __func__, saved_acpuclk_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700750
751 avs_reset_delays(avsdscr_setting);
752 msm_pm_config_hw_after_power_up();
753 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700754 pr_info("CPU%u: %s: post power up\n", cpu, __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755
756 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700757 pr_info("CPU%u: %s: return\n", cpu, __func__);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600758 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700759}
760
761static irqreturn_t msm_pm_rpm_wakeup_interrupt(int irq, void *dev_id)
762{
763 if (dev_id != &msm_pm_rpm_wakeup_interrupt)
764 return IRQ_NONE;
765
766 return IRQ_HANDLED;
767}
768
769
770/******************************************************************************
771 * External Idle/Suspend Functions
772 *****************************************************************************/
773
774void arch_idle(void)
775{
776 return;
777}
778
779int msm_pm_idle_prepare(struct cpuidle_device *dev)
780{
781 uint32_t latency_us;
782 uint32_t sleep_us;
783 int i;
784
785 latency_us = (uint32_t) pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
786 sleep_us = (uint32_t) ktime_to_ns(tick_nohz_get_sleep_length());
787 sleep_us = DIV_ROUND_UP(sleep_us, 1000);
788
789 for (i = 0; i < dev->state_count; i++) {
790 struct cpuidle_state *state = &dev->states[i];
791 enum msm_pm_sleep_mode mode;
792 bool allow;
793 struct msm_rpmrs_limits *rs_limits = NULL;
794 int idx;
795
796 mode = (enum msm_pm_sleep_mode) state->driver_data;
797 idx = MSM_PM_MODE(dev->cpu, mode);
798
799 allow = msm_pm_modes[idx].idle_enabled &&
800 msm_pm_modes[idx].idle_supported;
801
802 switch (mode) {
803 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
804 if (!allow)
805 break;
806
807 if (num_online_cpus() > 1) {
808 allow = false;
809 break;
810 }
811#ifdef CONFIG_HAS_WAKELOCK
812 if (has_wake_lock(WAKE_LOCK_IDLE)) {
813 allow = false;
814 break;
815 }
816#endif
817 /* fall through */
818
819 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
820 if (!allow)
821 break;
822
823 if (!dev->cpu &&
824 msm_rpm_local_request_is_outstanding()) {
825 allow = false;
826 break;
827 }
828 /* fall through */
829
830 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
831 if (!allow)
832 break;
833
834 rs_limits = msm_rpmrs_lowest_limits(true,
835 mode, latency_us, sleep_us);
836
837 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
838 pr_info("CPU%u: %s: %s, latency %uus, "
839 "sleep %uus, limit %p\n",
840 dev->cpu, __func__, state->desc,
841 latency_us, sleep_us, rs_limits);
842
843 if ((MSM_PM_DEBUG_IDLE_LIMITS & msm_pm_debug_mask) &&
844 rs_limits)
845 pr_info("CPU%u: %s: limit %p: "
846 "pxo %d, l2_cache %d, "
847 "vdd_mem %d, vdd_dig %d\n",
848 dev->cpu, __func__, rs_limits,
849 rs_limits->pxo,
850 rs_limits->l2_cache,
851 rs_limits->vdd_mem,
852 rs_limits->vdd_dig);
853
854 if (!rs_limits)
855 allow = false;
856 break;
857
858 default:
859 allow = false;
860 break;
861 }
862
863 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
864 pr_info("CPU%u: %s: allow %s: %d\n",
865 dev->cpu, __func__, state->desc, (int)allow);
866
867 if (allow) {
868 state->flags &= ~CPUIDLE_FLAG_IGNORE;
869 state->target_residency = 0;
870 state->exit_latency = 0;
871 state->power_usage = rs_limits->power[dev->cpu];
872
873 if (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == mode)
874 msm_pm_idle_rs_limits = rs_limits;
875 } else {
876 state->flags |= CPUIDLE_FLAG_IGNORE;
877 }
878 }
879
880 return 0;
881}
882
883int msm_pm_idle_enter(enum msm_pm_sleep_mode sleep_mode)
884{
885 int64_t time;
886#ifdef CONFIG_MSM_IDLE_STATS
887 int exit_stat;
888#endif
889
890 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
891 pr_info("CPU%u: %s: mode %d\n",
892 smp_processor_id(), __func__, sleep_mode);
893
894 time = ktime_to_ns(ktime_get());
895
896 switch (sleep_mode) {
897 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
898 msm_pm_swfi();
899#ifdef CONFIG_MSM_IDLE_STATS
900 exit_stat = MSM_PM_STAT_IDLE_WFI;
901#endif
902 break;
903
904 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
905 msm_pm_power_collapse_standalone(true);
906#ifdef CONFIG_MSM_IDLE_STATS
907 exit_stat = MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
908#endif
909 break;
910
911 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE: {
912 int64_t timer_expiration = msm_timer_enter_idle();
913 bool timer_halted = false;
914 uint32_t sleep_delay;
915 int ret;
916 int notify_rpm =
917 (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600918 int collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700919
920 sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
921 timer_expiration, MSM_PM_SLEEP_TICK_LIMIT);
922 if (sleep_delay == 0) /* 0 would mean infinite time */
923 sleep_delay = 1;
924
Karthik Parsha6fb932d2012-01-24 18:04:12 -0800925 if (MSM_PM_DEBUG_IDLE_CLK & msm_pm_debug_mask)
926 clock_debug_print_enabled();
927
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 ret = msm_rpmrs_enter_sleep(
929 sleep_delay, msm_pm_idle_rs_limits, true, notify_rpm);
930 if (!ret) {
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600931 collapsed = msm_pm_power_collapse(true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932 timer_halted = true;
933
934 msm_rpmrs_exit_sleep(msm_pm_idle_rs_limits, true,
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600935 notify_rpm, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700936 }
937
938 msm_timer_exit_idle((int) timer_halted);
939#ifdef CONFIG_MSM_IDLE_STATS
940 exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
941#endif
942 break;
943 }
944
945 default:
946 __WARN();
947 goto cpuidle_enter_bail;
948 }
949
950 time = ktime_to_ns(ktime_get()) - time;
951#ifdef CONFIG_MSM_IDLE_STATS
952 msm_pm_add_stat(exit_stat, time);
953#endif
954
955 do_div(time, 1000);
956 return (int) time;
957
958cpuidle_enter_bail:
959 return 0;
960}
961
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -0600962static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
963
964static DEFINE_PER_CPU_SHARED_ALIGNED(enum msm_pm_sleep_mode,
965 msm_pm_last_slp_mode);
966
967bool msm_pm_verify_cpu_pc(unsigned int cpu)
968{
969 enum msm_pm_sleep_mode mode = per_cpu(msm_pm_last_slp_mode, cpu);
970
971 if (msm_pm_slp_sts)
972 if ((mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) ||
973 (mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE))
974 return true;
975
976 return false;
977}
978
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700979void msm_pm_cpu_enter_lowpower(unsigned int cpu)
980{
981 int i;
982 bool allow[MSM_PM_SLEEP_MODE_NR];
983
984 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
985 struct msm_pm_platform_data *mode;
986
987 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
988 allow[i] = mode->suspend_supported && mode->suspend_enabled;
989 }
990
991 if (MSM_PM_DEBUG_HOTPLUG & msm_pm_debug_mask)
992 pr_notice("CPU%u: %s: shutting down cpu\n", cpu, __func__);
993
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -0600994 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE]) {
995 per_cpu(msm_pm_last_slp_mode, cpu)
996 = MSM_PM_SLEEP_MODE_POWER_COLLAPSE;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700997 msm_pm_power_collapse(false);
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -0600998 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
999 per_cpu(msm_pm_last_slp_mode, cpu)
1000 = MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -07001001 msm_pm_power_collapse_standalone(false);
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -06001002 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1003 per_cpu(msm_pm_last_slp_mode, cpu)
1004 = MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -07001005 msm_pm_swfi();
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -06001006 } else
1007 per_cpu(msm_pm_last_slp_mode, cpu) = MSM_PM_SLEEP_MODE_NR;
1008}
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -07001009
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -06001010int msm_pm_wait_cpu_shutdown(unsigned int cpu)
1011{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -07001012
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -06001013 int timeout = 10;
1014
1015 if (!msm_pm_slp_sts)
1016 return 0;
1017
1018 while (timeout--) {
1019
1020 /*
1021 * Check for the SPM of the core being hotplugged to set
1022 * its sleep state.The SPM sleep state indicates that the
1023 * core has been power collapsed.
1024 */
1025
1026 int acc_sts = __raw_readl(msm_pm_slp_sts->base_addr
1027 + cpu * msm_pm_slp_sts->cpu_offset);
1028 mb();
1029
1030 if (acc_sts & msm_pm_slp_sts->mask)
1031 return 0;
1032
1033 usleep(100);
1034 }
1035 pr_warn("%s(): Timed out waiting for CPU %u SPM to enter sleep state",
1036 __func__, cpu);
1037 return -EBUSY;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -07001038}
1039
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040static int msm_pm_enter(suspend_state_t state)
1041{
1042 bool allow[MSM_PM_SLEEP_MODE_NR];
1043 int i;
1044
1045#ifdef CONFIG_MSM_IDLE_STATS
1046 int64_t period = 0;
1047 int64_t time = msm_timer_get_sclk_time(&period);
1048#endif
1049
1050 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1051 pr_info("%s\n", __func__);
1052
1053 if (smp_processor_id()) {
1054 __WARN();
1055 goto enter_exit;
1056 }
1057
1058
1059 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
1060 struct msm_pm_platform_data *mode;
1061
1062 mode = &msm_pm_modes[MSM_PM_MODE(0, i)];
1063 allow[i] = mode->suspend_supported && mode->suspend_enabled;
1064 }
1065
1066 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE]) {
1067 struct msm_rpmrs_limits *rs_limits;
1068 int ret;
1069
1070 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1071 pr_info("%s: power collapse\n", __func__);
1072
1073 clock_debug_print_enabled();
1074
1075#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
1076 if (msm_pm_sleep_time_override > 0) {
1077 int64_t ns = NSEC_PER_SEC *
1078 (int64_t) msm_pm_sleep_time_override;
1079 msm_pm_set_max_sleep_time(ns);
1080 msm_pm_sleep_time_override = 0;
1081 }
1082#endif /* CONFIG_MSM_SLEEP_TIME_OVERRIDE */
1083
1084 if (MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask)
1085 msm_rpmrs_show_resources();
1086
1087 rs_limits = msm_rpmrs_lowest_limits(false,
1088 MSM_PM_SLEEP_MODE_POWER_COLLAPSE, -1, -1);
1089
1090 if ((MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask) &&
1091 rs_limits)
1092 pr_info("%s: limit %p: pxo %d, l2_cache %d, "
1093 "vdd_mem %d, vdd_dig %d\n",
1094 __func__, rs_limits,
1095 rs_limits->pxo, rs_limits->l2_cache,
1096 rs_limits->vdd_mem, rs_limits->vdd_dig);
1097
1098 if (rs_limits) {
1099 ret = msm_rpmrs_enter_sleep(
1100 msm_pm_max_sleep_time, rs_limits, false, true);
1101 if (!ret) {
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -06001102 int collapsed = msm_pm_power_collapse(false);
1103 msm_rpmrs_exit_sleep(rs_limits, false, true,
1104 collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105 }
1106 } else {
1107 pr_err("%s: cannot find the lowest power limit\n",
1108 __func__);
1109 }
1110
1111#ifdef CONFIG_MSM_IDLE_STATS
1112 if (time != 0) {
1113 int64_t end_time = msm_timer_get_sclk_time(NULL);
1114 if (end_time != 0) {
1115 time = end_time - time;
1116 if (time < 0)
1117 time += period;
1118 } else
1119 time = 0;
1120 }
1121
1122 msm_pm_add_stat(MSM_PM_STAT_SUSPEND, time);
1123#endif /* CONFIG_MSM_IDLE_STATS */
1124 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
1125 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1126 pr_info("%s: standalone power collapse\n", __func__);
1127 msm_pm_power_collapse_standalone(false);
1128 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1129 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1130 pr_info("%s: swfi\n", __func__);
1131 msm_pm_swfi();
1132 }
1133
1134
1135enter_exit:
1136 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1137 pr_info("%s: return\n", __func__);
1138
1139 return 0;
1140}
1141
1142static struct platform_suspend_ops msm_pm_ops = {
1143 .enter = msm_pm_enter,
1144 .valid = suspend_valid_only_mem,
1145};
1146
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001147/******************************************************************************
1148 * Initialization routine
1149 *****************************************************************************/
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -06001150void __init msm_pm_init_sleep_status_data(
1151 struct msm_pm_sleep_status_data *data)
1152{
1153 msm_pm_slp_sts = data;
1154}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001155
1156static int __init msm_pm_init(void)
1157{
1158 pgd_t *pc_pgd;
1159 pmd_t *pmd;
1160 unsigned long pmdval;
1161 unsigned int cpu;
1162#ifdef CONFIG_MSM_IDLE_STATS
1163 struct proc_dir_entry *d_entry;
1164#endif
1165 int ret;
1166
1167 /* Page table for cores to come back up safely. */
1168 pc_pgd = pgd_alloc(&init_mm);
1169 if (!pc_pgd)
1170 return -ENOMEM;
1171
1172 pmd = pmd_offset(pc_pgd +
1173 pgd_index(virt_to_phys(msm_pm_collapse_exit)),
1174 virt_to_phys(msm_pm_collapse_exit));
1175 pmdval = (virt_to_phys(msm_pm_collapse_exit) & PGDIR_MASK) |
1176 PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
1177 pmd[0] = __pmd(pmdval);
1178 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1179
1180 /* It is remotely possible that the code in msm_pm_collapse_exit()
1181 * which turns on the MMU with this mapping is in the
1182 * next even-numbered megabyte beyond the
1183 * start of msm_pm_collapse_exit().
1184 * Map this megabyte in as well.
1185 */
1186 pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
1187 flush_pmd_entry(pmd);
1188 msm_pm_pc_pgd = virt_to_phys(pc_pgd);
1189
1190 ret = request_irq(rpm_cpu0_wakeup_irq,
1191 msm_pm_rpm_wakeup_interrupt, IRQF_TRIGGER_RISING,
1192 "pm_drv", msm_pm_rpm_wakeup_interrupt);
1193 if (ret) {
1194 pr_err("%s: failed to request irq %u: %d\n",
1195 __func__, rpm_cpu0_wakeup_irq, ret);
1196 return ret;
1197 }
1198
1199 ret = irq_set_irq_wake(rpm_cpu0_wakeup_irq, 1);
1200 if (ret) {
1201 pr_err("%s: failed to set wakeup irq %u: %d\n",
1202 __func__, rpm_cpu0_wakeup_irq, ret);
1203 return ret;
1204 }
1205
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206#ifdef CONFIG_MSM_IDLE_STATS
1207 for_each_possible_cpu(cpu) {
1208 struct msm_pm_time_stats *stats =
1209 per_cpu(msm_pm_stats, cpu).stats;
1210
1211 stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
1212 stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
1213 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1214
1215 stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
1216 stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
1217 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1218
1219 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
1220 "idle-standalone-power-collapse";
1221 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
1222 first_bucket_time = CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1223
1224 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
1225 "idle-power-collapse";
1226 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
1227 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1228
1229 stats[MSM_PM_STAT_SUSPEND].name = "suspend";
1230 stats[MSM_PM_STAT_SUSPEND].first_bucket_time =
1231 CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
1232 }
1233
1234 d_entry = create_proc_entry("msm_pm_stats",
1235 S_IRUGO | S_IWUSR | S_IWGRP, NULL);
1236 if (d_entry) {
1237 d_entry->read_proc = msm_pm_read_proc;
1238 d_entry->write_proc = msm_pm_write_proc;
1239 d_entry->data = NULL;
1240 }
1241#endif /* CONFIG_MSM_IDLE_STATS */
1242
1243 msm_pm_mode_sysfs_add();
1244 msm_spm_allow_x_cpu_set_vdd(false);
1245
1246 suspend_set_ops(&msm_pm_ops);
1247 msm_cpuidle_init();
1248
1249 return 0;
1250}
1251
1252late_initcall(msm_pm_init);