blob: adf5471bb4f8ec995add61880a6f196aff104711 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/completion.h>
18#include <linux/cpuidle.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/ktime.h>
22#include <linux/pm.h>
23#include <linux/pm_qos_params.h>
24#include <linux/proc_fs.h>
25#include <linux/smp.h>
26#include <linux/suspend.h>
27#include <linux/tick.h>
28#include <linux/uaccess.h>
29#include <linux/wakelock.h>
30#include <mach/msm_iomap.h>
31#include <mach/system.h>
32#include <asm/cacheflush.h>
33#include <asm/hardware/gic.h>
34#include <asm/pgtable.h>
35#include <asm/pgalloc.h>
Maheshkumar Sivasubramanianc6c55032011-10-25 16:01:32 -060036#include <asm/hardware/cache-l2x0.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037#ifdef CONFIG_VFP
38#include <asm/vfp.h>
39#endif
40
41#include "acpuclock.h"
42#include "clock.h"
43#include "avs.h"
44#include "cpuidle.h"
45#include "idle.h"
46#include "pm.h"
47#include "rpm_resources.h"
48#include "scm-boot.h"
49#include "spm.h"
50#include "timer.h"
Pratik Patele5771792011-09-17 18:33:54 -070051#include "qdss.h"
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -060052#include "pm-boot.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
54/******************************************************************************
55 * Debug Definitions
56 *****************************************************************************/
57
58enum {
59 MSM_PM_DEBUG_SUSPEND = BIT(0),
60 MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
61 MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
62 MSM_PM_DEBUG_CLOCK = BIT(3),
63 MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
64 MSM_PM_DEBUG_IDLE = BIT(6),
65 MSM_PM_DEBUG_IDLE_LIMITS = BIT(7),
66 MSM_PM_DEBUG_HOTPLUG = BIT(8),
67};
68
69static int msm_pm_debug_mask = 1;
70module_param_named(
71 debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
72);
73
74
75/******************************************************************************
76 * Sleep Modes and Parameters
77 *****************************************************************************/
78
79static struct msm_pm_platform_data *msm_pm_modes;
80static int rpm_cpu0_wakeup_irq;
81
82void __init msm_pm_set_platform_data(
83 struct msm_pm_platform_data *data, int count)
84{
85 BUG_ON(MSM_PM_SLEEP_MODE_NR * num_possible_cpus() > count);
86 msm_pm_modes = data;
87}
88
89void __init msm_pm_set_rpm_wakeup_irq(unsigned int irq)
90{
91 rpm_cpu0_wakeup_irq = irq;
92}
93
94enum {
95 MSM_PM_MODE_ATTR_SUSPEND,
96 MSM_PM_MODE_ATTR_IDLE,
97 MSM_PM_MODE_ATTR_NR,
98};
99
100static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
101 [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
102 [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
103};
104
105struct msm_pm_kobj_attribute {
106 unsigned int cpu;
107 struct kobj_attribute ka;
108};
109
110#define GET_CPU_OF_ATTR(attr) \
111 (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
112
113struct msm_pm_sysfs_sleep_mode {
114 struct kobject *kobj;
115 struct attribute_group attr_group;
116 struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
117 struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
118};
119
120static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
121 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
122 [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
123 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
124 "standalone_power_collapse",
125};
126
127/*
128 * Write out the attribute.
129 */
130static ssize_t msm_pm_mode_attr_show(
131 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
132{
133 int ret = -EINVAL;
134 int i;
135
136 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
137 struct kernel_param kp;
138 unsigned int cpu;
139 struct msm_pm_platform_data *mode;
140
141 if (msm_pm_sleep_mode_labels[i] == NULL)
142 continue;
143
144 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
145 continue;
146
147 cpu = GET_CPU_OF_ATTR(attr);
148 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
149
150 if (!strcmp(attr->attr.name,
151 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
152 u32 arg = mode->suspend_enabled;
153 kp.arg = &arg;
154 ret = param_get_ulong(buf, &kp);
155 } else if (!strcmp(attr->attr.name,
156 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
157 u32 arg = mode->idle_enabled;
158 kp.arg = &arg;
159 ret = param_get_ulong(buf, &kp);
160 }
161
162 break;
163 }
164
165 if (ret > 0) {
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600166 strlcat(buf, "\n", PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 ret++;
168 }
169
170 return ret;
171}
172
173/*
174 * Read in the new attribute value.
175 */
176static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
177 struct kobj_attribute *attr, const char *buf, size_t count)
178{
179 int ret = -EINVAL;
180 int i;
181
182 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
183 struct kernel_param kp;
184 unsigned int cpu;
185 struct msm_pm_platform_data *mode;
186
187 if (msm_pm_sleep_mode_labels[i] == NULL)
188 continue;
189
190 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
191 continue;
192
193 cpu = GET_CPU_OF_ATTR(attr);
194 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
195
196 if (!strcmp(attr->attr.name,
197 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
198 kp.arg = &mode->suspend_enabled;
199 ret = param_set_byte(buf, &kp);
200 } else if (!strcmp(attr->attr.name,
201 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
202 kp.arg = &mode->idle_enabled;
203 ret = param_set_byte(buf, &kp);
204 }
205
206 break;
207 }
208
209 return ret ? ret : count;
210}
211
212/*
213 * Add sysfs entries for one cpu.
214 */
215static int __init msm_pm_mode_sysfs_add_cpu(
216 unsigned int cpu, struct kobject *modes_kobj)
217{
218 char cpu_name[8];
219 struct kobject *cpu_kobj;
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600220 struct msm_pm_sysfs_sleep_mode *mode = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221 int i, j, k;
222 int ret;
223
224 snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
225 cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
226 if (!cpu_kobj) {
227 pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
228 ret = -ENOMEM;
229 goto mode_sysfs_add_cpu_exit;
230 }
231
232 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
233 int idx = MSM_PM_MODE(cpu, i);
234
235 if ((!msm_pm_modes[idx].suspend_supported)
236 && (!msm_pm_modes[idx].idle_supported))
237 continue;
238
239 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
240 if (!mode) {
241 pr_err("%s: cannot allocate memory for attributes\n",
242 __func__);
243 ret = -ENOMEM;
244 goto mode_sysfs_add_cpu_exit;
245 }
246
247 mode->kobj = kobject_create_and_add(
248 msm_pm_sleep_mode_labels[i], cpu_kobj);
249 if (!mode->kobj) {
250 pr_err("%s: cannot create kobject\n", __func__);
251 ret = -ENOMEM;
252 goto mode_sysfs_add_cpu_exit;
253 }
254
255 for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
256 if ((k == MSM_PM_MODE_ATTR_IDLE) &&
257 !msm_pm_modes[idx].idle_supported)
258 continue;
259 if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
260 !msm_pm_modes[idx].suspend_supported)
261 continue;
262 mode->kas[j].cpu = cpu;
263 mode->kas[j].ka.attr.mode = 0644;
264 mode->kas[j].ka.show = msm_pm_mode_attr_show;
265 mode->kas[j].ka.store = msm_pm_mode_attr_store;
266 mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
267 mode->attrs[j] = &mode->kas[j].ka.attr;
268 j++;
269 }
270 mode->attrs[j] = NULL;
271
272 mode->attr_group.attrs = mode->attrs;
273 ret = sysfs_create_group(mode->kobj, &mode->attr_group);
274 if (ret) {
275 pr_err("%s: cannot create kobject attribute group\n",
276 __func__);
277 goto mode_sysfs_add_cpu_exit;
278 }
279 }
280
281 ret = 0;
282
283mode_sysfs_add_cpu_exit:
Praveen Chidambaramd5ac2d32011-10-24 14:30:27 -0600284 if (ret) {
Praveen Chidambaram2cfda632011-10-11 16:58:09 -0600285 if (mode && mode->kobj)
286 kobject_del(mode->kobj);
287 kfree(mode);
288 }
289
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 return ret;
291}
292
293/*
294 * Add sysfs entries for the sleep modes.
295 */
296static int __init msm_pm_mode_sysfs_add(void)
297{
298 struct kobject *module_kobj;
299 struct kobject *modes_kobj;
300 unsigned int cpu;
301 int ret;
302
303 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
304 if (!module_kobj) {
305 pr_err("%s: cannot find kobject for module %s\n",
306 __func__, KBUILD_MODNAME);
307 ret = -ENOENT;
308 goto mode_sysfs_add_exit;
309 }
310
311 modes_kobj = kobject_create_and_add("modes", module_kobj);
312 if (!modes_kobj) {
313 pr_err("%s: cannot create modes kobject\n", __func__);
314 ret = -ENOMEM;
315 goto mode_sysfs_add_exit;
316 }
317
318 for_each_possible_cpu(cpu) {
319 ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
320 if (ret)
321 goto mode_sysfs_add_exit;
322 }
323
324 ret = 0;
325
326mode_sysfs_add_exit:
327 return ret;
328}
329
330/******************************************************************************
331 * CONFIG_MSM_IDLE_STATS
332 *****************************************************************************/
333
334#ifdef CONFIG_MSM_IDLE_STATS
335enum msm_pm_time_stats_id {
336 MSM_PM_STAT_REQUESTED_IDLE,
337 MSM_PM_STAT_IDLE_WFI,
338 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
339 MSM_PM_STAT_IDLE_POWER_COLLAPSE,
340 MSM_PM_STAT_SUSPEND,
341 MSM_PM_STAT_COUNT
342};
343
344struct msm_pm_time_stats {
345 const char *name;
346 int64_t first_bucket_time;
347 int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
348 int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
349 int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
350 int count;
351 int64_t total_time;
352};
353
354struct msm_pm_cpu_time_stats {
355 struct msm_pm_time_stats stats[MSM_PM_STAT_COUNT];
356};
357
358static DEFINE_SPINLOCK(msm_pm_stats_lock);
359static DEFINE_PER_CPU_SHARED_ALIGNED(
360 struct msm_pm_cpu_time_stats, msm_pm_stats);
361
362/*
363 * Add the given time data to the statistics collection.
364 */
365static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
366{
367 unsigned long flags;
368 struct msm_pm_time_stats *stats;
369 int64_t bt;
370 int i;
371
372 spin_lock_irqsave(&msm_pm_stats_lock, flags);
373 stats = __get_cpu_var(msm_pm_stats).stats;
374
375 stats[id].total_time += t;
376 stats[id].count++;
377
378 bt = t;
379 do_div(bt, stats[id].first_bucket_time);
380
381 if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
382 (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
383 i = DIV_ROUND_UP(fls((uint32_t)bt),
384 CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
385 else
386 i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
387
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600388 if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
389 i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391 stats[id].bucket[i]++;
392
393 if (t < stats[id].min_time[i] || !stats[id].max_time[i])
394 stats[id].min_time[i] = t;
395 if (t > stats[id].max_time[i])
396 stats[id].max_time[i] = t;
397
398 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
399}
400
401/*
402 * Helper function of snprintf where buf is auto-incremented, size is auto-
403 * decremented, and there is no return value.
404 *
405 * NOTE: buf and size must be l-values (e.g. variables)
406 */
407#define SNPRINTF(buf, size, format, ...) \
408 do { \
409 if (size > 0) { \
410 int ret; \
411 ret = snprintf(buf, size, format, ## __VA_ARGS__); \
412 if (ret > size) { \
413 buf += size; \
414 size = 0; \
415 } else { \
416 buf += ret; \
417 size -= ret; \
418 } \
419 } \
420 } while (0)
421
422/*
423 * Write out the power management statistics.
424 */
425static int msm_pm_read_proc
426 (char *page, char **start, off_t off, int count, int *eof, void *data)
427{
428 unsigned int cpu = off / MSM_PM_STAT_COUNT;
429 int id = off % MSM_PM_STAT_COUNT;
430 char *p = page;
431
432 if (count < 1024) {
433 *start = (char *) 0;
434 *eof = 0;
435 return 0;
436 }
437
438 if (cpu < num_possible_cpus()) {
439 unsigned long flags;
440 struct msm_pm_time_stats *stats;
441 int i;
442 int64_t bucket_time;
443 int64_t s;
444 uint32_t ns;
445
446 spin_lock_irqsave(&msm_pm_stats_lock, flags);
447 stats = per_cpu(msm_pm_stats, cpu).stats;
448
449 s = stats[id].total_time;
450 ns = do_div(s, NSEC_PER_SEC);
451 SNPRINTF(p, count,
452 "[cpu %u] %s:\n"
453 " count: %7d\n"
454 " total_time: %lld.%09u\n",
455 cpu, stats[id].name,
456 stats[id].count,
457 s, ns);
458
459 bucket_time = stats[id].first_bucket_time;
460 for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
461 s = bucket_time;
462 ns = do_div(s, NSEC_PER_SEC);
463 SNPRINTF(p, count,
464 " <%6lld.%09u: %7d (%lld-%lld)\n",
465 s, ns, stats[id].bucket[i],
466 stats[id].min_time[i],
467 stats[id].max_time[i]);
468
469 bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
470 }
471
472 SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
473 s, ns, stats[id].bucket[i],
474 stats[id].min_time[i],
475 stats[id].max_time[i]);
476
477 *start = (char *) 1;
478 *eof = (off + 1 >= MSM_PM_STAT_COUNT * num_possible_cpus());
479
480 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
481 }
482
483 return p - page;
484}
485#undef SNPRINTF
486
487#define MSM_PM_STATS_RESET "reset"
488
489/*
490 * Reset the power management statistics values.
491 */
492static int msm_pm_write_proc(struct file *file, const char __user *buffer,
493 unsigned long count, void *data)
494{
495 char buf[sizeof(MSM_PM_STATS_RESET)];
496 int ret;
497 unsigned long flags;
498 unsigned int cpu;
499
500 if (count < strlen(MSM_PM_STATS_RESET)) {
501 ret = -EINVAL;
502 goto write_proc_failed;
503 }
504
505 if (copy_from_user(buf, buffer, strlen(MSM_PM_STATS_RESET))) {
506 ret = -EFAULT;
507 goto write_proc_failed;
508 }
509
510 if (memcmp(buf, MSM_PM_STATS_RESET, strlen(MSM_PM_STATS_RESET))) {
511 ret = -EINVAL;
512 goto write_proc_failed;
513 }
514
515 spin_lock_irqsave(&msm_pm_stats_lock, flags);
516 for_each_possible_cpu(cpu) {
517 struct msm_pm_time_stats *stats;
518 int i;
519
520 stats = per_cpu(msm_pm_stats, cpu).stats;
521 for (i = 0; i < MSM_PM_STAT_COUNT; i++) {
522 memset(stats[i].bucket,
523 0, sizeof(stats[i].bucket));
524 memset(stats[i].min_time,
525 0, sizeof(stats[i].min_time));
526 memset(stats[i].max_time,
527 0, sizeof(stats[i].max_time));
528 stats[i].count = 0;
529 stats[i].total_time = 0;
530 }
531 }
532
533 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
534 return count;
535
536write_proc_failed:
537 return ret;
538}
539#undef MSM_PM_STATS_RESET
540#endif /* CONFIG_MSM_IDLE_STATS */
541
542
543/******************************************************************************
544 * Configure Hardware before/after Low Power Mode
545 *****************************************************************************/
546
547/*
548 * Configure hardware registers in preparation for Apps power down.
549 */
550static void msm_pm_config_hw_before_power_down(void)
551{
552 return;
553}
554
555/*
556 * Clear hardware registers after Apps powers up.
557 */
558static void msm_pm_config_hw_after_power_up(void)
559{
560 return;
561}
562
563/*
564 * Configure hardware registers in preparation for SWFI.
565 */
566static void msm_pm_config_hw_before_swfi(void)
567{
568 return;
569}
570
571
572/******************************************************************************
573 * Suspend Max Sleep Time
574 *****************************************************************************/
575
576#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
577static int msm_pm_sleep_time_override;
578module_param_named(sleep_time_override,
579 msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
580#endif
581
582#define SCLK_HZ (32768)
583#define MSM_PM_SLEEP_TICK_LIMIT (0x6DDD000)
584
585static uint32_t msm_pm_max_sleep_time;
586
587/*
588 * Convert time from nanoseconds to slow clock ticks, then cap it to the
589 * specified limit
590 */
591static int64_t msm_pm_convert_and_cap_time(int64_t time_ns, int64_t limit)
592{
593 do_div(time_ns, NSEC_PER_SEC / SCLK_HZ);
594 return (time_ns > limit) ? limit : time_ns;
595}
596
597/*
598 * Set the sleep time for suspend. 0 means infinite sleep time.
599 */
600void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
601{
602 if (max_sleep_time_ns == 0) {
603 msm_pm_max_sleep_time = 0;
604 } else {
605 msm_pm_max_sleep_time = (uint32_t)msm_pm_convert_and_cap_time(
606 max_sleep_time_ns, MSM_PM_SLEEP_TICK_LIMIT);
607
608 if (msm_pm_max_sleep_time == 0)
609 msm_pm_max_sleep_time = 1;
610 }
611
612 if (msm_pm_debug_mask & MSM_PM_DEBUG_SUSPEND)
613 pr_info("%s: Requested %lld ns Giving %u sclk ticks\n",
614 __func__, max_sleep_time_ns, msm_pm_max_sleep_time);
615}
616EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
617
618
619/******************************************************************************
620 *
621 *****************************************************************************/
622
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623static struct msm_rpmrs_limits *msm_pm_idle_rs_limits;
624
625static void msm_pm_swfi(void)
626{
627 msm_pm_config_hw_before_swfi();
628 msm_arch_idle();
629}
630
Maheshkumar Sivasubramanianc6c55032011-10-25 16:01:32 -0600631#ifdef CONFIG_CACHE_L2X0
632static inline bool msm_pm_l2x0_power_collapse(void)
633{
634 bool collapsed = 0;
635
636 l2x0_suspend();
637 collapsed = msm_pm_collapse();
638 l2x0_resume(collapsed);
639
640 return collapsed;
641}
642#else
643static inline bool msm_pm_l2x0_power_collapse(void)
644{
645 return msm_pm_collapse();
646}
647#endif
648
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600649static bool msm_pm_spm_power_collapse(
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700650 unsigned int cpu, bool from_idle, bool notify_rpm)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651{
652 void *entry;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600653 bool collapsed = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 int ret;
655
656 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
657 pr_info("CPU%u: %s: notify_rpm %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700658 cpu, __func__, (int) notify_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659
660 ret = msm_spm_set_low_power_mode(
661 MSM_SPM_MODE_POWER_COLLAPSE, notify_rpm);
662 WARN_ON(ret);
663
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700664 entry = (!cpu || from_idle) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665 msm_pm_collapse_exit : msm_secondary_startup;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700666 msm_pm_boot_config_before_pc(cpu, virt_to_phys(entry));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667
668 if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
669 pr_info("CPU%u: %s: program vector to %p\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700670 cpu, __func__, entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671
672#ifdef CONFIG_VFP
673 vfp_flush_context();
674#endif
675
Maheshkumar Sivasubramanianc6c55032011-10-25 16:01:32 -0600676 collapsed = msm_pm_l2x0_power_collapse();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700678 msm_pm_boot_config_after_pc(cpu);
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -0600679
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680 if (collapsed) {
681#ifdef CONFIG_VFP
682 vfp_reinit();
683#endif
684 cpu_init();
685 writel(0xF0, MSM_QGIC_CPU_BASE + GIC_CPU_PRIMASK);
686 writel(1, MSM_QGIC_CPU_BASE + GIC_CPU_CTRL);
687 local_fiq_enable();
688 }
689
690 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
691 pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700692 cpu, __func__, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700693
694 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
695 WARN_ON(ret);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600696 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697}
698
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600699static bool msm_pm_power_collapse_standalone(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700701 unsigned int cpu = smp_processor_id();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702 unsigned int avsdscr_setting;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600703 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704
705 avsdscr_setting = avs_get_avsdscr();
706 avs_disable();
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700707 collapsed = msm_pm_spm_power_collapse(cpu, from_idle, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700708 avs_reset_delays(avsdscr_setting);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600709 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710}
711
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600712static bool msm_pm_power_collapse(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700714 unsigned int cpu = smp_processor_id();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715 unsigned long saved_acpuclk_rate;
716 unsigned int avsdscr_setting;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600717 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718
719 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
720 pr_info("CPU%u: %s: idle %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700721 cpu, __func__, (int)from_idle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722
723 msm_pm_config_hw_before_power_down();
724 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700725 pr_info("CPU%u: %s: pre power down\n", cpu, __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726
727 avsdscr_setting = avs_get_avsdscr();
728 avs_disable();
729
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700730 if (cpu_online(cpu))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731 saved_acpuclk_rate = acpuclk_power_collapse();
732 else
733 saved_acpuclk_rate = 0;
734
735 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
736 pr_info("CPU%u: %s: change clock rate (old rate = %lu)\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700737 cpu, __func__, saved_acpuclk_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700739 collapsed = msm_pm_spm_power_collapse(cpu, from_idle, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700740
741 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
742 pr_info("CPU%u: %s: restore clock rate to %lu\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700743 cpu, __func__, saved_acpuclk_rate);
744 if (acpuclk_set_rate(cpu, saved_acpuclk_rate, SETRATE_PC) < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 pr_err("CPU%u: %s: failed to restore clock rate(%lu)\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700746 cpu, __func__, saved_acpuclk_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700747
748 avs_reset_delays(avsdscr_setting);
749 msm_pm_config_hw_after_power_up();
750 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700751 pr_info("CPU%u: %s: post power up\n", cpu, __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752
753 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700754 pr_info("CPU%u: %s: return\n", cpu, __func__);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600755 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700756}
757
758static irqreturn_t msm_pm_rpm_wakeup_interrupt(int irq, void *dev_id)
759{
760 if (dev_id != &msm_pm_rpm_wakeup_interrupt)
761 return IRQ_NONE;
762
763 return IRQ_HANDLED;
764}
765
766
767/******************************************************************************
768 * External Idle/Suspend Functions
769 *****************************************************************************/
770
771void arch_idle(void)
772{
773 return;
774}
775
776int msm_pm_idle_prepare(struct cpuidle_device *dev)
777{
778 uint32_t latency_us;
779 uint32_t sleep_us;
780 int i;
781
782 latency_us = (uint32_t) pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
783 sleep_us = (uint32_t) ktime_to_ns(tick_nohz_get_sleep_length());
784 sleep_us = DIV_ROUND_UP(sleep_us, 1000);
785
786 for (i = 0; i < dev->state_count; i++) {
787 struct cpuidle_state *state = &dev->states[i];
788 enum msm_pm_sleep_mode mode;
789 bool allow;
790 struct msm_rpmrs_limits *rs_limits = NULL;
791 int idx;
792
793 mode = (enum msm_pm_sleep_mode) state->driver_data;
794 idx = MSM_PM_MODE(dev->cpu, mode);
795
796 allow = msm_pm_modes[idx].idle_enabled &&
797 msm_pm_modes[idx].idle_supported;
798
799 switch (mode) {
800 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
801 if (!allow)
802 break;
803
804 if (num_online_cpus() > 1) {
805 allow = false;
806 break;
807 }
808#ifdef CONFIG_HAS_WAKELOCK
809 if (has_wake_lock(WAKE_LOCK_IDLE)) {
810 allow = false;
811 break;
812 }
813#endif
814 /* fall through */
815
816 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
817 if (!allow)
818 break;
819
820 if (!dev->cpu &&
821 msm_rpm_local_request_is_outstanding()) {
822 allow = false;
823 break;
824 }
825 /* fall through */
826
827 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
828 if (!allow)
829 break;
830
831 rs_limits = msm_rpmrs_lowest_limits(true,
832 mode, latency_us, sleep_us);
833
834 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
835 pr_info("CPU%u: %s: %s, latency %uus, "
836 "sleep %uus, limit %p\n",
837 dev->cpu, __func__, state->desc,
838 latency_us, sleep_us, rs_limits);
839
840 if ((MSM_PM_DEBUG_IDLE_LIMITS & msm_pm_debug_mask) &&
841 rs_limits)
842 pr_info("CPU%u: %s: limit %p: "
843 "pxo %d, l2_cache %d, "
844 "vdd_mem %d, vdd_dig %d\n",
845 dev->cpu, __func__, rs_limits,
846 rs_limits->pxo,
847 rs_limits->l2_cache,
848 rs_limits->vdd_mem,
849 rs_limits->vdd_dig);
850
851 if (!rs_limits)
852 allow = false;
853 break;
854
855 default:
856 allow = false;
857 break;
858 }
859
860 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
861 pr_info("CPU%u: %s: allow %s: %d\n",
862 dev->cpu, __func__, state->desc, (int)allow);
863
864 if (allow) {
865 state->flags &= ~CPUIDLE_FLAG_IGNORE;
866 state->target_residency = 0;
867 state->exit_latency = 0;
868 state->power_usage = rs_limits->power[dev->cpu];
869
870 if (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == mode)
871 msm_pm_idle_rs_limits = rs_limits;
872 } else {
873 state->flags |= CPUIDLE_FLAG_IGNORE;
874 }
875 }
876
877 return 0;
878}
879
880int msm_pm_idle_enter(enum msm_pm_sleep_mode sleep_mode)
881{
882 int64_t time;
883#ifdef CONFIG_MSM_IDLE_STATS
884 int exit_stat;
885#endif
886
887 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
888 pr_info("CPU%u: %s: mode %d\n",
889 smp_processor_id(), __func__, sleep_mode);
890
891 time = ktime_to_ns(ktime_get());
892
893 switch (sleep_mode) {
894 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
895 msm_pm_swfi();
896#ifdef CONFIG_MSM_IDLE_STATS
897 exit_stat = MSM_PM_STAT_IDLE_WFI;
898#endif
899 break;
900
901 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
902 msm_pm_power_collapse_standalone(true);
903#ifdef CONFIG_MSM_IDLE_STATS
904 exit_stat = MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
905#endif
906 break;
907
908 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE: {
909 int64_t timer_expiration = msm_timer_enter_idle();
910 bool timer_halted = false;
911 uint32_t sleep_delay;
912 int ret;
913 int notify_rpm =
914 (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600915 int collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916
917 sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
918 timer_expiration, MSM_PM_SLEEP_TICK_LIMIT);
919 if (sleep_delay == 0) /* 0 would mean infinite time */
920 sleep_delay = 1;
921
922 ret = msm_rpmrs_enter_sleep(
923 sleep_delay, msm_pm_idle_rs_limits, true, notify_rpm);
924 if (!ret) {
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600925 collapsed = msm_pm_power_collapse(true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 timer_halted = true;
927
928 msm_rpmrs_exit_sleep(msm_pm_idle_rs_limits, true,
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600929 notify_rpm, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700930 }
931
932 msm_timer_exit_idle((int) timer_halted);
933#ifdef CONFIG_MSM_IDLE_STATS
934 exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
935#endif
936 break;
937 }
938
939 default:
940 __WARN();
941 goto cpuidle_enter_bail;
942 }
943
944 time = ktime_to_ns(ktime_get()) - time;
945#ifdef CONFIG_MSM_IDLE_STATS
946 msm_pm_add_stat(exit_stat, time);
947#endif
948
949 do_div(time, 1000);
950 return (int) time;
951
952cpuidle_enter_bail:
953 return 0;
954}
955
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700956void msm_pm_cpu_enter_lowpower(unsigned int cpu)
957{
958 int i;
959 bool allow[MSM_PM_SLEEP_MODE_NR];
960
961 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
962 struct msm_pm_platform_data *mode;
963
964 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
965 allow[i] = mode->suspend_supported && mode->suspend_enabled;
966 }
967
968 if (MSM_PM_DEBUG_HOTPLUG & msm_pm_debug_mask)
969 pr_notice("CPU%u: %s: shutting down cpu\n", cpu, __func__);
970
971 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
972 msm_pm_power_collapse(false);
973 else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE])
974 msm_pm_power_collapse_standalone(false);
975 else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT])
976 msm_pm_swfi();
977
978
979}
980
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981static int msm_pm_enter(suspend_state_t state)
982{
983 bool allow[MSM_PM_SLEEP_MODE_NR];
984 int i;
985
986#ifdef CONFIG_MSM_IDLE_STATS
987 int64_t period = 0;
988 int64_t time = msm_timer_get_sclk_time(&period);
989#endif
990
991 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
992 pr_info("%s\n", __func__);
993
994 if (smp_processor_id()) {
995 __WARN();
996 goto enter_exit;
997 }
998
999
1000 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
1001 struct msm_pm_platform_data *mode;
1002
1003 mode = &msm_pm_modes[MSM_PM_MODE(0, i)];
1004 allow[i] = mode->suspend_supported && mode->suspend_enabled;
1005 }
1006
1007 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE]) {
1008 struct msm_rpmrs_limits *rs_limits;
1009 int ret;
1010
1011 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1012 pr_info("%s: power collapse\n", __func__);
1013
1014 clock_debug_print_enabled();
1015
1016#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
1017 if (msm_pm_sleep_time_override > 0) {
1018 int64_t ns = NSEC_PER_SEC *
1019 (int64_t) msm_pm_sleep_time_override;
1020 msm_pm_set_max_sleep_time(ns);
1021 msm_pm_sleep_time_override = 0;
1022 }
1023#endif /* CONFIG_MSM_SLEEP_TIME_OVERRIDE */
1024
1025 if (MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask)
1026 msm_rpmrs_show_resources();
1027
1028 rs_limits = msm_rpmrs_lowest_limits(false,
1029 MSM_PM_SLEEP_MODE_POWER_COLLAPSE, -1, -1);
1030
1031 if ((MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask) &&
1032 rs_limits)
1033 pr_info("%s: limit %p: pxo %d, l2_cache %d, "
1034 "vdd_mem %d, vdd_dig %d\n",
1035 __func__, rs_limits,
1036 rs_limits->pxo, rs_limits->l2_cache,
1037 rs_limits->vdd_mem, rs_limits->vdd_dig);
1038
1039 if (rs_limits) {
1040 ret = msm_rpmrs_enter_sleep(
1041 msm_pm_max_sleep_time, rs_limits, false, true);
1042 if (!ret) {
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -06001043 int collapsed = msm_pm_power_collapse(false);
1044 msm_rpmrs_exit_sleep(rs_limits, false, true,
1045 collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001046 }
1047 } else {
1048 pr_err("%s: cannot find the lowest power limit\n",
1049 __func__);
1050 }
1051
1052#ifdef CONFIG_MSM_IDLE_STATS
1053 if (time != 0) {
1054 int64_t end_time = msm_timer_get_sclk_time(NULL);
1055 if (end_time != 0) {
1056 time = end_time - time;
1057 if (time < 0)
1058 time += period;
1059 } else
1060 time = 0;
1061 }
1062
1063 msm_pm_add_stat(MSM_PM_STAT_SUSPEND, time);
1064#endif /* CONFIG_MSM_IDLE_STATS */
1065 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
1066 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1067 pr_info("%s: standalone power collapse\n", __func__);
1068 msm_pm_power_collapse_standalone(false);
1069 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1070 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1071 pr_info("%s: swfi\n", __func__);
1072 msm_pm_swfi();
1073 }
1074
1075
1076enter_exit:
1077 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1078 pr_info("%s: return\n", __func__);
1079
1080 return 0;
1081}
1082
1083static struct platform_suspend_ops msm_pm_ops = {
1084 .enter = msm_pm_enter,
1085 .valid = suspend_valid_only_mem,
1086};
1087
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001088
1089/******************************************************************************
1090 * Initialization routine
1091 *****************************************************************************/
1092
1093static int __init msm_pm_init(void)
1094{
1095 pgd_t *pc_pgd;
1096 pmd_t *pmd;
1097 unsigned long pmdval;
1098 unsigned int cpu;
1099#ifdef CONFIG_MSM_IDLE_STATS
1100 struct proc_dir_entry *d_entry;
1101#endif
1102 int ret;
1103
1104 /* Page table for cores to come back up safely. */
1105 pc_pgd = pgd_alloc(&init_mm);
1106 if (!pc_pgd)
1107 return -ENOMEM;
1108
1109 pmd = pmd_offset(pc_pgd +
1110 pgd_index(virt_to_phys(msm_pm_collapse_exit)),
1111 virt_to_phys(msm_pm_collapse_exit));
1112 pmdval = (virt_to_phys(msm_pm_collapse_exit) & PGDIR_MASK) |
1113 PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
1114 pmd[0] = __pmd(pmdval);
1115 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1116
1117 /* It is remotely possible that the code in msm_pm_collapse_exit()
1118 * which turns on the MMU with this mapping is in the
1119 * next even-numbered megabyte beyond the
1120 * start of msm_pm_collapse_exit().
1121 * Map this megabyte in as well.
1122 */
1123 pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
1124 flush_pmd_entry(pmd);
1125 msm_pm_pc_pgd = virt_to_phys(pc_pgd);
1126
1127 ret = request_irq(rpm_cpu0_wakeup_irq,
1128 msm_pm_rpm_wakeup_interrupt, IRQF_TRIGGER_RISING,
1129 "pm_drv", msm_pm_rpm_wakeup_interrupt);
1130 if (ret) {
1131 pr_err("%s: failed to request irq %u: %d\n",
1132 __func__, rpm_cpu0_wakeup_irq, ret);
1133 return ret;
1134 }
1135
1136 ret = irq_set_irq_wake(rpm_cpu0_wakeup_irq, 1);
1137 if (ret) {
1138 pr_err("%s: failed to set wakeup irq %u: %d\n",
1139 __func__, rpm_cpu0_wakeup_irq, ret);
1140 return ret;
1141 }
1142
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001143#ifdef CONFIG_MSM_IDLE_STATS
1144 for_each_possible_cpu(cpu) {
1145 struct msm_pm_time_stats *stats =
1146 per_cpu(msm_pm_stats, cpu).stats;
1147
1148 stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
1149 stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
1150 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1151
1152 stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
1153 stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
1154 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1155
1156 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
1157 "idle-standalone-power-collapse";
1158 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
1159 first_bucket_time = CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1160
1161 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
1162 "idle-power-collapse";
1163 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
1164 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1165
1166 stats[MSM_PM_STAT_SUSPEND].name = "suspend";
1167 stats[MSM_PM_STAT_SUSPEND].first_bucket_time =
1168 CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
1169 }
1170
1171 d_entry = create_proc_entry("msm_pm_stats",
1172 S_IRUGO | S_IWUSR | S_IWGRP, NULL);
1173 if (d_entry) {
1174 d_entry->read_proc = msm_pm_read_proc;
1175 d_entry->write_proc = msm_pm_write_proc;
1176 d_entry->data = NULL;
1177 }
1178#endif /* CONFIG_MSM_IDLE_STATS */
1179
1180 msm_pm_mode_sysfs_add();
1181 msm_spm_allow_x_cpu_set_vdd(false);
1182
1183 suspend_set_ops(&msm_pm_ops);
1184 msm_cpuidle_init();
1185
1186 return 0;
1187}
1188
1189late_initcall(msm_pm_init);