blob: 5274e8b5b975d2319dbac3597e3987e9bae99232 [file] [log] [blame]
Karthik Parsha6fb932d2012-01-24 18:04:12 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/completion.h>
18#include <linux/cpuidle.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/ktime.h>
22#include <linux/pm.h>
23#include <linux/pm_qos_params.h>
24#include <linux/proc_fs.h>
25#include <linux/smp.h>
26#include <linux/suspend.h>
27#include <linux/tick.h>
28#include <linux/uaccess.h>
29#include <linux/wakelock.h>
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -060030#include <linux/delay.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#include <mach/msm_iomap.h>
32#include <mach/system.h>
33#include <asm/cacheflush.h>
34#include <asm/hardware/gic.h>
35#include <asm/pgtable.h>
36#include <asm/pgalloc.h>
Maheshkumar Sivasubramanianc6c55032011-10-25 16:01:32 -060037#include <asm/hardware/cache-l2x0.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038#ifdef CONFIG_VFP
39#include <asm/vfp.h>
40#endif
41
42#include "acpuclock.h"
43#include "clock.h"
44#include "avs.h"
Abhijeet Dharmapurikarefaca4f2011-12-27 16:24:07 -080045#include <mach/cpuidle.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046#include "idle.h"
Matt Wagantall7cca4642012-02-01 16:43:24 -080047#include "pm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048#include "rpm_resources.h"
49#include "scm-boot.h"
50#include "spm.h"
51#include "timer.h"
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -060052#include "pm-boot.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
54/******************************************************************************
55 * Debug Definitions
56 *****************************************************************************/
57
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -060058
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059enum {
60 MSM_PM_DEBUG_SUSPEND = BIT(0),
61 MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
62 MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
63 MSM_PM_DEBUG_CLOCK = BIT(3),
64 MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
Karthik Parsha6fb932d2012-01-24 18:04:12 -080065 MSM_PM_DEBUG_IDLE_CLK = BIT(5),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066 MSM_PM_DEBUG_IDLE = BIT(6),
67 MSM_PM_DEBUG_IDLE_LIMITS = BIT(7),
68 MSM_PM_DEBUG_HOTPLUG = BIT(8),
69};
70
71static int msm_pm_debug_mask = 1;
72module_param_named(
73 debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
74);
75
76
77/******************************************************************************
78 * Sleep Modes and Parameters
79 *****************************************************************************/
80
81static struct msm_pm_platform_data *msm_pm_modes;
82static int rpm_cpu0_wakeup_irq;
83
84void __init msm_pm_set_platform_data(
85 struct msm_pm_platform_data *data, int count)
86{
87 BUG_ON(MSM_PM_SLEEP_MODE_NR * num_possible_cpus() > count);
88 msm_pm_modes = data;
89}
90
91void __init msm_pm_set_rpm_wakeup_irq(unsigned int irq)
92{
93 rpm_cpu0_wakeup_irq = irq;
94}
95
96enum {
97 MSM_PM_MODE_ATTR_SUSPEND,
98 MSM_PM_MODE_ATTR_IDLE,
99 MSM_PM_MODE_ATTR_NR,
100};
101
102static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
103 [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
104 [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
105};
106
107struct msm_pm_kobj_attribute {
108 unsigned int cpu;
109 struct kobj_attribute ka;
110};
111
112#define GET_CPU_OF_ATTR(attr) \
113 (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
114
115struct msm_pm_sysfs_sleep_mode {
116 struct kobject *kobj;
117 struct attribute_group attr_group;
118 struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
119 struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
120};
121
122static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
123 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
124 [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
125 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
126 "standalone_power_collapse",
127};
128
129/*
130 * Write out the attribute.
131 */
132static ssize_t msm_pm_mode_attr_show(
133 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
134{
135 int ret = -EINVAL;
136 int i;
137
138 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
139 struct kernel_param kp;
140 unsigned int cpu;
141 struct msm_pm_platform_data *mode;
142
143 if (msm_pm_sleep_mode_labels[i] == NULL)
144 continue;
145
146 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
147 continue;
148
149 cpu = GET_CPU_OF_ATTR(attr);
150 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
151
152 if (!strcmp(attr->attr.name,
153 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
154 u32 arg = mode->suspend_enabled;
155 kp.arg = &arg;
156 ret = param_get_ulong(buf, &kp);
157 } else if (!strcmp(attr->attr.name,
158 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
159 u32 arg = mode->idle_enabled;
160 kp.arg = &arg;
161 ret = param_get_ulong(buf, &kp);
162 }
163
164 break;
165 }
166
167 if (ret > 0) {
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600168 strlcat(buf, "\n", PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 ret++;
170 }
171
172 return ret;
173}
174
175/*
176 * Read in the new attribute value.
177 */
178static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
179 struct kobj_attribute *attr, const char *buf, size_t count)
180{
181 int ret = -EINVAL;
182 int i;
183
184 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
185 struct kernel_param kp;
186 unsigned int cpu;
187 struct msm_pm_platform_data *mode;
188
189 if (msm_pm_sleep_mode_labels[i] == NULL)
190 continue;
191
192 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
193 continue;
194
195 cpu = GET_CPU_OF_ATTR(attr);
196 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
197
198 if (!strcmp(attr->attr.name,
199 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
200 kp.arg = &mode->suspend_enabled;
201 ret = param_set_byte(buf, &kp);
202 } else if (!strcmp(attr->attr.name,
203 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
204 kp.arg = &mode->idle_enabled;
205 ret = param_set_byte(buf, &kp);
206 }
207
208 break;
209 }
210
211 return ret ? ret : count;
212}
213
214/*
215 * Add sysfs entries for one cpu.
216 */
217static int __init msm_pm_mode_sysfs_add_cpu(
218 unsigned int cpu, struct kobject *modes_kobj)
219{
220 char cpu_name[8];
221 struct kobject *cpu_kobj;
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600222 struct msm_pm_sysfs_sleep_mode *mode = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223 int i, j, k;
224 int ret;
225
226 snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
227 cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
228 if (!cpu_kobj) {
229 pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
230 ret = -ENOMEM;
231 goto mode_sysfs_add_cpu_exit;
232 }
233
234 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
235 int idx = MSM_PM_MODE(cpu, i);
236
237 if ((!msm_pm_modes[idx].suspend_supported)
238 && (!msm_pm_modes[idx].idle_supported))
239 continue;
240
241 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
242 if (!mode) {
243 pr_err("%s: cannot allocate memory for attributes\n",
244 __func__);
245 ret = -ENOMEM;
246 goto mode_sysfs_add_cpu_exit;
247 }
248
249 mode->kobj = kobject_create_and_add(
250 msm_pm_sleep_mode_labels[i], cpu_kobj);
251 if (!mode->kobj) {
252 pr_err("%s: cannot create kobject\n", __func__);
253 ret = -ENOMEM;
254 goto mode_sysfs_add_cpu_exit;
255 }
256
257 for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
258 if ((k == MSM_PM_MODE_ATTR_IDLE) &&
259 !msm_pm_modes[idx].idle_supported)
260 continue;
261 if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
262 !msm_pm_modes[idx].suspend_supported)
263 continue;
264 mode->kas[j].cpu = cpu;
265 mode->kas[j].ka.attr.mode = 0644;
266 mode->kas[j].ka.show = msm_pm_mode_attr_show;
267 mode->kas[j].ka.store = msm_pm_mode_attr_store;
268 mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
269 mode->attrs[j] = &mode->kas[j].ka.attr;
270 j++;
271 }
272 mode->attrs[j] = NULL;
273
274 mode->attr_group.attrs = mode->attrs;
275 ret = sysfs_create_group(mode->kobj, &mode->attr_group);
276 if (ret) {
277 pr_err("%s: cannot create kobject attribute group\n",
278 __func__);
279 goto mode_sysfs_add_cpu_exit;
280 }
281 }
282
283 ret = 0;
284
285mode_sysfs_add_cpu_exit:
Praveen Chidambaramd5ac2d32011-10-24 14:30:27 -0600286 if (ret) {
Praveen Chidambaram2cfda632011-10-11 16:58:09 -0600287 if (mode && mode->kobj)
288 kobject_del(mode->kobj);
289 kfree(mode);
290 }
291
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292 return ret;
293}
294
295/*
296 * Add sysfs entries for the sleep modes.
297 */
298static int __init msm_pm_mode_sysfs_add(void)
299{
300 struct kobject *module_kobj;
301 struct kobject *modes_kobj;
302 unsigned int cpu;
303 int ret;
304
305 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
306 if (!module_kobj) {
307 pr_err("%s: cannot find kobject for module %s\n",
308 __func__, KBUILD_MODNAME);
309 ret = -ENOENT;
310 goto mode_sysfs_add_exit;
311 }
312
313 modes_kobj = kobject_create_and_add("modes", module_kobj);
314 if (!modes_kobj) {
315 pr_err("%s: cannot create modes kobject\n", __func__);
316 ret = -ENOMEM;
317 goto mode_sysfs_add_exit;
318 }
319
320 for_each_possible_cpu(cpu) {
321 ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
322 if (ret)
323 goto mode_sysfs_add_exit;
324 }
325
326 ret = 0;
327
328mode_sysfs_add_exit:
329 return ret;
330}
331
332/******************************************************************************
333 * CONFIG_MSM_IDLE_STATS
334 *****************************************************************************/
335
336#ifdef CONFIG_MSM_IDLE_STATS
337enum msm_pm_time_stats_id {
338 MSM_PM_STAT_REQUESTED_IDLE,
339 MSM_PM_STAT_IDLE_WFI,
340 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
341 MSM_PM_STAT_IDLE_POWER_COLLAPSE,
342 MSM_PM_STAT_SUSPEND,
343 MSM_PM_STAT_COUNT
344};
345
346struct msm_pm_time_stats {
347 const char *name;
348 int64_t first_bucket_time;
349 int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
350 int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
351 int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
352 int count;
353 int64_t total_time;
354};
355
356struct msm_pm_cpu_time_stats {
357 struct msm_pm_time_stats stats[MSM_PM_STAT_COUNT];
358};
359
360static DEFINE_SPINLOCK(msm_pm_stats_lock);
361static DEFINE_PER_CPU_SHARED_ALIGNED(
362 struct msm_pm_cpu_time_stats, msm_pm_stats);
363
364/*
365 * Add the given time data to the statistics collection.
366 */
367static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
368{
369 unsigned long flags;
370 struct msm_pm_time_stats *stats;
371 int64_t bt;
372 int i;
373
374 spin_lock_irqsave(&msm_pm_stats_lock, flags);
375 stats = __get_cpu_var(msm_pm_stats).stats;
376
377 stats[id].total_time += t;
378 stats[id].count++;
379
380 bt = t;
381 do_div(bt, stats[id].first_bucket_time);
382
383 if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
384 (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
385 i = DIV_ROUND_UP(fls((uint32_t)bt),
386 CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
387 else
388 i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
389
Praveen Chidambaramfdaef162011-09-28 08:40:05 -0600390 if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
391 i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
392
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 stats[id].bucket[i]++;
394
395 if (t < stats[id].min_time[i] || !stats[id].max_time[i])
396 stats[id].min_time[i] = t;
397 if (t > stats[id].max_time[i])
398 stats[id].max_time[i] = t;
399
400 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
401}
402
403/*
404 * Helper function of snprintf where buf is auto-incremented, size is auto-
405 * decremented, and there is no return value.
406 *
407 * NOTE: buf and size must be l-values (e.g. variables)
408 */
409#define SNPRINTF(buf, size, format, ...) \
410 do { \
411 if (size > 0) { \
412 int ret; \
413 ret = snprintf(buf, size, format, ## __VA_ARGS__); \
414 if (ret > size) { \
415 buf += size; \
416 size = 0; \
417 } else { \
418 buf += ret; \
419 size -= ret; \
420 } \
421 } \
422 } while (0)
423
424/*
425 * Write out the power management statistics.
426 */
427static int msm_pm_read_proc
428 (char *page, char **start, off_t off, int count, int *eof, void *data)
429{
430 unsigned int cpu = off / MSM_PM_STAT_COUNT;
431 int id = off % MSM_PM_STAT_COUNT;
432 char *p = page;
433
434 if (count < 1024) {
435 *start = (char *) 0;
436 *eof = 0;
437 return 0;
438 }
439
440 if (cpu < num_possible_cpus()) {
441 unsigned long flags;
442 struct msm_pm_time_stats *stats;
443 int i;
444 int64_t bucket_time;
445 int64_t s;
446 uint32_t ns;
447
448 spin_lock_irqsave(&msm_pm_stats_lock, flags);
449 stats = per_cpu(msm_pm_stats, cpu).stats;
450
451 s = stats[id].total_time;
452 ns = do_div(s, NSEC_PER_SEC);
453 SNPRINTF(p, count,
454 "[cpu %u] %s:\n"
455 " count: %7d\n"
456 " total_time: %lld.%09u\n",
457 cpu, stats[id].name,
458 stats[id].count,
459 s, ns);
460
461 bucket_time = stats[id].first_bucket_time;
462 for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
463 s = bucket_time;
464 ns = do_div(s, NSEC_PER_SEC);
465 SNPRINTF(p, count,
466 " <%6lld.%09u: %7d (%lld-%lld)\n",
467 s, ns, stats[id].bucket[i],
468 stats[id].min_time[i],
469 stats[id].max_time[i]);
470
471 bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
472 }
473
474 SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
475 s, ns, stats[id].bucket[i],
476 stats[id].min_time[i],
477 stats[id].max_time[i]);
478
479 *start = (char *) 1;
480 *eof = (off + 1 >= MSM_PM_STAT_COUNT * num_possible_cpus());
481
482 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
483 }
484
485 return p - page;
486}
487#undef SNPRINTF
488
489#define MSM_PM_STATS_RESET "reset"
490
491/*
492 * Reset the power management statistics values.
493 */
494static int msm_pm_write_proc(struct file *file, const char __user *buffer,
495 unsigned long count, void *data)
496{
497 char buf[sizeof(MSM_PM_STATS_RESET)];
498 int ret;
499 unsigned long flags;
500 unsigned int cpu;
501
502 if (count < strlen(MSM_PM_STATS_RESET)) {
503 ret = -EINVAL;
504 goto write_proc_failed;
505 }
506
507 if (copy_from_user(buf, buffer, strlen(MSM_PM_STATS_RESET))) {
508 ret = -EFAULT;
509 goto write_proc_failed;
510 }
511
512 if (memcmp(buf, MSM_PM_STATS_RESET, strlen(MSM_PM_STATS_RESET))) {
513 ret = -EINVAL;
514 goto write_proc_failed;
515 }
516
517 spin_lock_irqsave(&msm_pm_stats_lock, flags);
518 for_each_possible_cpu(cpu) {
519 struct msm_pm_time_stats *stats;
520 int i;
521
522 stats = per_cpu(msm_pm_stats, cpu).stats;
523 for (i = 0; i < MSM_PM_STAT_COUNT; i++) {
524 memset(stats[i].bucket,
525 0, sizeof(stats[i].bucket));
526 memset(stats[i].min_time,
527 0, sizeof(stats[i].min_time));
528 memset(stats[i].max_time,
529 0, sizeof(stats[i].max_time));
530 stats[i].count = 0;
531 stats[i].total_time = 0;
532 }
533 }
534
535 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
536 return count;
537
538write_proc_failed:
539 return ret;
540}
541#undef MSM_PM_STATS_RESET
542#endif /* CONFIG_MSM_IDLE_STATS */
543
544
545/******************************************************************************
546 * Configure Hardware before/after Low Power Mode
547 *****************************************************************************/
548
549/*
550 * Configure hardware registers in preparation for Apps power down.
551 */
552static void msm_pm_config_hw_before_power_down(void)
553{
554 return;
555}
556
557/*
558 * Clear hardware registers after Apps powers up.
559 */
560static void msm_pm_config_hw_after_power_up(void)
561{
562 return;
563}
564
565/*
566 * Configure hardware registers in preparation for SWFI.
567 */
568static void msm_pm_config_hw_before_swfi(void)
569{
570 return;
571}
572
573
574/******************************************************************************
575 * Suspend Max Sleep Time
576 *****************************************************************************/
577
578#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
579static int msm_pm_sleep_time_override;
580module_param_named(sleep_time_override,
581 msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
582#endif
583
584#define SCLK_HZ (32768)
585#define MSM_PM_SLEEP_TICK_LIMIT (0x6DDD000)
586
587static uint32_t msm_pm_max_sleep_time;
588
589/*
590 * Convert time from nanoseconds to slow clock ticks, then cap it to the
591 * specified limit
592 */
593static int64_t msm_pm_convert_and_cap_time(int64_t time_ns, int64_t limit)
594{
595 do_div(time_ns, NSEC_PER_SEC / SCLK_HZ);
596 return (time_ns > limit) ? limit : time_ns;
597}
598
599/*
600 * Set the sleep time for suspend. 0 means infinite sleep time.
601 */
602void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
603{
604 if (max_sleep_time_ns == 0) {
605 msm_pm_max_sleep_time = 0;
606 } else {
607 msm_pm_max_sleep_time = (uint32_t)msm_pm_convert_and_cap_time(
608 max_sleep_time_ns, MSM_PM_SLEEP_TICK_LIMIT);
609
610 if (msm_pm_max_sleep_time == 0)
611 msm_pm_max_sleep_time = 1;
612 }
613
614 if (msm_pm_debug_mask & MSM_PM_DEBUG_SUSPEND)
615 pr_info("%s: Requested %lld ns Giving %u sclk ticks\n",
616 __func__, max_sleep_time_ns, msm_pm_max_sleep_time);
617}
618EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
619
620
621/******************************************************************************
622 *
623 *****************************************************************************/
624
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625static struct msm_rpmrs_limits *msm_pm_idle_rs_limits;
626
627static void msm_pm_swfi(void)
628{
629 msm_pm_config_hw_before_swfi();
630 msm_arch_idle();
631}
632
Maheshkumar Sivasubramanianc6c55032011-10-25 16:01:32 -0600633#ifdef CONFIG_CACHE_L2X0
634static inline bool msm_pm_l2x0_power_collapse(void)
635{
636 bool collapsed = 0;
637
638 l2x0_suspend();
639 collapsed = msm_pm_collapse();
640 l2x0_resume(collapsed);
641
642 return collapsed;
643}
644#else
645static inline bool msm_pm_l2x0_power_collapse(void)
646{
647 return msm_pm_collapse();
648}
649#endif
650
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600651static bool msm_pm_spm_power_collapse(
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700652 unsigned int cpu, bool from_idle, bool notify_rpm)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653{
654 void *entry;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600655 bool collapsed = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656 int ret;
Rohit Vaswanie78dfb62012-02-21 10:29:29 -0800657 unsigned int saved_gic_cpu_ctrl;
658
659 saved_gic_cpu_ctrl = readl_relaxed(MSM_QGIC_CPU_BASE + GIC_CPU_CTRL);
660 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661
662 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
663 pr_info("CPU%u: %s: notify_rpm %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700664 cpu, __func__, (int) notify_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665
666 ret = msm_spm_set_low_power_mode(
667 MSM_SPM_MODE_POWER_COLLAPSE, notify_rpm);
668 WARN_ON(ret);
669
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700670 entry = (!cpu || from_idle) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 msm_pm_collapse_exit : msm_secondary_startup;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700672 msm_pm_boot_config_before_pc(cpu, virt_to_phys(entry));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673
674 if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
675 pr_info("CPU%u: %s: program vector to %p\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700676 cpu, __func__, entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677
678#ifdef CONFIG_VFP
679 vfp_flush_context();
680#endif
681
Maheshkumar Sivasubramanianc6c55032011-10-25 16:01:32 -0600682 collapsed = msm_pm_l2x0_power_collapse();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700684 msm_pm_boot_config_after_pc(cpu);
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -0600685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686 if (collapsed) {
687#ifdef CONFIG_VFP
688 vfp_reinit();
689#endif
690 cpu_init();
691 writel(0xF0, MSM_QGIC_CPU_BASE + GIC_CPU_PRIMASK);
Rohit Vaswanie78dfb62012-02-21 10:29:29 -0800692 writel_relaxed(saved_gic_cpu_ctrl,
693 MSM_QGIC_CPU_BASE + GIC_CPU_CTRL);
694 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695 local_fiq_enable();
696 }
697
698 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
699 pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700700 cpu, __func__, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701
702 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
703 WARN_ON(ret);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600704 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705}
706
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600707static bool msm_pm_power_collapse_standalone(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700708{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700709 unsigned int cpu = smp_processor_id();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 unsigned int avsdscr_setting;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600711 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712
713 avsdscr_setting = avs_get_avsdscr();
714 avs_disable();
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700715 collapsed = msm_pm_spm_power_collapse(cpu, from_idle, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716 avs_reset_delays(avsdscr_setting);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600717 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718}
719
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600720static bool msm_pm_power_collapse(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700722 unsigned int cpu = smp_processor_id();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700723 unsigned long saved_acpuclk_rate;
724 unsigned int avsdscr_setting;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600725 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726
727 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
728 pr_info("CPU%u: %s: idle %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700729 cpu, __func__, (int)from_idle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730
731 msm_pm_config_hw_before_power_down();
732 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700733 pr_info("CPU%u: %s: pre power down\n", cpu, __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734
735 avsdscr_setting = avs_get_avsdscr();
736 avs_disable();
737
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700738 if (cpu_online(cpu))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 saved_acpuclk_rate = acpuclk_power_collapse();
740 else
741 saved_acpuclk_rate = 0;
742
743 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
744 pr_info("CPU%u: %s: change clock rate (old rate = %lu)\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700745 cpu, __func__, saved_acpuclk_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700747 collapsed = msm_pm_spm_power_collapse(cpu, from_idle, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700748
749 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
750 pr_info("CPU%u: %s: restore clock rate to %lu\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700751 cpu, __func__, saved_acpuclk_rate);
752 if (acpuclk_set_rate(cpu, saved_acpuclk_rate, SETRATE_PC) < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700753 pr_err("CPU%u: %s: failed to restore clock rate(%lu)\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700754 cpu, __func__, saved_acpuclk_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755
756 avs_reset_delays(avsdscr_setting);
757 msm_pm_config_hw_after_power_up();
758 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700759 pr_info("CPU%u: %s: post power up\n", cpu, __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760
761 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700762 pr_info("CPU%u: %s: return\n", cpu, __func__);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600763 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764}
765
766static irqreturn_t msm_pm_rpm_wakeup_interrupt(int irq, void *dev_id)
767{
768 if (dev_id != &msm_pm_rpm_wakeup_interrupt)
769 return IRQ_NONE;
770
771 return IRQ_HANDLED;
772}
773
774
775/******************************************************************************
776 * External Idle/Suspend Functions
777 *****************************************************************************/
778
779void arch_idle(void)
780{
781 return;
782}
783
784int msm_pm_idle_prepare(struct cpuidle_device *dev)
785{
786 uint32_t latency_us;
787 uint32_t sleep_us;
788 int i;
789
790 latency_us = (uint32_t) pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
791 sleep_us = (uint32_t) ktime_to_ns(tick_nohz_get_sleep_length());
792 sleep_us = DIV_ROUND_UP(sleep_us, 1000);
793
794 for (i = 0; i < dev->state_count; i++) {
795 struct cpuidle_state *state = &dev->states[i];
796 enum msm_pm_sleep_mode mode;
797 bool allow;
798 struct msm_rpmrs_limits *rs_limits = NULL;
799 int idx;
800
801 mode = (enum msm_pm_sleep_mode) state->driver_data;
802 idx = MSM_PM_MODE(dev->cpu, mode);
803
804 allow = msm_pm_modes[idx].idle_enabled &&
805 msm_pm_modes[idx].idle_supported;
806
807 switch (mode) {
808 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
809 if (!allow)
810 break;
811
812 if (num_online_cpus() > 1) {
813 allow = false;
814 break;
815 }
816#ifdef CONFIG_HAS_WAKELOCK
817 if (has_wake_lock(WAKE_LOCK_IDLE)) {
818 allow = false;
819 break;
820 }
821#endif
822 /* fall through */
823
824 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
825 if (!allow)
826 break;
827
828 if (!dev->cpu &&
829 msm_rpm_local_request_is_outstanding()) {
830 allow = false;
831 break;
832 }
833 /* fall through */
834
835 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
836 if (!allow)
837 break;
838
839 rs_limits = msm_rpmrs_lowest_limits(true,
840 mode, latency_us, sleep_us);
841
842 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
843 pr_info("CPU%u: %s: %s, latency %uus, "
844 "sleep %uus, limit %p\n",
845 dev->cpu, __func__, state->desc,
846 latency_us, sleep_us, rs_limits);
847
848 if ((MSM_PM_DEBUG_IDLE_LIMITS & msm_pm_debug_mask) &&
849 rs_limits)
850 pr_info("CPU%u: %s: limit %p: "
851 "pxo %d, l2_cache %d, "
852 "vdd_mem %d, vdd_dig %d\n",
853 dev->cpu, __func__, rs_limits,
854 rs_limits->pxo,
855 rs_limits->l2_cache,
856 rs_limits->vdd_mem,
857 rs_limits->vdd_dig);
858
859 if (!rs_limits)
860 allow = false;
861 break;
862
863 default:
864 allow = false;
865 break;
866 }
867
868 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
869 pr_info("CPU%u: %s: allow %s: %d\n",
870 dev->cpu, __func__, state->desc, (int)allow);
871
872 if (allow) {
873 state->flags &= ~CPUIDLE_FLAG_IGNORE;
874 state->target_residency = 0;
875 state->exit_latency = 0;
876 state->power_usage = rs_limits->power[dev->cpu];
877
878 if (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == mode)
879 msm_pm_idle_rs_limits = rs_limits;
880 } else {
881 state->flags |= CPUIDLE_FLAG_IGNORE;
882 }
883 }
884
885 return 0;
886}
887
888int msm_pm_idle_enter(enum msm_pm_sleep_mode sleep_mode)
889{
890 int64_t time;
891#ifdef CONFIG_MSM_IDLE_STATS
892 int exit_stat;
893#endif
894
895 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
896 pr_info("CPU%u: %s: mode %d\n",
897 smp_processor_id(), __func__, sleep_mode);
898
899 time = ktime_to_ns(ktime_get());
900
901 switch (sleep_mode) {
902 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
903 msm_pm_swfi();
904#ifdef CONFIG_MSM_IDLE_STATS
905 exit_stat = MSM_PM_STAT_IDLE_WFI;
906#endif
907 break;
908
909 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
910 msm_pm_power_collapse_standalone(true);
911#ifdef CONFIG_MSM_IDLE_STATS
912 exit_stat = MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
913#endif
914 break;
915
916 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE: {
917 int64_t timer_expiration = msm_timer_enter_idle();
918 bool timer_halted = false;
919 uint32_t sleep_delay;
920 int ret;
921 int notify_rpm =
922 (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600923 int collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924
925 sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
926 timer_expiration, MSM_PM_SLEEP_TICK_LIMIT);
927 if (sleep_delay == 0) /* 0 would mean infinite time */
928 sleep_delay = 1;
929
Karthik Parsha6fb932d2012-01-24 18:04:12 -0800930 if (MSM_PM_DEBUG_IDLE_CLK & msm_pm_debug_mask)
931 clock_debug_print_enabled();
932
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 ret = msm_rpmrs_enter_sleep(
934 sleep_delay, msm_pm_idle_rs_limits, true, notify_rpm);
935 if (!ret) {
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600936 collapsed = msm_pm_power_collapse(true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 timer_halted = true;
938
939 msm_rpmrs_exit_sleep(msm_pm_idle_rs_limits, true,
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600940 notify_rpm, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700941 }
942
943 msm_timer_exit_idle((int) timer_halted);
944#ifdef CONFIG_MSM_IDLE_STATS
945 exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
946#endif
947 break;
948 }
949
950 default:
951 __WARN();
952 goto cpuidle_enter_bail;
953 }
954
955 time = ktime_to_ns(ktime_get()) - time;
956#ifdef CONFIG_MSM_IDLE_STATS
957 msm_pm_add_stat(exit_stat, time);
958#endif
959
960 do_div(time, 1000);
961 return (int) time;
962
963cpuidle_enter_bail:
964 return 0;
965}
966
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -0600967static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
968
969static DEFINE_PER_CPU_SHARED_ALIGNED(enum msm_pm_sleep_mode,
970 msm_pm_last_slp_mode);
971
972bool msm_pm_verify_cpu_pc(unsigned int cpu)
973{
974 enum msm_pm_sleep_mode mode = per_cpu(msm_pm_last_slp_mode, cpu);
975
976 if (msm_pm_slp_sts)
977 if ((mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) ||
978 (mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE))
979 return true;
980
981 return false;
982}
983
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700984void msm_pm_cpu_enter_lowpower(unsigned int cpu)
985{
986 int i;
987 bool allow[MSM_PM_SLEEP_MODE_NR];
988
989 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
990 struct msm_pm_platform_data *mode;
991
992 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
993 allow[i] = mode->suspend_supported && mode->suspend_enabled;
994 }
995
996 if (MSM_PM_DEBUG_HOTPLUG & msm_pm_debug_mask)
997 pr_notice("CPU%u: %s: shutting down cpu\n", cpu, __func__);
998
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -0600999 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE]) {
1000 per_cpu(msm_pm_last_slp_mode, cpu)
1001 = MSM_PM_SLEEP_MODE_POWER_COLLAPSE;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -07001002 msm_pm_power_collapse(false);
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -06001003 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
1004 per_cpu(msm_pm_last_slp_mode, cpu)
1005 = MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -07001006 msm_pm_power_collapse_standalone(false);
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -06001007 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1008 per_cpu(msm_pm_last_slp_mode, cpu)
1009 = MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -07001010 msm_pm_swfi();
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -06001011 } else
1012 per_cpu(msm_pm_last_slp_mode, cpu) = MSM_PM_SLEEP_MODE_NR;
1013}
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -07001014
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -06001015int msm_pm_wait_cpu_shutdown(unsigned int cpu)
1016{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -07001017
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -06001018 int timeout = 10;
1019
1020 if (!msm_pm_slp_sts)
1021 return 0;
1022
1023 while (timeout--) {
1024
1025 /*
1026 * Check for the SPM of the core being hotplugged to set
1027 * its sleep state.The SPM sleep state indicates that the
1028 * core has been power collapsed.
1029 */
1030
1031 int acc_sts = __raw_readl(msm_pm_slp_sts->base_addr
1032 + cpu * msm_pm_slp_sts->cpu_offset);
1033 mb();
1034
1035 if (acc_sts & msm_pm_slp_sts->mask)
1036 return 0;
1037
1038 usleep(100);
1039 }
1040 pr_warn("%s(): Timed out waiting for CPU %u SPM to enter sleep state",
1041 __func__, cpu);
1042 return -EBUSY;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -07001043}
1044
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045static int msm_pm_enter(suspend_state_t state)
1046{
1047 bool allow[MSM_PM_SLEEP_MODE_NR];
1048 int i;
1049
1050#ifdef CONFIG_MSM_IDLE_STATS
1051 int64_t period = 0;
1052 int64_t time = msm_timer_get_sclk_time(&period);
1053#endif
1054
1055 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1056 pr_info("%s\n", __func__);
1057
1058 if (smp_processor_id()) {
1059 __WARN();
1060 goto enter_exit;
1061 }
1062
1063
1064 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
1065 struct msm_pm_platform_data *mode;
1066
1067 mode = &msm_pm_modes[MSM_PM_MODE(0, i)];
1068 allow[i] = mode->suspend_supported && mode->suspend_enabled;
1069 }
1070
1071 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE]) {
1072 struct msm_rpmrs_limits *rs_limits;
1073 int ret;
1074
1075 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1076 pr_info("%s: power collapse\n", __func__);
1077
1078 clock_debug_print_enabled();
1079
1080#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
1081 if (msm_pm_sleep_time_override > 0) {
1082 int64_t ns = NSEC_PER_SEC *
1083 (int64_t) msm_pm_sleep_time_override;
1084 msm_pm_set_max_sleep_time(ns);
1085 msm_pm_sleep_time_override = 0;
1086 }
1087#endif /* CONFIG_MSM_SLEEP_TIME_OVERRIDE */
1088
1089 if (MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask)
1090 msm_rpmrs_show_resources();
1091
1092 rs_limits = msm_rpmrs_lowest_limits(false,
1093 MSM_PM_SLEEP_MODE_POWER_COLLAPSE, -1, -1);
1094
1095 if ((MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask) &&
1096 rs_limits)
1097 pr_info("%s: limit %p: pxo %d, l2_cache %d, "
1098 "vdd_mem %d, vdd_dig %d\n",
1099 __func__, rs_limits,
1100 rs_limits->pxo, rs_limits->l2_cache,
1101 rs_limits->vdd_mem, rs_limits->vdd_dig);
1102
1103 if (rs_limits) {
1104 ret = msm_rpmrs_enter_sleep(
1105 msm_pm_max_sleep_time, rs_limits, false, true);
1106 if (!ret) {
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -06001107 int collapsed = msm_pm_power_collapse(false);
1108 msm_rpmrs_exit_sleep(rs_limits, false, true,
1109 collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001110 }
1111 } else {
1112 pr_err("%s: cannot find the lowest power limit\n",
1113 __func__);
1114 }
1115
1116#ifdef CONFIG_MSM_IDLE_STATS
1117 if (time != 0) {
1118 int64_t end_time = msm_timer_get_sclk_time(NULL);
1119 if (end_time != 0) {
1120 time = end_time - time;
1121 if (time < 0)
1122 time += period;
1123 } else
1124 time = 0;
1125 }
1126
1127 msm_pm_add_stat(MSM_PM_STAT_SUSPEND, time);
1128#endif /* CONFIG_MSM_IDLE_STATS */
1129 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
1130 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1131 pr_info("%s: standalone power collapse\n", __func__);
1132 msm_pm_power_collapse_standalone(false);
1133 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1134 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1135 pr_info("%s: swfi\n", __func__);
1136 msm_pm_swfi();
1137 }
1138
1139
1140enter_exit:
1141 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1142 pr_info("%s: return\n", __func__);
1143
1144 return 0;
1145}
1146
1147static struct platform_suspend_ops msm_pm_ops = {
1148 .enter = msm_pm_enter,
1149 .valid = suspend_valid_only_mem,
1150};
1151
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152/******************************************************************************
1153 * Initialization routine
1154 *****************************************************************************/
Maheshkumar Sivasubramanian6866b1c2011-06-07 14:20:33 -06001155void __init msm_pm_init_sleep_status_data(
1156 struct msm_pm_sleep_status_data *data)
1157{
1158 msm_pm_slp_sts = data;
1159}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001160
1161static int __init msm_pm_init(void)
1162{
1163 pgd_t *pc_pgd;
1164 pmd_t *pmd;
1165 unsigned long pmdval;
1166 unsigned int cpu;
1167#ifdef CONFIG_MSM_IDLE_STATS
1168 struct proc_dir_entry *d_entry;
1169#endif
1170 int ret;
1171
1172 /* Page table for cores to come back up safely. */
1173 pc_pgd = pgd_alloc(&init_mm);
1174 if (!pc_pgd)
1175 return -ENOMEM;
1176
1177 pmd = pmd_offset(pc_pgd +
1178 pgd_index(virt_to_phys(msm_pm_collapse_exit)),
1179 virt_to_phys(msm_pm_collapse_exit));
1180 pmdval = (virt_to_phys(msm_pm_collapse_exit) & PGDIR_MASK) |
1181 PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
1182 pmd[0] = __pmd(pmdval);
1183 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1184
Steve Mucklefcece052012-02-18 20:09:58 -08001185 msm_saved_state_phys =
1186 allocate_contiguous_ebi_nomap(CPU_SAVED_STATE_SIZE *
1187 num_possible_cpus(), 4);
1188 if (!msm_saved_state_phys)
1189 return -ENOMEM;
1190 msm_saved_state = ioremap_nocache(msm_saved_state_phys,
1191 CPU_SAVED_STATE_SIZE *
1192 num_possible_cpus());
1193 if (!msm_saved_state)
1194 return -ENOMEM;
1195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196 /* It is remotely possible that the code in msm_pm_collapse_exit()
1197 * which turns on the MMU with this mapping is in the
1198 * next even-numbered megabyte beyond the
1199 * start of msm_pm_collapse_exit().
1200 * Map this megabyte in as well.
1201 */
1202 pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
1203 flush_pmd_entry(pmd);
1204 msm_pm_pc_pgd = virt_to_phys(pc_pgd);
1205
1206 ret = request_irq(rpm_cpu0_wakeup_irq,
1207 msm_pm_rpm_wakeup_interrupt, IRQF_TRIGGER_RISING,
1208 "pm_drv", msm_pm_rpm_wakeup_interrupt);
1209 if (ret) {
1210 pr_err("%s: failed to request irq %u: %d\n",
1211 __func__, rpm_cpu0_wakeup_irq, ret);
1212 return ret;
1213 }
1214
1215 ret = irq_set_irq_wake(rpm_cpu0_wakeup_irq, 1);
1216 if (ret) {
1217 pr_err("%s: failed to set wakeup irq %u: %d\n",
1218 __func__, rpm_cpu0_wakeup_irq, ret);
1219 return ret;
1220 }
1221
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222#ifdef CONFIG_MSM_IDLE_STATS
1223 for_each_possible_cpu(cpu) {
1224 struct msm_pm_time_stats *stats =
1225 per_cpu(msm_pm_stats, cpu).stats;
1226
1227 stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
1228 stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
1229 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1230
1231 stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
1232 stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
1233 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1234
1235 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
1236 "idle-standalone-power-collapse";
1237 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
1238 first_bucket_time = CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1239
1240 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
1241 "idle-power-collapse";
1242 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
1243 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1244
1245 stats[MSM_PM_STAT_SUSPEND].name = "suspend";
1246 stats[MSM_PM_STAT_SUSPEND].first_bucket_time =
1247 CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
1248 }
1249
1250 d_entry = create_proc_entry("msm_pm_stats",
1251 S_IRUGO | S_IWUSR | S_IWGRP, NULL);
1252 if (d_entry) {
1253 d_entry->read_proc = msm_pm_read_proc;
1254 d_entry->write_proc = msm_pm_write_proc;
1255 d_entry->data = NULL;
1256 }
1257#endif /* CONFIG_MSM_IDLE_STATS */
1258
1259 msm_pm_mode_sysfs_add();
1260 msm_spm_allow_x_cpu_set_vdd(false);
1261
1262 suspend_set_ops(&msm_pm_ops);
1263 msm_cpuidle_init();
1264
1265 return 0;
1266}
1267
1268late_initcall(msm_pm_init);