blob: 9a445a534b1da090021d579217f42689461a9cbe [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/completion.h>
18#include <linux/cpuidle.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/ktime.h>
22#include <linux/pm.h>
23#include <linux/pm_qos_params.h>
24#include <linux/proc_fs.h>
25#include <linux/smp.h>
26#include <linux/suspend.h>
27#include <linux/tick.h>
28#include <linux/uaccess.h>
29#include <linux/wakelock.h>
30#include <mach/msm_iomap.h>
31#include <mach/system.h>
32#include <asm/cacheflush.h>
33#include <asm/hardware/gic.h>
34#include <asm/pgtable.h>
35#include <asm/pgalloc.h>
36#ifdef CONFIG_VFP
37#include <asm/vfp.h>
38#endif
39
40#include "acpuclock.h"
41#include "clock.h"
42#include "avs.h"
43#include "cpuidle.h"
44#include "idle.h"
45#include "pm.h"
46#include "rpm_resources.h"
47#include "scm-boot.h"
48#include "spm.h"
49#include "timer.h"
50
51/******************************************************************************
52 * Debug Definitions
53 *****************************************************************************/
54
55enum {
56 MSM_PM_DEBUG_SUSPEND = BIT(0),
57 MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
58 MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
59 MSM_PM_DEBUG_CLOCK = BIT(3),
60 MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
61 MSM_PM_DEBUG_IDLE = BIT(6),
62 MSM_PM_DEBUG_IDLE_LIMITS = BIT(7),
63 MSM_PM_DEBUG_HOTPLUG = BIT(8),
64};
65
66static int msm_pm_debug_mask = 1;
67module_param_named(
68 debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
69);
70
71
72/******************************************************************************
73 * Sleep Modes and Parameters
74 *****************************************************************************/
75
76static struct msm_pm_platform_data *msm_pm_modes;
77static int rpm_cpu0_wakeup_irq;
78
79void __init msm_pm_set_platform_data(
80 struct msm_pm_platform_data *data, int count)
81{
82 BUG_ON(MSM_PM_SLEEP_MODE_NR * num_possible_cpus() > count);
83 msm_pm_modes = data;
84}
85
86void __init msm_pm_set_rpm_wakeup_irq(unsigned int irq)
87{
88 rpm_cpu0_wakeup_irq = irq;
89}
90
91enum {
92 MSM_PM_MODE_ATTR_SUSPEND,
93 MSM_PM_MODE_ATTR_IDLE,
94 MSM_PM_MODE_ATTR_NR,
95};
96
97static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
98 [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
99 [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
100};
101
102struct msm_pm_kobj_attribute {
103 unsigned int cpu;
104 struct kobj_attribute ka;
105};
106
107#define GET_CPU_OF_ATTR(attr) \
108 (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
109
110struct msm_pm_sysfs_sleep_mode {
111 struct kobject *kobj;
112 struct attribute_group attr_group;
113 struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
114 struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
115};
116
117static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
118 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
119 [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
120 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
121 "standalone_power_collapse",
122};
123
124/*
125 * Write out the attribute.
126 */
127static ssize_t msm_pm_mode_attr_show(
128 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
129{
130 int ret = -EINVAL;
131 int i;
132
133 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
134 struct kernel_param kp;
135 unsigned int cpu;
136 struct msm_pm_platform_data *mode;
137
138 if (msm_pm_sleep_mode_labels[i] == NULL)
139 continue;
140
141 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
142 continue;
143
144 cpu = GET_CPU_OF_ATTR(attr);
145 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
146
147 if (!strcmp(attr->attr.name,
148 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
149 u32 arg = mode->suspend_enabled;
150 kp.arg = &arg;
151 ret = param_get_ulong(buf, &kp);
152 } else if (!strcmp(attr->attr.name,
153 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
154 u32 arg = mode->idle_enabled;
155 kp.arg = &arg;
156 ret = param_get_ulong(buf, &kp);
157 }
158
159 break;
160 }
161
162 if (ret > 0) {
163 strcat(buf, "\n");
164 ret++;
165 }
166
167 return ret;
168}
169
170/*
171 * Read in the new attribute value.
172 */
173static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
174 struct kobj_attribute *attr, const char *buf, size_t count)
175{
176 int ret = -EINVAL;
177 int i;
178
179 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
180 struct kernel_param kp;
181 unsigned int cpu;
182 struct msm_pm_platform_data *mode;
183
184 if (msm_pm_sleep_mode_labels[i] == NULL)
185 continue;
186
187 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
188 continue;
189
190 cpu = GET_CPU_OF_ATTR(attr);
191 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
192
193 if (!strcmp(attr->attr.name,
194 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
195 kp.arg = &mode->suspend_enabled;
196 ret = param_set_byte(buf, &kp);
197 } else if (!strcmp(attr->attr.name,
198 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
199 kp.arg = &mode->idle_enabled;
200 ret = param_set_byte(buf, &kp);
201 }
202
203 break;
204 }
205
206 return ret ? ret : count;
207}
208
209/*
210 * Add sysfs entries for one cpu.
211 */
212static int __init msm_pm_mode_sysfs_add_cpu(
213 unsigned int cpu, struct kobject *modes_kobj)
214{
215 char cpu_name[8];
216 struct kobject *cpu_kobj;
217 struct msm_pm_sysfs_sleep_mode *mode;
218 int i, j, k;
219 int ret;
220
221 snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
222 cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
223 if (!cpu_kobj) {
224 pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
225 ret = -ENOMEM;
226 goto mode_sysfs_add_cpu_exit;
227 }
228
229 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
230 int idx = MSM_PM_MODE(cpu, i);
231
232 if ((!msm_pm_modes[idx].suspend_supported)
233 && (!msm_pm_modes[idx].idle_supported))
234 continue;
235
236 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
237 if (!mode) {
238 pr_err("%s: cannot allocate memory for attributes\n",
239 __func__);
240 ret = -ENOMEM;
241 goto mode_sysfs_add_cpu_exit;
242 }
243
244 mode->kobj = kobject_create_and_add(
245 msm_pm_sleep_mode_labels[i], cpu_kobj);
246 if (!mode->kobj) {
247 pr_err("%s: cannot create kobject\n", __func__);
248 ret = -ENOMEM;
249 goto mode_sysfs_add_cpu_exit;
250 }
251
252 for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
253 if ((k == MSM_PM_MODE_ATTR_IDLE) &&
254 !msm_pm_modes[idx].idle_supported)
255 continue;
256 if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
257 !msm_pm_modes[idx].suspend_supported)
258 continue;
259 mode->kas[j].cpu = cpu;
260 mode->kas[j].ka.attr.mode = 0644;
261 mode->kas[j].ka.show = msm_pm_mode_attr_show;
262 mode->kas[j].ka.store = msm_pm_mode_attr_store;
263 mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
264 mode->attrs[j] = &mode->kas[j].ka.attr;
265 j++;
266 }
267 mode->attrs[j] = NULL;
268
269 mode->attr_group.attrs = mode->attrs;
270 ret = sysfs_create_group(mode->kobj, &mode->attr_group);
271 if (ret) {
272 pr_err("%s: cannot create kobject attribute group\n",
273 __func__);
274 goto mode_sysfs_add_cpu_exit;
275 }
276 }
277
278 ret = 0;
279
280mode_sysfs_add_cpu_exit:
281 return ret;
282}
283
284/*
285 * Add sysfs entries for the sleep modes.
286 */
287static int __init msm_pm_mode_sysfs_add(void)
288{
289 struct kobject *module_kobj;
290 struct kobject *modes_kobj;
291 unsigned int cpu;
292 int ret;
293
294 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
295 if (!module_kobj) {
296 pr_err("%s: cannot find kobject for module %s\n",
297 __func__, KBUILD_MODNAME);
298 ret = -ENOENT;
299 goto mode_sysfs_add_exit;
300 }
301
302 modes_kobj = kobject_create_and_add("modes", module_kobj);
303 if (!modes_kobj) {
304 pr_err("%s: cannot create modes kobject\n", __func__);
305 ret = -ENOMEM;
306 goto mode_sysfs_add_exit;
307 }
308
309 for_each_possible_cpu(cpu) {
310 ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
311 if (ret)
312 goto mode_sysfs_add_exit;
313 }
314
315 ret = 0;
316
317mode_sysfs_add_exit:
318 return ret;
319}
320
321/******************************************************************************
322 * CONFIG_MSM_IDLE_STATS
323 *****************************************************************************/
324
325#ifdef CONFIG_MSM_IDLE_STATS
326enum msm_pm_time_stats_id {
327 MSM_PM_STAT_REQUESTED_IDLE,
328 MSM_PM_STAT_IDLE_WFI,
329 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
330 MSM_PM_STAT_IDLE_POWER_COLLAPSE,
331 MSM_PM_STAT_SUSPEND,
332 MSM_PM_STAT_COUNT
333};
334
335struct msm_pm_time_stats {
336 const char *name;
337 int64_t first_bucket_time;
338 int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
339 int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
340 int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
341 int count;
342 int64_t total_time;
343};
344
345struct msm_pm_cpu_time_stats {
346 struct msm_pm_time_stats stats[MSM_PM_STAT_COUNT];
347};
348
349static DEFINE_SPINLOCK(msm_pm_stats_lock);
350static DEFINE_PER_CPU_SHARED_ALIGNED(
351 struct msm_pm_cpu_time_stats, msm_pm_stats);
352
353/*
354 * Add the given time data to the statistics collection.
355 */
356static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
357{
358 unsigned long flags;
359 struct msm_pm_time_stats *stats;
360 int64_t bt;
361 int i;
362
363 spin_lock_irqsave(&msm_pm_stats_lock, flags);
364 stats = __get_cpu_var(msm_pm_stats).stats;
365
366 stats[id].total_time += t;
367 stats[id].count++;
368
369 bt = t;
370 do_div(bt, stats[id].first_bucket_time);
371
372 if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
373 (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
374 i = DIV_ROUND_UP(fls((uint32_t)bt),
375 CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
376 else
377 i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
378
379 stats[id].bucket[i]++;
380
381 if (t < stats[id].min_time[i] || !stats[id].max_time[i])
382 stats[id].min_time[i] = t;
383 if (t > stats[id].max_time[i])
384 stats[id].max_time[i] = t;
385
386 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
387}
388
389/*
390 * Helper function of snprintf where buf is auto-incremented, size is auto-
391 * decremented, and there is no return value.
392 *
393 * NOTE: buf and size must be l-values (e.g. variables)
394 */
395#define SNPRINTF(buf, size, format, ...) \
396 do { \
397 if (size > 0) { \
398 int ret; \
399 ret = snprintf(buf, size, format, ## __VA_ARGS__); \
400 if (ret > size) { \
401 buf += size; \
402 size = 0; \
403 } else { \
404 buf += ret; \
405 size -= ret; \
406 } \
407 } \
408 } while (0)
409
410/*
411 * Write out the power management statistics.
412 */
413static int msm_pm_read_proc
414 (char *page, char **start, off_t off, int count, int *eof, void *data)
415{
416 unsigned int cpu = off / MSM_PM_STAT_COUNT;
417 int id = off % MSM_PM_STAT_COUNT;
418 char *p = page;
419
420 if (count < 1024) {
421 *start = (char *) 0;
422 *eof = 0;
423 return 0;
424 }
425
426 if (cpu < num_possible_cpus()) {
427 unsigned long flags;
428 struct msm_pm_time_stats *stats;
429 int i;
430 int64_t bucket_time;
431 int64_t s;
432 uint32_t ns;
433
434 spin_lock_irqsave(&msm_pm_stats_lock, flags);
435 stats = per_cpu(msm_pm_stats, cpu).stats;
436
437 s = stats[id].total_time;
438 ns = do_div(s, NSEC_PER_SEC);
439 SNPRINTF(p, count,
440 "[cpu %u] %s:\n"
441 " count: %7d\n"
442 " total_time: %lld.%09u\n",
443 cpu, stats[id].name,
444 stats[id].count,
445 s, ns);
446
447 bucket_time = stats[id].first_bucket_time;
448 for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
449 s = bucket_time;
450 ns = do_div(s, NSEC_PER_SEC);
451 SNPRINTF(p, count,
452 " <%6lld.%09u: %7d (%lld-%lld)\n",
453 s, ns, stats[id].bucket[i],
454 stats[id].min_time[i],
455 stats[id].max_time[i]);
456
457 bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
458 }
459
460 SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
461 s, ns, stats[id].bucket[i],
462 stats[id].min_time[i],
463 stats[id].max_time[i]);
464
465 *start = (char *) 1;
466 *eof = (off + 1 >= MSM_PM_STAT_COUNT * num_possible_cpus());
467
468 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
469 }
470
471 return p - page;
472}
473#undef SNPRINTF
474
475#define MSM_PM_STATS_RESET "reset"
476
477/*
478 * Reset the power management statistics values.
479 */
480static int msm_pm_write_proc(struct file *file, const char __user *buffer,
481 unsigned long count, void *data)
482{
483 char buf[sizeof(MSM_PM_STATS_RESET)];
484 int ret;
485 unsigned long flags;
486 unsigned int cpu;
487
488 if (count < strlen(MSM_PM_STATS_RESET)) {
489 ret = -EINVAL;
490 goto write_proc_failed;
491 }
492
493 if (copy_from_user(buf, buffer, strlen(MSM_PM_STATS_RESET))) {
494 ret = -EFAULT;
495 goto write_proc_failed;
496 }
497
498 if (memcmp(buf, MSM_PM_STATS_RESET, strlen(MSM_PM_STATS_RESET))) {
499 ret = -EINVAL;
500 goto write_proc_failed;
501 }
502
503 spin_lock_irqsave(&msm_pm_stats_lock, flags);
504 for_each_possible_cpu(cpu) {
505 struct msm_pm_time_stats *stats;
506 int i;
507
508 stats = per_cpu(msm_pm_stats, cpu).stats;
509 for (i = 0; i < MSM_PM_STAT_COUNT; i++) {
510 memset(stats[i].bucket,
511 0, sizeof(stats[i].bucket));
512 memset(stats[i].min_time,
513 0, sizeof(stats[i].min_time));
514 memset(stats[i].max_time,
515 0, sizeof(stats[i].max_time));
516 stats[i].count = 0;
517 stats[i].total_time = 0;
518 }
519 }
520
521 spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
522 return count;
523
524write_proc_failed:
525 return ret;
526}
527#undef MSM_PM_STATS_RESET
528#endif /* CONFIG_MSM_IDLE_STATS */
529
530
531/******************************************************************************
532 * Configure Hardware before/after Low Power Mode
533 *****************************************************************************/
534
535/*
536 * Configure hardware registers in preparation for Apps power down.
537 */
538static void msm_pm_config_hw_before_power_down(void)
539{
540 return;
541}
542
543/*
544 * Clear hardware registers after Apps powers up.
545 */
546static void msm_pm_config_hw_after_power_up(void)
547{
548 return;
549}
550
551/*
552 * Configure hardware registers in preparation for SWFI.
553 */
554static void msm_pm_config_hw_before_swfi(void)
555{
556 return;
557}
558
559
560/******************************************************************************
561 * Suspend Max Sleep Time
562 *****************************************************************************/
563
564#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
565static int msm_pm_sleep_time_override;
566module_param_named(sleep_time_override,
567 msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
568#endif
569
570#define SCLK_HZ (32768)
571#define MSM_PM_SLEEP_TICK_LIMIT (0x6DDD000)
572
573static uint32_t msm_pm_max_sleep_time;
574
575/*
576 * Convert time from nanoseconds to slow clock ticks, then cap it to the
577 * specified limit
578 */
579static int64_t msm_pm_convert_and_cap_time(int64_t time_ns, int64_t limit)
580{
581 do_div(time_ns, NSEC_PER_SEC / SCLK_HZ);
582 return (time_ns > limit) ? limit : time_ns;
583}
584
585/*
586 * Set the sleep time for suspend. 0 means infinite sleep time.
587 */
588void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
589{
590 if (max_sleep_time_ns == 0) {
591 msm_pm_max_sleep_time = 0;
592 } else {
593 msm_pm_max_sleep_time = (uint32_t)msm_pm_convert_and_cap_time(
594 max_sleep_time_ns, MSM_PM_SLEEP_TICK_LIMIT);
595
596 if (msm_pm_max_sleep_time == 0)
597 msm_pm_max_sleep_time = 1;
598 }
599
600 if (msm_pm_debug_mask & MSM_PM_DEBUG_SUSPEND)
601 pr_info("%s: Requested %lld ns Giving %u sclk ticks\n",
602 __func__, max_sleep_time_ns, msm_pm_max_sleep_time);
603}
604EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
605
606
607/******************************************************************************
608 *
609 *****************************************************************************/
610
611struct msm_pm_device {
612 unsigned int cpu;
613#ifdef CONFIG_HOTPLUG_CPU
614 struct completion cpu_killed;
615 unsigned int warm_boot;
616#endif
617};
618
619static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_pm_device, msm_pm_devices);
620static struct msm_rpmrs_limits *msm_pm_idle_rs_limits;
621
622static void msm_pm_swfi(void)
623{
624 msm_pm_config_hw_before_swfi();
625 msm_arch_idle();
626}
627
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600628static bool msm_pm_spm_power_collapse(
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 struct msm_pm_device *dev, bool from_idle, bool notify_rpm)
630{
631 void *entry;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600632 bool collapsed = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633 int ret;
634
635 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
636 pr_info("CPU%u: %s: notify_rpm %d\n",
637 dev->cpu, __func__, (int) notify_rpm);
638
639 ret = msm_spm_set_low_power_mode(
640 MSM_SPM_MODE_POWER_COLLAPSE, notify_rpm);
641 WARN_ON(ret);
642
643 entry = (!dev->cpu || from_idle) ?
644 msm_pm_collapse_exit : msm_secondary_startup;
645 msm_pm_write_boot_vector(dev->cpu, virt_to_phys(entry));
646
647 if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
648 pr_info("CPU%u: %s: program vector to %p\n",
649 dev->cpu, __func__, entry);
650
651#ifdef CONFIG_VFP
652 vfp_flush_context();
653#endif
654
655 collapsed = msm_pm_collapse();
656
657 if (collapsed) {
658#ifdef CONFIG_VFP
659 vfp_reinit();
660#endif
661 cpu_init();
662 writel(0xF0, MSM_QGIC_CPU_BASE + GIC_CPU_PRIMASK);
663 writel(1, MSM_QGIC_CPU_BASE + GIC_CPU_CTRL);
664 local_fiq_enable();
665 }
666
667 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
668 pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
669 dev->cpu, __func__, collapsed);
670
671 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
672 WARN_ON(ret);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600673 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674}
675
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600676static bool msm_pm_power_collapse_standalone(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677{
678 struct msm_pm_device *dev = &__get_cpu_var(msm_pm_devices);
679 unsigned int avsdscr_setting;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600680 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681
682 avsdscr_setting = avs_get_avsdscr();
683 avs_disable();
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600684 collapsed = msm_pm_spm_power_collapse(dev, from_idle, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685 avs_reset_delays(avsdscr_setting);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600686 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687}
688
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600689static bool msm_pm_power_collapse(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690{
691 struct msm_pm_device *dev = &__get_cpu_var(msm_pm_devices);
692 unsigned long saved_acpuclk_rate;
693 unsigned int avsdscr_setting;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600694 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695
696 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
697 pr_info("CPU%u: %s: idle %d\n",
698 dev->cpu, __func__, (int)from_idle);
699
700 msm_pm_config_hw_before_power_down();
701 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
702 pr_info("CPU%u: %s: pre power down\n", dev->cpu, __func__);
703
704 avsdscr_setting = avs_get_avsdscr();
705 avs_disable();
706
707 if (cpu_online(dev->cpu))
708 saved_acpuclk_rate = acpuclk_power_collapse();
709 else
710 saved_acpuclk_rate = 0;
711
712 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
713 pr_info("CPU%u: %s: change clock rate (old rate = %lu)\n",
714 dev->cpu, __func__, saved_acpuclk_rate);
715
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600716 collapsed = msm_pm_spm_power_collapse(dev, from_idle, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717
718 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
719 pr_info("CPU%u: %s: restore clock rate to %lu\n",
720 dev->cpu, __func__, saved_acpuclk_rate);
721 if (acpuclk_set_rate(dev->cpu, saved_acpuclk_rate, SETRATE_PC) < 0)
722 pr_err("CPU%u: %s: failed to restore clock rate(%lu)\n",
723 dev->cpu, __func__, saved_acpuclk_rate);
724
725 avs_reset_delays(avsdscr_setting);
726 msm_pm_config_hw_after_power_up();
727 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
728 pr_info("CPU%u: %s: post power up\n", dev->cpu, __func__);
729
730 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
731 pr_info("CPU%u: %s: return\n", dev->cpu, __func__);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600732 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700733}
734
735static irqreturn_t msm_pm_rpm_wakeup_interrupt(int irq, void *dev_id)
736{
737 if (dev_id != &msm_pm_rpm_wakeup_interrupt)
738 return IRQ_NONE;
739
740 return IRQ_HANDLED;
741}
742
743
744/******************************************************************************
745 * External Idle/Suspend Functions
746 *****************************************************************************/
747
748void arch_idle(void)
749{
750 return;
751}
752
753int msm_pm_idle_prepare(struct cpuidle_device *dev)
754{
755 uint32_t latency_us;
756 uint32_t sleep_us;
757 int i;
758
759 latency_us = (uint32_t) pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
760 sleep_us = (uint32_t) ktime_to_ns(tick_nohz_get_sleep_length());
761 sleep_us = DIV_ROUND_UP(sleep_us, 1000);
762
763 for (i = 0; i < dev->state_count; i++) {
764 struct cpuidle_state *state = &dev->states[i];
765 enum msm_pm_sleep_mode mode;
766 bool allow;
767 struct msm_rpmrs_limits *rs_limits = NULL;
768 int idx;
769
770 mode = (enum msm_pm_sleep_mode) state->driver_data;
771 idx = MSM_PM_MODE(dev->cpu, mode);
772
773 allow = msm_pm_modes[idx].idle_enabled &&
774 msm_pm_modes[idx].idle_supported;
775
776 switch (mode) {
777 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
778 if (!allow)
779 break;
780
781 if (num_online_cpus() > 1) {
782 allow = false;
783 break;
784 }
785#ifdef CONFIG_HAS_WAKELOCK
786 if (has_wake_lock(WAKE_LOCK_IDLE)) {
787 allow = false;
788 break;
789 }
790#endif
791 /* fall through */
792
793 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
794 if (!allow)
795 break;
796
797 if (!dev->cpu &&
798 msm_rpm_local_request_is_outstanding()) {
799 allow = false;
800 break;
801 }
802 /* fall through */
803
804 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
805 if (!allow)
806 break;
807
808 rs_limits = msm_rpmrs_lowest_limits(true,
809 mode, latency_us, sleep_us);
810
811 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
812 pr_info("CPU%u: %s: %s, latency %uus, "
813 "sleep %uus, limit %p\n",
814 dev->cpu, __func__, state->desc,
815 latency_us, sleep_us, rs_limits);
816
817 if ((MSM_PM_DEBUG_IDLE_LIMITS & msm_pm_debug_mask) &&
818 rs_limits)
819 pr_info("CPU%u: %s: limit %p: "
820 "pxo %d, l2_cache %d, "
821 "vdd_mem %d, vdd_dig %d\n",
822 dev->cpu, __func__, rs_limits,
823 rs_limits->pxo,
824 rs_limits->l2_cache,
825 rs_limits->vdd_mem,
826 rs_limits->vdd_dig);
827
828 if (!rs_limits)
829 allow = false;
830 break;
831
832 default:
833 allow = false;
834 break;
835 }
836
837 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
838 pr_info("CPU%u: %s: allow %s: %d\n",
839 dev->cpu, __func__, state->desc, (int)allow);
840
841 if (allow) {
842 state->flags &= ~CPUIDLE_FLAG_IGNORE;
843 state->target_residency = 0;
844 state->exit_latency = 0;
845 state->power_usage = rs_limits->power[dev->cpu];
846
847 if (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == mode)
848 msm_pm_idle_rs_limits = rs_limits;
849 } else {
850 state->flags |= CPUIDLE_FLAG_IGNORE;
851 }
852 }
853
854 return 0;
855}
856
857int msm_pm_idle_enter(enum msm_pm_sleep_mode sleep_mode)
858{
859 int64_t time;
860#ifdef CONFIG_MSM_IDLE_STATS
861 int exit_stat;
862#endif
863
864 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
865 pr_info("CPU%u: %s: mode %d\n",
866 smp_processor_id(), __func__, sleep_mode);
867
868 time = ktime_to_ns(ktime_get());
869
870 switch (sleep_mode) {
871 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
872 msm_pm_swfi();
873#ifdef CONFIG_MSM_IDLE_STATS
874 exit_stat = MSM_PM_STAT_IDLE_WFI;
875#endif
876 break;
877
878 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
879 msm_pm_power_collapse_standalone(true);
880#ifdef CONFIG_MSM_IDLE_STATS
881 exit_stat = MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
882#endif
883 break;
884
885 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE: {
886 int64_t timer_expiration = msm_timer_enter_idle();
887 bool timer_halted = false;
888 uint32_t sleep_delay;
889 int ret;
890 int notify_rpm =
891 (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600892 int collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700893
894 sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
895 timer_expiration, MSM_PM_SLEEP_TICK_LIMIT);
896 if (sleep_delay == 0) /* 0 would mean infinite time */
897 sleep_delay = 1;
898
899 ret = msm_rpmrs_enter_sleep(
900 sleep_delay, msm_pm_idle_rs_limits, true, notify_rpm);
901 if (!ret) {
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600902 collapsed = msm_pm_power_collapse(true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700903 timer_halted = true;
904
905 msm_rpmrs_exit_sleep(msm_pm_idle_rs_limits, true,
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600906 notify_rpm, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907 }
908
909 msm_timer_exit_idle((int) timer_halted);
910#ifdef CONFIG_MSM_IDLE_STATS
911 exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
912#endif
913 break;
914 }
915
916 default:
917 __WARN();
918 goto cpuidle_enter_bail;
919 }
920
921 time = ktime_to_ns(ktime_get()) - time;
922#ifdef CONFIG_MSM_IDLE_STATS
923 msm_pm_add_stat(exit_stat, time);
924#endif
925
926 do_div(time, 1000);
927 return (int) time;
928
929cpuidle_enter_bail:
930 return 0;
931}
932
933static int msm_pm_enter(suspend_state_t state)
934{
935 bool allow[MSM_PM_SLEEP_MODE_NR];
936 int i;
937
938#ifdef CONFIG_MSM_IDLE_STATS
939 int64_t period = 0;
940 int64_t time = msm_timer_get_sclk_time(&period);
941#endif
942
943 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
944 pr_info("%s\n", __func__);
945
946 if (smp_processor_id()) {
947 __WARN();
948 goto enter_exit;
949 }
950
951
952 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
953 struct msm_pm_platform_data *mode;
954
955 mode = &msm_pm_modes[MSM_PM_MODE(0, i)];
956 allow[i] = mode->suspend_supported && mode->suspend_enabled;
957 }
958
959 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE]) {
960 struct msm_rpmrs_limits *rs_limits;
961 int ret;
962
963 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
964 pr_info("%s: power collapse\n", __func__);
965
966 clock_debug_print_enabled();
967
968#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
969 if (msm_pm_sleep_time_override > 0) {
970 int64_t ns = NSEC_PER_SEC *
971 (int64_t) msm_pm_sleep_time_override;
972 msm_pm_set_max_sleep_time(ns);
973 msm_pm_sleep_time_override = 0;
974 }
975#endif /* CONFIG_MSM_SLEEP_TIME_OVERRIDE */
976
977 if (MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask)
978 msm_rpmrs_show_resources();
979
980 rs_limits = msm_rpmrs_lowest_limits(false,
981 MSM_PM_SLEEP_MODE_POWER_COLLAPSE, -1, -1);
982
983 if ((MSM_PM_DEBUG_SUSPEND_LIMITS & msm_pm_debug_mask) &&
984 rs_limits)
985 pr_info("%s: limit %p: pxo %d, l2_cache %d, "
986 "vdd_mem %d, vdd_dig %d\n",
987 __func__, rs_limits,
988 rs_limits->pxo, rs_limits->l2_cache,
989 rs_limits->vdd_mem, rs_limits->vdd_dig);
990
991 if (rs_limits) {
992 ret = msm_rpmrs_enter_sleep(
993 msm_pm_max_sleep_time, rs_limits, false, true);
994 if (!ret) {
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600995 int collapsed = msm_pm_power_collapse(false);
996 msm_rpmrs_exit_sleep(rs_limits, false, true,
997 collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700998 }
999 } else {
1000 pr_err("%s: cannot find the lowest power limit\n",
1001 __func__);
1002 }
1003
1004#ifdef CONFIG_MSM_IDLE_STATS
1005 if (time != 0) {
1006 int64_t end_time = msm_timer_get_sclk_time(NULL);
1007 if (end_time != 0) {
1008 time = end_time - time;
1009 if (time < 0)
1010 time += period;
1011 } else
1012 time = 0;
1013 }
1014
1015 msm_pm_add_stat(MSM_PM_STAT_SUSPEND, time);
1016#endif /* CONFIG_MSM_IDLE_STATS */
1017 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
1018 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1019 pr_info("%s: standalone power collapse\n", __func__);
1020 msm_pm_power_collapse_standalone(false);
1021 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1022 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1023 pr_info("%s: swfi\n", __func__);
1024 msm_pm_swfi();
1025 }
1026
1027
1028enter_exit:
1029 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1030 pr_info("%s: return\n", __func__);
1031
1032 return 0;
1033}
1034
1035static struct platform_suspend_ops msm_pm_ops = {
1036 .enter = msm_pm_enter,
1037 .valid = suspend_valid_only_mem,
1038};
1039
1040#ifdef CONFIG_HOTPLUG_CPU
1041int platform_cpu_disable(unsigned int cpu)
1042{
1043 return cpu == 0 ? -EPERM : 0;
1044}
1045
1046int platform_cpu_kill(unsigned int cpu)
1047{
1048 struct completion *killed = &per_cpu(msm_pm_devices, cpu).cpu_killed;
1049 return wait_for_completion_timeout(killed, HZ * 5);
1050}
1051
1052void platform_cpu_die(unsigned int cpu)
1053{
1054 bool allow[MSM_PM_SLEEP_MODE_NR];
1055 int i;
1056
1057 if (unlikely(cpu != smp_processor_id())) {
1058 pr_crit("%s: running on %u, should be %u\n",
1059 __func__, smp_processor_id(), cpu);
1060 BUG();
1061 }
1062
1063 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
1064 struct msm_pm_platform_data *mode;
1065
1066 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
1067 allow[i] = mode->suspend_supported && mode->suspend_enabled;
1068 }
1069
1070 if (MSM_PM_DEBUG_HOTPLUG & msm_pm_debug_mask)
1071 pr_notice("CPU%u: %s: shutting down cpu\n", cpu, __func__);
1072 complete(&__get_cpu_var(msm_pm_devices).cpu_killed);
1073
1074 flush_cache_all();
1075
1076 for (;;) {
1077 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
1078 msm_pm_power_collapse(false);
1079 else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE])
1080 msm_pm_power_collapse_standalone(false);
1081 else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT])
1082 msm_pm_swfi();
1083
1084 if (pen_release == cpu) {
1085 /* OK, proper wakeup, we're done */
1086 break;
1087 }
1088 }
1089
1090 pen_release = -1;
1091 pr_notice("CPU%u: %s: normal wakeup\n", cpu, __func__);
1092}
1093
1094int msm_pm_platform_secondary_init(unsigned int cpu)
1095{
1096 int ret;
1097 struct msm_pm_device *dev = &__get_cpu_var(msm_pm_devices);
1098
1099 if (!dev->warm_boot) {
1100 dev->warm_boot = 1;
1101 return 0;
1102 }
1103#ifdef CONFIG_VFP
1104 vfp_reinit();
1105#endif
1106 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
1107
1108 return ret;
1109}
1110#endif /* CONFIG_HOTPLUG_CPU */
1111
1112/******************************************************************************
1113 * Initialization routine
1114 *****************************************************************************/
1115
1116static int __init msm_pm_init(void)
1117{
1118 pgd_t *pc_pgd;
1119 pmd_t *pmd;
1120 unsigned long pmdval;
1121 unsigned int cpu;
1122#ifdef CONFIG_MSM_IDLE_STATS
1123 struct proc_dir_entry *d_entry;
1124#endif
1125 int ret;
1126
1127 /* Page table for cores to come back up safely. */
1128 pc_pgd = pgd_alloc(&init_mm);
1129 if (!pc_pgd)
1130 return -ENOMEM;
1131
1132 pmd = pmd_offset(pc_pgd +
1133 pgd_index(virt_to_phys(msm_pm_collapse_exit)),
1134 virt_to_phys(msm_pm_collapse_exit));
1135 pmdval = (virt_to_phys(msm_pm_collapse_exit) & PGDIR_MASK) |
1136 PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
1137 pmd[0] = __pmd(pmdval);
1138 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1139
1140 /* It is remotely possible that the code in msm_pm_collapse_exit()
1141 * which turns on the MMU with this mapping is in the
1142 * next even-numbered megabyte beyond the
1143 * start of msm_pm_collapse_exit().
1144 * Map this megabyte in as well.
1145 */
1146 pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
1147 flush_pmd_entry(pmd);
1148 msm_pm_pc_pgd = virt_to_phys(pc_pgd);
1149
1150 ret = request_irq(rpm_cpu0_wakeup_irq,
1151 msm_pm_rpm_wakeup_interrupt, IRQF_TRIGGER_RISING,
1152 "pm_drv", msm_pm_rpm_wakeup_interrupt);
1153 if (ret) {
1154 pr_err("%s: failed to request irq %u: %d\n",
1155 __func__, rpm_cpu0_wakeup_irq, ret);
1156 return ret;
1157 }
1158
1159 ret = irq_set_irq_wake(rpm_cpu0_wakeup_irq, 1);
1160 if (ret) {
1161 pr_err("%s: failed to set wakeup irq %u: %d\n",
1162 __func__, rpm_cpu0_wakeup_irq, ret);
1163 return ret;
1164 }
1165
1166 for_each_possible_cpu(cpu) {
1167 struct msm_pm_device *dev = &per_cpu(msm_pm_devices, cpu);
1168
1169 dev->cpu = cpu;
1170#ifdef CONFIG_HOTPLUG_CPU
1171 init_completion(&dev->cpu_killed);
1172#endif
1173 }
1174
1175 ret = scm_set_boot_addr((void *)virt_to_phys(msm_pm_boot_entry),
1176 SCM_FLAG_WARMBOOT_CPU0 | SCM_FLAG_WARMBOOT_CPU1);
1177 if (ret) {
1178 pr_err("%s: failed to set up scm boot addr: %d\n",
1179 __func__, ret);
1180 return ret;
1181 }
1182
1183#ifdef CONFIG_MSM_IDLE_STATS
1184 for_each_possible_cpu(cpu) {
1185 struct msm_pm_time_stats *stats =
1186 per_cpu(msm_pm_stats, cpu).stats;
1187
1188 stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
1189 stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
1190 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1191
1192 stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
1193 stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
1194 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1195
1196 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
1197 "idle-standalone-power-collapse";
1198 stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
1199 first_bucket_time = CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1200
1201 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
1202 "idle-power-collapse";
1203 stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
1204 CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
1205
1206 stats[MSM_PM_STAT_SUSPEND].name = "suspend";
1207 stats[MSM_PM_STAT_SUSPEND].first_bucket_time =
1208 CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
1209 }
1210
1211 d_entry = create_proc_entry("msm_pm_stats",
1212 S_IRUGO | S_IWUSR | S_IWGRP, NULL);
1213 if (d_entry) {
1214 d_entry->read_proc = msm_pm_read_proc;
1215 d_entry->write_proc = msm_pm_write_proc;
1216 d_entry->data = NULL;
1217 }
1218#endif /* CONFIG_MSM_IDLE_STATS */
1219
1220 msm_pm_mode_sysfs_add();
1221 msm_spm_allow_x_cpu_set_vdd(false);
1222
1223 suspend_set_ops(&msm_pm_ops);
1224 msm_cpuidle_init();
1225
1226 return 0;
1227}
1228
1229late_initcall(msm_pm_init);