blob: a2da8b095f56091cfb105feada2f6b55b2b4d161 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/pm2.c
2 *
3 * MSM Power Management Routines
4 *
5 * Copyright (C) 2007 Google, Inc.
Murali Nalajalad1def3a2012-11-19 17:11:22 +05306 * Copyright (c) 2008-2012 The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/clk.h>
22#include <linux/delay.h>
23#include <linux/init.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070024#include <linux/pm_qos.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <linux/suspend.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <linux/io.h>
Murali Nalajala8fda4492012-03-19 18:22:59 +053027#include <linux/tick.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include <linux/memory.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <mach/msm_iomap.h>
30#include <mach/system.h>
31#ifdef CONFIG_CPU_V7
32#include <asm/pgtable.h>
33#include <asm/pgalloc.h>
34#endif
35#ifdef CONFIG_CACHE_L2X0
36#include <asm/hardware/cache-l2x0.h>
37#endif
38#ifdef CONFIG_VFP
39#include <asm/vfp.h>
40#endif
41
42#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_DEEP_POWER_DOWN
43#include <mach/msm_migrate_pages.h>
44#endif
Murali Nalajala41786ab2012-03-06 10:47:32 +053045#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070046#include <mach/proc_comm.h>
Anji jonnala1f2377c2012-03-27 14:35:55 +053047#include <asm/smp_scu.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
49#include "smd_private.h"
50#include "smd_rpcrouter.h"
51#include "acpuclock.h"
52#include "clock.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053#include "idle.h"
54#include "irq.h"
55#include "gpio.h"
56#include "timer.h"
Matt Wagantall7cca4642012-02-01 16:43:24 -080057#include "pm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058#include "spm.h"
59#include "sirc.h"
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -060060#include "pm-boot.h"
Murali Nalajala19d33a22012-05-18 14:11:19 +053061#include "devices-msm7x2xa.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
63/******************************************************************************
64 * Debug Definitions
65 *****************************************************************************/
66
67enum {
Murali Nalajalaa7efba12012-02-23 18:13:52 +053068 MSM_PM_DEBUG_SUSPEND = BIT(0),
69 MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
70 MSM_PM_DEBUG_STATE = BIT(2),
71 MSM_PM_DEBUG_CLOCK = BIT(3),
72 MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
73 MSM_PM_DEBUG_SMSM_STATE = BIT(5),
74 MSM_PM_DEBUG_IDLE = BIT(6),
75 MSM_PM_DEBUG_HOTPLUG = BIT(7),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076};
77
Murali Nalajalad1def3a2012-11-19 17:11:22 +053078DEFINE_PER_CPU(int, power_collapsed);
79
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080static int msm_pm_debug_mask;
81module_param_named(
82 debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
83);
84
85#define MSM_PM_DPRINTK(mask, level, message, ...) \
86 do { \
87 if ((mask) & msm_pm_debug_mask) \
88 printk(level message, ## __VA_ARGS__); \
89 } while (0)
90
91#define MSM_PM_DEBUG_PRINT_STATE(tag) \
92 do { \
93 MSM_PM_DPRINTK(MSM_PM_DEBUG_STATE, \
94 KERN_INFO, "%s: " \
95 "APPS_CLK_SLEEP_EN %x, APPS_PWRDOWN %x, " \
96 "SMSM_POWER_MASTER_DEM %x, SMSM_MODEM_STATE %x, " \
97 "SMSM_APPS_DEM %x\n", \
98 tag, \
99 __raw_readl(APPS_CLK_SLEEP_EN), \
100 __raw_readl(APPS_PWRDOWN), \
101 smsm_get_state(SMSM_POWER_MASTER_DEM), \
102 smsm_get_state(SMSM_MODEM_STATE), \
103 smsm_get_state(SMSM_APPS_DEM)); \
104 } while (0)
105
106#define MSM_PM_DEBUG_PRINT_SLEEP_INFO() \
107 do { \
108 if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE) \
109 smsm_print_sleep_info(msm_pm_smem_data->sleep_time, \
110 msm_pm_smem_data->resources_used, \
111 msm_pm_smem_data->irq_mask, \
112 msm_pm_smem_data->wakeup_reason, \
113 msm_pm_smem_data->pending_irqs); \
114 } while (0)
115
116
117/******************************************************************************
118 * Sleep Modes and Parameters
119 *****************************************************************************/
120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121static int msm_pm_idle_sleep_min_time = CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME;
122module_param_named(
123 idle_sleep_min_time, msm_pm_idle_sleep_min_time,
124 int, S_IRUGO | S_IWUSR | S_IWGRP
125);
126
127enum {
128 MSM_PM_MODE_ATTR_SUSPEND,
129 MSM_PM_MODE_ATTR_IDLE,
130 MSM_PM_MODE_ATTR_LATENCY,
131 MSM_PM_MODE_ATTR_RESIDENCY,
132 MSM_PM_MODE_ATTR_NR,
133};
134
135static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
136 [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
137 [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
138 [MSM_PM_MODE_ATTR_LATENCY] = "latency",
139 [MSM_PM_MODE_ATTR_RESIDENCY] = "residency",
140};
141
142static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
143 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND] = " ",
144 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700145 [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT] =
146 "ramp_down_and_wfi",
147 [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
148 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] =
149 "power_collapse_no_xo_shutdown",
150 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
151 "standalone_power_collapse",
152};
153
154static struct msm_pm_platform_data *msm_pm_modes;
Murali Nalajala2a0bbda2012-03-28 12:12:54 +0530155static struct msm_pm_irq_calls *msm_pm_irq_extns;
Murali Nalajalaff723ec2012-07-13 16:54:40 +0530156static struct msm_pm_cpr_ops *msm_cpr_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530158struct msm_pm_kobj_attribute {
159 unsigned int cpu;
160 struct kobj_attribute ka;
161};
162
163#define GET_CPU_OF_ATTR(attr) \
164 (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
165
166struct msm_pm_sysfs_sleep_mode {
167 struct kobject *kobj;
168 struct attribute_group attr_group;
169 struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
170 struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
171};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172
173/*
174 * Write out the attribute.
175 */
176static ssize_t msm_pm_mode_attr_show(
177 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
178{
179 int ret = -EINVAL;
180 int i;
181
182 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
183 struct kernel_param kp;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530184 unsigned int cpu;
185 struct msm_pm_platform_data *mode;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186
187 if (msm_pm_sleep_mode_labels[i] == NULL)
188 continue;
189
190 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
191 continue;
192
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530193 cpu = GET_CPU_OF_ATTR(attr);
194 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196 if (!strcmp(attr->attr.name,
197 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530198 u32 arg = mode->suspend_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 kp.arg = &arg;
200 ret = param_get_ulong(buf, &kp);
201 } else if (!strcmp(attr->attr.name,
202 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530203 u32 arg = mode->idle_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 kp.arg = &arg;
205 ret = param_get_ulong(buf, &kp);
206 } else if (!strcmp(attr->attr.name,
207 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_LATENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530208 u32 arg = mode->latency;
209 kp.arg = &arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210 ret = param_get_ulong(buf, &kp);
211 } else if (!strcmp(attr->attr.name,
212 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_RESIDENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530213 u32 arg = mode->residency;
214 kp.arg = &arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 ret = param_get_ulong(buf, &kp);
216 }
217
218 break;
219 }
220
221 if (ret > 0) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530222 strlcat(buf, "\n", PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223 ret++;
224 }
225
226 return ret;
227}
228
229/*
230 * Read in the new attribute value.
231 */
232static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
233 struct kobj_attribute *attr, const char *buf, size_t count)
234{
235 int ret = -EINVAL;
236 int i;
237
238 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
239 struct kernel_param kp;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530240 unsigned int cpu;
241 struct msm_pm_platform_data *mode;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242
243 if (msm_pm_sleep_mode_labels[i] == NULL)
244 continue;
245
246 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
247 continue;
248
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530249 cpu = GET_CPU_OF_ATTR(attr);
250 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
251
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252 if (!strcmp(attr->attr.name,
253 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530254 kp.arg = &mode->suspend_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255 ret = param_set_byte(buf, &kp);
256 } else if (!strcmp(attr->attr.name,
257 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530258 kp.arg = &mode->idle_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 ret = param_set_byte(buf, &kp);
260 } else if (!strcmp(attr->attr.name,
261 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_LATENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530262 kp.arg = &mode->latency;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 ret = param_set_ulong(buf, &kp);
264 } else if (!strcmp(attr->attr.name,
265 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_RESIDENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530266 kp.arg = &mode->residency;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267 ret = param_set_ulong(buf, &kp);
268 }
269
270 break;
271 }
272
273 return ret ? ret : count;
274}
275
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530276 /* Add sysfs entries for one cpu. */
277static int __init msm_pm_mode_sysfs_add_cpu(
278 unsigned int cpu, struct kobject *modes_kobj)
279{
280 char cpu_name[8];
281 struct kobject *cpu_kobj;
282 struct msm_pm_sysfs_sleep_mode *mode = NULL;
283 int i, j, k;
284 int ret;
285
286 snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
287 cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
288 if (!cpu_kobj) {
289 pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
290 ret = -ENOMEM;
291 goto mode_sysfs_add_cpu_exit;
292 }
293
294 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
295 int idx = MSM_PM_MODE(cpu, i);
296
297 if ((!msm_pm_modes[idx].suspend_supported) &&
298 (!msm_pm_modes[idx].idle_supported))
299 continue;
300
301 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
302 if (!mode) {
303 pr_err("%s: cannot allocate memory for attributes\n",
304 __func__);
305 ret = -ENOMEM;
306 goto mode_sysfs_add_cpu_exit;
307 }
308
309 mode->kobj = kobject_create_and_add(
310 msm_pm_sleep_mode_labels[i], cpu_kobj);
311 if (!mode->kobj) {
312 pr_err("%s: cannot create kobject\n", __func__);
313 ret = -ENOMEM;
314 goto mode_sysfs_add_cpu_exit;
315 }
316
317 for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
318 if ((k == MSM_PM_MODE_ATTR_IDLE) &&
319 !msm_pm_modes[idx].idle_supported)
320 continue;
321 if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
322 !msm_pm_modes[idx].suspend_supported)
323 continue;
324 mode->kas[j].cpu = cpu;
325 mode->kas[j].ka.attr.mode = 0644;
326 mode->kas[j].ka.show = msm_pm_mode_attr_show;
327 mode->kas[j].ka.store = msm_pm_mode_attr_store;
328 mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
329 mode->attrs[j] = &mode->kas[j].ka.attr;
330 j++;
331 }
332 mode->attrs[j] = NULL;
333
334 mode->attr_group.attrs = mode->attrs;
335 ret = sysfs_create_group(mode->kobj, &mode->attr_group);
336 if (ret) {
337 printk(KERN_ERR
338 "%s: cannot create kobject attribute group\n",
339 __func__);
340 goto mode_sysfs_add_cpu_exit;
341 }
342 }
343
344 ret = 0;
345
346mode_sysfs_add_cpu_exit:
347 if (ret) {
348 if (mode && mode->kobj)
349 kobject_del(mode->kobj);
350 kfree(mode);
351 }
352
353 return ret;
354}
355
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356/*
357 * Add sysfs entries for the sleep modes.
358 */
359static int __init msm_pm_mode_sysfs_add(void)
360{
361 struct kobject *module_kobj = NULL;
362 struct kobject *modes_kobj = NULL;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530363 unsigned int cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364 int ret;
365
366 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
367 if (!module_kobj) {
368 printk(KERN_ERR "%s: cannot find kobject for module %s\n",
369 __func__, KBUILD_MODNAME);
370 ret = -ENOENT;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530371 goto mode_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 }
373
374 modes_kobj = kobject_create_and_add("modes", module_kobj);
375 if (!modes_kobj) {
376 printk(KERN_ERR "%s: cannot create modes kobject\n", __func__);
377 ret = -ENOMEM;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530378 goto mode_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379 }
380
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530381 for_each_possible_cpu(cpu) {
382 ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
383 if (ret)
384 goto mode_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385 }
386
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530387 ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530389mode_sysfs_add_exit:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 return ret;
391}
392
Stephen Boyd3f4bac22012-05-30 10:03:13 -0700393s32 msm_cpuidle_get_deep_idle_latency(void)
394{
395 int i = MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN);
396 return msm_pm_modes[i].latency - 1;
397}
398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399void __init msm_pm_set_platform_data(
400 struct msm_pm_platform_data *data, int count)
401{
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530402 BUG_ON(MSM_PM_SLEEP_MODE_NR * num_possible_cpus() > count);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700403 msm_pm_modes = data;
404}
405
Murali Nalajala2a0bbda2012-03-28 12:12:54 +0530406void __init msm_pm_set_irq_extns(struct msm_pm_irq_calls *irq_calls)
407{
408 /* sanity check */
409 BUG_ON(irq_calls == NULL || irq_calls->irq_pending == NULL ||
410 irq_calls->idle_sleep_allowed == NULL ||
411 irq_calls->enter_sleep1 == NULL ||
412 irq_calls->enter_sleep2 == NULL ||
413 irq_calls->exit_sleep1 == NULL ||
414 irq_calls->exit_sleep2 == NULL ||
415 irq_calls->exit_sleep3 == NULL);
416
417 msm_pm_irq_extns = irq_calls;
418}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419
Murali Nalajalaff723ec2012-07-13 16:54:40 +0530420void __init msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops)
421{
422 msm_cpr_ops = ops;
423}
424
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425/******************************************************************************
426 * Sleep Limitations
427 *****************************************************************************/
428enum {
429 SLEEP_LIMIT_NONE = 0,
430 SLEEP_LIMIT_NO_TCXO_SHUTDOWN = 2,
431 SLEEP_LIMIT_MASK = 0x03,
432};
433
Praveen Chidambaram3895bde2012-05-14 19:42:40 +0530434static uint32_t msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE
436enum {
437 SLEEP_RESOURCE_MEMORY_BIT0 = 0x0200,
438 SLEEP_RESOURCE_MEMORY_BIT1 = 0x0010,
439};
440#endif
441
442
443/******************************************************************************
444 * Configure Hardware for Power Down/Up
445 *****************************************************************************/
446
447#if defined(CONFIG_ARCH_MSM7X30)
Taniya Das298de8c2012-02-16 11:45:31 +0530448#define APPS_CLK_SLEEP_EN (MSM_APCS_GCC_BASE + 0x020)
449#define APPS_PWRDOWN (MSM_ACC0_BASE + 0x01c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450#define APPS_SECOP (MSM_TCSR_BASE + 0x038)
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530451#define APPS_STANDBY_CTL NULL
452#else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453#define APPS_CLK_SLEEP_EN (MSM_CSR_BASE + 0x11c)
454#define APPS_PWRDOWN (MSM_CSR_BASE + 0x440)
455#define APPS_STANDBY_CTL (MSM_CSR_BASE + 0x108)
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530456#define APPS_SECOP NULL
457#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458
459/*
460 * Configure hardware registers in preparation for Apps power down.
461 */
462static void msm_pm_config_hw_before_power_down(void)
463{
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530464 if (cpu_is_msm7x30() || cpu_is_msm8x55()) {
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530465 __raw_writel(4, APPS_SECOP);
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530466 } else if (cpu_is_msm7x27()) {
467 __raw_writel(0x1f, APPS_CLK_SLEEP_EN);
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530468 } else if (cpu_is_msm7x27a() || cpu_is_msm7x27aa() ||
Pankaj Kumarfee56a82012-04-17 14:26:49 +0530469 cpu_is_msm7x25a() || cpu_is_msm7x25aa() ||
470 cpu_is_msm7x25ab()) {
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530471 __raw_writel(0x7, APPS_CLK_SLEEP_EN);
Murali Nalajalab87e88c2012-05-18 15:12:13 +0530472 } else if (cpu_is_qsd8x50()) {
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530473 __raw_writel(0x1f, APPS_CLK_SLEEP_EN);
474 mb();
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530475 __raw_writel(0, APPS_STANDBY_CTL);
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530476 }
Murali Nalajalab87e88c2012-05-18 15:12:13 +0530477 mb();
478 __raw_writel(1, APPS_PWRDOWN);
479 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480}
481
482/*
Anji jonnala1f2377c2012-03-27 14:35:55 +0530483 * Program the top csr from core0 context to put the
484 * core1 into GDFS, as core1 is not running yet.
485 */
Murali Nalajala1989f822012-10-19 19:50:03 +0530486static void msm_pm_configure_top_csr(void)
Anji jonnala1f2377c2012-03-27 14:35:55 +0530487{
Murali Nalajala1989f822012-10-19 19:50:03 +0530488 /*
489 * Enable TCSR for core
490 * Set reset bit for SPM
491 * Set CLK_OFF bit
492 * Set clamps bit
493 * Set power_up bit
494 * Disable TSCR for core
495 */
496 uint32_t bit_pos[][6] = {
497 /* c2 */
498 {17, 15, 13, 16, 14, 17},
499 /* c1 & c3*/
500 {22, 20, 18, 21, 19, 22},
501 };
502 uint32_t mpa5_cfg_ctl[2] = {0x30, 0x48};
Anji jonnala1f2377c2012-03-27 14:35:55 +0530503 void __iomem *base_ptr;
504 unsigned int value = 0;
Murali Nalajala1989f822012-10-19 19:50:03 +0530505 unsigned int cpu;
506 int i;
Anji jonnala1f2377c2012-03-27 14:35:55 +0530507
Murali Nalajala1989f822012-10-19 19:50:03 +0530508 /* Initialize all the SPM registers */
Anji jonnala1f2377c2012-03-27 14:35:55 +0530509 msm_spm_reinit();
510
Murali Nalajala1989f822012-10-19 19:50:03 +0530511 for_each_possible_cpu(cpu) {
512 /* skip for C0 */
513 if (!cpu)
514 continue;
Anji jonnala1f2377c2012-03-27 14:35:55 +0530515
Murali Nalajala1989f822012-10-19 19:50:03 +0530516 base_ptr = core_reset_base(cpu);
517 if (!base_ptr)
518 return;
Anji jonnala1f2377c2012-03-27 14:35:55 +0530519
Murali Nalajala1989f822012-10-19 19:50:03 +0530520 /* bring the core out of reset */
521 __raw_writel(0x3, base_ptr);
522 mb();
Anji jonnala1f2377c2012-03-27 14:35:55 +0530523
Murali Nalajala1989f822012-10-19 19:50:03 +0530524 /*
525 * i == 0, Enable TCSR for core
526 * i == 1, Set reset bit for SPM
527 * i == 2, Set CLK_OFF bit
528 * i == 3, Set clamps bit
529 * i == 4, Set power_up bit
530 */
531 for (i = 0; i < 5; i++) {
532 value = __raw_readl(MSM_CFG_CTL_BASE +
533 mpa5_cfg_ctl[cpu/2]);
534 value |= BIT(bit_pos[cpu%2][i]);
535 __raw_writel(value, MSM_CFG_CTL_BASE +
536 mpa5_cfg_ctl[cpu/2]);
537 mb();
538 }
Anji jonnala1f2377c2012-03-27 14:35:55 +0530539
Murali Nalajala1989f822012-10-19 19:50:03 +0530540 /* i == 5, Disable TCSR for core */
541 value = __raw_readl(MSM_CFG_CTL_BASE +
542 mpa5_cfg_ctl[cpu/2]);
543 value &= ~BIT(bit_pos[cpu%2][i]);
544 __raw_writel(value, MSM_CFG_CTL_BASE +
545 mpa5_cfg_ctl[cpu/2]);
546 mb();
Anji jonnala1f2377c2012-03-27 14:35:55 +0530547
Murali Nalajala1989f822012-10-19 19:50:03 +0530548 __raw_writel(0x0, base_ptr);
549 mb();
550 }
Anji jonnala1f2377c2012-03-27 14:35:55 +0530551}
552
553/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554 * Clear hardware registers after Apps powers up.
555 */
556static void msm_pm_config_hw_after_power_up(void)
557{
Anji jonnala1f2377c2012-03-27 14:35:55 +0530558
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530559 if (cpu_is_msm7x30() || cpu_is_msm8x55()) {
560 __raw_writel(0, APPS_SECOP);
561 mb();
562 __raw_writel(0, APPS_PWRDOWN);
563 mb();
564 msm_spm_reinit();
Utsab Bose4ed4ba12012-11-08 18:52:38 +0530565 } else if (cpu_is_msm8625() || cpu_is_msm8625q()) {
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530566 __raw_writel(0, APPS_PWRDOWN);
567 mb();
Anji jonnala1f2377c2012-03-27 14:35:55 +0530568
Murali Nalajalad1def3a2012-11-19 17:11:22 +0530569 if (per_cpu(power_collapsed, 1)) {
Anji jonnala1f2377c2012-03-27 14:35:55 +0530570 /*
571 * enable the SCU while coming out of power
572 * collapse.
573 */
574 scu_enable(MSM_SCU_BASE);
575 /*
576 * Program the top csr to put the core1 into GDFS.
577 */
Murali Nalajala1989f822012-10-19 19:50:03 +0530578 msm_pm_configure_top_csr();
Anji jonnala1f2377c2012-03-27 14:35:55 +0530579 }
Murali Nalajalab87e88c2012-05-18 15:12:13 +0530580 } else {
581 __raw_writel(0, APPS_PWRDOWN);
582 mb();
583 __raw_writel(0, APPS_CLK_SLEEP_EN);
584 mb();
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530585 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586}
587
588/*
589 * Configure hardware registers in preparation for SWFI.
590 */
591static void msm_pm_config_hw_before_swfi(void)
592{
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530593 if (cpu_is_qsd8x50()) {
594 __raw_writel(0x1f, APPS_CLK_SLEEP_EN);
595 mb();
596 } else if (cpu_is_msm7x27()) {
597 __raw_writel(0x0f, APPS_CLK_SLEEP_EN);
598 mb();
599 } else if (cpu_is_msm7x27a() || cpu_is_msm7x27aa() ||
Pankaj Kumarfee56a82012-04-17 14:26:49 +0530600 cpu_is_msm7x25a() || cpu_is_msm7x25aa() ||
601 cpu_is_msm7x25ab()) {
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530602 __raw_writel(0x7, APPS_CLK_SLEEP_EN);
603 mb();
604 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605}
606
607/*
608 * Respond to timing out waiting for Modem
609 *
610 * NOTE: The function never returns.
611 */
612static void msm_pm_timeout(void)
613{
614#if defined(CONFIG_MSM_PM_TIMEOUT_RESET_CHIP)
615 printk(KERN_EMERG "%s(): resetting chip\n", __func__);
616 msm_proc_comm(PCOM_RESET_CHIP_IMM, NULL, NULL);
617#elif defined(CONFIG_MSM_PM_TIMEOUT_RESET_MODEM)
618 printk(KERN_EMERG "%s(): resetting modem\n", __func__);
619 msm_proc_comm_reset_modem_now();
620#elif defined(CONFIG_MSM_PM_TIMEOUT_HALT)
621 printk(KERN_EMERG "%s(): halting\n", __func__);
622#endif
623 for (;;)
624 ;
625}
626
627
628/******************************************************************************
629 * State Polling Definitions
630 *****************************************************************************/
631
632struct msm_pm_polled_group {
633 uint32_t group_id;
634
635 uint32_t bits_all_set;
636 uint32_t bits_all_clear;
637 uint32_t bits_any_set;
638 uint32_t bits_any_clear;
639
640 uint32_t value_read;
641};
642
643/*
644 * Return true if all bits indicated by flag are set in source.
645 */
646static inline bool msm_pm_all_set(uint32_t source, uint32_t flag)
647{
648 return (source & flag) == flag;
649}
650
651/*
652 * Return true if any bit indicated by flag are set in source.
653 */
654static inline bool msm_pm_any_set(uint32_t source, uint32_t flag)
655{
656 return !flag || (source & flag);
657}
658
659/*
660 * Return true if all bits indicated by flag are cleared in source.
661 */
662static inline bool msm_pm_all_clear(uint32_t source, uint32_t flag)
663{
664 return (~source & flag) == flag;
665}
666
667/*
668 * Return true if any bit indicated by flag are cleared in source.
669 */
670static inline bool msm_pm_any_clear(uint32_t source, uint32_t flag)
671{
672 return !flag || (~source & flag);
673}
674
675/*
676 * Poll the shared memory states as indicated by the poll groups.
677 *
678 * nr_grps: number of groups in the array
679 * grps: array of groups
680 *
681 * The function returns when conditions specified by any of the poll
682 * groups become true. The conditions specified by a poll group are
683 * deemed true when 1) at least one bit from bits_any_set is set OR one
684 * bit from bits_any_clear is cleared; and 2) all bits in bits_all_set
685 * are set; and 3) all bits in bits_all_clear are cleared.
686 *
687 * Return value:
688 * >=0: index of the poll group whose conditions have become true
689 * -ETIMEDOUT: timed out
690 */
691static int msm_pm_poll_state(int nr_grps, struct msm_pm_polled_group *grps)
692{
693 int i, k;
694
695 for (i = 0; i < 50000; i++) {
696 for (k = 0; k < nr_grps; k++) {
697 bool all_set, all_clear;
698 bool any_set, any_clear;
699
700 grps[k].value_read = smsm_get_state(grps[k].group_id);
701
702 all_set = msm_pm_all_set(grps[k].value_read,
703 grps[k].bits_all_set);
704 all_clear = msm_pm_all_clear(grps[k].value_read,
705 grps[k].bits_all_clear);
706 any_set = msm_pm_any_set(grps[k].value_read,
707 grps[k].bits_any_set);
708 any_clear = msm_pm_any_clear(grps[k].value_read,
709 grps[k].bits_any_clear);
710
711 if (all_set && all_clear && (any_set || any_clear))
712 return k;
713 }
714 udelay(50);
715 }
716
717 printk(KERN_ERR "%s failed:\n", __func__);
718 for (k = 0; k < nr_grps; k++)
719 printk(KERN_ERR "(%x, %x, %x, %x) %x\n",
720 grps[k].bits_all_set, grps[k].bits_all_clear,
721 grps[k].bits_any_set, grps[k].bits_any_clear,
722 grps[k].value_read);
723
724 return -ETIMEDOUT;
725}
726
727
728/******************************************************************************
729 * Suspend Max Sleep Time
730 *****************************************************************************/
731
732#define SCLK_HZ (32768)
733#define MSM_PM_SLEEP_TICK_LIMIT (0x6DDD000)
734
735#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
736static int msm_pm_sleep_time_override;
737module_param_named(sleep_time_override,
738 msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
739#endif
740
741static uint32_t msm_pm_max_sleep_time;
742
743/*
744 * Convert time from nanoseconds to slow clock ticks, then cap it to the
745 * specified limit
746 */
747static int64_t msm_pm_convert_and_cap_time(int64_t time_ns, int64_t limit)
748{
749 do_div(time_ns, NSEC_PER_SEC / SCLK_HZ);
750 return (time_ns > limit) ? limit : time_ns;
751}
752
753/*
754 * Set the sleep time for suspend. 0 means infinite sleep time.
755 */
756void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
757{
758 unsigned long flags;
759
760 local_irq_save(flags);
761 if (max_sleep_time_ns == 0) {
762 msm_pm_max_sleep_time = 0;
763 } else {
764 msm_pm_max_sleep_time = (uint32_t)msm_pm_convert_and_cap_time(
765 max_sleep_time_ns, MSM_PM_SLEEP_TICK_LIMIT);
766
767 if (msm_pm_max_sleep_time == 0)
768 msm_pm_max_sleep_time = 1;
769 }
770
771 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
772 "%s(): Requested %lld ns Giving %u sclk ticks\n", __func__,
773 max_sleep_time_ns, msm_pm_max_sleep_time);
774 local_irq_restore(flags);
775}
776EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
777
778
779/******************************************************************************
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700780 * Shared Memory Bits
781 *****************************************************************************/
782
783#define DEM_MASTER_BITS_PER_CPU 6
784
785/* Power Master State Bits - Per CPU */
786#define DEM_MASTER_SMSM_RUN \
787 (0x01UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
788#define DEM_MASTER_SMSM_RSA \
789 (0x02UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
790#define DEM_MASTER_SMSM_PWRC_EARLY_EXIT \
791 (0x04UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
792#define DEM_MASTER_SMSM_SLEEP_EXIT \
793 (0x08UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
794#define DEM_MASTER_SMSM_READY \
795 (0x10UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
796#define DEM_MASTER_SMSM_SLEEP \
797 (0x20UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
798
799/* Power Slave State Bits */
800#define DEM_SLAVE_SMSM_RUN (0x0001)
801#define DEM_SLAVE_SMSM_PWRC (0x0002)
802#define DEM_SLAVE_SMSM_PWRC_DELAY (0x0004)
803#define DEM_SLAVE_SMSM_PWRC_EARLY_EXIT (0x0008)
804#define DEM_SLAVE_SMSM_WFPI (0x0010)
805#define DEM_SLAVE_SMSM_SLEEP (0x0020)
806#define DEM_SLAVE_SMSM_SLEEP_EXIT (0x0040)
807#define DEM_SLAVE_SMSM_MSGS_REDUCED (0x0080)
808#define DEM_SLAVE_SMSM_RESET (0x0100)
809#define DEM_SLAVE_SMSM_PWRC_SUSPEND (0x0200)
810
811
812/******************************************************************************
813 * Shared Memory Data
814 *****************************************************************************/
815
816#define DEM_MAX_PORT_NAME_LEN (20)
817
818struct msm_pm_smem_t {
819 uint32_t sleep_time;
820 uint32_t irq_mask;
821 uint32_t resources_used;
822 uint32_t reserved1;
823
824 uint32_t wakeup_reason;
825 uint32_t pending_irqs;
826 uint32_t rpc_prog;
827 uint32_t rpc_proc;
828 char smd_port_name[DEM_MAX_PORT_NAME_LEN];
829 uint32_t reserved2;
830};
831
832
833/******************************************************************************
834 *
835 *****************************************************************************/
836static struct msm_pm_smem_t *msm_pm_smem_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700837static atomic_t msm_pm_init_done = ATOMIC_INIT(0);
838
839static int msm_pm_modem_busy(void)
840{
841 if (!(smsm_get_state(SMSM_POWER_MASTER_DEM) & DEM_MASTER_SMSM_READY)) {
842 MSM_PM_DPRINTK(MSM_PM_DEBUG_POWER_COLLAPSE,
843 KERN_INFO, "%s(): master not ready\n", __func__);
844 return -EBUSY;
845 }
846
847 return 0;
848}
849
850/*
851 * Power collapse the Apps processor. This function executes the handshake
852 * protocol with Modem.
853 *
854 * Return value:
855 * -EAGAIN: modem reset occurred or early exit from power collapse
856 * -EBUSY: modem not ready for our power collapse -- no power loss
857 * -ETIMEDOUT: timed out waiting for modem's handshake -- no power loss
858 * 0: success
859 */
860static int msm_pm_power_collapse
861 (bool from_idle, uint32_t sleep_delay, uint32_t sleep_limit)
862{
863 struct msm_pm_polled_group state_grps[2];
864 unsigned long saved_acpuclk_rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700865 int collapsed = 0;
866 int ret;
Murali Nalajala07b04022012-04-10 16:00:49 +0530867 int val;
868 int modem_early_exit = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700869
870 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
871 KERN_INFO, "%s(): idle %d, delay %u, limit %u\n", __func__,
872 (int)from_idle, sleep_delay, sleep_limit);
873
874 if (!(smsm_get_state(SMSM_POWER_MASTER_DEM) & DEM_MASTER_SMSM_READY)) {
875 MSM_PM_DPRINTK(
876 MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE,
877 KERN_INFO, "%s(): master not ready\n", __func__);
878 ret = -EBUSY;
879 goto power_collapse_bail;
880 }
881
882 memset(msm_pm_smem_data, 0, sizeof(*msm_pm_smem_data));
883
Utsab Bose4ed4ba12012-11-08 18:52:38 +0530884 if (cpu_is_msm8625() || cpu_is_msm8625q()) {
Murali Nalajala41786ab2012-03-06 10:47:32 +0530885 /* Program the SPM */
886 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_COLLAPSE,
887 false);
888 WARN_ON(ret);
889 }
890
Murali Nalajalaff723ec2012-07-13 16:54:40 +0530891 /* Call CPR suspend only for "idlePC" case */
892 if (msm_cpr_ops && from_idle)
893 msm_cpr_ops->cpr_suspend();
894
Murali Nalajala2a0bbda2012-03-28 12:12:54 +0530895 msm_pm_irq_extns->enter_sleep1(true, from_idle,
896 &msm_pm_smem_data->irq_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897
898 msm_pm_smem_data->sleep_time = sleep_delay;
899 msm_pm_smem_data->resources_used = sleep_limit;
900
Murali Nalajalaf3f79a42012-11-21 14:39:46 +0530901 saved_acpuclk_rate = acpuclk_power_collapse();
902 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
903 "%s(): change clock rate (old rate = %lu)\n", __func__,
904 saved_acpuclk_rate);
905
906 if (saved_acpuclk_rate == 0) {
907 ret = -EAGAIN;
908 goto acpu_set_clock_fail;
909 }
910
911 msm_sirc_enter_sleep();
912 msm_gpio_enter_sleep(from_idle);
913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914 /* Enter PWRC/PWRC_SUSPEND */
915
916 if (from_idle)
917 smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_RUN,
918 DEM_SLAVE_SMSM_PWRC);
919 else
920 smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_RUN,
921 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND);
922
923 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): PWRC");
924 MSM_PM_DEBUG_PRINT_SLEEP_INFO();
925
926 memset(state_grps, 0, sizeof(state_grps));
927 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
928 state_grps[0].bits_all_set = DEM_MASTER_SMSM_RSA;
929 state_grps[1].group_id = SMSM_MODEM_STATE;
930 state_grps[1].bits_all_set = SMSM_RESET;
931
932 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
933
934 if (ret < 0) {
935 printk(KERN_EMERG "%s(): power collapse entry "
936 "timed out waiting for Modem's response\n", __func__);
937 msm_pm_timeout();
938 }
939
940 if (ret == 1) {
941 MSM_PM_DPRINTK(
942 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
943 KERN_INFO,
944 "%s(): msm_pm_poll_state detected Modem reset\n",
945 __func__);
946 goto power_collapse_early_exit;
947 }
948
949 /* DEM Master in RSA */
950
951 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): PWRC RSA");
952
Murali Nalajala2a0bbda2012-03-28 12:12:54 +0530953 ret = msm_pm_irq_extns->enter_sleep2(true, from_idle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700954 if (ret < 0) {
955 MSM_PM_DPRINTK(
956 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
957 KERN_INFO,
958 "%s(): msm_irq_enter_sleep2 aborted, %d\n", __func__,
959 ret);
960 goto power_collapse_early_exit;
961 }
962
963 msm_pm_config_hw_before_power_down();
964 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): pre power down");
965
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -0600966 msm_pm_boot_config_before_pc(smp_processor_id(),
967 virt_to_phys(msm_pm_collapse_exit));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968
969#ifdef CONFIG_VFP
970 if (from_idle)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700971 vfp_pm_suspend();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700972#endif
973
974#ifdef CONFIG_CACHE_L2X0
Utsab Bose4ed4ba12012-11-08 18:52:38 +0530975 if (!cpu_is_msm8625() && !cpu_is_msm8625q())
Murali Nalajala73c13332012-05-15 11:30:59 +0530976 l2cc_suspend();
977 else
978 apps_power_collapse = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979#endif
980
981 collapsed = msm_pm_collapse();
Murali Nalajala07b04022012-04-10 16:00:49 +0530982
983 /*
984 * TBD: Currently recognise the MODEM early exit
985 * path by reading the MPA5_GDFS_CNT_VAL register.
986 */
Utsab Bose4ed4ba12012-11-08 18:52:38 +0530987 if (cpu_is_msm8625() || cpu_is_msm8625q()) {
Murali Nalajalad1def3a2012-11-19 17:11:22 +0530988 int cpu;
Murali Nalajala07b04022012-04-10 16:00:49 +0530989 /*
Murali Nalajala93e6ed02012-05-13 12:57:22 +0530990 * on system reset, default value of MPA5_GDFS_CNT_VAL
991 * is = 0x0, later modem reprogram this value to
Murali Nalajalaaa7310b2012-10-19 18:47:27 +0530992 * 0x00030004/0x000F0004(8x25Q). Once APPS did
993 * a power collapse and coming out of it expected value
994 * of this register always be 0x00030004/0x000F0004(8x25Q).
995 * Incase if APPS sees the value as 0x00030002/0x000F0002(8x25Q)
996 * consider this case as a modem early exit.
Murali Nalajala07b04022012-04-10 16:00:49 +0530997 */
998 val = __raw_readl(MSM_CFG_CTL_BASE + 0x38);
Murali Nalajalaaa7310b2012-10-19 18:47:27 +0530999
1000 /* 8x25Q */
Utsab Bose4ed4ba12012-11-08 18:52:38 +05301001 if (cpu_is_msm8625q()) {
Murali Nalajala1989f822012-10-19 19:50:03 +05301002 if (val != 0x000F0002) {
Murali Nalajalad1def3a2012-11-19 17:11:22 +05301003 for_each_possible_cpu(cpu) {
1004 if (!cpu)
1005 continue;
1006 per_cpu(power_collapsed, cpu) = 1;
1007 }
Murali Nalajala1989f822012-10-19 19:50:03 +05301008 /*
1009 * override DBGNOPOWERDN and program the GDFS
1010 * count val
1011 */
1012 __raw_writel(0x000F0002,
1013 (MSM_CFG_CTL_BASE + 0x38));
1014 } else
Murali Nalajalaaa7310b2012-10-19 18:47:27 +05301015 modem_early_exit = 1;
1016 } else {
Murali Nalajala1989f822012-10-19 19:50:03 +05301017 if (val != 0x00030002) {
Murali Nalajalad1def3a2012-11-19 17:11:22 +05301018 for_each_possible_cpu(cpu) {
1019 if (!cpu)
1020 continue;
1021 per_cpu(power_collapsed, cpu) = 1;
1022 }
Murali Nalajala1989f822012-10-19 19:50:03 +05301023 /*
1024 * override DBGNOPOWERDN and program the GDFS
1025 * count val
1026 */
1027 __raw_writel(0x00030002,
1028 (MSM_CFG_CTL_BASE + 0x38));
1029 } else
Murali Nalajalaaa7310b2012-10-19 18:47:27 +05301030 modem_early_exit = 1;
1031 }
Murali Nalajala07b04022012-04-10 16:00:49 +05301032 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001033
1034#ifdef CONFIG_CACHE_L2X0
Utsab Bose4ed4ba12012-11-08 18:52:38 +05301035 if (!cpu_is_msm8625() && !cpu_is_msm8625q())
Murali Nalajala73c13332012-05-15 11:30:59 +05301036 l2cc_resume();
1037 else
1038 apps_power_collapse = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001039#endif
1040
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -06001041 msm_pm_boot_config_after_pc(smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042
1043 if (collapsed) {
1044#ifdef CONFIG_VFP
1045 if (from_idle)
Steve Mucklef132c6c2012-06-06 18:30:57 -07001046 vfp_pm_resume();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001047#endif
1048 cpu_init();
1049 local_fiq_enable();
1050 }
1051
1052 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE,
1053 KERN_INFO,
1054 "%s(): msm_pm_collapse returned %d\n", __func__, collapsed);
1055
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301056 msm_pm_irq_extns->exit_sleep1(msm_pm_smem_data->irq_mask,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001057 msm_pm_smem_data->wakeup_reason,
1058 msm_pm_smem_data->pending_irqs);
1059
1060 msm_pm_config_hw_after_power_up();
1061 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): post power up");
1062
1063 memset(state_grps, 0, sizeof(state_grps));
1064 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
1065 state_grps[0].bits_any_set =
1066 DEM_MASTER_SMSM_RSA | DEM_MASTER_SMSM_PWRC_EARLY_EXIT;
1067 state_grps[1].group_id = SMSM_MODEM_STATE;
1068 state_grps[1].bits_all_set = SMSM_RESET;
1069
1070 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
1071
1072 if (ret < 0) {
1073 printk(KERN_EMERG "%s(): power collapse exit "
1074 "timed out waiting for Modem's response\n", __func__);
1075 msm_pm_timeout();
1076 }
1077
1078 if (ret == 1) {
1079 MSM_PM_DPRINTK(
1080 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1081 KERN_INFO,
1082 "%s(): msm_pm_poll_state detected Modem reset\n",
1083 __func__);
1084 goto power_collapse_early_exit;
1085 }
1086
1087 /* Sanity check */
Murali Nalajala07b04022012-04-10 16:00:49 +05301088 if (collapsed && !modem_early_exit) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089 BUG_ON(!(state_grps[0].value_read & DEM_MASTER_SMSM_RSA));
1090 } else {
1091 BUG_ON(!(state_grps[0].value_read &
1092 DEM_MASTER_SMSM_PWRC_EARLY_EXIT));
1093 goto power_collapse_early_exit;
1094 }
1095
1096 /* Enter WFPI */
1097
1098 smsm_change_state(SMSM_APPS_DEM,
1099 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND,
1100 DEM_SLAVE_SMSM_WFPI);
1101
1102 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): WFPI");
1103
1104 memset(state_grps, 0, sizeof(state_grps));
1105 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
1106 state_grps[0].bits_all_set = DEM_MASTER_SMSM_RUN;
1107 state_grps[1].group_id = SMSM_MODEM_STATE;
1108 state_grps[1].bits_all_set = SMSM_RESET;
1109
1110 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
1111
1112 if (ret < 0) {
1113 printk(KERN_EMERG "%s(): power collapse WFPI "
1114 "timed out waiting for Modem's response\n", __func__);
1115 msm_pm_timeout();
1116 }
1117
1118 if (ret == 1) {
1119 MSM_PM_DPRINTK(
1120 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1121 KERN_INFO,
1122 "%s(): msm_pm_poll_state detected Modem reset\n",
1123 __func__);
1124 ret = -EAGAIN;
1125 goto power_collapse_restore_gpio_bail;
1126 }
1127
Murali Nalajalaf3f79a42012-11-21 14:39:46 +05301128 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
1129 "%s(): restore clock rate to %lu\n", __func__,
1130 saved_acpuclk_rate);
1131 if (acpuclk_set_rate(smp_processor_id(), saved_acpuclk_rate,
1132 SETRATE_PC) < 0)
1133 pr_err("%s(): failed to restore clock rate(%lu)\n",
1134 __func__, saved_acpuclk_rate);
1135
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001136 /* DEM Master == RUN */
1137
1138 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): WFPI RUN");
1139 MSM_PM_DEBUG_PRINT_SLEEP_INFO();
1140
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301141 msm_pm_irq_extns->exit_sleep2(msm_pm_smem_data->irq_mask,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001142 msm_pm_smem_data->wakeup_reason,
1143 msm_pm_smem_data->pending_irqs);
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301144 msm_pm_irq_extns->exit_sleep3(msm_pm_smem_data->irq_mask,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001145 msm_pm_smem_data->wakeup_reason,
1146 msm_pm_smem_data->pending_irqs);
1147 msm_gpio_exit_sleep();
1148 msm_sirc_exit_sleep();
1149
1150 smsm_change_state(SMSM_APPS_DEM,
1151 DEM_SLAVE_SMSM_WFPI, DEM_SLAVE_SMSM_RUN);
1152
1153 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): RUN");
1154
1155 smd_sleep_exit();
Murali Nalajala41786ab2012-03-06 10:47:32 +05301156
Utsab Bose4ed4ba12012-11-08 18:52:38 +05301157 if (cpu_is_msm8625() || cpu_is_msm8625q()) {
Murali Nalajala41786ab2012-03-06 10:47:32 +05301158 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING,
1159 false);
1160 WARN_ON(ret);
1161 }
1162
Murali Nalajalaff723ec2012-07-13 16:54:40 +05301163 /* Call CPR resume only for "idlePC" case */
1164 if (msm_cpr_ops && from_idle)
1165 msm_cpr_ops->cpr_resume();
1166
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001167 return 0;
1168
1169power_collapse_early_exit:
1170 /* Enter PWRC_EARLY_EXIT */
1171
1172 smsm_change_state(SMSM_APPS_DEM,
1173 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND,
1174 DEM_SLAVE_SMSM_PWRC_EARLY_EXIT);
1175
1176 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): EARLY_EXIT");
1177
1178 memset(state_grps, 0, sizeof(state_grps));
1179 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
1180 state_grps[0].bits_all_set = DEM_MASTER_SMSM_PWRC_EARLY_EXIT;
1181 state_grps[1].group_id = SMSM_MODEM_STATE;
1182 state_grps[1].bits_all_set = SMSM_RESET;
1183
1184 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
1185 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): EARLY_EXIT EE");
1186
1187 if (ret < 0) {
1188 printk(KERN_EMERG "%s(): power collapse EARLY_EXIT "
1189 "timed out waiting for Modem's response\n", __func__);
1190 msm_pm_timeout();
1191 }
1192
1193 if (ret == 1) {
1194 MSM_PM_DPRINTK(
1195 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1196 KERN_INFO,
1197 "%s(): msm_pm_poll_state detected Modem reset\n",
1198 __func__);
1199 }
1200
1201 /* DEM Master == RESET or PWRC_EARLY_EXIT */
1202
1203 ret = -EAGAIN;
1204
1205power_collapse_restore_gpio_bail:
1206 msm_gpio_exit_sleep();
1207 msm_sirc_exit_sleep();
1208
1209 /* Enter RUN */
1210 smsm_change_state(SMSM_APPS_DEM,
1211 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND |
1212 DEM_SLAVE_SMSM_PWRC_EARLY_EXIT, DEM_SLAVE_SMSM_RUN);
1213
1214 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): RUN");
1215
Murali Nalajalaf3f79a42012-11-21 14:39:46 +05301216 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
1217 "%s(): restore clock rate to %lu\n", __func__,
1218 saved_acpuclk_rate);
1219 if (acpuclk_set_rate(smp_processor_id(), saved_acpuclk_rate,
1220 SETRATE_PC) < 0)
1221 pr_err("%s(): failed to restore clock rate(%lu)\n",
1222 __func__, saved_acpuclk_rate);
1223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224 if (collapsed)
1225 smd_sleep_exit();
1226
Murali Nalajalaf3f79a42012-11-21 14:39:46 +05301227acpu_set_clock_fail:
Murali Nalajalaff723ec2012-07-13 16:54:40 +05301228 if (msm_cpr_ops && from_idle)
1229 msm_cpr_ops->cpr_resume();
1230
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231power_collapse_bail:
Utsab Bose4ed4ba12012-11-08 18:52:38 +05301232 if (cpu_is_msm8625() || cpu_is_msm8625q()) {
Murali Nalajala41786ab2012-03-06 10:47:32 +05301233 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING,
1234 false);
1235 WARN_ON(ret);
1236 }
1237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 return ret;
1239}
1240
1241/*
1242 * Power collapse the Apps processor without involving Modem.
1243 *
1244 * Return value:
1245 * 0: success
1246 */
Stephen Boydb29750d2012-02-21 01:21:32 -08001247static int __ref msm_pm_power_collapse_standalone(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001249 int collapsed = 0;
1250 int ret;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301251 void *entry;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001252
1253 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1254 KERN_INFO, "%s()\n", __func__);
1255
1256 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_COLLAPSE, false);
1257 WARN_ON(ret);
1258
Murali Nalajala41786ab2012-03-06 10:47:32 +05301259 entry = (!smp_processor_id() || from_idle) ?
1260 msm_pm_collapse_exit : msm_secondary_startup;
1261
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -06001262 msm_pm_boot_config_before_pc(smp_processor_id(),
Murali Nalajala41786ab2012-03-06 10:47:32 +05301263 virt_to_phys(entry));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264
1265#ifdef CONFIG_VFP
Steve Mucklef132c6c2012-06-06 18:30:57 -07001266 vfp_pm_suspend();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267#endif
1268
1269#ifdef CONFIG_CACHE_L2X0
Utsab Bose4ed4ba12012-11-08 18:52:38 +05301270 if (!cpu_is_msm8625() && !cpu_is_msm8625q())
Taniya Das38a8c6e2012-05-09 20:34:39 +05301271 l2cc_suspend();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001272#endif
1273
1274 collapsed = msm_pm_collapse();
1275
1276#ifdef CONFIG_CACHE_L2X0
Utsab Bose4ed4ba12012-11-08 18:52:38 +05301277 if (!cpu_is_msm8625() && !cpu_is_msm8625q())
Taniya Das38a8c6e2012-05-09 20:34:39 +05301278 l2cc_resume();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001279#endif
1280
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -06001281 msm_pm_boot_config_after_pc(smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001282
1283 if (collapsed) {
1284#ifdef CONFIG_VFP
Steve Mucklef132c6c2012-06-06 18:30:57 -07001285 vfp_pm_resume();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001286#endif
1287 cpu_init();
1288 local_fiq_enable();
1289 }
1290
1291 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE,
1292 KERN_INFO,
1293 "%s(): msm_pm_collapse returned %d\n", __func__, collapsed);
1294
1295 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
1296 WARN_ON(ret);
1297
Anji jonnalac6816222012-03-31 10:55:14 +05301298 return !collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001299}
1300
1301/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302 * Bring the Apps processor to SWFI.
1303 *
1304 * Return value:
1305 * -EIO: could not ramp Apps processor clock
1306 * 0: success
1307 */
1308static int msm_pm_swfi(bool ramp_acpu)
1309{
1310 unsigned long saved_acpuclk_rate = 0;
1311
1312 if (ramp_acpu) {
1313 saved_acpuclk_rate = acpuclk_wait_for_irq();
1314 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
1315 "%s(): change clock rate (old rate = %lu)\n", __func__,
1316 saved_acpuclk_rate);
1317
1318 if (!saved_acpuclk_rate)
1319 return -EIO;
1320 }
1321
Utsab Bose4ed4ba12012-11-08 18:52:38 +05301322 if (!cpu_is_msm8625() && !cpu_is_msm8625q())
Murali Nalajala41786ab2012-03-06 10:47:32 +05301323 msm_pm_config_hw_before_swfi();
1324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001325 msm_arch_idle();
1326
1327 if (ramp_acpu) {
1328 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
1329 "%s(): restore clock rate to %lu\n", __func__,
1330 saved_acpuclk_rate);
1331 if (acpuclk_set_rate(smp_processor_id(), saved_acpuclk_rate,
1332 SETRATE_SWFI) < 0)
1333 printk(KERN_ERR
1334 "%s(): failed to restore clock rate(%lu)\n",
1335 __func__, saved_acpuclk_rate);
1336 }
1337
1338 return 0;
1339}
1340
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301341static int64_t msm_pm_timer_enter_suspend(int64_t *period)
1342{
Anji Jonnalac02367a2012-07-01 02:56:11 +05301343 int64_t time = 0;
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301344
1345 time = msm_timer_get_sclk_time(period);
1346 if (!time)
1347 pr_err("%s: Unable to read sclk.\n", __func__);
1348 return time;
1349}
1350
1351static int64_t msm_pm_timer_exit_suspend(int64_t time, int64_t period)
1352{
1353
1354 if (time != 0) {
1355 int64_t end_time = msm_timer_get_sclk_time(NULL);
1356 if (end_time != 0) {
1357 time = end_time - time;
1358 if (time < 0)
1359 time += period;
1360 } else
1361 time = 0;
1362 }
1363 return time;
1364}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001365
1366/******************************************************************************
1367 * External Idle/Suspend Functions
1368 *****************************************************************************/
1369
1370/*
1371 * Put CPU in low power mode.
1372 */
1373void arch_idle(void)
1374{
1375 bool allow[MSM_PM_SLEEP_MODE_NR];
1376 uint32_t sleep_limit = SLEEP_LIMIT_NONE;
1377
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001378 int64_t timer_expiration;
Murali Nalajala8fda4492012-03-19 18:22:59 +05301379 int latency_qos;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001380 int ret;
1381 int i;
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301382 unsigned int cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001383 int64_t t1;
Murali Nalajalab86f3702012-03-30 17:54:57 +05301384 static DEFINE_PER_CPU(int64_t, t2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001385 int exit_stat;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001386
1387 if (!atomic_read(&msm_pm_init_done))
1388 return;
1389
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301390 cpu = smp_processor_id();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001391 latency_qos = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
Murali Nalajala8fda4492012-03-19 18:22:59 +05301392 /* get the next timer expiration */
1393 timer_expiration = ktime_to_ns(tick_nohz_get_sleep_length());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001394
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395 t1 = ktime_to_ns(ktime_get());
Murali Nalajalab86f3702012-03-30 17:54:57 +05301396 msm_pm_add_stat(MSM_PM_STAT_NOT_IDLE, t1 - __get_cpu_var(t2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397 msm_pm_add_stat(MSM_PM_STAT_REQUESTED_IDLE, timer_expiration);
Murali Nalajala7744d162012-01-13 13:06:03 +05301398 exit_stat = MSM_PM_STAT_IDLE_SPIN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399
1400 for (i = 0; i < ARRAY_SIZE(allow); i++)
1401 allow[i] = true;
1402
Murali Nalajala41786ab2012-03-06 10:47:32 +05301403 if (num_online_cpus() > 1 ||
1404 (timer_expiration < msm_pm_idle_sleep_min_time) ||
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301405 !msm_pm_irq_extns->idle_sleep_allowed()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001406 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
1407 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408 }
1409
1410 for (i = 0; i < ARRAY_SIZE(allow); i++) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301411 struct msm_pm_platform_data *mode =
1412 &msm_pm_modes[MSM_PM_MODE(cpu, i)];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413 if (!mode->idle_supported || !mode->idle_enabled ||
1414 mode->latency >= latency_qos ||
1415 mode->residency * 1000ULL >= timer_expiration)
1416 allow[i] = false;
1417 }
1418
1419 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
1420 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
1421 uint32_t wait_us = CONFIG_MSM_IDLE_WAIT_ON_MODEM;
1422 while (msm_pm_modem_busy() && wait_us) {
1423 if (wait_us > 100) {
1424 udelay(100);
1425 wait_us -= 100;
1426 } else {
1427 udelay(wait_us);
1428 wait_us = 0;
1429 }
1430 }
1431
1432 if (msm_pm_modem_busy()) {
1433 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
1434 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]
1435 = false;
1436 }
1437 }
1438
1439 MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
1440 "%s(): latency qos %d, next timer %lld, sleep limit %u\n",
1441 __func__, latency_qos, timer_expiration, sleep_limit);
1442
1443 for (i = 0; i < ARRAY_SIZE(allow); i++)
1444 MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
1445 "%s(): allow %s: %d\n", __func__,
1446 msm_pm_sleep_mode_labels[i], (int)allow[i]);
1447
1448 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
1449 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
Murali Nalajala8fda4492012-03-19 18:22:59 +05301450 /* Sync the timer with SCLK, it is needed only for modem
1451 * assissted pollapse case.
1452 */
1453 int64_t next_timer_exp = msm_timer_enter_idle();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454 uint32_t sleep_delay;
Murali Nalajala8fda4492012-03-19 18:22:59 +05301455 bool low_power = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001456
1457 sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
Murali Nalajala8fda4492012-03-19 18:22:59 +05301458 next_timer_exp, MSM_PM_SLEEP_TICK_LIMIT);
1459
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001460 if (sleep_delay == 0) /* 0 would mean infinite time */
1461 sleep_delay = 1;
1462
1463 if (!allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
1464 sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
1465
1466#if defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_ACTIVE)
1467 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT1;
1468#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_RETENTION)
1469 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
1470#endif
1471
1472 ret = msm_pm_power_collapse(true, sleep_delay, sleep_limit);
1473 low_power = (ret != -EBUSY && ret != -ETIMEDOUT);
Murali Nalajala8fda4492012-03-19 18:22:59 +05301474 msm_timer_exit_idle(low_power);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001476 if (ret)
1477 exit_stat = MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
1478 else {
1479 exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
1480 msm_pm_sleep_limit = sleep_limit;
1481 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
Murali Nalajala41786ab2012-03-06 10:47:32 +05301483 ret = msm_pm_power_collapse_standalone(true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001484 exit_stat = ret ?
1485 MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE :
1486 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001487 } else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
1488 ret = msm_pm_swfi(true);
1489 if (ret)
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301490 while (!msm_pm_irq_extns->irq_pending())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001491 udelay(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001492 exit_stat = ret ? MSM_PM_STAT_IDLE_SPIN : MSM_PM_STAT_IDLE_WFI;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001493 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1494 msm_pm_swfi(false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001495 exit_stat = MSM_PM_STAT_IDLE_WFI;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496 } else {
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301497 while (!msm_pm_irq_extns->irq_pending())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001498 udelay(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001499 exit_stat = MSM_PM_STAT_IDLE_SPIN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001500 }
1501
Murali Nalajalab86f3702012-03-30 17:54:57 +05301502 __get_cpu_var(t2) = ktime_to_ns(ktime_get());
1503 msm_pm_add_stat(exit_stat, __get_cpu_var(t2) - t1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001504}
1505
1506/*
1507 * Suspend the Apps processor.
1508 *
1509 * Return value:
Murali Nalajala41786ab2012-03-06 10:47:32 +05301510 * -EPERM: Suspend happened by a not permitted core
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001511 * -EAGAIN: modem reset occurred or early exit from suspend
1512 * -EBUSY: modem not ready for our suspend
1513 * -EINVAL: invalid sleep mode
1514 * -EIO: could not ramp Apps processor clock
1515 * -ETIMEDOUT: timed out waiting for modem's handshake
1516 * 0: success
1517 */
1518static int msm_pm_enter(suspend_state_t state)
1519{
1520 bool allow[MSM_PM_SLEEP_MODE_NR];
1521 uint32_t sleep_limit = SLEEP_LIMIT_NONE;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301522 int ret = -EPERM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001523 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001524 int64_t period = 0;
1525 int64_t time = 0;
1526
Murali Nalajala41786ab2012-03-06 10:47:32 +05301527 /* Must executed by CORE0 */
1528 if (smp_processor_id()) {
1529 __WARN();
1530 goto suspend_exit;
1531 }
1532
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301533 time = msm_pm_timer_enter_suspend(&period);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001534
1535 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
1536 "%s(): sleep limit %u\n", __func__, sleep_limit);
1537
1538 for (i = 0; i < ARRAY_SIZE(allow); i++)
1539 allow[i] = true;
1540
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001541 for (i = 0; i < ARRAY_SIZE(allow); i++) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301542 struct msm_pm_platform_data *mode;
1543 mode = &msm_pm_modes[MSM_PM_MODE(0, i)];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001544 if (!mode->suspend_supported || !mode->suspend_enabled)
1545 allow[i] = false;
1546 }
1547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001548 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
1549 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001550 enum msm_pm_time_stats_id id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001551
1552 clock_debug_print_enabled();
1553
1554#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
1555 if (msm_pm_sleep_time_override > 0) {
1556 int64_t ns;
1557 ns = NSEC_PER_SEC * (int64_t)msm_pm_sleep_time_override;
1558 msm_pm_set_max_sleep_time(ns);
1559 msm_pm_sleep_time_override = 0;
1560 }
1561#endif
1562 if (!allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
1563 sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
1564
1565#if defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_ACTIVE)
1566 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT1;
1567#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_RETENTION)
1568 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
1569#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_DEEP_POWER_DOWN)
1570 if (get_msm_migrate_pages_status() != MEM_OFFLINE)
1571 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
1572#endif
1573
1574 for (i = 0; i < 30 && msm_pm_modem_busy(); i++)
1575 udelay(500);
1576
1577 ret = msm_pm_power_collapse(
1578 false, msm_pm_max_sleep_time, sleep_limit);
1579
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001580 if (ret)
1581 id = MSM_PM_STAT_FAILED_SUSPEND;
1582 else {
1583 id = MSM_PM_STAT_SUSPEND;
1584 msm_pm_sleep_limit = sleep_limit;
1585 }
1586
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301587 time = msm_pm_timer_exit_suspend(time, period);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 msm_pm_add_stat(id, time);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001589 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
Murali Nalajala41786ab2012-03-06 10:47:32 +05301590 ret = msm_pm_power_collapse_standalone(false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001591 } else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
1592 ret = msm_pm_swfi(true);
1593 if (ret)
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301594 while (!msm_pm_irq_extns->irq_pending())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001595 udelay(1);
1596 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1597 msm_pm_swfi(false);
1598 }
1599
Murali Nalajala41786ab2012-03-06 10:47:32 +05301600suspend_exit:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001601 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
1602 "%s(): return %d\n", __func__, ret);
1603
1604 return ret;
1605}
1606
1607static struct platform_suspend_ops msm_pm_ops = {
1608 .enter = msm_pm_enter,
1609 .valid = suspend_valid_only_mem,
1610};
1611
Murali Nalajalac89f2f32012-02-07 19:23:52 +05301612/* Hotplug the "non boot" CPU's and put
1613 * the cores into low power mode
1614 */
1615void msm_pm_cpu_enter_lowpower(unsigned int cpu)
1616{
Murali Nalajalaa7efba12012-02-23 18:13:52 +05301617 bool allow[MSM_PM_SLEEP_MODE_NR];
1618 int i;
1619
1620 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
1621 struct msm_pm_platform_data *mode;
1622
1623 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
1624 allow[i] = mode->suspend_supported && mode->suspend_enabled;
1625 }
1626
1627 MSM_PM_DPRINTK(MSM_PM_DEBUG_HOTPLUG, KERN_INFO,
1628 "CPU%u: %s: shutting down cpu\n", cpu, __func__);
1629
1630 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
1631 msm_pm_power_collapse_standalone(false);
1632 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1633 msm_pm_swfi(false);
1634 } else {
1635 MSM_PM_DPRINTK(MSM_PM_DEBUG_HOTPLUG, KERN_INFO,
1636 "CPU%u: %s: shutting down failed!!!\n", cpu, __func__);
1637 }
Murali Nalajalac89f2f32012-02-07 19:23:52 +05301638}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001639
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001640/*
1641 * Initialize the power management subsystem.
1642 *
1643 * Return value:
1644 * -ENODEV: initialization failed
1645 * 0: success
1646 */
1647static int __init msm_pm_init(void)
1648{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001649 int ret;
Murali Nalajala93f29992012-03-21 15:59:27 +05301650 int val;
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301651 enum msm_pm_time_stats_id enable_stats[] = {
1652 MSM_PM_STAT_REQUESTED_IDLE,
1653 MSM_PM_STAT_IDLE_SPIN,
1654 MSM_PM_STAT_IDLE_WFI,
1655 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
1656 MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
1657 MSM_PM_STAT_IDLE_POWER_COLLAPSE,
1658 MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
1659 MSM_PM_STAT_SUSPEND,
1660 MSM_PM_STAT_FAILED_SUSPEND,
1661 MSM_PM_STAT_NOT_IDLE,
1662 };
1663
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001664#ifdef CONFIG_CPU_V7
1665 pgd_t *pc_pgd;
1666 pmd_t *pmd;
1667 unsigned long pmdval;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001668 unsigned long exit_phys;
1669
1670 exit_phys = virt_to_phys(msm_pm_collapse_exit);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001671
1672 /* Page table for cores to come back up safely. */
1673 pc_pgd = pgd_alloc(&init_mm);
1674 if (!pc_pgd)
1675 return -ENOMEM;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001676 pmd = pmd_offset(pud_offset(pc_pgd + pgd_index(exit_phys), exit_phys),
1677 exit_phys);
1678 pmdval = (exit_phys & PGDIR_MASK) |
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001679 PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
1680 pmd[0] = __pmd(pmdval);
1681 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1682
Steve Mucklefcece052012-02-18 20:09:58 -08001683 msm_saved_state_phys =
1684 allocate_contiguous_ebi_nomap(CPU_SAVED_STATE_SIZE *
1685 num_possible_cpus(), 4);
1686 if (!msm_saved_state_phys)
1687 return -ENOMEM;
1688 msm_saved_state = ioremap_nocache(msm_saved_state_phys,
1689 CPU_SAVED_STATE_SIZE *
1690 num_possible_cpus());
1691 if (!msm_saved_state)
1692 return -ENOMEM;
1693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694 /* It is remotely possible that the code in msm_pm_collapse_exit()
1695 * which turns on the MMU with this mapping is in the
1696 * next even-numbered megabyte beyond the
1697 * start of msm_pm_collapse_exit().
1698 * Map this megabyte in as well.
1699 */
1700 pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
1701 flush_pmd_entry(pmd);
1702 msm_pm_pc_pgd = virt_to_phys(pc_pgd);
Steve Muckle730ad7a2012-02-21 15:26:37 -08001703 clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd),
1704 virt_to_phys(&msm_pm_pc_pgd));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001705#endif
1706
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001707 msm_pm_smem_data = smem_alloc(SMEM_APPS_DEM_SLAVE_DATA,
1708 sizeof(*msm_pm_smem_data));
1709 if (msm_pm_smem_data == NULL) {
1710 printk(KERN_ERR "%s: failed to get smsm_data\n", __func__);
1711 return -ENODEV;
1712 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001713
1714 ret = msm_timer_init_time_sync(msm_pm_timeout);
1715 if (ret)
1716 return ret;
1717
1718 ret = smsm_change_intr_mask(SMSM_POWER_MASTER_DEM, 0xFFFFFFFF, 0);
1719 if (ret) {
1720 printk(KERN_ERR "%s: failed to clear interrupt mask, %d\n",
1721 __func__, ret);
1722 return ret;
1723 }
1724
Utsab Bose4ed4ba12012-11-08 18:52:38 +05301725 if (cpu_is_msm8625() || cpu_is_msm8625q()) {
Murali Nalajala93f29992012-03-21 15:59:27 +05301726 target_type = TARGET_IS_8625;
1727 clean_caches((unsigned long)&target_type, sizeof(target_type),
1728 virt_to_phys(&target_type));
1729
Anji jonnalae644f8e2012-05-09 19:52:18 +05301730 /*
1731 * Configure the MPA5_GDFS_CNT_VAL register for
Murali Nalajalaaa7310b2012-10-19 18:47:27 +05301732 * DBGPWRUPEREQ_OVERRIDE[19:16] = Override the
Anji jonnalae644f8e2012-05-09 19:52:18 +05301733 * DBGNOPOWERDN for each cpu.
1734 * MPA5_GDFS_CNT_VAL[9:0] = Delay counter for
1735 * GDFS control.
Murali Nalajala93f29992012-03-21 15:59:27 +05301736 */
Utsab Bose4ed4ba12012-11-08 18:52:38 +05301737 if (cpu_is_msm8625q())
Murali Nalajalaaa7310b2012-10-19 18:47:27 +05301738 val = 0x000F0002;
1739 else
1740 val = 0x00030002;
1741
Murali Nalajala93f29992012-03-21 15:59:27 +05301742 __raw_writel(val, (MSM_CFG_CTL_BASE + 0x38));
Murali Nalajala73c13332012-05-15 11:30:59 +05301743
1744 l2x0_base_addr = MSM_L2CC_BASE;
Murali Nalajala93f29992012-03-21 15:59:27 +05301745 }
1746
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001747#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE
1748 /* The wakeup_reason field is overloaded during initialization time
1749 to signal Modem that Apps will control the low power modes of
1750 the memory.
1751 */
1752 msm_pm_smem_data->wakeup_reason = 1;
1753 smsm_change_state(SMSM_APPS_DEM, 0, DEM_SLAVE_SMSM_RUN);
1754#endif
1755
1756 BUG_ON(msm_pm_modes == NULL);
1757
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001758 suspend_set_ops(&msm_pm_ops);
1759
1760 msm_pm_mode_sysfs_add();
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301761 msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
Murali Nalajala558c0ce2012-03-29 19:42:08 +05301762
1763 atomic_set(&msm_pm_init_done, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001764 return 0;
1765}
1766
1767late_initcall_sync(msm_pm_init);