blob: 10c5445e43a8f080fd8740e3692f7f8c9c4b2e28 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/pm2.c
2 *
3 * MSM Power Management Routines
4 *
5 * Copyright (C) 2007 Google, Inc.
Murali Nalajala0df9fee2012-01-12 15:26:09 +05306 * Copyright (c) 2008-2012 Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/clk.h>
22#include <linux/delay.h>
23#include <linux/init.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070024#include <linux/pm_qos.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <linux/suspend.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <linux/io.h>
Murali Nalajala8fda4492012-03-19 18:22:59 +053027#include <linux/tick.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include <linux/memory.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <mach/msm_iomap.h>
30#include <mach/system.h>
31#ifdef CONFIG_CPU_V7
32#include <asm/pgtable.h>
33#include <asm/pgalloc.h>
34#endif
35#ifdef CONFIG_CACHE_L2X0
36#include <asm/hardware/cache-l2x0.h>
37#endif
38#ifdef CONFIG_VFP
39#include <asm/vfp.h>
40#endif
41
42#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_DEEP_POWER_DOWN
43#include <mach/msm_migrate_pages.h>
44#endif
Murali Nalajala41786ab2012-03-06 10:47:32 +053045#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070046#include <mach/proc_comm.h>
Anji jonnala1f2377c2012-03-27 14:35:55 +053047#include <asm/smp_scu.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
49#include "smd_private.h"
50#include "smd_rpcrouter.h"
51#include "acpuclock.h"
52#include "clock.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053#include "idle.h"
54#include "irq.h"
55#include "gpio.h"
56#include "timer.h"
Matt Wagantall7cca4642012-02-01 16:43:24 -080057#include "pm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058#include "spm.h"
59#include "sirc.h"
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -060060#include "pm-boot.h"
Murali Nalajala19d33a22012-05-18 14:11:19 +053061#include "devices-msm7x2xa.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
63/******************************************************************************
64 * Debug Definitions
65 *****************************************************************************/
66
67enum {
Murali Nalajalaa7efba12012-02-23 18:13:52 +053068 MSM_PM_DEBUG_SUSPEND = BIT(0),
69 MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
70 MSM_PM_DEBUG_STATE = BIT(2),
71 MSM_PM_DEBUG_CLOCK = BIT(3),
72 MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
73 MSM_PM_DEBUG_SMSM_STATE = BIT(5),
74 MSM_PM_DEBUG_IDLE = BIT(6),
75 MSM_PM_DEBUG_HOTPLUG = BIT(7),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076};
77
78static int msm_pm_debug_mask;
Taniya Dase30a6b22012-03-20 11:37:45 +053079int power_collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080module_param_named(
81 debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
82);
83
84#define MSM_PM_DPRINTK(mask, level, message, ...) \
85 do { \
86 if ((mask) & msm_pm_debug_mask) \
87 printk(level message, ## __VA_ARGS__); \
88 } while (0)
89
90#define MSM_PM_DEBUG_PRINT_STATE(tag) \
91 do { \
92 MSM_PM_DPRINTK(MSM_PM_DEBUG_STATE, \
93 KERN_INFO, "%s: " \
94 "APPS_CLK_SLEEP_EN %x, APPS_PWRDOWN %x, " \
95 "SMSM_POWER_MASTER_DEM %x, SMSM_MODEM_STATE %x, " \
96 "SMSM_APPS_DEM %x\n", \
97 tag, \
98 __raw_readl(APPS_CLK_SLEEP_EN), \
99 __raw_readl(APPS_PWRDOWN), \
100 smsm_get_state(SMSM_POWER_MASTER_DEM), \
101 smsm_get_state(SMSM_MODEM_STATE), \
102 smsm_get_state(SMSM_APPS_DEM)); \
103 } while (0)
104
105#define MSM_PM_DEBUG_PRINT_SLEEP_INFO() \
106 do { \
107 if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE) \
108 smsm_print_sleep_info(msm_pm_smem_data->sleep_time, \
109 msm_pm_smem_data->resources_used, \
110 msm_pm_smem_data->irq_mask, \
111 msm_pm_smem_data->wakeup_reason, \
112 msm_pm_smem_data->pending_irqs); \
113 } while (0)
114
115
116/******************************************************************************
117 * Sleep Modes and Parameters
118 *****************************************************************************/
119
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120static int msm_pm_idle_sleep_min_time = CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME;
121module_param_named(
122 idle_sleep_min_time, msm_pm_idle_sleep_min_time,
123 int, S_IRUGO | S_IWUSR | S_IWGRP
124);
125
126enum {
127 MSM_PM_MODE_ATTR_SUSPEND,
128 MSM_PM_MODE_ATTR_IDLE,
129 MSM_PM_MODE_ATTR_LATENCY,
130 MSM_PM_MODE_ATTR_RESIDENCY,
131 MSM_PM_MODE_ATTR_NR,
132};
133
134static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
135 [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
136 [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
137 [MSM_PM_MODE_ATTR_LATENCY] = "latency",
138 [MSM_PM_MODE_ATTR_RESIDENCY] = "residency",
139};
140
141static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
142 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND] = " ",
143 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT] =
145 "ramp_down_and_wfi",
146 [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
147 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] =
148 "power_collapse_no_xo_shutdown",
149 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
150 "standalone_power_collapse",
151};
152
153static struct msm_pm_platform_data *msm_pm_modes;
Murali Nalajala2a0bbda2012-03-28 12:12:54 +0530154static struct msm_pm_irq_calls *msm_pm_irq_extns;
Murali Nalajalaff723ec2012-07-13 16:54:40 +0530155static struct msm_pm_cpr_ops *msm_cpr_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530157struct msm_pm_kobj_attribute {
158 unsigned int cpu;
159 struct kobj_attribute ka;
160};
161
162#define GET_CPU_OF_ATTR(attr) \
163 (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
164
165struct msm_pm_sysfs_sleep_mode {
166 struct kobject *kobj;
167 struct attribute_group attr_group;
168 struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
169 struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
170};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171
172/*
173 * Write out the attribute.
174 */
175static ssize_t msm_pm_mode_attr_show(
176 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
177{
178 int ret = -EINVAL;
179 int i;
180
181 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
182 struct kernel_param kp;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530183 unsigned int cpu;
184 struct msm_pm_platform_data *mode;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185
186 if (msm_pm_sleep_mode_labels[i] == NULL)
187 continue;
188
189 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
190 continue;
191
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530192 cpu = GET_CPU_OF_ATTR(attr);
193 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
194
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195 if (!strcmp(attr->attr.name,
196 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530197 u32 arg = mode->suspend_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198 kp.arg = &arg;
199 ret = param_get_ulong(buf, &kp);
200 } else if (!strcmp(attr->attr.name,
201 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530202 u32 arg = mode->idle_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203 kp.arg = &arg;
204 ret = param_get_ulong(buf, &kp);
205 } else if (!strcmp(attr->attr.name,
206 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_LATENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530207 u32 arg = mode->latency;
208 kp.arg = &arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209 ret = param_get_ulong(buf, &kp);
210 } else if (!strcmp(attr->attr.name,
211 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_RESIDENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530212 u32 arg = mode->residency;
213 kp.arg = &arg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214 ret = param_get_ulong(buf, &kp);
215 }
216
217 break;
218 }
219
220 if (ret > 0) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530221 strlcat(buf, "\n", PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 ret++;
223 }
224
225 return ret;
226}
227
228/*
229 * Read in the new attribute value.
230 */
231static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
232 struct kobj_attribute *attr, const char *buf, size_t count)
233{
234 int ret = -EINVAL;
235 int i;
236
237 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
238 struct kernel_param kp;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530239 unsigned int cpu;
240 struct msm_pm_platform_data *mode;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241
242 if (msm_pm_sleep_mode_labels[i] == NULL)
243 continue;
244
245 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
246 continue;
247
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530248 cpu = GET_CPU_OF_ATTR(attr);
249 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251 if (!strcmp(attr->attr.name,
252 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530253 kp.arg = &mode->suspend_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254 ret = param_set_byte(buf, &kp);
255 } else if (!strcmp(attr->attr.name,
256 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530257 kp.arg = &mode->idle_enabled;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258 ret = param_set_byte(buf, &kp);
259 } else if (!strcmp(attr->attr.name,
260 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_LATENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530261 kp.arg = &mode->latency;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 ret = param_set_ulong(buf, &kp);
263 } else if (!strcmp(attr->attr.name,
264 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_RESIDENCY])) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530265 kp.arg = &mode->residency;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 ret = param_set_ulong(buf, &kp);
267 }
268
269 break;
270 }
271
272 return ret ? ret : count;
273}
274
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530275 /* Add sysfs entries for one cpu. */
276static int __init msm_pm_mode_sysfs_add_cpu(
277 unsigned int cpu, struct kobject *modes_kobj)
278{
279 char cpu_name[8];
280 struct kobject *cpu_kobj;
281 struct msm_pm_sysfs_sleep_mode *mode = NULL;
282 int i, j, k;
283 int ret;
284
285 snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
286 cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
287 if (!cpu_kobj) {
288 pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
289 ret = -ENOMEM;
290 goto mode_sysfs_add_cpu_exit;
291 }
292
293 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
294 int idx = MSM_PM_MODE(cpu, i);
295
296 if ((!msm_pm_modes[idx].suspend_supported) &&
297 (!msm_pm_modes[idx].idle_supported))
298 continue;
299
300 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
301 if (!mode) {
302 pr_err("%s: cannot allocate memory for attributes\n",
303 __func__);
304 ret = -ENOMEM;
305 goto mode_sysfs_add_cpu_exit;
306 }
307
308 mode->kobj = kobject_create_and_add(
309 msm_pm_sleep_mode_labels[i], cpu_kobj);
310 if (!mode->kobj) {
311 pr_err("%s: cannot create kobject\n", __func__);
312 ret = -ENOMEM;
313 goto mode_sysfs_add_cpu_exit;
314 }
315
316 for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
317 if ((k == MSM_PM_MODE_ATTR_IDLE) &&
318 !msm_pm_modes[idx].idle_supported)
319 continue;
320 if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
321 !msm_pm_modes[idx].suspend_supported)
322 continue;
323 mode->kas[j].cpu = cpu;
324 mode->kas[j].ka.attr.mode = 0644;
325 mode->kas[j].ka.show = msm_pm_mode_attr_show;
326 mode->kas[j].ka.store = msm_pm_mode_attr_store;
327 mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
328 mode->attrs[j] = &mode->kas[j].ka.attr;
329 j++;
330 }
331 mode->attrs[j] = NULL;
332
333 mode->attr_group.attrs = mode->attrs;
334 ret = sysfs_create_group(mode->kobj, &mode->attr_group);
335 if (ret) {
336 printk(KERN_ERR
337 "%s: cannot create kobject attribute group\n",
338 __func__);
339 goto mode_sysfs_add_cpu_exit;
340 }
341 }
342
343 ret = 0;
344
345mode_sysfs_add_cpu_exit:
346 if (ret) {
347 if (mode && mode->kobj)
348 kobject_del(mode->kobj);
349 kfree(mode);
350 }
351
352 return ret;
353}
354
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355/*
356 * Add sysfs entries for the sleep modes.
357 */
358static int __init msm_pm_mode_sysfs_add(void)
359{
360 struct kobject *module_kobj = NULL;
361 struct kobject *modes_kobj = NULL;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530362 unsigned int cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363 int ret;
364
365 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
366 if (!module_kobj) {
367 printk(KERN_ERR "%s: cannot find kobject for module %s\n",
368 __func__, KBUILD_MODNAME);
369 ret = -ENOENT;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530370 goto mode_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371 }
372
373 modes_kobj = kobject_create_and_add("modes", module_kobj);
374 if (!modes_kobj) {
375 printk(KERN_ERR "%s: cannot create modes kobject\n", __func__);
376 ret = -ENOMEM;
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530377 goto mode_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378 }
379
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530380 for_each_possible_cpu(cpu) {
381 ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
382 if (ret)
383 goto mode_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384 }
385
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530386 ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530388mode_sysfs_add_exit:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389 return ret;
390}
391
Stephen Boyd3f4bac22012-05-30 10:03:13 -0700392s32 msm_cpuidle_get_deep_idle_latency(void)
393{
394 int i = MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN);
395 return msm_pm_modes[i].latency - 1;
396}
397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398void __init msm_pm_set_platform_data(
399 struct msm_pm_platform_data *data, int count)
400{
Murali Nalajala0df9fee2012-01-12 15:26:09 +0530401 BUG_ON(MSM_PM_SLEEP_MODE_NR * num_possible_cpus() > count);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 msm_pm_modes = data;
403}
404
Murali Nalajala2a0bbda2012-03-28 12:12:54 +0530405void __init msm_pm_set_irq_extns(struct msm_pm_irq_calls *irq_calls)
406{
407 /* sanity check */
408 BUG_ON(irq_calls == NULL || irq_calls->irq_pending == NULL ||
409 irq_calls->idle_sleep_allowed == NULL ||
410 irq_calls->enter_sleep1 == NULL ||
411 irq_calls->enter_sleep2 == NULL ||
412 irq_calls->exit_sleep1 == NULL ||
413 irq_calls->exit_sleep2 == NULL ||
414 irq_calls->exit_sleep3 == NULL);
415
416 msm_pm_irq_extns = irq_calls;
417}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418
Murali Nalajalaff723ec2012-07-13 16:54:40 +0530419void __init msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops)
420{
421 msm_cpr_ops = ops;
422}
423
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424/******************************************************************************
425 * Sleep Limitations
426 *****************************************************************************/
427enum {
428 SLEEP_LIMIT_NONE = 0,
429 SLEEP_LIMIT_NO_TCXO_SHUTDOWN = 2,
430 SLEEP_LIMIT_MASK = 0x03,
431};
432
Praveen Chidambaram3895bde2012-05-14 19:42:40 +0530433static uint32_t msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE
435enum {
436 SLEEP_RESOURCE_MEMORY_BIT0 = 0x0200,
437 SLEEP_RESOURCE_MEMORY_BIT1 = 0x0010,
438};
439#endif
440
441
442/******************************************************************************
443 * Configure Hardware for Power Down/Up
444 *****************************************************************************/
445
446#if defined(CONFIG_ARCH_MSM7X30)
Taniya Das298de8c2012-02-16 11:45:31 +0530447#define APPS_CLK_SLEEP_EN (MSM_APCS_GCC_BASE + 0x020)
448#define APPS_PWRDOWN (MSM_ACC0_BASE + 0x01c)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449#define APPS_SECOP (MSM_TCSR_BASE + 0x038)
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530450#define APPS_STANDBY_CTL NULL
451#else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452#define APPS_CLK_SLEEP_EN (MSM_CSR_BASE + 0x11c)
453#define APPS_PWRDOWN (MSM_CSR_BASE + 0x440)
454#define APPS_STANDBY_CTL (MSM_CSR_BASE + 0x108)
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530455#define APPS_SECOP NULL
456#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457
458/*
459 * Configure hardware registers in preparation for Apps power down.
460 */
461static void msm_pm_config_hw_before_power_down(void)
462{
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530463 if (cpu_is_msm7x30() || cpu_is_msm8x55()) {
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530464 __raw_writel(4, APPS_SECOP);
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530465 } else if (cpu_is_msm7x27()) {
466 __raw_writel(0x1f, APPS_CLK_SLEEP_EN);
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530467 } else if (cpu_is_msm7x27a() || cpu_is_msm7x27aa() ||
Pankaj Kumarfee56a82012-04-17 14:26:49 +0530468 cpu_is_msm7x25a() || cpu_is_msm7x25aa() ||
469 cpu_is_msm7x25ab()) {
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530470 __raw_writel(0x7, APPS_CLK_SLEEP_EN);
Murali Nalajalab87e88c2012-05-18 15:12:13 +0530471 } else if (cpu_is_qsd8x50()) {
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530472 __raw_writel(0x1f, APPS_CLK_SLEEP_EN);
473 mb();
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530474 __raw_writel(0, APPS_STANDBY_CTL);
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530475 }
Murali Nalajalab87e88c2012-05-18 15:12:13 +0530476 mb();
477 __raw_writel(1, APPS_PWRDOWN);
478 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479}
480
481/*
Anji jonnala1f2377c2012-03-27 14:35:55 +0530482 * Program the top csr from core0 context to put the
483 * core1 into GDFS, as core1 is not running yet.
484 */
485static void configure_top_csr(void)
486{
487 void __iomem *base_ptr;
488 unsigned int value = 0;
489
Murali Nalajala19d33a22012-05-18 14:11:19 +0530490 base_ptr = core1_reset_base();
Anji jonnala1f2377c2012-03-27 14:35:55 +0530491 if (!base_ptr)
492 return;
493
494 /* bring the core1 out of reset */
495 __raw_writel(0x3, base_ptr);
496 mb();
497 /*
498 * override DBGNOPOWERDN and program the GDFS
499 * count val
500 */
501
502 __raw_writel(0x00030002, (MSM_CFG_CTL_BASE + 0x38));
503 mb();
504
505 /* Initialize the SPM0 and SPM1 registers */
506 msm_spm_reinit();
507
508 /* enable TCSR for core1 */
509 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
510 value |= BIT(22);
511 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
512 mb();
513
514 /* set reset bit for SPM1 */
515 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
516 value |= BIT(20);
517 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
518 mb();
519
520 /* set CLK_OFF bit */
521 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
522 value |= BIT(18);
523 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
524 mb();
525
526 /* set clamps bit */
527 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
528 value |= BIT(21);
529 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
530 mb();
531
532 /* set power_up bit */
533 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
534 value |= BIT(19);
535 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
536 mb();
537
538 /* Disable TSCR for core0 */
539 value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
540 value &= ~BIT(22);
541 __raw_writel(value, MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
542 mb();
543 __raw_writel(0x0, base_ptr);
544 mb();
Anji jonnala1f2377c2012-03-27 14:35:55 +0530545}
546
547/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 * Clear hardware registers after Apps powers up.
549 */
550static void msm_pm_config_hw_after_power_up(void)
551{
Anji jonnala1f2377c2012-03-27 14:35:55 +0530552
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530553 if (cpu_is_msm7x30() || cpu_is_msm8x55()) {
554 __raw_writel(0, APPS_SECOP);
555 mb();
556 __raw_writel(0, APPS_PWRDOWN);
557 mb();
558 msm_spm_reinit();
Murali Nalajalab87e88c2012-05-18 15:12:13 +0530559 } else if (cpu_is_msm8625()) {
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530560 __raw_writel(0, APPS_PWRDOWN);
561 mb();
Anji jonnala1f2377c2012-03-27 14:35:55 +0530562
Murali Nalajalab87e88c2012-05-18 15:12:13 +0530563 if (power_collapsed) {
Anji jonnala1f2377c2012-03-27 14:35:55 +0530564 /*
565 * enable the SCU while coming out of power
566 * collapse.
567 */
568 scu_enable(MSM_SCU_BASE);
569 /*
570 * Program the top csr to put the core1 into GDFS.
571 */
572 configure_top_csr();
573 }
Murali Nalajalab87e88c2012-05-18 15:12:13 +0530574 } else {
575 __raw_writel(0, APPS_PWRDOWN);
576 mb();
577 __raw_writel(0, APPS_CLK_SLEEP_EN);
578 mb();
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530579 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580}
581
582/*
583 * Configure hardware registers in preparation for SWFI.
584 */
585static void msm_pm_config_hw_before_swfi(void)
586{
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530587 if (cpu_is_qsd8x50()) {
588 __raw_writel(0x1f, APPS_CLK_SLEEP_EN);
589 mb();
590 } else if (cpu_is_msm7x27()) {
591 __raw_writel(0x0f, APPS_CLK_SLEEP_EN);
592 mb();
593 } else if (cpu_is_msm7x27a() || cpu_is_msm7x27aa() ||
Pankaj Kumarfee56a82012-04-17 14:26:49 +0530594 cpu_is_msm7x25a() || cpu_is_msm7x25aa() ||
595 cpu_is_msm7x25ab()) {
Murali Nalajala1bc1d7c2012-02-09 11:18:42 +0530596 __raw_writel(0x7, APPS_CLK_SLEEP_EN);
597 mb();
598 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599}
600
601/*
602 * Respond to timing out waiting for Modem
603 *
604 * NOTE: The function never returns.
605 */
606static void msm_pm_timeout(void)
607{
608#if defined(CONFIG_MSM_PM_TIMEOUT_RESET_CHIP)
609 printk(KERN_EMERG "%s(): resetting chip\n", __func__);
610 msm_proc_comm(PCOM_RESET_CHIP_IMM, NULL, NULL);
611#elif defined(CONFIG_MSM_PM_TIMEOUT_RESET_MODEM)
612 printk(KERN_EMERG "%s(): resetting modem\n", __func__);
613 msm_proc_comm_reset_modem_now();
614#elif defined(CONFIG_MSM_PM_TIMEOUT_HALT)
615 printk(KERN_EMERG "%s(): halting\n", __func__);
616#endif
617 for (;;)
618 ;
619}
620
621
622/******************************************************************************
623 * State Polling Definitions
624 *****************************************************************************/
625
626struct msm_pm_polled_group {
627 uint32_t group_id;
628
629 uint32_t bits_all_set;
630 uint32_t bits_all_clear;
631 uint32_t bits_any_set;
632 uint32_t bits_any_clear;
633
634 uint32_t value_read;
635};
636
637/*
638 * Return true if all bits indicated by flag are set in source.
639 */
640static inline bool msm_pm_all_set(uint32_t source, uint32_t flag)
641{
642 return (source & flag) == flag;
643}
644
645/*
646 * Return true if any bit indicated by flag are set in source.
647 */
648static inline bool msm_pm_any_set(uint32_t source, uint32_t flag)
649{
650 return !flag || (source & flag);
651}
652
653/*
654 * Return true if all bits indicated by flag are cleared in source.
655 */
656static inline bool msm_pm_all_clear(uint32_t source, uint32_t flag)
657{
658 return (~source & flag) == flag;
659}
660
661/*
662 * Return true if any bit indicated by flag are cleared in source.
663 */
664static inline bool msm_pm_any_clear(uint32_t source, uint32_t flag)
665{
666 return !flag || (~source & flag);
667}
668
669/*
670 * Poll the shared memory states as indicated by the poll groups.
671 *
672 * nr_grps: number of groups in the array
673 * grps: array of groups
674 *
675 * The function returns when conditions specified by any of the poll
676 * groups become true. The conditions specified by a poll group are
677 * deemed true when 1) at least one bit from bits_any_set is set OR one
678 * bit from bits_any_clear is cleared; and 2) all bits in bits_all_set
679 * are set; and 3) all bits in bits_all_clear are cleared.
680 *
681 * Return value:
682 * >=0: index of the poll group whose conditions have become true
683 * -ETIMEDOUT: timed out
684 */
685static int msm_pm_poll_state(int nr_grps, struct msm_pm_polled_group *grps)
686{
687 int i, k;
688
689 for (i = 0; i < 50000; i++) {
690 for (k = 0; k < nr_grps; k++) {
691 bool all_set, all_clear;
692 bool any_set, any_clear;
693
694 grps[k].value_read = smsm_get_state(grps[k].group_id);
695
696 all_set = msm_pm_all_set(grps[k].value_read,
697 grps[k].bits_all_set);
698 all_clear = msm_pm_all_clear(grps[k].value_read,
699 grps[k].bits_all_clear);
700 any_set = msm_pm_any_set(grps[k].value_read,
701 grps[k].bits_any_set);
702 any_clear = msm_pm_any_clear(grps[k].value_read,
703 grps[k].bits_any_clear);
704
705 if (all_set && all_clear && (any_set || any_clear))
706 return k;
707 }
708 udelay(50);
709 }
710
711 printk(KERN_ERR "%s failed:\n", __func__);
712 for (k = 0; k < nr_grps; k++)
713 printk(KERN_ERR "(%x, %x, %x, %x) %x\n",
714 grps[k].bits_all_set, grps[k].bits_all_clear,
715 grps[k].bits_any_set, grps[k].bits_any_clear,
716 grps[k].value_read);
717
718 return -ETIMEDOUT;
719}
720
721
722/******************************************************************************
723 * Suspend Max Sleep Time
724 *****************************************************************************/
725
726#define SCLK_HZ (32768)
727#define MSM_PM_SLEEP_TICK_LIMIT (0x6DDD000)
728
729#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
730static int msm_pm_sleep_time_override;
731module_param_named(sleep_time_override,
732 msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
733#endif
734
735static uint32_t msm_pm_max_sleep_time;
736
737/*
738 * Convert time from nanoseconds to slow clock ticks, then cap it to the
739 * specified limit
740 */
741static int64_t msm_pm_convert_and_cap_time(int64_t time_ns, int64_t limit)
742{
743 do_div(time_ns, NSEC_PER_SEC / SCLK_HZ);
744 return (time_ns > limit) ? limit : time_ns;
745}
746
747/*
748 * Set the sleep time for suspend. 0 means infinite sleep time.
749 */
750void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
751{
752 unsigned long flags;
753
754 local_irq_save(flags);
755 if (max_sleep_time_ns == 0) {
756 msm_pm_max_sleep_time = 0;
757 } else {
758 msm_pm_max_sleep_time = (uint32_t)msm_pm_convert_and_cap_time(
759 max_sleep_time_ns, MSM_PM_SLEEP_TICK_LIMIT);
760
761 if (msm_pm_max_sleep_time == 0)
762 msm_pm_max_sleep_time = 1;
763 }
764
765 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
766 "%s(): Requested %lld ns Giving %u sclk ticks\n", __func__,
767 max_sleep_time_ns, msm_pm_max_sleep_time);
768 local_irq_restore(flags);
769}
770EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
771
772
773/******************************************************************************
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774 * Shared Memory Bits
775 *****************************************************************************/
776
777#define DEM_MASTER_BITS_PER_CPU 6
778
779/* Power Master State Bits - Per CPU */
780#define DEM_MASTER_SMSM_RUN \
781 (0x01UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
782#define DEM_MASTER_SMSM_RSA \
783 (0x02UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
784#define DEM_MASTER_SMSM_PWRC_EARLY_EXIT \
785 (0x04UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
786#define DEM_MASTER_SMSM_SLEEP_EXIT \
787 (0x08UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
788#define DEM_MASTER_SMSM_READY \
789 (0x10UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
790#define DEM_MASTER_SMSM_SLEEP \
791 (0x20UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
792
793/* Power Slave State Bits */
794#define DEM_SLAVE_SMSM_RUN (0x0001)
795#define DEM_SLAVE_SMSM_PWRC (0x0002)
796#define DEM_SLAVE_SMSM_PWRC_DELAY (0x0004)
797#define DEM_SLAVE_SMSM_PWRC_EARLY_EXIT (0x0008)
798#define DEM_SLAVE_SMSM_WFPI (0x0010)
799#define DEM_SLAVE_SMSM_SLEEP (0x0020)
800#define DEM_SLAVE_SMSM_SLEEP_EXIT (0x0040)
801#define DEM_SLAVE_SMSM_MSGS_REDUCED (0x0080)
802#define DEM_SLAVE_SMSM_RESET (0x0100)
803#define DEM_SLAVE_SMSM_PWRC_SUSPEND (0x0200)
804
805
806/******************************************************************************
807 * Shared Memory Data
808 *****************************************************************************/
809
810#define DEM_MAX_PORT_NAME_LEN (20)
811
812struct msm_pm_smem_t {
813 uint32_t sleep_time;
814 uint32_t irq_mask;
815 uint32_t resources_used;
816 uint32_t reserved1;
817
818 uint32_t wakeup_reason;
819 uint32_t pending_irqs;
820 uint32_t rpc_prog;
821 uint32_t rpc_proc;
822 char smd_port_name[DEM_MAX_PORT_NAME_LEN];
823 uint32_t reserved2;
824};
825
826
827/******************************************************************************
828 *
829 *****************************************************************************/
830static struct msm_pm_smem_t *msm_pm_smem_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831static atomic_t msm_pm_init_done = ATOMIC_INIT(0);
832
833static int msm_pm_modem_busy(void)
834{
835 if (!(smsm_get_state(SMSM_POWER_MASTER_DEM) & DEM_MASTER_SMSM_READY)) {
836 MSM_PM_DPRINTK(MSM_PM_DEBUG_POWER_COLLAPSE,
837 KERN_INFO, "%s(): master not ready\n", __func__);
838 return -EBUSY;
839 }
840
841 return 0;
842}
843
844/*
845 * Power collapse the Apps processor. This function executes the handshake
846 * protocol with Modem.
847 *
848 * Return value:
849 * -EAGAIN: modem reset occurred or early exit from power collapse
850 * -EBUSY: modem not ready for our power collapse -- no power loss
851 * -ETIMEDOUT: timed out waiting for modem's handshake -- no power loss
852 * 0: success
853 */
854static int msm_pm_power_collapse
855 (bool from_idle, uint32_t sleep_delay, uint32_t sleep_limit)
856{
857 struct msm_pm_polled_group state_grps[2];
858 unsigned long saved_acpuclk_rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859 int collapsed = 0;
860 int ret;
Murali Nalajala07b04022012-04-10 16:00:49 +0530861 int val;
862 int modem_early_exit = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863
864 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
865 KERN_INFO, "%s(): idle %d, delay %u, limit %u\n", __func__,
866 (int)from_idle, sleep_delay, sleep_limit);
867
868 if (!(smsm_get_state(SMSM_POWER_MASTER_DEM) & DEM_MASTER_SMSM_READY)) {
869 MSM_PM_DPRINTK(
870 MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE,
871 KERN_INFO, "%s(): master not ready\n", __func__);
872 ret = -EBUSY;
873 goto power_collapse_bail;
874 }
875
876 memset(msm_pm_smem_data, 0, sizeof(*msm_pm_smem_data));
877
Murali Nalajala41786ab2012-03-06 10:47:32 +0530878 if (cpu_is_msm8625()) {
879 /* Program the SPM */
880 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_COLLAPSE,
881 false);
882 WARN_ON(ret);
883 }
884
Murali Nalajalaff723ec2012-07-13 16:54:40 +0530885 /* Call CPR suspend only for "idlePC" case */
886 if (msm_cpr_ops && from_idle)
887 msm_cpr_ops->cpr_suspend();
888
Murali Nalajala2a0bbda2012-03-28 12:12:54 +0530889 msm_pm_irq_extns->enter_sleep1(true, from_idle,
890 &msm_pm_smem_data->irq_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891 msm_sirc_enter_sleep();
892 msm_gpio_enter_sleep(from_idle);
893
894 msm_pm_smem_data->sleep_time = sleep_delay;
895 msm_pm_smem_data->resources_used = sleep_limit;
896
897 /* Enter PWRC/PWRC_SUSPEND */
898
899 if (from_idle)
900 smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_RUN,
901 DEM_SLAVE_SMSM_PWRC);
902 else
903 smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_RUN,
904 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND);
905
906 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): PWRC");
907 MSM_PM_DEBUG_PRINT_SLEEP_INFO();
908
909 memset(state_grps, 0, sizeof(state_grps));
910 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
911 state_grps[0].bits_all_set = DEM_MASTER_SMSM_RSA;
912 state_grps[1].group_id = SMSM_MODEM_STATE;
913 state_grps[1].bits_all_set = SMSM_RESET;
914
915 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
916
917 if (ret < 0) {
918 printk(KERN_EMERG "%s(): power collapse entry "
919 "timed out waiting for Modem's response\n", __func__);
920 msm_pm_timeout();
921 }
922
923 if (ret == 1) {
924 MSM_PM_DPRINTK(
925 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
926 KERN_INFO,
927 "%s(): msm_pm_poll_state detected Modem reset\n",
928 __func__);
929 goto power_collapse_early_exit;
930 }
931
932 /* DEM Master in RSA */
933
934 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): PWRC RSA");
935
Murali Nalajala2a0bbda2012-03-28 12:12:54 +0530936 ret = msm_pm_irq_extns->enter_sleep2(true, from_idle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 if (ret < 0) {
938 MSM_PM_DPRINTK(
939 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
940 KERN_INFO,
941 "%s(): msm_irq_enter_sleep2 aborted, %d\n", __func__,
942 ret);
943 goto power_collapse_early_exit;
944 }
945
946 msm_pm_config_hw_before_power_down();
947 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): pre power down");
948
949 saved_acpuclk_rate = acpuclk_power_collapse();
950 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
951 "%s(): change clock rate (old rate = %lu)\n", __func__,
952 saved_acpuclk_rate);
953
954 if (saved_acpuclk_rate == 0) {
955 msm_pm_config_hw_after_power_up();
956 goto power_collapse_early_exit;
957 }
958
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -0600959 msm_pm_boot_config_before_pc(smp_processor_id(),
960 virt_to_phys(msm_pm_collapse_exit));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961
962#ifdef CONFIG_VFP
963 if (from_idle)
Steve Mucklef132c6c2012-06-06 18:30:57 -0700964 vfp_pm_suspend();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965#endif
966
967#ifdef CONFIG_CACHE_L2X0
Murali Nalajala73c13332012-05-15 11:30:59 +0530968 if (!cpu_is_msm8625())
969 l2cc_suspend();
970 else
971 apps_power_collapse = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700972#endif
973
974 collapsed = msm_pm_collapse();
Murali Nalajala07b04022012-04-10 16:00:49 +0530975
976 /*
977 * TBD: Currently recognise the MODEM early exit
978 * path by reading the MPA5_GDFS_CNT_VAL register.
979 */
980 if (cpu_is_msm8625()) {
981 /*
Murali Nalajala93e6ed02012-05-13 12:57:22 +0530982 * on system reset, default value of MPA5_GDFS_CNT_VAL
983 * is = 0x0, later modem reprogram this value to
984 * 0x00030004. Once APPS did a power collapse and
985 * coming out of it expected value of this register
986 * always be 0x00030004. Incase if APPS sees the value
987 * as 0x00030002 consider this case as a modem early
988 * exit.
Murali Nalajala07b04022012-04-10 16:00:49 +0530989 */
990 val = __raw_readl(MSM_CFG_CTL_BASE + 0x38);
Murali Nalajala93e6ed02012-05-13 12:57:22 +0530991 if (val != 0x00030002)
Murali Nalajala07b04022012-04-10 16:00:49 +0530992 power_collapsed = 1;
Murali Nalajala93e6ed02012-05-13 12:57:22 +0530993 else
994 modem_early_exit = 1;
Murali Nalajala07b04022012-04-10 16:00:49 +0530995 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700996
997#ifdef CONFIG_CACHE_L2X0
Murali Nalajala73c13332012-05-15 11:30:59 +0530998 if (!cpu_is_msm8625())
999 l2cc_resume();
1000 else
1001 apps_power_collapse = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002#endif
1003
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -06001004 msm_pm_boot_config_after_pc(smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005
1006 if (collapsed) {
1007#ifdef CONFIG_VFP
1008 if (from_idle)
Steve Mucklef132c6c2012-06-06 18:30:57 -07001009 vfp_pm_resume();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010#endif
1011 cpu_init();
1012 local_fiq_enable();
1013 }
1014
1015 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE,
1016 KERN_INFO,
1017 "%s(): msm_pm_collapse returned %d\n", __func__, collapsed);
1018
1019 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
1020 "%s(): restore clock rate to %lu\n", __func__,
1021 saved_acpuclk_rate);
1022 if (acpuclk_set_rate(smp_processor_id(), saved_acpuclk_rate,
1023 SETRATE_PC) < 0)
1024 printk(KERN_ERR "%s(): failed to restore clock rate(%lu)\n",
1025 __func__, saved_acpuclk_rate);
1026
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301027 msm_pm_irq_extns->exit_sleep1(msm_pm_smem_data->irq_mask,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001028 msm_pm_smem_data->wakeup_reason,
1029 msm_pm_smem_data->pending_irqs);
1030
1031 msm_pm_config_hw_after_power_up();
1032 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): post power up");
1033
1034 memset(state_grps, 0, sizeof(state_grps));
1035 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
1036 state_grps[0].bits_any_set =
1037 DEM_MASTER_SMSM_RSA | DEM_MASTER_SMSM_PWRC_EARLY_EXIT;
1038 state_grps[1].group_id = SMSM_MODEM_STATE;
1039 state_grps[1].bits_all_set = SMSM_RESET;
1040
1041 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
1042
1043 if (ret < 0) {
1044 printk(KERN_EMERG "%s(): power collapse exit "
1045 "timed out waiting for Modem's response\n", __func__);
1046 msm_pm_timeout();
1047 }
1048
1049 if (ret == 1) {
1050 MSM_PM_DPRINTK(
1051 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1052 KERN_INFO,
1053 "%s(): msm_pm_poll_state detected Modem reset\n",
1054 __func__);
1055 goto power_collapse_early_exit;
1056 }
1057
1058 /* Sanity check */
Murali Nalajala07b04022012-04-10 16:00:49 +05301059 if (collapsed && !modem_early_exit) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 BUG_ON(!(state_grps[0].value_read & DEM_MASTER_SMSM_RSA));
1061 } else {
1062 BUG_ON(!(state_grps[0].value_read &
1063 DEM_MASTER_SMSM_PWRC_EARLY_EXIT));
1064 goto power_collapse_early_exit;
1065 }
1066
1067 /* Enter WFPI */
1068
1069 smsm_change_state(SMSM_APPS_DEM,
1070 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND,
1071 DEM_SLAVE_SMSM_WFPI);
1072
1073 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): WFPI");
1074
1075 memset(state_grps, 0, sizeof(state_grps));
1076 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
1077 state_grps[0].bits_all_set = DEM_MASTER_SMSM_RUN;
1078 state_grps[1].group_id = SMSM_MODEM_STATE;
1079 state_grps[1].bits_all_set = SMSM_RESET;
1080
1081 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
1082
1083 if (ret < 0) {
1084 printk(KERN_EMERG "%s(): power collapse WFPI "
1085 "timed out waiting for Modem's response\n", __func__);
1086 msm_pm_timeout();
1087 }
1088
1089 if (ret == 1) {
1090 MSM_PM_DPRINTK(
1091 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1092 KERN_INFO,
1093 "%s(): msm_pm_poll_state detected Modem reset\n",
1094 __func__);
1095 ret = -EAGAIN;
1096 goto power_collapse_restore_gpio_bail;
1097 }
1098
1099 /* DEM Master == RUN */
1100
1101 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): WFPI RUN");
1102 MSM_PM_DEBUG_PRINT_SLEEP_INFO();
1103
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301104 msm_pm_irq_extns->exit_sleep2(msm_pm_smem_data->irq_mask,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105 msm_pm_smem_data->wakeup_reason,
1106 msm_pm_smem_data->pending_irqs);
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301107 msm_pm_irq_extns->exit_sleep3(msm_pm_smem_data->irq_mask,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001108 msm_pm_smem_data->wakeup_reason,
1109 msm_pm_smem_data->pending_irqs);
1110 msm_gpio_exit_sleep();
1111 msm_sirc_exit_sleep();
1112
1113 smsm_change_state(SMSM_APPS_DEM,
1114 DEM_SLAVE_SMSM_WFPI, DEM_SLAVE_SMSM_RUN);
1115
1116 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): RUN");
1117
1118 smd_sleep_exit();
Murali Nalajala41786ab2012-03-06 10:47:32 +05301119
1120 if (cpu_is_msm8625()) {
1121 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING,
1122 false);
1123 WARN_ON(ret);
1124 }
1125
Murali Nalajalaff723ec2012-07-13 16:54:40 +05301126 /* Call CPR resume only for "idlePC" case */
1127 if (msm_cpr_ops && from_idle)
1128 msm_cpr_ops->cpr_resume();
1129
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001130 return 0;
1131
1132power_collapse_early_exit:
1133 /* Enter PWRC_EARLY_EXIT */
1134
1135 smsm_change_state(SMSM_APPS_DEM,
1136 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND,
1137 DEM_SLAVE_SMSM_PWRC_EARLY_EXIT);
1138
1139 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): EARLY_EXIT");
1140
1141 memset(state_grps, 0, sizeof(state_grps));
1142 state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
1143 state_grps[0].bits_all_set = DEM_MASTER_SMSM_PWRC_EARLY_EXIT;
1144 state_grps[1].group_id = SMSM_MODEM_STATE;
1145 state_grps[1].bits_all_set = SMSM_RESET;
1146
1147 ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
1148 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): EARLY_EXIT EE");
1149
1150 if (ret < 0) {
1151 printk(KERN_EMERG "%s(): power collapse EARLY_EXIT "
1152 "timed out waiting for Modem's response\n", __func__);
1153 msm_pm_timeout();
1154 }
1155
1156 if (ret == 1) {
1157 MSM_PM_DPRINTK(
1158 MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1159 KERN_INFO,
1160 "%s(): msm_pm_poll_state detected Modem reset\n",
1161 __func__);
1162 }
1163
1164 /* DEM Master == RESET or PWRC_EARLY_EXIT */
1165
1166 ret = -EAGAIN;
1167
1168power_collapse_restore_gpio_bail:
1169 msm_gpio_exit_sleep();
1170 msm_sirc_exit_sleep();
1171
1172 /* Enter RUN */
1173 smsm_change_state(SMSM_APPS_DEM,
1174 DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND |
1175 DEM_SLAVE_SMSM_PWRC_EARLY_EXIT, DEM_SLAVE_SMSM_RUN);
1176
1177 MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): RUN");
1178
1179 if (collapsed)
1180 smd_sleep_exit();
1181
Murali Nalajalaff723ec2012-07-13 16:54:40 +05301182 /* Call CPR resume only for "idlePC" case */
1183 if (msm_cpr_ops && from_idle)
1184 msm_cpr_ops->cpr_resume();
1185
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001186power_collapse_bail:
Murali Nalajala41786ab2012-03-06 10:47:32 +05301187 if (cpu_is_msm8625()) {
1188 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING,
1189 false);
1190 WARN_ON(ret);
1191 }
1192
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193 return ret;
1194}
1195
1196/*
1197 * Power collapse the Apps processor without involving Modem.
1198 *
1199 * Return value:
1200 * 0: success
1201 */
Stephen Boydb29750d2012-02-21 01:21:32 -08001202static int __ref msm_pm_power_collapse_standalone(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204 int collapsed = 0;
1205 int ret;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301206 void *entry;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001207
1208 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
1209 KERN_INFO, "%s()\n", __func__);
1210
1211 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_COLLAPSE, false);
1212 WARN_ON(ret);
1213
Murali Nalajala41786ab2012-03-06 10:47:32 +05301214 entry = (!smp_processor_id() || from_idle) ?
1215 msm_pm_collapse_exit : msm_secondary_startup;
1216
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -06001217 msm_pm_boot_config_before_pc(smp_processor_id(),
Murali Nalajala41786ab2012-03-06 10:47:32 +05301218 virt_to_phys(entry));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219
1220#ifdef CONFIG_VFP
Steve Mucklef132c6c2012-06-06 18:30:57 -07001221 vfp_pm_suspend();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222#endif
1223
1224#ifdef CONFIG_CACHE_L2X0
Murali Nalajala41786ab2012-03-06 10:47:32 +05301225 if (!cpu_is_msm8625())
Taniya Das38a8c6e2012-05-09 20:34:39 +05301226 l2cc_suspend();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001227#endif
1228
1229 collapsed = msm_pm_collapse();
1230
1231#ifdef CONFIG_CACHE_L2X0
Murali Nalajala41786ab2012-03-06 10:47:32 +05301232 if (!cpu_is_msm8625())
Taniya Das38a8c6e2012-05-09 20:34:39 +05301233 l2cc_resume();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001234#endif
1235
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -06001236 msm_pm_boot_config_after_pc(smp_processor_id());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237
1238 if (collapsed) {
1239#ifdef CONFIG_VFP
Steve Mucklef132c6c2012-06-06 18:30:57 -07001240 vfp_pm_resume();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001241#endif
1242 cpu_init();
1243 local_fiq_enable();
1244 }
1245
1246 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE,
1247 KERN_INFO,
1248 "%s(): msm_pm_collapse returned %d\n", __func__, collapsed);
1249
1250 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
1251 WARN_ON(ret);
1252
Anji jonnalac6816222012-03-31 10:55:14 +05301253 return !collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254}
1255
1256/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257 * Bring the Apps processor to SWFI.
1258 *
1259 * Return value:
1260 * -EIO: could not ramp Apps processor clock
1261 * 0: success
1262 */
1263static int msm_pm_swfi(bool ramp_acpu)
1264{
1265 unsigned long saved_acpuclk_rate = 0;
1266
1267 if (ramp_acpu) {
1268 saved_acpuclk_rate = acpuclk_wait_for_irq();
1269 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
1270 "%s(): change clock rate (old rate = %lu)\n", __func__,
1271 saved_acpuclk_rate);
1272
1273 if (!saved_acpuclk_rate)
1274 return -EIO;
1275 }
1276
Murali Nalajala41786ab2012-03-06 10:47:32 +05301277 if (!cpu_is_msm8625())
1278 msm_pm_config_hw_before_swfi();
1279
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001280 msm_arch_idle();
1281
1282 if (ramp_acpu) {
1283 MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
1284 "%s(): restore clock rate to %lu\n", __func__,
1285 saved_acpuclk_rate);
1286 if (acpuclk_set_rate(smp_processor_id(), saved_acpuclk_rate,
1287 SETRATE_SWFI) < 0)
1288 printk(KERN_ERR
1289 "%s(): failed to restore clock rate(%lu)\n",
1290 __func__, saved_acpuclk_rate);
1291 }
1292
1293 return 0;
1294}
1295
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301296static int64_t msm_pm_timer_enter_suspend(int64_t *period)
1297{
Anji Jonnalac02367a2012-07-01 02:56:11 +05301298 int64_t time = 0;
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301299
1300 time = msm_timer_get_sclk_time(period);
1301 if (!time)
1302 pr_err("%s: Unable to read sclk.\n", __func__);
1303 return time;
1304}
1305
1306static int64_t msm_pm_timer_exit_suspend(int64_t time, int64_t period)
1307{
1308
1309 if (time != 0) {
1310 int64_t end_time = msm_timer_get_sclk_time(NULL);
1311 if (end_time != 0) {
1312 time = end_time - time;
1313 if (time < 0)
1314 time += period;
1315 } else
1316 time = 0;
1317 }
1318 return time;
1319}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320
1321/******************************************************************************
1322 * External Idle/Suspend Functions
1323 *****************************************************************************/
1324
1325/*
1326 * Put CPU in low power mode.
1327 */
1328void arch_idle(void)
1329{
1330 bool allow[MSM_PM_SLEEP_MODE_NR];
1331 uint32_t sleep_limit = SLEEP_LIMIT_NONE;
1332
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001333 int64_t timer_expiration;
Murali Nalajala8fda4492012-03-19 18:22:59 +05301334 int latency_qos;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001335 int ret;
1336 int i;
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301337 unsigned int cpu;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338 int64_t t1;
Murali Nalajalab86f3702012-03-30 17:54:57 +05301339 static DEFINE_PER_CPU(int64_t, t2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001340 int exit_stat;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001341
1342 if (!atomic_read(&msm_pm_init_done))
1343 return;
1344
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301345 cpu = smp_processor_id();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001346 latency_qos = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
Murali Nalajala8fda4492012-03-19 18:22:59 +05301347 /* get the next timer expiration */
1348 timer_expiration = ktime_to_ns(tick_nohz_get_sleep_length());
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001349
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350 t1 = ktime_to_ns(ktime_get());
Murali Nalajalab86f3702012-03-30 17:54:57 +05301351 msm_pm_add_stat(MSM_PM_STAT_NOT_IDLE, t1 - __get_cpu_var(t2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001352 msm_pm_add_stat(MSM_PM_STAT_REQUESTED_IDLE, timer_expiration);
Murali Nalajala7744d162012-01-13 13:06:03 +05301353 exit_stat = MSM_PM_STAT_IDLE_SPIN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001354
1355 for (i = 0; i < ARRAY_SIZE(allow); i++)
1356 allow[i] = true;
1357
Murali Nalajala41786ab2012-03-06 10:47:32 +05301358 if (num_online_cpus() > 1 ||
1359 (timer_expiration < msm_pm_idle_sleep_min_time) ||
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301360 !msm_pm_irq_extns->idle_sleep_allowed()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001361 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
1362 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001363 }
1364
1365 for (i = 0; i < ARRAY_SIZE(allow); i++) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301366 struct msm_pm_platform_data *mode =
1367 &msm_pm_modes[MSM_PM_MODE(cpu, i)];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001368 if (!mode->idle_supported || !mode->idle_enabled ||
1369 mode->latency >= latency_qos ||
1370 mode->residency * 1000ULL >= timer_expiration)
1371 allow[i] = false;
1372 }
1373
1374 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
1375 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
1376 uint32_t wait_us = CONFIG_MSM_IDLE_WAIT_ON_MODEM;
1377 while (msm_pm_modem_busy() && wait_us) {
1378 if (wait_us > 100) {
1379 udelay(100);
1380 wait_us -= 100;
1381 } else {
1382 udelay(wait_us);
1383 wait_us = 0;
1384 }
1385 }
1386
1387 if (msm_pm_modem_busy()) {
1388 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
1389 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]
1390 = false;
1391 }
1392 }
1393
1394 MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
1395 "%s(): latency qos %d, next timer %lld, sleep limit %u\n",
1396 __func__, latency_qos, timer_expiration, sleep_limit);
1397
1398 for (i = 0; i < ARRAY_SIZE(allow); i++)
1399 MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
1400 "%s(): allow %s: %d\n", __func__,
1401 msm_pm_sleep_mode_labels[i], (int)allow[i]);
1402
1403 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
1404 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
Murali Nalajala8fda4492012-03-19 18:22:59 +05301405 /* Sync the timer with SCLK, it is needed only for modem
1406 * assissted pollapse case.
1407 */
1408 int64_t next_timer_exp = msm_timer_enter_idle();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409 uint32_t sleep_delay;
Murali Nalajala8fda4492012-03-19 18:22:59 +05301410 bool low_power = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001411
1412 sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
Murali Nalajala8fda4492012-03-19 18:22:59 +05301413 next_timer_exp, MSM_PM_SLEEP_TICK_LIMIT);
1414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 if (sleep_delay == 0) /* 0 would mean infinite time */
1416 sleep_delay = 1;
1417
1418 if (!allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
1419 sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
1420
1421#if defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_ACTIVE)
1422 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT1;
1423#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_IDLE_RETENTION)
1424 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
1425#endif
1426
1427 ret = msm_pm_power_collapse(true, sleep_delay, sleep_limit);
1428 low_power = (ret != -EBUSY && ret != -ETIMEDOUT);
Murali Nalajala8fda4492012-03-19 18:22:59 +05301429 msm_timer_exit_idle(low_power);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001430
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001431 if (ret)
1432 exit_stat = MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
1433 else {
1434 exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
1435 msm_pm_sleep_limit = sleep_limit;
1436 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
Murali Nalajala41786ab2012-03-06 10:47:32 +05301438 ret = msm_pm_power_collapse_standalone(true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001439 exit_stat = ret ?
1440 MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE :
1441 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001442 } else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
1443 ret = msm_pm_swfi(true);
1444 if (ret)
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301445 while (!msm_pm_irq_extns->irq_pending())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446 udelay(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001447 exit_stat = ret ? MSM_PM_STAT_IDLE_SPIN : MSM_PM_STAT_IDLE_WFI;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001448 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1449 msm_pm_swfi(false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001450 exit_stat = MSM_PM_STAT_IDLE_WFI;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001451 } else {
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301452 while (!msm_pm_irq_extns->irq_pending())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453 udelay(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454 exit_stat = MSM_PM_STAT_IDLE_SPIN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001455 }
1456
Murali Nalajalab86f3702012-03-30 17:54:57 +05301457 __get_cpu_var(t2) = ktime_to_ns(ktime_get());
1458 msm_pm_add_stat(exit_stat, __get_cpu_var(t2) - t1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001459}
1460
1461/*
1462 * Suspend the Apps processor.
1463 *
1464 * Return value:
Murali Nalajala41786ab2012-03-06 10:47:32 +05301465 * -EPERM: Suspend happened by a not permitted core
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466 * -EAGAIN: modem reset occurred or early exit from suspend
1467 * -EBUSY: modem not ready for our suspend
1468 * -EINVAL: invalid sleep mode
1469 * -EIO: could not ramp Apps processor clock
1470 * -ETIMEDOUT: timed out waiting for modem's handshake
1471 * 0: success
1472 */
1473static int msm_pm_enter(suspend_state_t state)
1474{
1475 bool allow[MSM_PM_SLEEP_MODE_NR];
1476 uint32_t sleep_limit = SLEEP_LIMIT_NONE;
Murali Nalajala41786ab2012-03-06 10:47:32 +05301477 int ret = -EPERM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001478 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001479 int64_t period = 0;
1480 int64_t time = 0;
1481
Murali Nalajala41786ab2012-03-06 10:47:32 +05301482 /* Must executed by CORE0 */
1483 if (smp_processor_id()) {
1484 __WARN();
1485 goto suspend_exit;
1486 }
1487
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301488 time = msm_pm_timer_enter_suspend(&period);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489
1490 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
1491 "%s(): sleep limit %u\n", __func__, sleep_limit);
1492
1493 for (i = 0; i < ARRAY_SIZE(allow); i++)
1494 allow[i] = true;
1495
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496 for (i = 0; i < ARRAY_SIZE(allow); i++) {
Murali Nalajala0df9fee2012-01-12 15:26:09 +05301497 struct msm_pm_platform_data *mode;
1498 mode = &msm_pm_modes[MSM_PM_MODE(0, i)];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001499 if (!mode->suspend_supported || !mode->suspend_enabled)
1500 allow[i] = false;
1501 }
1502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
1504 allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001505 enum msm_pm_time_stats_id id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001506
1507 clock_debug_print_enabled();
1508
1509#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
1510 if (msm_pm_sleep_time_override > 0) {
1511 int64_t ns;
1512 ns = NSEC_PER_SEC * (int64_t)msm_pm_sleep_time_override;
1513 msm_pm_set_max_sleep_time(ns);
1514 msm_pm_sleep_time_override = 0;
1515 }
1516#endif
1517 if (!allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
1518 sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
1519
1520#if defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_ACTIVE)
1521 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT1;
1522#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_RETENTION)
1523 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
1524#elif defined(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_DEEP_POWER_DOWN)
1525 if (get_msm_migrate_pages_status() != MEM_OFFLINE)
1526 sleep_limit |= SLEEP_RESOURCE_MEMORY_BIT0;
1527#endif
1528
1529 for (i = 0; i < 30 && msm_pm_modem_busy(); i++)
1530 udelay(500);
1531
1532 ret = msm_pm_power_collapse(
1533 false, msm_pm_max_sleep_time, sleep_limit);
1534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001535 if (ret)
1536 id = MSM_PM_STAT_FAILED_SUSPEND;
1537 else {
1538 id = MSM_PM_STAT_SUSPEND;
1539 msm_pm_sleep_limit = sleep_limit;
1540 }
1541
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301542 time = msm_pm_timer_exit_suspend(time, period);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001543 msm_pm_add_stat(id, time);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001544 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
Murali Nalajala41786ab2012-03-06 10:47:32 +05301545 ret = msm_pm_power_collapse_standalone(false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001546 } else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
1547 ret = msm_pm_swfi(true);
1548 if (ret)
Murali Nalajala2a0bbda2012-03-28 12:12:54 +05301549 while (!msm_pm_irq_extns->irq_pending())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001550 udelay(1);
1551 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1552 msm_pm_swfi(false);
1553 }
1554
Murali Nalajala41786ab2012-03-06 10:47:32 +05301555suspend_exit:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001556 MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
1557 "%s(): return %d\n", __func__, ret);
1558
1559 return ret;
1560}
1561
1562static struct platform_suspend_ops msm_pm_ops = {
1563 .enter = msm_pm_enter,
1564 .valid = suspend_valid_only_mem,
1565};
1566
Murali Nalajalac89f2f32012-02-07 19:23:52 +05301567/* Hotplug the "non boot" CPU's and put
1568 * the cores into low power mode
1569 */
1570void msm_pm_cpu_enter_lowpower(unsigned int cpu)
1571{
Murali Nalajalaa7efba12012-02-23 18:13:52 +05301572 bool allow[MSM_PM_SLEEP_MODE_NR];
1573 int i;
1574
1575 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
1576 struct msm_pm_platform_data *mode;
1577
1578 mode = &msm_pm_modes[MSM_PM_MODE(cpu, i)];
1579 allow[i] = mode->suspend_supported && mode->suspend_enabled;
1580 }
1581
1582 MSM_PM_DPRINTK(MSM_PM_DEBUG_HOTPLUG, KERN_INFO,
1583 "CPU%u: %s: shutting down cpu\n", cpu, __func__);
1584
1585 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
1586 msm_pm_power_collapse_standalone(false);
1587 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1588 msm_pm_swfi(false);
1589 } else {
1590 MSM_PM_DPRINTK(MSM_PM_DEBUG_HOTPLUG, KERN_INFO,
1591 "CPU%u: %s: shutting down failed!!!\n", cpu, __func__);
1592 }
Murali Nalajalac89f2f32012-02-07 19:23:52 +05301593}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001594
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001595/*
1596 * Initialize the power management subsystem.
1597 *
1598 * Return value:
1599 * -ENODEV: initialization failed
1600 * 0: success
1601 */
1602static int __init msm_pm_init(void)
1603{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001604 int ret;
Murali Nalajala93f29992012-03-21 15:59:27 +05301605 int val;
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301606 enum msm_pm_time_stats_id enable_stats[] = {
1607 MSM_PM_STAT_REQUESTED_IDLE,
1608 MSM_PM_STAT_IDLE_SPIN,
1609 MSM_PM_STAT_IDLE_WFI,
1610 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
1611 MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
1612 MSM_PM_STAT_IDLE_POWER_COLLAPSE,
1613 MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
1614 MSM_PM_STAT_SUSPEND,
1615 MSM_PM_STAT_FAILED_SUSPEND,
1616 MSM_PM_STAT_NOT_IDLE,
1617 };
1618
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001619#ifdef CONFIG_CPU_V7
1620 pgd_t *pc_pgd;
1621 pmd_t *pmd;
1622 unsigned long pmdval;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001623 unsigned long exit_phys;
1624
1625 exit_phys = virt_to_phys(msm_pm_collapse_exit);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001626
1627 /* Page table for cores to come back up safely. */
1628 pc_pgd = pgd_alloc(&init_mm);
1629 if (!pc_pgd)
1630 return -ENOMEM;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001631 pmd = pmd_offset(pud_offset(pc_pgd + pgd_index(exit_phys), exit_phys),
1632 exit_phys);
1633 pmdval = (exit_phys & PGDIR_MASK) |
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001634 PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
1635 pmd[0] = __pmd(pmdval);
1636 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1637
Steve Mucklefcece052012-02-18 20:09:58 -08001638 msm_saved_state_phys =
1639 allocate_contiguous_ebi_nomap(CPU_SAVED_STATE_SIZE *
1640 num_possible_cpus(), 4);
1641 if (!msm_saved_state_phys)
1642 return -ENOMEM;
1643 msm_saved_state = ioremap_nocache(msm_saved_state_phys,
1644 CPU_SAVED_STATE_SIZE *
1645 num_possible_cpus());
1646 if (!msm_saved_state)
1647 return -ENOMEM;
1648
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001649 /* It is remotely possible that the code in msm_pm_collapse_exit()
1650 * which turns on the MMU with this mapping is in the
1651 * next even-numbered megabyte beyond the
1652 * start of msm_pm_collapse_exit().
1653 * Map this megabyte in as well.
1654 */
1655 pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
1656 flush_pmd_entry(pmd);
1657 msm_pm_pc_pgd = virt_to_phys(pc_pgd);
Steve Muckle730ad7a2012-02-21 15:26:37 -08001658 clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd),
1659 virt_to_phys(&msm_pm_pc_pgd));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001660#endif
1661
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662 msm_pm_smem_data = smem_alloc(SMEM_APPS_DEM_SLAVE_DATA,
1663 sizeof(*msm_pm_smem_data));
1664 if (msm_pm_smem_data == NULL) {
1665 printk(KERN_ERR "%s: failed to get smsm_data\n", __func__);
1666 return -ENODEV;
1667 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001668
1669 ret = msm_timer_init_time_sync(msm_pm_timeout);
1670 if (ret)
1671 return ret;
1672
1673 ret = smsm_change_intr_mask(SMSM_POWER_MASTER_DEM, 0xFFFFFFFF, 0);
1674 if (ret) {
1675 printk(KERN_ERR "%s: failed to clear interrupt mask, %d\n",
1676 __func__, ret);
1677 return ret;
1678 }
1679
Murali Nalajala93f29992012-03-21 15:59:27 +05301680 if (cpu_is_msm8625()) {
1681 target_type = TARGET_IS_8625;
1682 clean_caches((unsigned long)&target_type, sizeof(target_type),
1683 virt_to_phys(&target_type));
1684
Anji jonnalae644f8e2012-05-09 19:52:18 +05301685 /*
1686 * Configure the MPA5_GDFS_CNT_VAL register for
1687 * DBGPWRUPEREQ_OVERRIDE[17:16] = Override the
1688 * DBGNOPOWERDN for each cpu.
1689 * MPA5_GDFS_CNT_VAL[9:0] = Delay counter for
1690 * GDFS control.
Murali Nalajala93f29992012-03-21 15:59:27 +05301691 */
Anji jonnalae644f8e2012-05-09 19:52:18 +05301692 val = 0x00030002;
Murali Nalajala93f29992012-03-21 15:59:27 +05301693 __raw_writel(val, (MSM_CFG_CTL_BASE + 0x38));
Murali Nalajala73c13332012-05-15 11:30:59 +05301694
1695 l2x0_base_addr = MSM_L2CC_BASE;
Murali Nalajala93f29992012-03-21 15:59:27 +05301696 }
1697
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001698#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE
1699 /* The wakeup_reason field is overloaded during initialization time
1700 to signal Modem that Apps will control the low power modes of
1701 the memory.
1702 */
1703 msm_pm_smem_data->wakeup_reason = 1;
1704 smsm_change_state(SMSM_APPS_DEM, 0, DEM_SLAVE_SMSM_RUN);
1705#endif
1706
1707 BUG_ON(msm_pm_modes == NULL);
1708
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001709 suspend_set_ops(&msm_pm_ops);
1710
1711 msm_pm_mode_sysfs_add();
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301712 msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
Murali Nalajala558c0ce2012-03-29 19:42:08 +05301713
1714 atomic_set(&msm_pm_init_done, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001715 return 0;
1716}
1717
1718late_initcall_sync(msm_pm_init);