blob: 631da7d0ee05e0d1f841b778c6582261b0c58ae4 [file] [log] [blame]
Mahesh Sivasubramanian1b8601b2012-12-20 14:11:23 -07001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
Priyanka Mathur13bdad12013-01-28 15:52:56 -080014#include <linux/debugfs.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/completion.h>
19#include <linux/cpuidle.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/ktime.h>
23#include <linux/pm.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070024#include <linux/pm_qos.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <linux/smp.h>
26#include <linux/suspend.h>
27#include <linux/tick.h>
Anji Jonnala02dac8d2013-03-06 21:31:04 +053028#include <linux/delay.h>
Mahesh Sivasubramaniancb396622012-03-14 14:50:37 -060029#include <linux/platform_device.h>
Anji Jonnala02dac8d2013-03-06 21:31:04 +053030#include <linux/of_platform.h>
Abhijeet Dharmapurikare8f83852013-02-06 18:57:17 -080031#include <linux/regulator/krait-regulator.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032#include <mach/msm_iomap.h>
Praveen Chidambaram192979f2012-04-25 18:30:23 -060033#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034#include <mach/system.h>
Girish Mahadevand27ca4a2012-08-15 09:21:23 -060035#include <mach/scm.h>
36#include <mach/socinfo.h>
Praveen Chidambaramc594a092012-09-18 19:48:29 -060037#include <mach/msm-krait-l2-accessors.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038#include <asm/cacheflush.h>
39#include <asm/hardware/gic.h>
40#include <asm/pgtable.h>
41#include <asm/pgalloc.h>
Girish Mahadevan55944992012-10-26 11:03:07 -060042#include <asm/outercache.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043#ifdef CONFIG_VFP
44#include <asm/vfp.h>
45#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046#include "acpuclock.h"
47#include "clock.h"
48#include "avs.h"
Abhijeet Dharmapurikarefaca4f2011-12-27 16:24:07 -080049#include <mach/cpuidle.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050#include "idle.h"
Matt Wagantall7cca4642012-02-01 16:43:24 -080051#include "pm.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052#include "scm-boot.h"
53#include "spm.h"
54#include "timer.h"
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -060055#include "pm-boot.h"
Girish Mahadevandc318fd2012-08-17 16:48:05 -060056#include <mach/event_timer.h>
Priyanka Mathur26b4a4b2012-11-05 13:45:45 -080057#define CREATE_TRACE_POINTS
58#include "trace_msm_low_power.h"
Praveen Chidambaramf27a5152013-02-01 11:44:53 -070059
60#define SCM_L2_RETENTION (0x2)
61#define SCM_CMD_TERMINATE_PC (0x2)
62
63#define GET_CPU_OF_ATTR(attr) \
64 (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
65
66#define SCLK_HZ (32768)
67#define MSM_PM_SLEEP_TICK_LIMIT (0x6DDD000)
68
Priyanka Mathur13bdad12013-01-28 15:52:56 -080069#define NUM_OF_COUNTERS 3
70#define MAX_BUF_SIZE 512
71
Praveen Chidambaramf27a5152013-02-01 11:44:53 -070072static int msm_pm_debug_mask = 1;
73module_param_named(
74 debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
75);
76
77static int msm_pm_sleep_time_override;
78module_param_named(sleep_time_override,
79 msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080
81enum {
82 MSM_PM_DEBUG_SUSPEND = BIT(0),
83 MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
84 MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
85 MSM_PM_DEBUG_CLOCK = BIT(3),
86 MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
Karthik Parsha6fb932d2012-01-24 18:04:12 -080087 MSM_PM_DEBUG_IDLE_CLK = BIT(5),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088 MSM_PM_DEBUG_IDLE = BIT(6),
89 MSM_PM_DEBUG_IDLE_LIMITS = BIT(7),
90 MSM_PM_DEBUG_HOTPLUG = BIT(8),
91};
92
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093enum {
94 MSM_PM_MODE_ATTR_SUSPEND,
95 MSM_PM_MODE_ATTR_IDLE,
96 MSM_PM_MODE_ATTR_NR,
97};
98
99static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
100 [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
101 [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
102};
103
104struct msm_pm_kobj_attribute {
105 unsigned int cpu;
106 struct kobj_attribute ka;
107};
108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109struct msm_pm_sysfs_sleep_mode {
110 struct kobject *kobj;
111 struct attribute_group attr_group;
112 struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
113 struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
114};
115
116static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
117 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
118 [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
Praveen Chidambaramd3d844d2012-04-24 09:47:38 -0600119 [MSM_PM_SLEEP_MODE_RETENTION] = "retention",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120 [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
121 "standalone_power_collapse",
122};
123
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600124static struct hrtimer pm_hrtimer;
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600125static struct msm_pm_sleep_ops pm_sleep_ops;
Mahesh Sivasubramanian1b8601b2012-12-20 14:11:23 -0700126static bool msm_pm_ldo_retention_enabled = true;
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700127static bool msm_pm_use_sync_timer;
128static struct msm_pm_cp15_save_data cp15_data;
129static bool msm_pm_retention_calls_tz;
130static uint32_t msm_pm_max_sleep_time;
Praveen Chidambaram4b8df032013-01-18 16:21:16 -0700131static bool msm_no_ramp_down_pc;
Anji Jonnala02dac8d2013-03-06 21:31:04 +0530132static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700133
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134/*
135 * Write out the attribute.
136 */
137static ssize_t msm_pm_mode_attr_show(
138 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
139{
140 int ret = -EINVAL;
141 int i;
142
143 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
144 struct kernel_param kp;
145 unsigned int cpu;
146 struct msm_pm_platform_data *mode;
147
148 if (msm_pm_sleep_mode_labels[i] == NULL)
149 continue;
150
151 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
152 continue;
153
154 cpu = GET_CPU_OF_ATTR(attr);
Praveen Chidambaram42da9d22012-03-30 12:16:34 -0600155 mode = &msm_pm_sleep_modes[MSM_PM_MODE(cpu, i)];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156
157 if (!strcmp(attr->attr.name,
158 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
159 u32 arg = mode->suspend_enabled;
160 kp.arg = &arg;
161 ret = param_get_ulong(buf, &kp);
162 } else if (!strcmp(attr->attr.name,
163 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
164 u32 arg = mode->idle_enabled;
165 kp.arg = &arg;
166 ret = param_get_ulong(buf, &kp);
167 }
168
169 break;
170 }
171
172 if (ret > 0) {
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600173 strlcat(buf, "\n", PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 ret++;
175 }
176
177 return ret;
178}
179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
181 struct kobj_attribute *attr, const char *buf, size_t count)
182{
183 int ret = -EINVAL;
184 int i;
185
186 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
187 struct kernel_param kp;
188 unsigned int cpu;
189 struct msm_pm_platform_data *mode;
190
191 if (msm_pm_sleep_mode_labels[i] == NULL)
192 continue;
193
194 if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
195 continue;
196
197 cpu = GET_CPU_OF_ATTR(attr);
Praveen Chidambaram42da9d22012-03-30 12:16:34 -0600198 mode = &msm_pm_sleep_modes[MSM_PM_MODE(cpu, i)];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199
200 if (!strcmp(attr->attr.name,
201 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
202 kp.arg = &mode->suspend_enabled;
203 ret = param_set_byte(buf, &kp);
204 } else if (!strcmp(attr->attr.name,
205 msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
206 kp.arg = &mode->idle_enabled;
207 ret = param_set_byte(buf, &kp);
208 }
209
210 break;
211 }
212
213 return ret ? ret : count;
214}
215
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700216static int __devinit msm_pm_mode_sysfs_add_cpu(
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 unsigned int cpu, struct kobject *modes_kobj)
218{
219 char cpu_name[8];
220 struct kobject *cpu_kobj;
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600221 struct msm_pm_sysfs_sleep_mode *mode = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 int i, j, k;
223 int ret;
224
225 snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
226 cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
227 if (!cpu_kobj) {
228 pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
229 ret = -ENOMEM;
230 goto mode_sysfs_add_cpu_exit;
231 }
232
233 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
234 int idx = MSM_PM_MODE(cpu, i);
235
Praveen Chidambaram42da9d22012-03-30 12:16:34 -0600236 if ((!msm_pm_sleep_modes[idx].suspend_supported)
237 && (!msm_pm_sleep_modes[idx].idle_supported))
238 continue;
239
240 if (!msm_pm_sleep_mode_labels[i] ||
241 !msm_pm_sleep_mode_labels[i][0])
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242 continue;
243
244 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
245 if (!mode) {
246 pr_err("%s: cannot allocate memory for attributes\n",
247 __func__);
248 ret = -ENOMEM;
249 goto mode_sysfs_add_cpu_exit;
250 }
251
252 mode->kobj = kobject_create_and_add(
253 msm_pm_sleep_mode_labels[i], cpu_kobj);
254 if (!mode->kobj) {
255 pr_err("%s: cannot create kobject\n", __func__);
256 ret = -ENOMEM;
257 goto mode_sysfs_add_cpu_exit;
258 }
259
260 for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
261 if ((k == MSM_PM_MODE_ATTR_IDLE) &&
Praveen Chidambaram42da9d22012-03-30 12:16:34 -0600262 !msm_pm_sleep_modes[idx].idle_supported)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 continue;
264 if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
Praveen Chidambaram42da9d22012-03-30 12:16:34 -0600265 !msm_pm_sleep_modes[idx].suspend_supported)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 continue;
Stephen Boydd2059c32012-07-03 15:11:15 -0700267 sysfs_attr_init(&mode->kas[j].ka.attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 mode->kas[j].cpu = cpu;
269 mode->kas[j].ka.attr.mode = 0644;
270 mode->kas[j].ka.show = msm_pm_mode_attr_show;
271 mode->kas[j].ka.store = msm_pm_mode_attr_store;
272 mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
273 mode->attrs[j] = &mode->kas[j].ka.attr;
274 j++;
275 }
276 mode->attrs[j] = NULL;
277
278 mode->attr_group.attrs = mode->attrs;
279 ret = sysfs_create_group(mode->kobj, &mode->attr_group);
280 if (ret) {
281 pr_err("%s: cannot create kobject attribute group\n",
282 __func__);
283 goto mode_sysfs_add_cpu_exit;
284 }
285 }
286
287 ret = 0;
288
289mode_sysfs_add_cpu_exit:
Praveen Chidambaramd5ac2d32011-10-24 14:30:27 -0600290 if (ret) {
Praveen Chidambaram2cfda632011-10-11 16:58:09 -0600291 if (mode && mode->kobj)
292 kobject_del(mode->kobj);
293 kfree(mode);
294 }
295
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296 return ret;
297}
298
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700299int __devinit msm_pm_mode_sysfs_add(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300{
301 struct kobject *module_kobj;
302 struct kobject *modes_kobj;
303 unsigned int cpu;
304 int ret;
305
306 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
307 if (!module_kobj) {
308 pr_err("%s: cannot find kobject for module %s\n",
309 __func__, KBUILD_MODNAME);
310 ret = -ENOENT;
311 goto mode_sysfs_add_exit;
312 }
313
314 modes_kobj = kobject_create_and_add("modes", module_kobj);
315 if (!modes_kobj) {
316 pr_err("%s: cannot create modes kobject\n", __func__);
317 ret = -ENOMEM;
318 goto mode_sysfs_add_exit;
319 }
320
321 for_each_possible_cpu(cpu) {
322 ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
323 if (ret)
324 goto mode_sysfs_add_exit;
325 }
326
327 ret = 0;
328
329mode_sysfs_add_exit:
330 return ret;
331}
332
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333/*
334 * Configure hardware registers in preparation for Apps power down.
335 */
336static void msm_pm_config_hw_before_power_down(void)
337{
338 return;
339}
340
341/*
342 * Clear hardware registers after Apps powers up.
343 */
344static void msm_pm_config_hw_after_power_up(void)
345{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700346}
347
348/*
349 * Configure hardware registers in preparation for SWFI.
350 */
351static void msm_pm_config_hw_before_swfi(void)
352{
353 return;
354}
355
Girish Mahadevand27ca4a2012-08-15 09:21:23 -0600356/*
357 * Configure/Restore hardware registers in preparation for Retention.
358 */
359
360static void msm_pm_config_hw_after_retention(void)
361{
362 int ret;
Abhijeet Dharmapurikare8f83852013-02-06 18:57:17 -0800363
Girish Mahadevand27ca4a2012-08-15 09:21:23 -0600364 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
365 WARN_ON(ret);
Abhijeet Dharmapurikare8f83852013-02-06 18:57:17 -0800366 krait_power_mdd_enable(smp_processor_id(), false);
Girish Mahadevand27ca4a2012-08-15 09:21:23 -0600367}
368
369static void msm_pm_config_hw_before_retention(void)
370{
Abhijeet Dharmapurikare8f83852013-02-06 18:57:17 -0800371 krait_power_mdd_enable(smp_processor_id(), true);
Girish Mahadevand27ca4a2012-08-15 09:21:23 -0600372 return;
373}
374
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375/*
376 * Convert time from nanoseconds to slow clock ticks, then cap it to the
377 * specified limit
378 */
379static int64_t msm_pm_convert_and_cap_time(int64_t time_ns, int64_t limit)
380{
381 do_div(time_ns, NSEC_PER_SEC / SCLK_HZ);
382 return (time_ns > limit) ? limit : time_ns;
383}
384
385/*
386 * Set the sleep time for suspend. 0 means infinite sleep time.
387 */
388void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
389{
390 if (max_sleep_time_ns == 0) {
391 msm_pm_max_sleep_time = 0;
392 } else {
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700393 msm_pm_max_sleep_time =
394 (uint32_t)msm_pm_convert_and_cap_time(
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 max_sleep_time_ns, MSM_PM_SLEEP_TICK_LIMIT);
396
397 if (msm_pm_max_sleep_time == 0)
398 msm_pm_max_sleep_time = 1;
399 }
400
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700401 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 pr_info("%s: Requested %lld ns Giving %u sclk ticks\n",
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700403 __func__, max_sleep_time_ns,
404 msm_pm_max_sleep_time);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405}
406EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
407
Praveen Chidambaramc594a092012-09-18 19:48:29 -0600408static void msm_pm_save_cpu_reg(void)
409{
410 int i;
411
412 /* Only on core0 */
413 if (smp_processor_id())
414 return;
415
416 /**
417 * On some targets, L2 PC will turn off may reset the core
418 * configuration for the mux and the default may not make the core
419 * happy when it resumes.
420 * Save the active vdd, and set the core vdd to QSB max vdd, so that
421 * when the core resumes, it is capable of supporting the current QSB
422 * rate. Then restore the active vdd before switching the acpuclk rate.
423 */
424 if (msm_pm_get_l2_flush_flag() == 1) {
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700425 cp15_data.active_vdd = msm_spm_get_vdd(0);
426 for (i = 0; i < cp15_data.reg_saved_state_size; i++)
427 cp15_data.reg_val[i] =
428 get_l2_indirect_reg(
429 cp15_data.reg_data[i]);
430 msm_spm_set_vdd(0, cp15_data.qsb_pc_vdd);
Praveen Chidambaramc594a092012-09-18 19:48:29 -0600431 }
432}
433
434static void msm_pm_restore_cpu_reg(void)
435{
436 int i;
437
438 /* Only on core0 */
439 if (smp_processor_id())
440 return;
441
442 if (msm_pm_get_l2_flush_flag() == 1) {
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700443 for (i = 0; i < cp15_data.reg_saved_state_size; i++)
444 set_l2_indirect_reg(
445 cp15_data.reg_data[i],
446 cp15_data.reg_val[i]);
447 msm_spm_set_vdd(0, cp15_data.active_vdd);
Praveen Chidambaramc594a092012-09-18 19:48:29 -0600448 }
449}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451static void msm_pm_swfi(void)
452{
453 msm_pm_config_hw_before_swfi();
454 msm_arch_idle();
455}
456
Praveen Chidambaramd3d844d2012-04-24 09:47:38 -0600457static void msm_pm_retention(void)
458{
459 int ret = 0;
460
Girish Mahadevand27ca4a2012-08-15 09:21:23 -0600461 msm_pm_config_hw_before_retention();
Praveen Chidambaramd3d844d2012-04-24 09:47:38 -0600462 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_RETENTION, false);
463 WARN_ON(ret);
Girish Mahadevand27ca4a2012-08-15 09:21:23 -0600464
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700465 if (msm_pm_retention_calls_tz)
Girish Mahadevand27ca4a2012-08-15 09:21:23 -0600466 scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC,
467 SCM_L2_RETENTION);
468 else
469 msm_arch_idle();
470
471 msm_pm_config_hw_after_retention();
Praveen Chidambaramd3d844d2012-04-24 09:47:38 -0600472}
473
Stephen Boydb29750d2012-02-21 01:21:32 -0800474static bool __ref msm_pm_spm_power_collapse(
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700475 unsigned int cpu, bool from_idle, bool notify_rpm)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700476{
477 void *entry;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600478 bool collapsed = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 int ret;
480
481 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
482 pr_info("CPU%u: %s: notify_rpm %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700483 cpu, __func__, (int) notify_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484
485 ret = msm_spm_set_low_power_mode(
486 MSM_SPM_MODE_POWER_COLLAPSE, notify_rpm);
487 WARN_ON(ret);
488
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700489 entry = (!cpu || from_idle) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490 msm_pm_collapse_exit : msm_secondary_startup;
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700491 msm_pm_boot_config_before_pc(cpu, virt_to_phys(entry));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492
493 if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
494 pr_info("CPU%u: %s: program vector to %p\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700495 cpu, __func__, entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496
Girish Mahadevan55944992012-10-26 11:03:07 -0600497 collapsed = msm_pm_collapse();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700499 msm_pm_boot_config_after_pc(cpu);
Maheshkumar Sivasubramanian8ccc16e2011-10-25 15:59:57 -0600500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 if (collapsed) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502 cpu_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 local_fiq_enable();
504 }
505
506 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
507 pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700508 cpu, __func__, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509
510 ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
511 WARN_ON(ret);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600512 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513}
514
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600515static bool msm_pm_power_collapse_standalone(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700517 unsigned int cpu = smp_processor_id();
Praveen Chidambaram5e614112012-11-08 17:53:34 -0700518 unsigned int avsdscr;
519 unsigned int avscsr;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600520 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521
Praveen Chidambaram5e614112012-11-08 17:53:34 -0700522 avsdscr = avs_get_avsdscr();
523 avscsr = avs_get_avscsr();
524 avs_set_avscsr(0); /* Disable AVS */
525
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700526 collapsed = msm_pm_spm_power_collapse(cpu, from_idle, false);
Praveen Chidambaram5e614112012-11-08 17:53:34 -0700527
528 avs_set_avsdscr(avsdscr);
529 avs_set_avscsr(avscsr);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600530 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531}
532
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600533static bool msm_pm_power_collapse(bool from_idle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534{
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700535 unsigned int cpu = smp_processor_id();
Praveen Chidambaram4b8df032013-01-18 16:21:16 -0700536 unsigned long saved_acpuclk_rate = 0;
Praveen Chidambaram5e614112012-11-08 17:53:34 -0700537 unsigned int avsdscr;
538 unsigned int avscsr;
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600539 bool collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540
541 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
542 pr_info("CPU%u: %s: idle %d\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700543 cpu, __func__, (int)from_idle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544
545 msm_pm_config_hw_before_power_down();
546 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700547 pr_info("CPU%u: %s: pre power down\n", cpu, __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548
Praveen Chidambaram5e614112012-11-08 17:53:34 -0700549 avsdscr = avs_get_avsdscr();
550 avscsr = avs_get_avscsr();
551 avs_set_avscsr(0); /* Disable AVS */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552
Praveen Chidambaram4b8df032013-01-18 16:21:16 -0700553 if (cpu_online(cpu) && !msm_no_ramp_down_pc)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554 saved_acpuclk_rate = acpuclk_power_collapse();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555
556 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
557 pr_info("CPU%u: %s: change clock rate (old rate = %lu)\n",
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700558 cpu, __func__, saved_acpuclk_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700560 if (cp15_data.save_cp15)
Praveen Chidambaramc594a092012-09-18 19:48:29 -0600561 msm_pm_save_cpu_reg();
562
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700563 collapsed = msm_pm_spm_power_collapse(cpu, from_idle, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700565 if (cp15_data.save_cp15)
Praveen Chidambaramc594a092012-09-18 19:48:29 -0600566 msm_pm_restore_cpu_reg();
567
Girish Mahadevan4e025d92012-02-29 13:26:51 -0700568 if (cpu_online(cpu)) {
569 if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
570 pr_info("CPU%u: %s: restore clock rate to %lu\n",
571 cpu, __func__, saved_acpuclk_rate);
Praveen Chidambaram4b8df032013-01-18 16:21:16 -0700572 if (!msm_no_ramp_down_pc &&
573 acpuclk_set_rate(cpu, saved_acpuclk_rate, SETRATE_PC)
574 < 0)
Girish Mahadevan4e025d92012-02-29 13:26:51 -0700575 pr_err("CPU%u: %s: failed to restore clock rate(%lu)\n",
576 cpu, __func__, saved_acpuclk_rate);
577 } else {
578 unsigned int gic_dist_enabled;
579 unsigned int gic_dist_pending;
580 gic_dist_enabled = readl_relaxed(
581 MSM_QGIC_DIST_BASE + GIC_DIST_ENABLE_CLEAR);
582 gic_dist_pending = readl_relaxed(
583 MSM_QGIC_DIST_BASE + GIC_DIST_PENDING_SET);
584 mb();
585 gic_dist_pending &= gic_dist_enabled;
586
587 if (gic_dist_pending)
588 pr_err("CPU %d interrupted during hotplug.Pending int 0x%x\n",
589 cpu, gic_dist_pending);
590 }
591
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592
Praveen Chidambaram5e614112012-11-08 17:53:34 -0700593 avs_set_avsdscr(avsdscr);
594 avs_set_avscsr(avscsr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 msm_pm_config_hw_after_power_up();
596 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700597 pr_info("CPU%u: %s: post power up\n", cpu, __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598
599 if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700600 pr_info("CPU%u: %s: return\n", cpu, __func__);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600601 return collapsed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602}
603
Praveen Chidambaram192979f2012-04-25 18:30:23 -0600604static int64_t msm_pm_timer_enter_idle(void)
605{
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700606 if (msm_pm_use_sync_timer)
Praveen Chidambaram192979f2012-04-25 18:30:23 -0600607 return ktime_to_ns(tick_nohz_get_sleep_length());
608
609 return msm_timer_enter_idle();
610}
611
612static void msm_pm_timer_exit_idle(bool timer_halted)
613{
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700614 if (msm_pm_use_sync_timer)
Praveen Chidambaram192979f2012-04-25 18:30:23 -0600615 return;
616
617 msm_timer_exit_idle((int) timer_halted);
618}
619
Praveen Chidambaram3895bde2012-05-14 19:42:40 +0530620static int64_t msm_pm_timer_enter_suspend(int64_t *period)
621{
Anji Jonnalac02367a2012-07-01 02:56:11 +0530622 int64_t time = 0;
Praveen Chidambaram3895bde2012-05-14 19:42:40 +0530623
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700624 if (msm_pm_use_sync_timer)
Mahesh Sivasubramanian0664ee42013-01-31 11:48:21 -0700625 return ktime_to_ns(ktime_get());
Praveen Chidambaram3895bde2012-05-14 19:42:40 +0530626
627 time = msm_timer_get_sclk_time(period);
628 if (!time)
629 pr_err("%s: Unable to read sclk.\n", __func__);
630
631 return time;
632}
633
634static int64_t msm_pm_timer_exit_suspend(int64_t time, int64_t period)
635{
Praveen Chidambaramf27a5152013-02-01 11:44:53 -0700636 if (msm_pm_use_sync_timer)
Mahesh Sivasubramanian0664ee42013-01-31 11:48:21 -0700637 return ktime_to_ns(ktime_get()) - time;
Praveen Chidambaram3895bde2012-05-14 19:42:40 +0530638
639 if (time != 0) {
640 int64_t end_time = msm_timer_get_sclk_time(NULL);
641 if (end_time != 0) {
642 time = end_time - time;
643 if (time < 0)
644 time += period;
645 } else
646 time = 0;
647 }
648
649 return time;
650}
651
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600652/**
653 * pm_hrtimer_cb() : Callback function for hrtimer created if the
654 * core needs to be awake to handle an event.
655 * @hrtimer : Pointer to hrtimer
656 */
657static enum hrtimer_restart pm_hrtimer_cb(struct hrtimer *hrtimer)
658{
659 return HRTIMER_NORESTART;
660}
661
662/**
663 * msm_pm_set_timer() : Set an hrtimer to wakeup the core in time
664 * to handle an event.
665 */
666static void msm_pm_set_timer(uint32_t modified_time_us)
667{
668 u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
669 ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
670 pm_hrtimer.function = pm_hrtimer_cb;
671 hrtimer_start(&pm_hrtimer, modified_ktime, HRTIMER_MODE_ABS);
672}
673
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674/******************************************************************************
675 * External Idle/Suspend Functions
676 *****************************************************************************/
677
678void arch_idle(void)
679{
680 return;
681}
682
Priyanka Mathur26b4a4b2012-11-05 13:45:45 -0800683static inline void msm_pm_ftrace_lpm_enter(unsigned int cpu,
684 uint32_t latency, uint32_t sleep_us,
685 uint32_t wake_up,
686 enum msm_pm_sleep_mode mode)
687{
688 switch (mode) {
689 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
690 trace_msm_pm_enter_wfi(cpu, latency, sleep_us, wake_up);
691 break;
692 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
693 trace_msm_pm_enter_spc(cpu, latency, sleep_us, wake_up);
694 break;
695 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
696 trace_msm_pm_enter_pc(cpu, latency, sleep_us, wake_up);
697 break;
698 case MSM_PM_SLEEP_MODE_RETENTION:
699 trace_msm_pm_enter_ret(cpu, latency, sleep_us, wake_up);
700 break;
701 default:
702 break;
703 }
704}
705
706static inline void msm_pm_ftrace_lpm_exit(unsigned int cpu,
707 enum msm_pm_sleep_mode mode,
708 bool success)
709{
710 switch (mode) {
711 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
712 trace_msm_pm_exit_wfi(cpu, success);
713 break;
714 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
715 trace_msm_pm_exit_spc(cpu, success);
716 break;
717 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
718 trace_msm_pm_exit_pc(cpu, success);
719 break;
720 case MSM_PM_SLEEP_MODE_RETENTION:
721 trace_msm_pm_exit_ret(cpu, success);
722 break;
723 default:
724 break;
725 }
726}
727
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800728static int msm_pm_idle_prepare(struct cpuidle_device *dev,
729 struct cpuidle_driver *drv, int index,
730 void **msm_pm_idle_rs_limits)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 int i;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700733 unsigned int power_usage = -1;
Priyanka Mathur848bb4c2012-11-30 18:04:57 -0800734 int ret = MSM_PM_SLEEP_MODE_NOT_SELECTED;
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600735 uint32_t modified_time_us = 0;
736 struct msm_pm_time_params time_param;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600738 time_param.latency_us =
739 (uint32_t) pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
740 time_param.sleep_us =
741 (uint32_t) (ktime_to_us(tick_nohz_get_sleep_length())
742 & UINT_MAX);
743 time_param.modified_time_us = 0;
744
745 if (!dev->cpu)
746 time_param.next_event_us =
747 (uint32_t) (ktime_to_us(get_next_event_time())
748 & UINT_MAX);
749 else
750 time_param.next_event_us = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700751
752 for (i = 0; i < dev->state_count; i++) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700753 struct cpuidle_state *state = &drv->states[i];
754 struct cpuidle_state_usage *st_usage = &dev->states_usage[i];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 enum msm_pm_sleep_mode mode;
756 bool allow;
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600757 uint32_t power;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758 int idx;
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800759 void *rs_limits = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760
Steve Mucklef132c6c2012-06-06 18:30:57 -0700761 mode = (enum msm_pm_sleep_mode) cpuidle_get_statedata(st_usage);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762 idx = MSM_PM_MODE(dev->cpu, mode);
763
Praveen Chidambaram42da9d22012-03-30 12:16:34 -0600764 allow = msm_pm_sleep_modes[idx].idle_enabled &&
765 msm_pm_sleep_modes[idx].idle_supported;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766
767 switch (mode) {
768 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
Mahesh Sivasubramanian87f579c2013-02-04 11:59:44 -0700769 if (num_online_cpus() > 1)
Priyanka Mathur68a9bc52012-10-03 15:15:01 -0700770 allow = false;
Mahesh Sivasubramanian87f579c2013-02-04 11:59:44 -0700771 break;
Mahesh Sivasubramanianba7e01c2012-09-04 18:22:51 -0600772 case MSM_PM_SLEEP_MODE_RETENTION:
Mahesh Sivasubramanian1b8601b2012-12-20 14:11:23 -0700773 /*
774 * The Krait BHS regulator doesn't have enough head
775 * room to drive the retention voltage on LDO and so
776 * has disabled retention
777 */
778 if (!msm_pm_ldo_retention_enabled)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779 allow = false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700780
Mahesh Sivasubramanian87f579c2013-02-04 11:59:44 -0700781 if (msm_pm_retention_calls_tz && num_online_cpus() > 1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700782 allow = false;
783 break;
Mahesh Sivasubramanian87f579c2013-02-04 11:59:44 -0700784 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
785 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
786 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787 default:
788 allow = false;
789 break;
790 }
791
Mahesh Sivasubramanian87f579c2013-02-04 11:59:44 -0700792 if (!allow)
793 continue;
794
795 if (pm_sleep_ops.lowest_limits)
796 rs_limits = pm_sleep_ops.lowest_limits(true,
797 mode, &time_param, &power);
798
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
Mahesh Sivasubramanian87f579c2013-02-04 11:59:44 -0700800 pr_info("CPU%u:%s:%s, latency %uus, slp %uus, lim %p\n",
801 dev->cpu, __func__, state->desc,
802 time_param.latency_us,
803 time_param.sleep_us, rs_limits);
804 if (!rs_limits)
805 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806
Mahesh Sivasubramanian87f579c2013-02-04 11:59:44 -0700807 if (power < power_usage) {
808 power_usage = power;
809 modified_time_us = time_param.modified_time_us;
810 ret = mode;
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800811 *msm_pm_idle_rs_limits = rs_limits;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 }
Mahesh Sivasubramanian87f579c2013-02-04 11:59:44 -0700813
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814 }
815
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600816 if (modified_time_us && !dev->cpu)
817 msm_pm_set_timer(modified_time_us);
Priyanka Mathur26b4a4b2012-11-05 13:45:45 -0800818
819 msm_pm_ftrace_lpm_enter(dev->cpu, time_param.latency_us,
820 time_param.sleep_us, time_param.next_event_us,
821 ret);
822
Steve Mucklef132c6c2012-06-06 18:30:57 -0700823 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700824}
825
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800826enum msm_pm_sleep_mode msm_pm_idle_enter(struct cpuidle_device *dev,
827 struct cpuidle_driver *drv, int index)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828{
829 int64_t time;
Priyanka Mathur26b4a4b2012-11-05 13:45:45 -0800830 bool collapsed = 1;
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800831 int exit_stat = -1;
832 enum msm_pm_sleep_mode sleep_mode;
833 void *msm_pm_idle_rs_limits = NULL;
834 int sleep_delay = 1;
835 int ret = -ENODEV;
836 int64_t timer_expiration = 0;
837 int notify_rpm = false;
838 bool timer_halted = false;
839
840 sleep_mode = msm_pm_idle_prepare(dev, drv, index,
841 &msm_pm_idle_rs_limits);
842
843 if (!msm_pm_idle_rs_limits) {
844 sleep_mode = MSM_PM_SLEEP_MODE_NOT_SELECTED;
845 goto cpuidle_enter_bail;
846 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700847
848 if (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask)
849 pr_info("CPU%u: %s: mode %d\n",
850 smp_processor_id(), __func__, sleep_mode);
851
852 time = ktime_to_ns(ktime_get());
853
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800854 if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
855 notify_rpm = true;
856 timer_expiration = msm_pm_timer_enter_idle();
857
858 sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
859 timer_expiration, MSM_PM_SLEEP_TICK_LIMIT);
860 if (sleep_delay == 0) /* 0 would mean infinite time */
861 sleep_delay = 1;
862 }
863
864 if (pm_sleep_ops.enter_sleep)
865 ret = pm_sleep_ops.enter_sleep(sleep_delay,
866 msm_pm_idle_rs_limits, true, notify_rpm);
867 if (ret)
868 goto cpuidle_enter_bail;
869
Mahesh Sivasubramanian95a48e72013-02-15 14:23:26 -0700870 switch (sleep_mode) {
871 case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
872 msm_pm_swfi();
873 exit_stat = MSM_PM_STAT_IDLE_WFI;
874 break;
875
876 case MSM_PM_SLEEP_MODE_RETENTION:
877 msm_pm_retention();
878 exit_stat = MSM_PM_STAT_RETENTION;
879 break;
880
881 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE:
882 collapsed = msm_pm_power_collapse_standalone(true);
883 exit_stat = MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE;
884 break;
885
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800886 case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
Mahesh Sivasubramanian95a48e72013-02-15 14:23:26 -0700887 if (MSM_PM_DEBUG_IDLE_CLK & msm_pm_debug_mask)
888 clock_debug_print_enabled();
Karthik Parsha6fb932d2012-01-24 18:04:12 -0800889
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800890 collapsed = msm_pm_power_collapse(true);
891 timer_halted = true;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892
Mahesh Sivasubramanian95a48e72013-02-15 14:23:26 -0700893 exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800894 msm_pm_timer_exit_idle(timer_halted);
Mahesh Sivasubramanian95a48e72013-02-15 14:23:26 -0700895 break;
Mahesh Sivasubramanian95a48e72013-02-15 14:23:26 -0700896
897 case MSM_PM_SLEEP_MODE_NOT_SELECTED:
898 goto cpuidle_enter_bail;
899 break;
900
901 default:
902 __WARN();
903 goto cpuidle_enter_bail;
904 break;
905 }
906
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800907 if (pm_sleep_ops.exit_sleep)
908 pm_sleep_ops.exit_sleep(msm_pm_idle_rs_limits, true,
909 notify_rpm, collapsed);
Mahesh Sivasubramanian95a48e72013-02-15 14:23:26 -0700910
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800911 time = ktime_to_ns(ktime_get()) - time;
912 msm_pm_ftrace_lpm_exit(smp_processor_id(), sleep_mode, collapsed);
913 if (exit_stat >= 0)
914 msm_pm_add_stat(exit_stat, time);
Mahesh Sivasubramanian95a48e72013-02-15 14:23:26 -0700915 do_div(time, 1000);
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800916 dev->last_residency = (int) time;
917 return sleep_mode;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918
919cpuidle_enter_bail:
Priyanka Mathur92fe5752013-01-17 10:58:04 -0800920 dev->last_residency = 0;
921 if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
922 msm_pm_timer_exit_idle(timer_halted);
923 sleep_mode = MSM_PM_SLEEP_MODE_NOT_SELECTED;
924 return sleep_mode;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925}
926
Anji Jonnala02dac8d2013-03-06 21:31:04 +0530927int msm_pm_wait_cpu_shutdown(unsigned int cpu)
928{
929 int timeout = 10;
930
931 if (!msm_pm_slp_sts)
932 return 0;
933 if (!msm_pm_slp_sts[cpu].base_addr)
934 return 0;
935 while (timeout--) {
936 /*
937 * Check for the SPM of the core being hotplugged to set
938 * its sleep state.The SPM sleep state indicates that the
939 * core has been power collapsed.
940 */
941 int acc_sts = __raw_readl(msm_pm_slp_sts[cpu].base_addr);
942
943 if (acc_sts & msm_pm_slp_sts[cpu].mask)
944 return 0;
945 usleep(100);
946 }
947
948 pr_info("%s(): Timed out waiting for CPU %u SPM to enter sleep state",
949 __func__, cpu);
950 return -EBUSY;
951}
952
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700953void msm_pm_cpu_enter_lowpower(unsigned int cpu)
954{
955 int i;
956 bool allow[MSM_PM_SLEEP_MODE_NR];
957
958 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
959 struct msm_pm_platform_data *mode;
960
Praveen Chidambaram42da9d22012-03-30 12:16:34 -0600961 mode = &msm_pm_sleep_modes[MSM_PM_MODE(cpu, i)];
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700962 allow[i] = mode->suspend_supported && mode->suspend_enabled;
963 }
964
965 if (MSM_PM_DEBUG_HOTPLUG & msm_pm_debug_mask)
966 pr_notice("CPU%u: %s: shutting down cpu\n", cpu, __func__);
967
Matt Wagantall5375aa72012-07-02 19:59:51 -0700968 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700969 msm_pm_power_collapse(false);
Matt Wagantall5375aa72012-07-02 19:59:51 -0700970 else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE])
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700971 msm_pm_power_collapse_standalone(false);
Matt Wagantall5375aa72012-07-02 19:59:51 -0700972 else if (allow[MSM_PM_SLEEP_MODE_RETENTION])
Praveen Chidambaramd3d844d2012-04-24 09:47:38 -0600973 msm_pm_retention();
Stephen Boydbda74272012-08-09 14:01:27 -0700974 else
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700975 msm_pm_swfi();
Mahesh Sivasubramaniand23add12011-11-18 14:30:11 -0700976}
977
Mahesh Sivasubramanian1b8601b2012-12-20 14:11:23 -0700978static void msm_pm_ack_retention_disable(void *data)
979{
980 /*
981 * This is a NULL function to ensure that the core has woken up
982 * and is safe to disable retention.
983 */
984}
985/**
986 * msm_pm_enable_retention() - Disable/Enable retention on all cores
987 * @enable: Enable/Disable retention
988 *
989 */
990void msm_pm_enable_retention(bool enable)
991{
992 msm_pm_ldo_retention_enabled = enable;
993 /*
994 * If retention is being disabled, wakeup all online core to ensure
995 * that it isn't executing retention. Offlined cores need not be woken
996 * up as they enter the deepest sleep mode, namely RPM assited power
997 * collapse
998 */
999 if (!enable)
1000 smp_call_function_many(cpu_online_mask,
1001 msm_pm_ack_retention_disable,
1002 NULL, true);
1003}
1004EXPORT_SYMBOL(msm_pm_enable_retention);
1005
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001006static int msm_pm_enter(suspend_state_t state)
1007{
1008 bool allow[MSM_PM_SLEEP_MODE_NR];
1009 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010 int64_t period = 0;
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301011 int64_t time = msm_pm_timer_enter_suspend(&period);
Girish Mahadevandc318fd2012-08-17 16:48:05 -06001012 struct msm_pm_time_params time_param;
1013
1014 time_param.latency_us = -1;
1015 time_param.sleep_us = -1;
1016 time_param.next_event_us = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017
1018 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1019 pr_info("%s\n", __func__);
1020
1021 if (smp_processor_id()) {
1022 __WARN();
1023 goto enter_exit;
1024 }
1025
1026
1027 for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
1028 struct msm_pm_platform_data *mode;
1029
Praveen Chidambaram42da9d22012-03-30 12:16:34 -06001030 mode = &msm_pm_sleep_modes[MSM_PM_MODE(0, i)];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001031 allow[i] = mode->suspend_supported && mode->suspend_enabled;
1032 }
1033
1034 if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE]) {
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -06001035 void *rs_limits = NULL;
1036 int ret = -ENODEV;
1037 uint32_t power;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001038
1039 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1040 pr_info("%s: power collapse\n", __func__);
1041
1042 clock_debug_print_enabled();
1043
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044 if (msm_pm_sleep_time_override > 0) {
1045 int64_t ns = NSEC_PER_SEC *
1046 (int64_t) msm_pm_sleep_time_override;
1047 msm_pm_set_max_sleep_time(ns);
1048 msm_pm_sleep_time_override = 0;
1049 }
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001050
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -06001051 if (pm_sleep_ops.lowest_limits)
1052 rs_limits = pm_sleep_ops.lowest_limits(false,
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001053 MSM_PM_SLEEP_MODE_POWER_COLLAPSE, &time_param, &power);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054
1055 if (rs_limits) {
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -06001056 if (pm_sleep_ops.enter_sleep)
1057 ret = pm_sleep_ops.enter_sleep(
1058 msm_pm_max_sleep_time,
1059 rs_limits, false, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 if (!ret) {
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -06001061 int collapsed = msm_pm_power_collapse(false);
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -06001062 if (pm_sleep_ops.exit_sleep) {
1063 pm_sleep_ops.exit_sleep(rs_limits,
1064 false, true, collapsed);
1065 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066 }
1067 } else {
1068 pr_err("%s: cannot find the lowest power limit\n",
1069 __func__);
1070 }
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301071 time = msm_pm_timer_exit_suspend(time, period);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 msm_pm_add_stat(MSM_PM_STAT_SUSPEND, time);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073 } else if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE]) {
1074 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1075 pr_info("%s: standalone power collapse\n", __func__);
1076 msm_pm_power_collapse_standalone(false);
Praveen Chidambaramd3d844d2012-04-24 09:47:38 -06001077 } else if (allow[MSM_PM_SLEEP_MODE_RETENTION]) {
1078 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1079 pr_info("%s: retention\n", __func__);
1080 msm_pm_retention();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081 } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
1082 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1083 pr_info("%s: swfi\n", __func__);
1084 msm_pm_swfi();
1085 }
1086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087enter_exit:
1088 if (MSM_PM_DEBUG_SUSPEND & msm_pm_debug_mask)
1089 pr_info("%s: return\n", __func__);
1090
1091 return 0;
1092}
1093
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -06001094void msm_pm_set_sleep_ops(struct msm_pm_sleep_ops *ops)
1095{
1096 if (ops)
1097 pm_sleep_ops = *ops;
1098}
1099
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001100static const struct platform_suspend_ops msm_pm_ops = {
1101 .enter = msm_pm_enter,
1102 .valid = suspend_valid_only_mem,
Mahesh Sivasubramaniancb396622012-03-14 14:50:37 -06001103};
Anji Jonnala02dac8d2013-03-06 21:31:04 +05301104static int __devinit msm_cpu_status_probe(struct platform_device *pdev)
1105{
1106 struct msm_pm_sleep_status_data *pdata;
1107 char *key;
1108 u32 cpu;
1109
1110 if (!pdev)
1111 return -EFAULT;
1112
1113 msm_pm_slp_sts =
1114 kzalloc(sizeof(*msm_pm_slp_sts) * num_possible_cpus(),
1115 GFP_KERNEL);
1116
1117 if (!msm_pm_slp_sts)
1118 return -ENOMEM;
1119
1120 if (pdev->dev.of_node) {
1121 struct resource *res;
1122 u32 offset;
1123 int rc;
1124 u32 mask;
1125
1126 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1127 if (!res)
1128 goto fail_free_mem;
1129
1130 key = "qcom,cpu-alias-addr";
1131 rc = of_property_read_u32(pdev->dev.of_node, key, &offset);
1132
1133 if (rc)
1134 goto fail_free_mem;
1135
1136 key = "qcom,sleep-status-mask";
1137 rc = of_property_read_u32(pdev->dev.of_node, key,
1138 &mask);
1139 if (rc)
1140 goto fail_free_mem;
1141
1142 for_each_possible_cpu(cpu) {
1143 msm_pm_slp_sts[cpu].base_addr =
1144 ioremap(res->start + cpu * offset,
1145 resource_size(res));
1146 msm_pm_slp_sts[cpu].mask = mask;
1147
1148 if (!msm_pm_slp_sts[cpu].base_addr)
1149 goto failed_of_node;
1150 }
1151
1152 } else {
1153 pdata = pdev->dev.platform_data;
1154 if (!pdev->dev.platform_data)
1155 goto fail_free_mem;
1156
1157 for_each_possible_cpu(cpu) {
1158 msm_pm_slp_sts[cpu].base_addr =
1159 pdata->base_addr + cpu * pdata->cpu_offset;
1160 msm_pm_slp_sts[cpu].mask = pdata->mask;
1161 }
1162 }
1163
1164 return 0;
1165
1166failed_of_node:
1167 pr_info("%s(): Failed to key=%s\n", __func__, key);
1168 for_each_possible_cpu(cpu) {
1169 if (msm_pm_slp_sts[cpu].base_addr)
1170 iounmap(msm_pm_slp_sts[cpu].base_addr);
1171 }
1172fail_free_mem:
1173 kfree(msm_pm_slp_sts);
1174 return -EINVAL;
1175
1176};
1177
1178static struct of_device_id msm_slp_sts_match_tbl[] = {
1179 {.compatible = "qcom,cpu-sleep-status"},
1180 {},
1181};
1182
1183static struct platform_driver msm_cpu_status_driver = {
1184 .probe = msm_cpu_status_probe,
1185 .driver = {
1186 .name = "cpu_slp_status",
1187 .owner = THIS_MODULE,
1188 .of_match_table = msm_slp_sts_match_tbl,
1189 },
1190};
Mahesh Sivasubramaniancb396622012-03-14 14:50:37 -06001191
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001192static int __devinit msm_pm_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193{
1194 pgd_t *pc_pgd;
1195 pmd_t *pmd;
1196 unsigned long pmdval;
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301197 enum msm_pm_time_stats_id enable_stats[] = {
1198 MSM_PM_STAT_IDLE_WFI,
1199 MSM_PM_STAT_RETENTION,
1200 MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
1201 MSM_PM_STAT_IDLE_POWER_COLLAPSE,
1202 MSM_PM_STAT_SUSPEND,
1203 };
Steve Mucklef132c6c2012-06-06 18:30:57 -07001204 unsigned long exit_phys;
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301205
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 /* Page table for cores to come back up safely. */
1207 pc_pgd = pgd_alloc(&init_mm);
1208 if (!pc_pgd)
1209 return -ENOMEM;
1210
Steve Mucklef132c6c2012-06-06 18:30:57 -07001211 exit_phys = virt_to_phys(msm_pm_collapse_exit);
1212
1213 pmd = pmd_offset(pud_offset(pc_pgd + pgd_index(exit_phys),exit_phys),
1214 exit_phys);
1215 pmdval = (exit_phys & PGDIR_MASK) |
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216 PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
1217 pmd[0] = __pmd(pmdval);
1218 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1219
Steve Mucklefcece052012-02-18 20:09:58 -08001220 msm_saved_state_phys =
1221 allocate_contiguous_ebi_nomap(CPU_SAVED_STATE_SIZE *
1222 num_possible_cpus(), 4);
1223 if (!msm_saved_state_phys)
1224 return -ENOMEM;
1225 msm_saved_state = ioremap_nocache(msm_saved_state_phys,
1226 CPU_SAVED_STATE_SIZE *
1227 num_possible_cpus());
1228 if (!msm_saved_state)
1229 return -ENOMEM;
1230
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231 /* It is remotely possible that the code in msm_pm_collapse_exit()
1232 * which turns on the MMU with this mapping is in the
1233 * next even-numbered megabyte beyond the
1234 * start of msm_pm_collapse_exit().
1235 * Map this megabyte in as well.
1236 */
1237 pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
1238 flush_pmd_entry(pmd);
1239 msm_pm_pc_pgd = virt_to_phys(pc_pgd);
Steve Muckle730ad7a2012-02-21 15:26:37 -08001240 clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd),
1241 virt_to_phys(&msm_pm_pc_pgd));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243 msm_pm_mode_sysfs_add();
Praveen Chidambaram3895bde2012-05-14 19:42:40 +05301244 msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245 suspend_set_ops(&msm_pm_ops);
Girish Mahadevandc318fd2012-08-17 16:48:05 -06001246 hrtimer_init(&pm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 msm_cpuidle_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248
1249 return 0;
1250}
1251
Girish Mahadevan55944992012-10-26 11:03:07 -06001252static void __devinit msm_pm_set_flush_fn(uint32_t pc_mode)
1253{
1254 msm_pm_disable_l2_fn = NULL;
1255 msm_pm_enable_l2_fn = NULL;
1256 msm_pm_flush_l2_fn = outer_flush_all;
1257
1258 if (pc_mode == MSM_PM_PC_NOTZ_L2_EXT) {
1259 msm_pm_disable_l2_fn = outer_disable;
1260 msm_pm_enable_l2_fn = outer_resume;
1261 }
1262}
1263
Priyanka Mathur13bdad12013-01-28 15:52:56 -08001264struct msm_pc_debug_counters_buffer {
1265 void __iomem *reg;
1266 u32 len;
1267 char buf[MAX_BUF_SIZE];
1268};
1269
1270static inline u32 msm_pc_debug_counters_read_register(
1271 void __iomem *reg, int index , int offset)
1272{
1273 return readl_relaxed(reg + (index * 4 + offset) * 4);
1274}
1275
1276static char *counter_name[] = {
1277 "PC Entry Counter",
1278 "Warmboot Entry Counter",
1279 "PC Bailout Counter"
1280};
1281
1282static int msm_pc_debug_counters_copy(
1283 struct msm_pc_debug_counters_buffer *data)
1284{
1285 int j;
1286 u32 stat;
1287 unsigned int cpu;
1288
1289 for_each_possible_cpu(cpu) {
1290 data->len += scnprintf(data->buf + data->len,
1291 sizeof(data->buf)-data->len,
1292 "CPU%d\n", cpu);
1293
1294 for (j = 0; j < NUM_OF_COUNTERS; j++) {
1295 stat = msm_pc_debug_counters_read_register(
1296 data->reg, cpu, j);
1297 data->len += scnprintf(data->buf + data->len,
1298 sizeof(data->buf)-data->len,
1299 "\t%s : %d\n", counter_name[j],
1300 stat);
1301 }
1302
1303 }
1304
1305 return data->len;
1306}
1307
1308static int msm_pc_debug_counters_file_read(struct file *file,
1309 char __user *bufu, size_t count, loff_t *ppos)
1310{
1311 struct msm_pc_debug_counters_buffer *data;
1312
1313 data = file->private_data;
1314
1315 if (!data)
1316 return -EINVAL;
1317
1318 if (!bufu || count < 0)
1319 return -EINVAL;
1320
1321 if (!access_ok(VERIFY_WRITE, bufu, count))
1322 return -EFAULT;
1323
1324 if (*ppos >= data->len && data->len == 0)
1325 data->len = msm_pc_debug_counters_copy(data);
1326
1327 return simple_read_from_buffer(bufu, count, ppos,
1328 data->buf, data->len);
1329}
1330
1331static int msm_pc_debug_counters_file_open(struct inode *inode,
1332 struct file *file)
1333{
1334 struct msm_pc_debug_counters_buffer *buf;
1335 void __iomem *msm_pc_debug_counters_reg;
1336
1337 msm_pc_debug_counters_reg = inode->i_private;
1338
1339 if (!msm_pc_debug_counters_reg)
1340 return -EINVAL;
1341
1342 file->private_data = kzalloc(
1343 sizeof(struct msm_pc_debug_counters_buffer), GFP_KERNEL);
1344
1345 if (!file->private_data) {
1346 pr_err("%s: ERROR kmalloc failed to allocate %d bytes\n",
1347 __func__, sizeof(struct msm_pc_debug_counters_buffer));
1348
1349 return -ENOMEM;
1350 }
1351
1352 buf = file->private_data;
1353 buf->reg = msm_pc_debug_counters_reg;
1354
1355 return 0;
1356}
1357
1358static int msm_pc_debug_counters_file_close(struct inode *inode,
1359 struct file *file)
1360{
1361 kfree(file->private_data);
1362 return 0;
1363}
1364
1365static const struct file_operations msm_pc_debug_counters_fops = {
1366 .open = msm_pc_debug_counters_file_open,
1367 .read = msm_pc_debug_counters_file_read,
1368 .release = msm_pc_debug_counters_file_close,
1369 .llseek = no_llseek,
1370};
1371
Girish Mahadevan55944992012-10-26 11:03:07 -06001372static int __devinit msm_pm_8x60_probe(struct platform_device *pdev)
1373{
1374 char *key = NULL;
Priyanka Mathur13bdad12013-01-28 15:52:56 -08001375 struct dentry *dent = NULL;
Girish Mahadevan55944992012-10-26 11:03:07 -06001376 uint32_t val = 0;
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001377 struct resource *res = NULL;
1378 int i ;
1379 struct msm_pm_init_data_type pdata_local;
Girish Mahadevan55944992012-10-26 11:03:07 -06001380 int ret = 0;
1381
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001382 memset(&pdata_local, 0, sizeof(struct msm_pm_init_data_type));
1383
1384 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1385 if (res) {
1386 msm_pc_debug_counters_phys = res->start;
1387 WARN_ON(resource_size(res) < SZ_64);
1388 msm_pc_debug_counters = devm_ioremap(&pdev->dev, res->start,
1389 resource_size(res));
Priyanka Mathur13bdad12013-01-28 15:52:56 -08001390 if (msm_pc_debug_counters)
1391 for (i = 0; i < resource_size(res)/4; i++)
1392 __raw_writel(0, msm_pc_debug_counters + i * 4);
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001393
1394 }
1395
1396 if (!msm_pc_debug_counters) {
1397 msm_pc_debug_counters = 0;
1398 msm_pc_debug_counters_phys = 0;
Priyanka Mathur13bdad12013-01-28 15:52:56 -08001399 } else {
1400 dent = debugfs_create_file("pc_debug_counter", S_IRUGO, NULL,
1401 msm_pc_debug_counters,
1402 &msm_pc_debug_counters_fops);
1403 if (!dent)
1404 pr_err("%s: ERROR debugfs_create_file failed\n",
1405 __func__);
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001406 }
1407
Girish Mahadevan55944992012-10-26 11:03:07 -06001408 if (!pdev->dev.of_node) {
1409 struct msm_pm_init_data_type *d = pdev->dev.platform_data;
1410
1411 if (!d)
1412 goto pm_8x60_probe_done;
1413
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001414 memcpy(&pdata_local, d, sizeof(struct msm_pm_init_data_type));
1415
Girish Mahadevan55944992012-10-26 11:03:07 -06001416 } else {
1417 key = "qcom,pc-mode";
1418 ret = of_property_read_u32(pdev->dev.of_node, key, &val);
Girish Mahadevan55944992012-10-26 11:03:07 -06001419 if (ret) {
1420 pr_debug("%s: Cannot read %s,defaulting to 0",
1421 __func__, key);
1422 val = MSM_PM_PC_TZ_L2_INT;
1423 ret = 0;
1424 }
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001425 pdata_local.pc_mode = val;
Girish Mahadevan55944992012-10-26 11:03:07 -06001426
1427 key = "qcom,use-sync-timer";
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001428 pdata_local.use_sync_timer =
Girish Mahadevan55944992012-10-26 11:03:07 -06001429 of_property_read_bool(pdev->dev.of_node, key);
Praveen Chidambaram4b8df032013-01-18 16:21:16 -07001430
1431 key = "qcom,saw-turns-off-pll";
1432 msm_no_ramp_down_pc = of_property_read_bool(pdev->dev.of_node,
1433 key);
Girish Mahadevan55944992012-10-26 11:03:07 -06001434 }
1435
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001436 if (pdata_local.cp15_data.reg_data &&
1437 pdata_local.cp15_data.reg_saved_state_size > 0) {
1438 cp15_data.reg_data = kzalloc(sizeof(uint32_t) *
1439 pdata_local.cp15_data.reg_saved_state_size,
1440 GFP_KERNEL);
1441 if (!cp15_data.reg_data)
1442 return -ENOMEM;
1443
1444 cp15_data.reg_val = kzalloc(sizeof(uint32_t) *
1445 pdata_local.cp15_data.reg_saved_state_size,
1446 GFP_KERNEL);
1447 if (cp15_data.reg_val)
1448 return -ENOMEM;
1449
1450 memcpy(cp15_data.reg_data, pdata_local.cp15_data.reg_data,
1451 pdata_local.cp15_data.reg_saved_state_size *
1452 sizeof(uint32_t));
1453 }
1454
1455 msm_pm_set_flush_fn(pdata_local.pc_mode);
1456 msm_pm_use_sync_timer = pdata_local.use_sync_timer;
1457 msm_pm_retention_calls_tz = pdata_local.retention_calls_tz;
1458
Girish Mahadevan55944992012-10-26 11:03:07 -06001459pm_8x60_probe_done:
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001460 msm_pm_init();
Anji Jonnala02dac8d2013-03-06 21:31:04 +05301461 if (pdev->dev.of_node)
1462 of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
1463
Girish Mahadevan55944992012-10-26 11:03:07 -06001464 return ret;
1465}
1466
1467static struct of_device_id msm_pm_8x60_table[] = {
1468 {.compatible = "qcom,pm-8x60"},
1469 {},
1470};
1471
1472static struct platform_driver msm_pm_8x60_driver = {
1473 .probe = msm_pm_8x60_probe,
1474 .driver = {
1475 .name = "pm-8x60",
1476 .owner = THIS_MODULE,
1477 .of_match_table = msm_pm_8x60_table,
1478 },
1479};
1480
1481static int __init msm_pm_8x60_init(void)
1482{
Anji Jonnala02dac8d2013-03-06 21:31:04 +05301483 int rc;
1484
1485 rc = platform_driver_register(&msm_cpu_status_driver);
1486
1487 if (rc) {
1488 pr_err("%s(): failed to register driver %s\n", __func__,
1489 msm_cpu_status_driver.driver.name);
1490 return rc;
1491 }
1492
Girish Mahadevan55944992012-10-26 11:03:07 -06001493 return platform_driver_register(&msm_pm_8x60_driver);
1494}
Praveen Chidambaramf27a5152013-02-01 11:44:53 -07001495device_initcall(msm_pm_8x60_init);