blob: 43073d3b38c353bdcc0ecb6065e3c5c3849d7962 [file] [log] [blame]
Praveen Chidambaram78499012011-11-01 17:15:17 -06001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/bug.h>
19#include <linux/mutex.h>
20#include <linux/proc_fs.h>
21#include <linux/spinlock.h>
22#include <linux/cpu.h>
Girish Mahadevandc318fd2012-08-17 16:48:05 -060023#include <linux/hrtimer.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <mach/rpm.h>
25#include <mach/msm_iomap.h>
26#include <asm/mach-types.h>
27#include <linux/io.h>
Praveen Chidambaram841d46c2011-08-04 09:07:53 -060028#include <mach/socinfo.h>
Subhash Jadavani909e04f2012-04-12 10:52:50 +053029#include <mach/mpm.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include "rpm_resources.h"
31#include "spm.h"
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -060032#include "idle.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34/******************************************************************************
35 * Debug Definitions
36 *****************************************************************************/
37
38enum {
39 MSM_RPMRS_DEBUG_OUTPUT = BIT(0),
40 MSM_RPMRS_DEBUG_BUFFER = BIT(1),
Girish Mahadevandc318fd2012-08-17 16:48:05 -060041 MSM_RPMRS_DEBUG_EVENT_TIMER = BIT(2),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042};
43
44static int msm_rpmrs_debug_mask;
45module_param_named(
46 debug_mask, msm_rpmrs_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
47);
48
49static struct msm_rpmrs_level *msm_rpmrs_levels;
50static int msm_rpmrs_level_count;
51
52static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits);
53static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits);
54static void msm_rpmrs_restore_pxo(void);
55static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits);
56static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits);
57static void msm_rpmrs_restore_l2_cache(void);
58static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits);
59static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits);
60static void msm_rpmrs_restore_vdd_mem(void);
61static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits);
62static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits);
63static void msm_rpmrs_restore_vdd_dig(void);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Praveen Chidambaram66775c62011-08-04 16:59:24 -060065static ssize_t msm_rpmrs_resource_attr_show(
66 struct kobject *kobj, struct kobj_attribute *attr, char *buf);
67static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
68 struct kobj_attribute *attr, const char *buf, size_t count);
69
Praveen Chidambaram78499012011-11-01 17:15:17 -060070static int vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_LAST];
71static int vdd_mem_vlevels[MSM_RPMRS_VDD_MEM_LAST];
72static int vdd_mask;
73
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074#define MSM_RPMRS_MAX_RS_REGISTER_COUNT 2
75
Praveen Chidambaram66775c62011-08-04 16:59:24 -060076#define RPMRS_ATTR(_name) \
77 __ATTR(_name, S_IRUGO|S_IWUSR, \
78 msm_rpmrs_resource_attr_show, msm_rpmrs_resource_attr_store)
79
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080struct msm_rpmrs_resource {
81 struct msm_rpm_iv_pair rs[MSM_RPMRS_MAX_RS_REGISTER_COUNT];
82 uint32_t size;
83 char *name;
84
85 uint32_t enable_low_power;
86
87 bool (*beyond_limits)(struct msm_rpmrs_limits *limits);
88 void (*aggregate)(struct msm_rpmrs_limits *limits);
89 void (*restore)(void);
Praveen Chidambaram66775c62011-08-04 16:59:24 -060090
91 struct kobj_attribute ko_attr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092};
93
94static struct msm_rpmrs_resource msm_rpmrs_pxo = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095 .size = 1,
96 .name = "pxo",
97 .beyond_limits = msm_rpmrs_pxo_beyond_limits,
98 .aggregate = msm_rpmrs_aggregate_pxo,
99 .restore = msm_rpmrs_restore_pxo,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600100 .ko_attr = RPMRS_ATTR(pxo),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101};
102
103static struct msm_rpmrs_resource msm_rpmrs_l2_cache = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104 .size = 1,
105 .name = "L2_cache",
106 .beyond_limits = msm_rpmrs_l2_cache_beyond_limits,
107 .aggregate = msm_rpmrs_aggregate_l2_cache,
108 .restore = msm_rpmrs_restore_l2_cache,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600109 .ko_attr = RPMRS_ATTR(L2_cache),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110};
111
112static struct msm_rpmrs_resource msm_rpmrs_vdd_mem = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700113 .size = 2,
114 .name = "vdd_mem",
115 .beyond_limits = msm_rpmrs_vdd_mem_beyond_limits,
116 .aggregate = msm_rpmrs_aggregate_vdd_mem,
117 .restore = msm_rpmrs_restore_vdd_mem,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600118 .ko_attr = RPMRS_ATTR(vdd_mem),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119};
120
121static struct msm_rpmrs_resource msm_rpmrs_vdd_dig = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122 .size = 2,
123 .name = "vdd_dig",
124 .beyond_limits = msm_rpmrs_vdd_dig_beyond_limits,
125 .aggregate = msm_rpmrs_aggregate_vdd_dig,
126 .restore = msm_rpmrs_restore_vdd_dig,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600127 .ko_attr = RPMRS_ATTR(vdd_dig),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128};
129
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600130static struct msm_rpmrs_resource msm_rpmrs_rpm_ctl = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131 .size = 1,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600132 .name = "rpm_ctl",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133 .beyond_limits = NULL,
Eugene Seah78aa5e72011-07-18 18:28:37 -0600134 .aggregate = NULL,
135 .restore = NULL,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600136 .ko_attr = RPMRS_ATTR(rpm_ctl),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137};
138
139static struct msm_rpmrs_resource *msm_rpmrs_resources[] = {
140 &msm_rpmrs_pxo,
141 &msm_rpmrs_l2_cache,
142 &msm_rpmrs_vdd_mem,
143 &msm_rpmrs_vdd_dig,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600144 &msm_rpmrs_rpm_ctl,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700145};
146
Praveen Chidambaram78499012011-11-01 17:15:17 -0600147static uint32_t msm_rpmrs_buffer[MSM_RPM_ID_LAST];
148static DECLARE_BITMAP(msm_rpmrs_buffered, MSM_RPM_ID_LAST);
149static DECLARE_BITMAP(msm_rpmrs_listed, MSM_RPM_ID_LAST);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700150static DEFINE_SPINLOCK(msm_rpmrs_lock);
151
Praveen Chidambaram78499012011-11-01 17:15:17 -0600152#define MSM_RPMRS_VDD(v) ((v) & (vdd_mask))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153
154/******************************************************************************
155 * Attribute Definitions
156 *****************************************************************************/
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600157static struct attribute *msm_rpmrs_attributes[] = {
158 &msm_rpmrs_pxo.ko_attr.attr,
159 &msm_rpmrs_l2_cache.ko_attr.attr,
160 &msm_rpmrs_vdd_mem.ko_attr.attr,
161 &msm_rpmrs_vdd_dig.ko_attr.attr,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600162 NULL,
163};
164static struct attribute *msm_rpmrs_mode_attributes[] = {
165 &msm_rpmrs_rpm_ctl.ko_attr.attr,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600166 NULL,
167};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700168
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600169static struct attribute_group msm_rpmrs_attribute_group = {
170 .attrs = msm_rpmrs_attributes,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171};
172
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600173static struct attribute_group msm_rpmrs_mode_attribute_group = {
174 .attrs = msm_rpmrs_mode_attributes,
175};
176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177#define GET_RS_FROM_ATTR(attr) \
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600178 (container_of(attr, struct msm_rpmrs_resource, ko_attr))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180
181/******************************************************************************
182 * Resource Specific Functions
183 *****************************************************************************/
184
185static void msm_rpmrs_aggregate_sclk(uint32_t sclk_count)
186{
187 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
188 set_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
189 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = sclk_count;
190 set_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
191}
192
193static void msm_rpmrs_restore_sclk(void)
194{
195 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
196 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = 0;
197 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
198 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
199}
200
201static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits)
202{
203 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
204 uint32_t pxo;
205
206 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
207 pxo = msm_rpmrs_buffer[rs->rs[0].id];
208 else
209 pxo = MSM_RPMRS_PXO_ON;
210
211 return pxo > limits->pxo;
212}
213
214static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits)
215{
216 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
217 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
218
219 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
220 rs->rs[0].value = *buf;
221 if (limits->pxo > *buf)
222 *buf = limits->pxo;
223 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
224 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
225 }
226}
227
228static void msm_rpmrs_restore_pxo(void)
229{
230 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
231
232 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
233 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
234}
235
236static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
237{
238 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
239 uint32_t l2_cache;
240
241 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
242 l2_cache = msm_rpmrs_buffer[rs->rs[0].id];
243 else
244 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
245
246 return l2_cache > limits->l2_cache;
247}
248
249static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits)
250{
251 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
252 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
253
254 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
255 rs->rs[0].value = *buf;
256 if (limits->l2_cache > *buf)
257 *buf = limits->l2_cache;
258
259 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
260 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
261 }
262}
263
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264static bool msm_spm_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
265{
266 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
267 uint32_t l2_cache = rs->rs[0].value;
268
269 if (!rs->enable_low_power)
270 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
271
272 return l2_cache > limits->l2_cache;
273}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274
275static void msm_rpmrs_restore_l2_cache(void)
276{
277 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
278
279 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
280 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
281}
282
283static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits)
284{
285 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
286 uint32_t vdd_mem;
287
288 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
289 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
290
291 if (rs->enable_low_power == 0)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600292 vdd_mem = vdd_mem_vlevels[MSM_RPMRS_VDD_MEM_ACTIVE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293 else if (rs->enable_low_power == 1)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600294 vdd_mem = vdd_mem_vlevels[MSM_RPMRS_VDD_MEM_RET_HIGH];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295 else
Praveen Chidambaram78499012011-11-01 17:15:17 -0600296 vdd_mem = vdd_mem_vlevels[MSM_RPMRS_VDD_MEM_RET_LOW];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297
298 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_mem))
Praveen Chidambaram78499012011-11-01 17:15:17 -0600299 vdd_mem = MSM_RPMRS_VDD(buffered_value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300 } else {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600301 vdd_mem = vdd_mem_vlevels[MSM_RPMRS_VDD_MEM_ACTIVE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302 }
303
Praveen Chidambaram78499012011-11-01 17:15:17 -0600304 return vdd_mem > vdd_mem_vlevels[limits->vdd_mem_upper_bound];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305}
306
307static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits)
308{
309 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
310 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
311
312 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
313 rs->rs[0].value = *buf;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600314 if (vdd_mem_vlevels[limits->vdd_mem] > MSM_RPMRS_VDD(*buf)) {
315 *buf &= ~vdd_mask;
316 *buf |= vdd_mem_vlevels[limits->vdd_mem];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 }
318
319 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
320 pr_info("%s: vdd %d (0x%x)\n", __func__,
321 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
322 }
323}
324
325static void msm_rpmrs_restore_vdd_mem(void)
326{
327 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
328
329 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
330 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
331}
332
333static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits)
334{
335 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
336 uint32_t vdd_dig;
337
338 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
339 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
340
341 if (rs->enable_low_power == 0)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600342 vdd_dig = vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_ACTIVE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343 else if (rs->enable_low_power == 1)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600344 vdd_dig = vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_RET_HIGH];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345 else
Praveen Chidambaram78499012011-11-01 17:15:17 -0600346 vdd_dig = vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_RET_LOW];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700347
348 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_dig))
Praveen Chidambaram78499012011-11-01 17:15:17 -0600349 vdd_dig = MSM_RPMRS_VDD(buffered_value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 } else {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600351 vdd_dig = vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_ACTIVE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352 }
353
Praveen Chidambaram78499012011-11-01 17:15:17 -0600354 return vdd_dig > vdd_dig_vlevels[limits->vdd_dig_upper_bound];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355}
356
357static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits)
358{
359 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
360 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
361
362 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
363 rs->rs[0].value = *buf;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600364 if (vdd_dig_vlevels[limits->vdd_dig] > MSM_RPMRS_VDD(*buf)) {
365 *buf &= ~vdd_mask;
366 *buf |= vdd_dig_vlevels[limits->vdd_dig];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367 }
368
369
370 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
371 pr_info("%s: vdd %d (0x%x)\n", __func__,
372 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
373 }
374}
375
376static void msm_rpmrs_restore_vdd_dig(void)
377{
378 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
379
380 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
381 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
382}
383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384/******************************************************************************
385 * Buffering Functions
386 *****************************************************************************/
387
388static bool msm_rpmrs_irqs_detectable(struct msm_rpmrs_limits *limits,
389 bool irqs_detect, bool gpio_detect)
390{
391
Praveen Chidambaram78499012011-11-01 17:15:17 -0600392 if (vdd_dig_vlevels[limits->vdd_dig_upper_bound] <=
393 vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_RET_HIGH])
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394 return irqs_detect;
395
396 if (limits->pxo == MSM_RPMRS_PXO_OFF)
397 return gpio_detect;
398
399 return true;
400}
401
402static bool msm_rpmrs_use_mpm(struct msm_rpmrs_limits *limits)
403{
404 return (limits->pxo == MSM_RPMRS_PXO_OFF) ||
Praveen Chidambaram78499012011-11-01 17:15:17 -0600405 (vdd_dig_vlevels[limits->vdd_dig] <=
406 vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_RET_HIGH]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407}
408
409static void msm_rpmrs_update_levels(void)
410{
411 int i, k;
412
413 for (i = 0; i < msm_rpmrs_level_count; i++) {
414 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
415
416 if (level->sleep_mode != MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
417 continue;
418
419 level->available = true;
420
421 for (k = 0; k < ARRAY_SIZE(msm_rpmrs_resources); k++) {
422 struct msm_rpmrs_resource *rs = msm_rpmrs_resources[k];
423
424 if (rs->beyond_limits &&
425 rs->beyond_limits(&level->rs_limits)) {
426 level->available = false;
427 break;
428 }
429 }
Praveen Chidambaram78499012011-11-01 17:15:17 -0600430
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700431 }
432}
433
434/*
435 * Return value:
436 * 0: no entries in <req> is on our resource list
437 * 1: one or more entries in <req> is on our resource list
438 * -EINVAL: invalid id in <req> array
439 */
440static int msm_rpmrs_buffer_request(struct msm_rpm_iv_pair *req, int count)
441{
442 bool listed;
443 int i;
444
445 for (i = 0; i < count; i++)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600446 if (req[i].id >= MSM_RPM_ID_LAST)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 return -EINVAL;
448
449 for (i = 0, listed = false; i < count; i++) {
450 msm_rpmrs_buffer[req[i].id] = req[i].value;
451 set_bit(req[i].id, msm_rpmrs_buffered);
452
453 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
454 pr_info("%s: reg %d: 0x%x\n",
455 __func__, req[i].id, req[i].value);
456
457 if (listed)
458 continue;
459
460 if (test_bit(req[i].id, msm_rpmrs_listed))
461 listed = true;
462 }
463
464 return listed ? 1 : 0;
465}
466
467/*
468 * Return value:
469 * 0: no entries in <req> is on our resource list
470 * 1: one or more entries in <req> is on our resource list
471 * -EINVAL: invalid id in <req> array
472 */
473static int msm_rpmrs_clear_buffer(struct msm_rpm_iv_pair *req, int count)
474{
475 bool listed;
476 int i;
477
478 for (i = 0; i < count; i++)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600479 if (req[i].id >= MSM_RPM_ID_LAST)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 return -EINVAL;
481
482 for (i = 0, listed = false; i < count; i++) {
483 msm_rpmrs_buffer[req[i].id] = 0;
484 clear_bit(req[i].id, msm_rpmrs_buffered);
485
486 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
487 pr_info("%s: reg %d\n", __func__, req[i].id);
488
489 if (listed)
490 continue;
491
492 if (test_bit(req[i].id, msm_rpmrs_listed))
493 listed = true;
494 }
495
496 return listed ? 1 : 0;
497}
498
499#ifdef CONFIG_MSM_L2_SPM
500static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
501{
502 int rc = 0;
503 int lpm;
504
505 switch (limits->l2_cache) {
506 case MSM_RPMRS_L2_CACHE_HSFS_OPEN:
507 lpm = MSM_SPM_L2_MODE_POWER_COLLAPSE;
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600508 msm_pm_set_l2_flush_flag(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509 break;
510 case MSM_RPMRS_L2_CACHE_GDHS:
511 lpm = MSM_SPM_L2_MODE_GDHS;
512 break;
513 case MSM_RPMRS_L2_CACHE_RETENTION:
514 lpm = MSM_SPM_L2_MODE_RETENTION;
515 break;
516 default:
517 case MSM_RPMRS_L2_CACHE_ACTIVE:
518 lpm = MSM_SPM_L2_MODE_DISABLED;
519 break;
520 }
521
522 rc = msm_spm_l2_set_low_power_mode(lpm, notify_rpm);
523 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
524 pr_info("%s: Requesting low power mode %d returned %d\n",
525 __func__, lpm, rc);
526
527 return rc;
528}
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600529static void msm_rpmrs_L2_restore(struct msm_rpmrs_limits *limits,
530 bool notify_rpm, bool collapsed)
531{
532 msm_spm_l2_set_low_power_mode(MSM_SPM_MODE_DISABLED, notify_rpm);
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600533 msm_pm_set_l2_flush_flag(0);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600534}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535#else
536static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
537{
538 return 0;
539}
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600540static void msm_rpmrs_L2_restore(struct msm_rpmrs_limits *limits,
541 bool notify_rpm, bool collapsed)
542{
543}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544#endif
545
546static int msm_rpmrs_flush_buffer(
547 uint32_t sclk_count, struct msm_rpmrs_limits *limits, int from_idle)
548{
549 struct msm_rpm_iv_pair *req;
550 int count;
551 int rc;
552 int i;
553
554 msm_rpmrs_aggregate_sclk(sclk_count);
555 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
556 if (msm_rpmrs_resources[i]->aggregate)
557 msm_rpmrs_resources[i]->aggregate(limits);
558 }
559
Praveen Chidambaram78499012011-11-01 17:15:17 -0600560 count = bitmap_weight(msm_rpmrs_buffered, MSM_RPM_ID_LAST);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561
562 req = kmalloc(sizeof(*req) * count, GFP_ATOMIC);
563 if (!req) {
564 rc = -ENOMEM;
565 goto flush_buffer_restore;
566 }
567
568 count = 0;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600569 i = find_first_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570
Praveen Chidambaram78499012011-11-01 17:15:17 -0600571 while (i < MSM_RPM_ID_LAST) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
573 pr_info("%s: reg %d: 0x%x\n",
574 __func__, i, msm_rpmrs_buffer[i]);
575
576 req[count].id = i;
577 req[count].value = msm_rpmrs_buffer[i];
578 count++;
579
Praveen Chidambaram78499012011-11-01 17:15:17 -0600580 i = find_next_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST, i + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 }
582
583 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_SLEEP, req, count);
584 kfree(req);
585
586 if (rc)
587 goto flush_buffer_restore;
588
589 bitmap_and(msm_rpmrs_buffered,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600590 msm_rpmrs_buffered, msm_rpmrs_listed, MSM_RPM_ID_LAST);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591
592flush_buffer_restore:
593 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
594 if (msm_rpmrs_resources[i]->restore)
595 msm_rpmrs_resources[i]->restore();
596 }
597 msm_rpmrs_restore_sclk();
598
599 if (rc)
600 pr_err("%s: failed: %d\n", __func__, rc);
601 return rc;
602}
603
604static int msm_rpmrs_set_common(
605 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
606{
607 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
608 unsigned long flags;
609 int rc;
610
611 spin_lock_irqsave(&msm_rpmrs_lock, flags);
612 rc = msm_rpmrs_buffer_request(req, count);
613 if (rc > 0) {
614 msm_rpmrs_update_levels();
615 rc = 0;
616 }
617 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
618
619 return rc;
620 }
621
622 if (noirq)
623 return msm_rpm_set_noirq(ctx, req, count);
624 else
625 return msm_rpm_set(ctx, req, count);
626}
627
628static int msm_rpmrs_clear_common(
629 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
630{
631 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
632 unsigned long flags;
633 int rc;
634
635 spin_lock_irqsave(&msm_rpmrs_lock, flags);
636 rc = msm_rpmrs_clear_buffer(req, count);
637 if (rc > 0) {
638 msm_rpmrs_update_levels();
639 rc = 0;
640 }
641 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
642
643 if (rc < 0)
644 return rc;
645 }
646
647 if (noirq)
648 return msm_rpm_clear_noirq(ctx, req, count);
649 else
650 return msm_rpm_clear(ctx, req, count);
651}
652
653/******************************************************************************
654 * Attribute Functions
655 *****************************************************************************/
656
657static ssize_t msm_rpmrs_resource_attr_show(
658 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
659{
660 struct kernel_param kp;
661 unsigned long flags;
662 unsigned int temp;
663 int rc;
664
665 spin_lock_irqsave(&msm_rpmrs_lock, flags);
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600666 /* special case active-set signal for MSM_RPMRS_ID_RPM_CTL */
Praveen Chidambaram78499012011-11-01 17:15:17 -0600667 if (GET_RS_FROM_ATTR(attr)->rs[0].id ==
668 msm_rpmrs_rpm_ctl.rs[0].id)
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600669 temp = GET_RS_FROM_ATTR(attr)->rs[0].value;
670 else
671 temp = GET_RS_FROM_ATTR(attr)->enable_low_power;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
673
674 kp.arg = &temp;
675 rc = param_get_uint(buf, &kp);
676
677 if (rc > 0) {
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600678 strlcat(buf, "\n", PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700679 rc++;
680 }
681
682 return rc;
683}
684
685static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
686 struct kobj_attribute *attr, const char *buf, size_t count)
687{
688 struct kernel_param kp;
689 unsigned long flags;
690 unsigned int temp;
691 int rc;
692
693 kp.arg = &temp;
694 rc = param_set_uint(buf, &kp);
695 if (rc)
696 return rc;
697
698 spin_lock_irqsave(&msm_rpmrs_lock, flags);
699 GET_RS_FROM_ATTR(attr)->enable_low_power = temp;
Eugene Seah78aa5e72011-07-18 18:28:37 -0600700
701 /* special case active-set signal for MSM_RPMRS_ID_RPM_CTL */
Praveen Chidambaram78499012011-11-01 17:15:17 -0600702 if (GET_RS_FROM_ATTR(attr)->rs[0].id ==
703 msm_rpmrs_rpm_ctl.rs[0].id) {
Eugene Seah78aa5e72011-07-18 18:28:37 -0600704 struct msm_rpm_iv_pair req;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600705 req.id = msm_rpmrs_rpm_ctl.rs[0].id;
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600706 req.value = GET_RS_FROM_ATTR(attr)->enable_low_power;
707 GET_RS_FROM_ATTR(attr)->rs[0].value = req.value;
Eugene Seah78aa5e72011-07-18 18:28:37 -0600708
709 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &req, 1);
710 if (rc) {
711 pr_err("%s: failed to request RPM_CTL to %d: %d\n",
712 __func__, req.value, rc);
713 }
714 }
715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716 msm_rpmrs_update_levels();
717 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
718
719 return count;
720}
721
722static int __init msm_rpmrs_resource_sysfs_add(void)
723{
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600724 struct kobject *module_kobj = NULL;
725 struct kobject *low_power_kobj = NULL;
726 struct kobject *mode_kobj = NULL;
727 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728
729 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
730 if (!module_kobj) {
731 pr_err("%s: cannot find kobject for module %s\n",
732 __func__, KBUILD_MODNAME);
733 rc = -ENOENT;
734 goto resource_sysfs_add_exit;
735 }
736
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600737 low_power_kobj = kobject_create_and_add(
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 "enable_low_power", module_kobj);
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600739 if (!low_power_kobj) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700740 pr_err("%s: cannot create kobject\n", __func__);
741 rc = -ENOMEM;
742 goto resource_sysfs_add_exit;
743 }
744
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600745 mode_kobj = kobject_create_and_add(
746 "mode", module_kobj);
747 if (!mode_kobj) {
748 pr_err("%s: cannot create kobject\n", __func__);
749 rc = -ENOMEM;
750 goto resource_sysfs_add_exit;
751 }
752
753 rc = sysfs_create_group(low_power_kobj, &msm_rpmrs_attribute_group);
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600754 if (rc) {
755 pr_err("%s: cannot create kobject attribute group\n", __func__);
756 goto resource_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 }
758
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600759 rc = sysfs_create_group(mode_kobj, &msm_rpmrs_mode_attribute_group);
760 if (rc) {
761 pr_err("%s: cannot create kobject attribute group\n", __func__);
762 goto resource_sysfs_add_exit;
763 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600765 rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766resource_sysfs_add_exit:
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600767 if (rc) {
768 if (low_power_kobj)
769 sysfs_remove_group(low_power_kobj,
770 &msm_rpmrs_attribute_group);
771 kobject_del(low_power_kobj);
772 kobject_del(mode_kobj);
773 }
774
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700775 return rc;
776}
777
778/******************************************************************************
779 * Public Functions
780 *****************************************************************************/
781
782int msm_rpmrs_set(int ctx, struct msm_rpm_iv_pair *req, int count)
783{
784 return msm_rpmrs_set_common(ctx, req, count, false);
785}
786
787int msm_rpmrs_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
788{
789 WARN(!irqs_disabled(), "msm_rpmrs_set_noirq can only be called "
790 "safely when local irqs are disabled. Consider using "
791 "msm_rpmrs_set or msm_rpmrs_set_nosleep instead.");
792 return msm_rpmrs_set_common(ctx, req, count, true);
793}
794
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600795/* Allow individual bits of an rpm resource be set, currently used only for
796 * active context resource viz. RPM_CTL. The API is generic enough to possibly
797 * extend it to other resources as well in the future.
798 */
799int msm_rpmrs_set_bits_noirq(int ctx, struct msm_rpm_iv_pair *req, int count,
800 int *mask)
801{
802 unsigned long flags;
803 int i, j;
804 int rc = -1;
805 struct msm_rpmrs_resource *rs;
806
807 if (ctx != MSM_RPM_CTX_SET_0)
808 return -ENOSYS;
809
810 spin_lock_irqsave(&msm_rpmrs_lock, flags);
811 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
812 rs = msm_rpmrs_resources[i];
813 if (rs->rs[0].id == req[0].id && rs->size == count) {
814 for (j = 0; j < rs->size; j++) {
815 rs->rs[j].value &= ~mask[j];
816 rs->rs[j].value |= req[j].value & mask[j];
817 }
818 break;
819 }
820 }
821
822 if (i != ARRAY_SIZE(msm_rpmrs_resources)) {
823 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &rs->rs[0], rs->size);
824 if (rc) {
825 for (j = 0; j < rs->size; j++) {
826 pr_err("%s: failed to request %d to %d: %d\n",
827 __func__,
828 rs->rs[j].id, rs->rs[j].value, rc);
829 }
830 }
831 }
832 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
833
834 return rc;
835
836}
837
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838int msm_rpmrs_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
839{
840 return msm_rpmrs_clear_common(ctx, req, count, false);
841}
842
843int msm_rpmrs_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
844{
845 WARN(!irqs_disabled(), "msm_rpmrs_clear_noirq can only be called "
846 "safely when local irqs are disabled. Consider using "
847 "msm_rpmrs_clear or msm_rpmrs_clear_nosleep instead.");
848 return msm_rpmrs_clear_common(ctx, req, count, true);
849}
850
851void msm_rpmrs_show_resources(void)
852{
853 struct msm_rpmrs_resource *rs;
854 unsigned long flags;
855 int i;
856
857 spin_lock_irqsave(&msm_rpmrs_lock, flags);
858 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
859 rs = msm_rpmrs_resources[i];
Praveen Chidambaram78499012011-11-01 17:15:17 -0600860 if (rs->rs[0].id < MSM_RPM_ID_LAST)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700861 pr_info("%s: resource %s: buffered %d, value 0x%x\n",
862 __func__, rs->name,
863 test_bit(rs->rs[0].id, msm_rpmrs_buffered),
864 msm_rpmrs_buffer[rs->rs[0].id]);
865 else
866 pr_info("%s: resource %s: value %d\n",
867 __func__, rs->name, rs->rs[0].value);
868 }
869 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
870}
871
Stephen Boyd3f4bac22012-05-30 10:03:13 -0700872s32 msm_cpuidle_get_deep_idle_latency(void)
873{
874 int i;
875 struct msm_rpmrs_level *level = msm_rpmrs_levels, *best = level;
876
877 if (!level)
878 return 0;
879
880 for (i = 0; i < msm_rpmrs_level_count; i++, level++) {
881 if (!level->available)
882 continue;
883 if (level->sleep_mode != MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
884 continue;
885 /* Pick the first power collapse mode by default */
886 if (best->sleep_mode != MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
887 best = level;
888 /* Find the lowest latency for power collapse */
889 if (level->latency_us < best->latency_us)
890 best = level;
891 }
892 return best->latency_us - 1;
893}
894
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600895static void *msm_rpmrs_lowest_limits(bool from_idle,
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600896 enum msm_pm_sleep_mode sleep_mode,
897 struct msm_pm_time_params *time_param, uint32_t *power)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898{
899 unsigned int cpu = smp_processor_id();
900 struct msm_rpmrs_level *best_level = NULL;
901 bool irqs_detectable = false;
902 bool gpio_detectable = false;
903 int i;
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600904 uint32_t pwr;
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600905 uint32_t next_wakeup_us = time_param->sleep_us;
906 bool modify_event_timer;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907
908 if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
909 irqs_detectable = msm_mpm_irqs_detectable(from_idle);
910 gpio_detectable = msm_mpm_gpio_irqs_detectable(from_idle);
911 }
912
913 for (i = 0; i < msm_rpmrs_level_count; i++) {
914 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600916 modify_event_timer = false;
917
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918 if (!level->available)
919 continue;
920
921 if (sleep_mode != level->sleep_mode)
922 continue;
923
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600924 if (time_param->latency_us < level->latency_us)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 continue;
926
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600927 if (time_param->next_event_us &&
928 time_param->next_event_us < level->latency_us)
929 continue;
930
931 if (time_param->next_event_us) {
932 if ((time_param->next_event_us < time_param->sleep_us)
933 || ((time_param->next_event_us - level->latency_us) <
934 time_param->sleep_us)) {
935 modify_event_timer = true;
936 next_wakeup_us = time_param->next_event_us -
937 level->latency_us;
938 }
939 }
940
941 if (next_wakeup_us <= level->time_overhead_us)
Girish Mahadevan4eb6b3c2012-03-20 17:07:28 -0600942 continue;
943
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944 if (!msm_rpmrs_irqs_detectable(&level->rs_limits,
945 irqs_detectable, gpio_detectable))
946 continue;
947
Mahesh Sivasubramanian3ca355a2012-10-01 12:18:17 -0600948 if ((MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE == sleep_mode)
949 || (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == sleep_mode))
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600950 if (!cpu && msm_rpm_local_request_is_outstanding())
951 break;
952
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600953 if (next_wakeup_us <= 1) {
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600954 pwr = level->energy_overhead;
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600955 } else if (next_wakeup_us <= level->time_overhead_us) {
956 pwr = level->energy_overhead / next_wakeup_us;
957 } else if ((next_wakeup_us >> 10) > level->time_overhead_us) {
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600958 pwr = level->steady_state_power;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700959 } else {
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600960 pwr = level->steady_state_power;
961 pwr -= (level->time_overhead_us *
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600962 level->steady_state_power)/next_wakeup_us;
963 pwr += level->energy_overhead / next_wakeup_us;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964 }
965
966 if (!best_level ||
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600967 best_level->rs_limits.power[cpu] >= pwr) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968 level->rs_limits.latency_us[cpu] = level->latency_us;
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600969 level->rs_limits.power[cpu] = pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970 best_level = level;
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600971 if (power)
972 *power = pwr;
Girish Mahadevandc318fd2012-08-17 16:48:05 -0600973 if (modify_event_timer && best_level->latency_us > 1)
974 time_param->modified_time_us =
975 time_param->next_event_us -
976 best_level->latency_us;
977 else
978 time_param->modified_time_us = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979 }
980 }
981
982 return best_level ? &best_level->rs_limits : NULL;
983}
984
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600985static int msm_rpmrs_enter_sleep(uint32_t sclk_count, void *limits,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 bool from_idle, bool notify_rpm)
987{
988 int rc = 0;
989
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990 if (notify_rpm) {
991 rc = msm_rpmrs_flush_buffer(sclk_count, limits, from_idle);
992 if (rc)
993 return rc;
994
995 if (msm_rpmrs_use_mpm(limits))
Mahesh Sivasubramanian2efbc352012-07-18 14:15:44 -0600996 msm_mpm_enter_sleep(sclk_count, from_idle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997 }
998
Mahesh Sivasubramanian6e22a1e2011-12-01 10:20:34 -0700999 rc = msm_rpmrs_flush_L2(limits, notify_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 return rc;
1001}
1002
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -06001003static void msm_rpmrs_exit_sleep(void *limits, bool from_idle,
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -06001004 bool notify_rpm, bool collapsed)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005{
1006
1007 /* Disable L2 for now, we dont want L2 to do retention by default */
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -06001008 msm_rpmrs_L2_restore(limits, notify_rpm, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009
1010 if (msm_rpmrs_use_mpm(limits))
1011 msm_mpm_exit_sleep(from_idle);
1012}
1013
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001014static int rpmrs_cpu_callback(struct notifier_block *nfb,
1015 unsigned long action, void *hcpu)
1016{
1017 switch (action) {
1018 case CPU_ONLINE_FROZEN:
1019 case CPU_ONLINE:
1020 if (num_online_cpus() > 1)
1021 msm_rpmrs_l2_cache.rs[0].value =
1022 MSM_RPMRS_L2_CACHE_ACTIVE;
1023 break;
1024 case CPU_DEAD_FROZEN:
1025 case CPU_DEAD:
1026 if (num_online_cpus() == 1)
1027 msm_rpmrs_l2_cache.rs[0].value =
1028 MSM_RPMRS_L2_CACHE_HSFS_OPEN;
1029 break;
1030 }
1031
1032 msm_rpmrs_update_levels();
1033 return NOTIFY_OK;
1034}
1035
1036static struct notifier_block __refdata rpmrs_cpu_notifier = {
1037 .notifier_call = rpmrs_cpu_callback,
1038};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001039
Praveen Chidambaram78499012011-11-01 17:15:17 -06001040int __init msm_rpmrs_levels_init(struct msm_rpmrs_platform_data *data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001041{
Praveen Chidambaram78499012011-11-01 17:15:17 -06001042 int i, k;
1043 struct msm_rpmrs_level *levels = data->levels;
1044
1045 msm_rpmrs_level_count = data->num_levels;
1046
1047 msm_rpmrs_levels = kzalloc(sizeof(struct msm_rpmrs_level) *
1048 msm_rpmrs_level_count, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001049 if (!msm_rpmrs_levels)
1050 return -ENOMEM;
Praveen Chidambaram78499012011-11-01 17:15:17 -06001051
1052 memcpy(msm_rpmrs_levels, levels,
1053 msm_rpmrs_level_count * sizeof(struct msm_rpmrs_level));
1054
1055 memcpy(vdd_dig_vlevels, data->vdd_dig_levels,
1056 (MSM_RPMRS_VDD_DIG_MAX + 1) * sizeof(vdd_dig_vlevels[0]));
1057
1058 memcpy(vdd_mem_vlevels, data->vdd_mem_levels,
1059 (MSM_RPMRS_VDD_MEM_MAX + 1) * sizeof(vdd_mem_vlevels[0]));
1060 vdd_mask = data->vdd_mask;
1061
1062 msm_rpmrs_pxo.rs[0].id = data->rpmrs_target_id[MSM_RPMRS_ID_PXO_CLK];
1063 msm_rpmrs_l2_cache.rs[0].id =
1064 data->rpmrs_target_id[MSM_RPMRS_ID_L2_CACHE_CTL];
1065 msm_rpmrs_vdd_mem.rs[0].id =
1066 data->rpmrs_target_id[MSM_RPMRS_ID_VDD_MEM_0];
1067 msm_rpmrs_vdd_mem.rs[1].id =
1068 data->rpmrs_target_id[MSM_RPMRS_ID_VDD_MEM_1];
1069 msm_rpmrs_vdd_dig.rs[0].id =
1070 data->rpmrs_target_id[MSM_RPMRS_ID_VDD_DIG_0];
1071 msm_rpmrs_vdd_dig.rs[1].id =
1072 data->rpmrs_target_id[MSM_RPMRS_ID_VDD_DIG_1];
1073 msm_rpmrs_rpm_ctl.rs[0].id =
1074 data->rpmrs_target_id[MSM_RPMRS_ID_RPM_CTL];
1075
1076 /* Initialize listed bitmap for valid resource IDs */
1077 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
Stephen Boydbf46af02012-02-06 15:48:34 -08001078 for (k = 0; k < msm_rpmrs_resources[i]->size; k++) {
Praveen Chidambaram78499012011-11-01 17:15:17 -06001079 if (msm_rpmrs_resources[i]->rs[k].id >=
1080 MSM_RPM_ID_LAST)
1081 continue;
1082 set_bit(msm_rpmrs_resources[i]->rs[k].id,
1083 msm_rpmrs_listed);
Stephen Boydbf46af02012-02-06 15:48:34 -08001084 }
Praveen Chidambaram78499012011-11-01 17:15:17 -06001085 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086
1087 return 0;
1088}
1089
1090static int __init msm_rpmrs_init(void)
1091{
1092 struct msm_rpm_iv_pair req;
1093 int rc;
1094
1095 BUG_ON(!msm_rpmrs_levels);
1096
Praveen Chidambaram841d46c2011-08-04 09:07:53 -06001097 if (cpu_is_msm8x60()) {
Praveen Chidambaram78499012011-11-01 17:15:17 -06001098 req.id = msm_rpmrs_l2_cache.rs[0].id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099 req.value = 1;
1100
1101 rc = msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
1102 if (rc) {
1103 pr_err("%s: failed to request L2 cache: %d\n",
1104 __func__, rc);
1105 goto init_exit;
1106 }
1107
Praveen Chidambaram78499012011-11-01 17:15:17 -06001108 req.id = msm_rpmrs_l2_cache.rs[0].id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001109 req.value = 0;
1110
1111 rc = msm_rpmrs_set(MSM_RPM_CTX_SET_SLEEP, &req, 1);
1112 if (rc) {
1113 pr_err("%s: failed to initialize L2 cache for sleep: "
1114 "%d\n", __func__, rc);
1115 goto init_exit;
1116 }
1117 }
1118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001119 rc = msm_rpmrs_resource_sysfs_add();
1120
1121init_exit:
1122 return rc;
1123}
1124device_initcall(msm_rpmrs_init);
1125
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -06001126static struct msm_pm_sleep_ops msm_rpmrs_ops = {
1127 .lowest_limits = msm_rpmrs_lowest_limits,
1128 .enter_sleep = msm_rpmrs_enter_sleep,
1129 .exit_sleep = msm_rpmrs_exit_sleep,
1130};
1131
Maheshkumar Sivasubramanian16588412011-10-13 12:16:23 -06001132static int __init msm_rpmrs_l2_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001133{
Stepan Moskovchenko5b9e7762012-09-21 20:32:17 -07001134 if (soc_class_is_msm8960() || soc_class_is_msm8930() ||
1135 soc_class_is_apq8064()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001136
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -06001137 msm_pm_set_l2_flush_flag(0);
1138
Maheshkumar Sivasubramanian07c363f2011-10-18 09:52:33 -06001139 msm_rpmrs_l2_cache.beyond_limits =
1140 msm_spm_l2_cache_beyond_limits;
1141 msm_rpmrs_l2_cache.aggregate = NULL;
1142 msm_rpmrs_l2_cache.restore = NULL;
Maheshkumar Sivasubramanian16588412011-10-13 12:16:23 -06001143
Maheshkumar Sivasubramanian07c363f2011-10-18 09:52:33 -06001144 register_hotcpu_notifier(&rpmrs_cpu_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001145
Maheshkumar Sivasubramanian07c363f2011-10-18 09:52:33 -06001146 } else if (cpu_is_msm9615()) {
1147 msm_rpmrs_l2_cache.beyond_limits = NULL;
1148 msm_rpmrs_l2_cache.aggregate = NULL;
1149 msm_rpmrs_l2_cache.restore = NULL;
1150 }
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -06001151
1152 msm_pm_set_sleep_ops(&msm_rpmrs_ops);
1153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001154 return 0;
1155}
Maheshkumar Sivasubramanian16588412011-10-13 12:16:23 -06001156early_initcall(msm_rpmrs_l2_init);