blob: 5314cee6d16c6782c44fbf6b6c457f2f7cbda41a [file] [log] [blame]
Praveen Chidambaram78499012011-11-01 17:15:17 -06001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/bug.h>
19#include <linux/mutex.h>
20#include <linux/proc_fs.h>
21#include <linux/spinlock.h>
22#include <linux/cpu.h>
23#include <mach/rpm.h>
24#include <mach/msm_iomap.h>
25#include <asm/mach-types.h>
26#include <linux/io.h>
Praveen Chidambaram841d46c2011-08-04 09:07:53 -060027#include <mach/socinfo.h>
Subhash Jadavani909e04f2012-04-12 10:52:50 +053028#include <mach/mpm.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include "rpm_resources.h"
30#include "spm.h"
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -060031#include "idle.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33/******************************************************************************
34 * Debug Definitions
35 *****************************************************************************/
36
37enum {
38 MSM_RPMRS_DEBUG_OUTPUT = BIT(0),
39 MSM_RPMRS_DEBUG_BUFFER = BIT(1),
40};
41
42static int msm_rpmrs_debug_mask;
43module_param_named(
44 debug_mask, msm_rpmrs_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
45);
46
47static struct msm_rpmrs_level *msm_rpmrs_levels;
48static int msm_rpmrs_level_count;
49
50static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits);
51static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits);
52static void msm_rpmrs_restore_pxo(void);
53static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits);
54static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits);
55static void msm_rpmrs_restore_l2_cache(void);
56static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits);
57static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits);
58static void msm_rpmrs_restore_vdd_mem(void);
59static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits);
60static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits);
61static void msm_rpmrs_restore_vdd_dig(void);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Praveen Chidambaram66775c62011-08-04 16:59:24 -060063static ssize_t msm_rpmrs_resource_attr_show(
64 struct kobject *kobj, struct kobj_attribute *attr, char *buf);
65static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
66 struct kobj_attribute *attr, const char *buf, size_t count);
67
Praveen Chidambaram78499012011-11-01 17:15:17 -060068static int vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_LAST];
69static int vdd_mem_vlevels[MSM_RPMRS_VDD_MEM_LAST];
70static int vdd_mask;
71
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072#define MSM_RPMRS_MAX_RS_REGISTER_COUNT 2
73
Praveen Chidambaram66775c62011-08-04 16:59:24 -060074#define RPMRS_ATTR(_name) \
75 __ATTR(_name, S_IRUGO|S_IWUSR, \
76 msm_rpmrs_resource_attr_show, msm_rpmrs_resource_attr_store)
77
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078struct msm_rpmrs_resource {
79 struct msm_rpm_iv_pair rs[MSM_RPMRS_MAX_RS_REGISTER_COUNT];
80 uint32_t size;
81 char *name;
82
83 uint32_t enable_low_power;
84
85 bool (*beyond_limits)(struct msm_rpmrs_limits *limits);
86 void (*aggregate)(struct msm_rpmrs_limits *limits);
87 void (*restore)(void);
Praveen Chidambaram66775c62011-08-04 16:59:24 -060088
89 struct kobj_attribute ko_attr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070090};
91
92static struct msm_rpmrs_resource msm_rpmrs_pxo = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093 .size = 1,
94 .name = "pxo",
95 .beyond_limits = msm_rpmrs_pxo_beyond_limits,
96 .aggregate = msm_rpmrs_aggregate_pxo,
97 .restore = msm_rpmrs_restore_pxo,
Praveen Chidambaram66775c62011-08-04 16:59:24 -060098 .ko_attr = RPMRS_ATTR(pxo),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099};
100
101static struct msm_rpmrs_resource msm_rpmrs_l2_cache = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 .size = 1,
103 .name = "L2_cache",
104 .beyond_limits = msm_rpmrs_l2_cache_beyond_limits,
105 .aggregate = msm_rpmrs_aggregate_l2_cache,
106 .restore = msm_rpmrs_restore_l2_cache,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600107 .ko_attr = RPMRS_ATTR(L2_cache),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108};
109
110static struct msm_rpmrs_resource msm_rpmrs_vdd_mem = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700111 .size = 2,
112 .name = "vdd_mem",
113 .beyond_limits = msm_rpmrs_vdd_mem_beyond_limits,
114 .aggregate = msm_rpmrs_aggregate_vdd_mem,
115 .restore = msm_rpmrs_restore_vdd_mem,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600116 .ko_attr = RPMRS_ATTR(vdd_mem),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117};
118
119static struct msm_rpmrs_resource msm_rpmrs_vdd_dig = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120 .size = 2,
121 .name = "vdd_dig",
122 .beyond_limits = msm_rpmrs_vdd_dig_beyond_limits,
123 .aggregate = msm_rpmrs_aggregate_vdd_dig,
124 .restore = msm_rpmrs_restore_vdd_dig,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600125 .ko_attr = RPMRS_ATTR(vdd_dig),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126};
127
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600128static struct msm_rpmrs_resource msm_rpmrs_rpm_ctl = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129 .size = 1,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600130 .name = "rpm_ctl",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131 .beyond_limits = NULL,
Eugene Seah78aa5e72011-07-18 18:28:37 -0600132 .aggregate = NULL,
133 .restore = NULL,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600134 .ko_attr = RPMRS_ATTR(rpm_ctl),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135};
136
137static struct msm_rpmrs_resource *msm_rpmrs_resources[] = {
138 &msm_rpmrs_pxo,
139 &msm_rpmrs_l2_cache,
140 &msm_rpmrs_vdd_mem,
141 &msm_rpmrs_vdd_dig,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600142 &msm_rpmrs_rpm_ctl,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143};
144
Praveen Chidambaram78499012011-11-01 17:15:17 -0600145static uint32_t msm_rpmrs_buffer[MSM_RPM_ID_LAST];
146static DECLARE_BITMAP(msm_rpmrs_buffered, MSM_RPM_ID_LAST);
147static DECLARE_BITMAP(msm_rpmrs_listed, MSM_RPM_ID_LAST);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700148static DEFINE_SPINLOCK(msm_rpmrs_lock);
149
Praveen Chidambaram78499012011-11-01 17:15:17 -0600150#define MSM_RPMRS_VDD(v) ((v) & (vdd_mask))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151
152/******************************************************************************
153 * Attribute Definitions
154 *****************************************************************************/
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600155static struct attribute *msm_rpmrs_attributes[] = {
156 &msm_rpmrs_pxo.ko_attr.attr,
157 &msm_rpmrs_l2_cache.ko_attr.attr,
158 &msm_rpmrs_vdd_mem.ko_attr.attr,
159 &msm_rpmrs_vdd_dig.ko_attr.attr,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600160 NULL,
161};
162static struct attribute *msm_rpmrs_mode_attributes[] = {
163 &msm_rpmrs_rpm_ctl.ko_attr.attr,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600164 NULL,
165};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600167static struct attribute_group msm_rpmrs_attribute_group = {
168 .attrs = msm_rpmrs_attributes,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169};
170
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600171static struct attribute_group msm_rpmrs_mode_attribute_group = {
172 .attrs = msm_rpmrs_mode_attributes,
173};
174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175#define GET_RS_FROM_ATTR(attr) \
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600176 (container_of(attr, struct msm_rpmrs_resource, ko_attr))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178
179/******************************************************************************
180 * Resource Specific Functions
181 *****************************************************************************/
182
183static void msm_rpmrs_aggregate_sclk(uint32_t sclk_count)
184{
185 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
186 set_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
187 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = sclk_count;
188 set_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
189}
190
191static void msm_rpmrs_restore_sclk(void)
192{
193 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
194 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = 0;
195 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
196 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
197}
198
199static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits)
200{
201 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
202 uint32_t pxo;
203
204 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
205 pxo = msm_rpmrs_buffer[rs->rs[0].id];
206 else
207 pxo = MSM_RPMRS_PXO_ON;
208
209 return pxo > limits->pxo;
210}
211
212static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits)
213{
214 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
215 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
216
217 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
218 rs->rs[0].value = *buf;
219 if (limits->pxo > *buf)
220 *buf = limits->pxo;
221 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
222 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
223 }
224}
225
226static void msm_rpmrs_restore_pxo(void)
227{
228 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
229
230 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
231 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
232}
233
234static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
235{
236 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
237 uint32_t l2_cache;
238
239 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
240 l2_cache = msm_rpmrs_buffer[rs->rs[0].id];
241 else
242 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
243
244 return l2_cache > limits->l2_cache;
245}
246
247static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits)
248{
249 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
250 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
251
252 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
253 rs->rs[0].value = *buf;
254 if (limits->l2_cache > *buf)
255 *buf = limits->l2_cache;
256
257 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
258 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
259 }
260}
261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262static bool msm_spm_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
263{
264 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
265 uint32_t l2_cache = rs->rs[0].value;
266
267 if (!rs->enable_low_power)
268 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
269
270 return l2_cache > limits->l2_cache;
271}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272
273static void msm_rpmrs_restore_l2_cache(void)
274{
275 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
276
277 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
278 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
279}
280
281static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits)
282{
283 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
284 uint32_t vdd_mem;
285
286 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
287 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
288
289 if (rs->enable_low_power == 0)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600290 vdd_mem = vdd_mem_vlevels[MSM_RPMRS_VDD_MEM_ACTIVE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291 else if (rs->enable_low_power == 1)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600292 vdd_mem = vdd_mem_vlevels[MSM_RPMRS_VDD_MEM_RET_HIGH];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293 else
Praveen Chidambaram78499012011-11-01 17:15:17 -0600294 vdd_mem = vdd_mem_vlevels[MSM_RPMRS_VDD_MEM_RET_LOW];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295
296 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_mem))
Praveen Chidambaram78499012011-11-01 17:15:17 -0600297 vdd_mem = MSM_RPMRS_VDD(buffered_value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298 } else {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600299 vdd_mem = vdd_mem_vlevels[MSM_RPMRS_VDD_MEM_ACTIVE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300 }
301
Praveen Chidambaram78499012011-11-01 17:15:17 -0600302 return vdd_mem > vdd_mem_vlevels[limits->vdd_mem_upper_bound];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303}
304
305static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits)
306{
307 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
308 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
309
310 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
311 rs->rs[0].value = *buf;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600312 if (vdd_mem_vlevels[limits->vdd_mem] > MSM_RPMRS_VDD(*buf)) {
313 *buf &= ~vdd_mask;
314 *buf |= vdd_mem_vlevels[limits->vdd_mem];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 }
316
317 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
318 pr_info("%s: vdd %d (0x%x)\n", __func__,
319 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
320 }
321}
322
323static void msm_rpmrs_restore_vdd_mem(void)
324{
325 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
326
327 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
328 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
329}
330
331static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits)
332{
333 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
334 uint32_t vdd_dig;
335
336 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
337 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
338
339 if (rs->enable_low_power == 0)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600340 vdd_dig = vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_ACTIVE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700341 else if (rs->enable_low_power == 1)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600342 vdd_dig = vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_RET_HIGH];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343 else
Praveen Chidambaram78499012011-11-01 17:15:17 -0600344 vdd_dig = vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_RET_LOW];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345
346 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_dig))
Praveen Chidambaram78499012011-11-01 17:15:17 -0600347 vdd_dig = MSM_RPMRS_VDD(buffered_value);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348 } else {
Praveen Chidambaram78499012011-11-01 17:15:17 -0600349 vdd_dig = vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_ACTIVE];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 }
351
Praveen Chidambaram78499012011-11-01 17:15:17 -0600352 return vdd_dig > vdd_dig_vlevels[limits->vdd_dig_upper_bound];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353}
354
355static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits)
356{
357 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
358 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
359
360 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
361 rs->rs[0].value = *buf;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600362 if (vdd_dig_vlevels[limits->vdd_dig] > MSM_RPMRS_VDD(*buf)) {
363 *buf &= ~vdd_mask;
364 *buf |= vdd_dig_vlevels[limits->vdd_dig];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700365 }
366
367
368 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
369 pr_info("%s: vdd %d (0x%x)\n", __func__,
370 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
371 }
372}
373
374static void msm_rpmrs_restore_vdd_dig(void)
375{
376 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
377
378 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
379 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
380}
381
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382/******************************************************************************
383 * Buffering Functions
384 *****************************************************************************/
385
386static bool msm_rpmrs_irqs_detectable(struct msm_rpmrs_limits *limits,
387 bool irqs_detect, bool gpio_detect)
388{
389
Praveen Chidambaram78499012011-11-01 17:15:17 -0600390 if (vdd_dig_vlevels[limits->vdd_dig_upper_bound] <=
391 vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_RET_HIGH])
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700392 return irqs_detect;
393
394 if (limits->pxo == MSM_RPMRS_PXO_OFF)
395 return gpio_detect;
396
397 return true;
398}
399
400static bool msm_rpmrs_use_mpm(struct msm_rpmrs_limits *limits)
401{
402 return (limits->pxo == MSM_RPMRS_PXO_OFF) ||
Praveen Chidambaram78499012011-11-01 17:15:17 -0600403 (vdd_dig_vlevels[limits->vdd_dig] <=
404 vdd_dig_vlevels[MSM_RPMRS_VDD_DIG_RET_HIGH]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405}
406
407static void msm_rpmrs_update_levels(void)
408{
409 int i, k;
410
411 for (i = 0; i < msm_rpmrs_level_count; i++) {
412 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
413
414 if (level->sleep_mode != MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
415 continue;
416
417 level->available = true;
418
419 for (k = 0; k < ARRAY_SIZE(msm_rpmrs_resources); k++) {
420 struct msm_rpmrs_resource *rs = msm_rpmrs_resources[k];
421
422 if (rs->beyond_limits &&
423 rs->beyond_limits(&level->rs_limits)) {
424 level->available = false;
425 break;
426 }
427 }
Praveen Chidambaram78499012011-11-01 17:15:17 -0600428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 }
430}
431
432/*
433 * Return value:
434 * 0: no entries in <req> is on our resource list
435 * 1: one or more entries in <req> is on our resource list
436 * -EINVAL: invalid id in <req> array
437 */
438static int msm_rpmrs_buffer_request(struct msm_rpm_iv_pair *req, int count)
439{
440 bool listed;
441 int i;
442
443 for (i = 0; i < count; i++)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600444 if (req[i].id >= MSM_RPM_ID_LAST)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445 return -EINVAL;
446
447 for (i = 0, listed = false; i < count; i++) {
448 msm_rpmrs_buffer[req[i].id] = req[i].value;
449 set_bit(req[i].id, msm_rpmrs_buffered);
450
451 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
452 pr_info("%s: reg %d: 0x%x\n",
453 __func__, req[i].id, req[i].value);
454
455 if (listed)
456 continue;
457
458 if (test_bit(req[i].id, msm_rpmrs_listed))
459 listed = true;
460 }
461
462 return listed ? 1 : 0;
463}
464
465/*
466 * Return value:
467 * 0: no entries in <req> is on our resource list
468 * 1: one or more entries in <req> is on our resource list
469 * -EINVAL: invalid id in <req> array
470 */
471static int msm_rpmrs_clear_buffer(struct msm_rpm_iv_pair *req, int count)
472{
473 bool listed;
474 int i;
475
476 for (i = 0; i < count; i++)
Praveen Chidambaram78499012011-11-01 17:15:17 -0600477 if (req[i].id >= MSM_RPM_ID_LAST)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478 return -EINVAL;
479
480 for (i = 0, listed = false; i < count; i++) {
481 msm_rpmrs_buffer[req[i].id] = 0;
482 clear_bit(req[i].id, msm_rpmrs_buffered);
483
484 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
485 pr_info("%s: reg %d\n", __func__, req[i].id);
486
487 if (listed)
488 continue;
489
490 if (test_bit(req[i].id, msm_rpmrs_listed))
491 listed = true;
492 }
493
494 return listed ? 1 : 0;
495}
496
497#ifdef CONFIG_MSM_L2_SPM
498static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
499{
500 int rc = 0;
501 int lpm;
502
503 switch (limits->l2_cache) {
504 case MSM_RPMRS_L2_CACHE_HSFS_OPEN:
505 lpm = MSM_SPM_L2_MODE_POWER_COLLAPSE;
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600506 msm_pm_set_l2_flush_flag(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507 break;
508 case MSM_RPMRS_L2_CACHE_GDHS:
509 lpm = MSM_SPM_L2_MODE_GDHS;
510 break;
511 case MSM_RPMRS_L2_CACHE_RETENTION:
512 lpm = MSM_SPM_L2_MODE_RETENTION;
513 break;
514 default:
515 case MSM_RPMRS_L2_CACHE_ACTIVE:
516 lpm = MSM_SPM_L2_MODE_DISABLED;
517 break;
518 }
519
520 rc = msm_spm_l2_set_low_power_mode(lpm, notify_rpm);
521 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
522 pr_info("%s: Requesting low power mode %d returned %d\n",
523 __func__, lpm, rc);
524
525 return rc;
526}
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600527static void msm_rpmrs_L2_restore(struct msm_rpmrs_limits *limits,
528 bool notify_rpm, bool collapsed)
529{
530 msm_spm_l2_set_low_power_mode(MSM_SPM_MODE_DISABLED, notify_rpm);
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600531 msm_pm_set_l2_flush_flag(0);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600532}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533#else
534static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
535{
536 return 0;
537}
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600538static void msm_rpmrs_L2_restore(struct msm_rpmrs_limits *limits,
539 bool notify_rpm, bool collapsed)
540{
541}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542#endif
543
544static int msm_rpmrs_flush_buffer(
545 uint32_t sclk_count, struct msm_rpmrs_limits *limits, int from_idle)
546{
547 struct msm_rpm_iv_pair *req;
548 int count;
549 int rc;
550 int i;
551
552 msm_rpmrs_aggregate_sclk(sclk_count);
553 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
554 if (msm_rpmrs_resources[i]->aggregate)
555 msm_rpmrs_resources[i]->aggregate(limits);
556 }
557
Praveen Chidambaram78499012011-11-01 17:15:17 -0600558 count = bitmap_weight(msm_rpmrs_buffered, MSM_RPM_ID_LAST);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559
560 req = kmalloc(sizeof(*req) * count, GFP_ATOMIC);
561 if (!req) {
562 rc = -ENOMEM;
563 goto flush_buffer_restore;
564 }
565
566 count = 0;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600567 i = find_first_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568
Praveen Chidambaram78499012011-11-01 17:15:17 -0600569 while (i < MSM_RPM_ID_LAST) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700570 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
571 pr_info("%s: reg %d: 0x%x\n",
572 __func__, i, msm_rpmrs_buffer[i]);
573
574 req[count].id = i;
575 req[count].value = msm_rpmrs_buffer[i];
576 count++;
577
Praveen Chidambaram78499012011-11-01 17:15:17 -0600578 i = find_next_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST, i + 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 }
580
581 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_SLEEP, req, count);
582 kfree(req);
583
584 if (rc)
585 goto flush_buffer_restore;
586
587 bitmap_and(msm_rpmrs_buffered,
Praveen Chidambaram78499012011-11-01 17:15:17 -0600588 msm_rpmrs_buffered, msm_rpmrs_listed, MSM_RPM_ID_LAST);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589
590flush_buffer_restore:
591 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
592 if (msm_rpmrs_resources[i]->restore)
593 msm_rpmrs_resources[i]->restore();
594 }
595 msm_rpmrs_restore_sclk();
596
597 if (rc)
598 pr_err("%s: failed: %d\n", __func__, rc);
599 return rc;
600}
601
602static int msm_rpmrs_set_common(
603 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
604{
605 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
606 unsigned long flags;
607 int rc;
608
609 spin_lock_irqsave(&msm_rpmrs_lock, flags);
610 rc = msm_rpmrs_buffer_request(req, count);
611 if (rc > 0) {
612 msm_rpmrs_update_levels();
613 rc = 0;
614 }
615 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
616
617 return rc;
618 }
619
620 if (noirq)
621 return msm_rpm_set_noirq(ctx, req, count);
622 else
623 return msm_rpm_set(ctx, req, count);
624}
625
626static int msm_rpmrs_clear_common(
627 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
628{
629 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
630 unsigned long flags;
631 int rc;
632
633 spin_lock_irqsave(&msm_rpmrs_lock, flags);
634 rc = msm_rpmrs_clear_buffer(req, count);
635 if (rc > 0) {
636 msm_rpmrs_update_levels();
637 rc = 0;
638 }
639 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
640
641 if (rc < 0)
642 return rc;
643 }
644
645 if (noirq)
646 return msm_rpm_clear_noirq(ctx, req, count);
647 else
648 return msm_rpm_clear(ctx, req, count);
649}
650
651/******************************************************************************
652 * Attribute Functions
653 *****************************************************************************/
654
655static ssize_t msm_rpmrs_resource_attr_show(
656 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
657{
658 struct kernel_param kp;
659 unsigned long flags;
660 unsigned int temp;
661 int rc;
662
663 spin_lock_irqsave(&msm_rpmrs_lock, flags);
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600664 /* special case active-set signal for MSM_RPMRS_ID_RPM_CTL */
Praveen Chidambaram78499012011-11-01 17:15:17 -0600665 if (GET_RS_FROM_ATTR(attr)->rs[0].id ==
666 msm_rpmrs_rpm_ctl.rs[0].id)
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600667 temp = GET_RS_FROM_ATTR(attr)->rs[0].value;
668 else
669 temp = GET_RS_FROM_ATTR(attr)->enable_low_power;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
671
672 kp.arg = &temp;
673 rc = param_get_uint(buf, &kp);
674
675 if (rc > 0) {
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600676 strlcat(buf, "\n", PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677 rc++;
678 }
679
680 return rc;
681}
682
683static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
684 struct kobj_attribute *attr, const char *buf, size_t count)
685{
686 struct kernel_param kp;
687 unsigned long flags;
688 unsigned int temp;
689 int rc;
690
691 kp.arg = &temp;
692 rc = param_set_uint(buf, &kp);
693 if (rc)
694 return rc;
695
696 spin_lock_irqsave(&msm_rpmrs_lock, flags);
697 GET_RS_FROM_ATTR(attr)->enable_low_power = temp;
Eugene Seah78aa5e72011-07-18 18:28:37 -0600698
699 /* special case active-set signal for MSM_RPMRS_ID_RPM_CTL */
Praveen Chidambaram78499012011-11-01 17:15:17 -0600700 if (GET_RS_FROM_ATTR(attr)->rs[0].id ==
701 msm_rpmrs_rpm_ctl.rs[0].id) {
Eugene Seah78aa5e72011-07-18 18:28:37 -0600702 struct msm_rpm_iv_pair req;
Praveen Chidambaram78499012011-11-01 17:15:17 -0600703 req.id = msm_rpmrs_rpm_ctl.rs[0].id;
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600704 req.value = GET_RS_FROM_ATTR(attr)->enable_low_power;
705 GET_RS_FROM_ATTR(attr)->rs[0].value = req.value;
Eugene Seah78aa5e72011-07-18 18:28:37 -0600706
707 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &req, 1);
708 if (rc) {
709 pr_err("%s: failed to request RPM_CTL to %d: %d\n",
710 __func__, req.value, rc);
711 }
712 }
713
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714 msm_rpmrs_update_levels();
715 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
716
717 return count;
718}
719
720static int __init msm_rpmrs_resource_sysfs_add(void)
721{
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600722 struct kobject *module_kobj = NULL;
723 struct kobject *low_power_kobj = NULL;
724 struct kobject *mode_kobj = NULL;
725 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726
727 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
728 if (!module_kobj) {
729 pr_err("%s: cannot find kobject for module %s\n",
730 __func__, KBUILD_MODNAME);
731 rc = -ENOENT;
732 goto resource_sysfs_add_exit;
733 }
734
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600735 low_power_kobj = kobject_create_and_add(
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700736 "enable_low_power", module_kobj);
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600737 if (!low_power_kobj) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 pr_err("%s: cannot create kobject\n", __func__);
739 rc = -ENOMEM;
740 goto resource_sysfs_add_exit;
741 }
742
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600743 mode_kobj = kobject_create_and_add(
744 "mode", module_kobj);
745 if (!mode_kobj) {
746 pr_err("%s: cannot create kobject\n", __func__);
747 rc = -ENOMEM;
748 goto resource_sysfs_add_exit;
749 }
750
751 rc = sysfs_create_group(low_power_kobj, &msm_rpmrs_attribute_group);
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600752 if (rc) {
753 pr_err("%s: cannot create kobject attribute group\n", __func__);
754 goto resource_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 }
756
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600757 rc = sysfs_create_group(mode_kobj, &msm_rpmrs_mode_attribute_group);
758 if (rc) {
759 pr_err("%s: cannot create kobject attribute group\n", __func__);
760 goto resource_sysfs_add_exit;
761 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600763 rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764resource_sysfs_add_exit:
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600765 if (rc) {
766 if (low_power_kobj)
767 sysfs_remove_group(low_power_kobj,
768 &msm_rpmrs_attribute_group);
769 kobject_del(low_power_kobj);
770 kobject_del(mode_kobj);
771 }
772
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700773 return rc;
774}
775
776/******************************************************************************
777 * Public Functions
778 *****************************************************************************/
779
780int msm_rpmrs_set(int ctx, struct msm_rpm_iv_pair *req, int count)
781{
782 return msm_rpmrs_set_common(ctx, req, count, false);
783}
784
785int msm_rpmrs_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
786{
787 WARN(!irqs_disabled(), "msm_rpmrs_set_noirq can only be called "
788 "safely when local irqs are disabled. Consider using "
789 "msm_rpmrs_set or msm_rpmrs_set_nosleep instead.");
790 return msm_rpmrs_set_common(ctx, req, count, true);
791}
792
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600793/* Allow individual bits of an rpm resource be set, currently used only for
794 * active context resource viz. RPM_CTL. The API is generic enough to possibly
795 * extend it to other resources as well in the future.
796 */
797int msm_rpmrs_set_bits_noirq(int ctx, struct msm_rpm_iv_pair *req, int count,
798 int *mask)
799{
800 unsigned long flags;
801 int i, j;
802 int rc = -1;
803 struct msm_rpmrs_resource *rs;
804
805 if (ctx != MSM_RPM_CTX_SET_0)
806 return -ENOSYS;
807
808 spin_lock_irqsave(&msm_rpmrs_lock, flags);
809 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
810 rs = msm_rpmrs_resources[i];
811 if (rs->rs[0].id == req[0].id && rs->size == count) {
812 for (j = 0; j < rs->size; j++) {
813 rs->rs[j].value &= ~mask[j];
814 rs->rs[j].value |= req[j].value & mask[j];
815 }
816 break;
817 }
818 }
819
820 if (i != ARRAY_SIZE(msm_rpmrs_resources)) {
821 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &rs->rs[0], rs->size);
822 if (rc) {
823 for (j = 0; j < rs->size; j++) {
824 pr_err("%s: failed to request %d to %d: %d\n",
825 __func__,
826 rs->rs[j].id, rs->rs[j].value, rc);
827 }
828 }
829 }
830 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
831
832 return rc;
833
834}
835
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836int msm_rpmrs_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
837{
838 return msm_rpmrs_clear_common(ctx, req, count, false);
839}
840
841int msm_rpmrs_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
842{
843 WARN(!irqs_disabled(), "msm_rpmrs_clear_noirq can only be called "
844 "safely when local irqs are disabled. Consider using "
845 "msm_rpmrs_clear or msm_rpmrs_clear_nosleep instead.");
846 return msm_rpmrs_clear_common(ctx, req, count, true);
847}
848
849void msm_rpmrs_show_resources(void)
850{
851 struct msm_rpmrs_resource *rs;
852 unsigned long flags;
853 int i;
854
855 spin_lock_irqsave(&msm_rpmrs_lock, flags);
856 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
857 rs = msm_rpmrs_resources[i];
Praveen Chidambaram78499012011-11-01 17:15:17 -0600858 if (rs->rs[0].id < MSM_RPM_ID_LAST)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859 pr_info("%s: resource %s: buffered %d, value 0x%x\n",
860 __func__, rs->name,
861 test_bit(rs->rs[0].id, msm_rpmrs_buffered),
862 msm_rpmrs_buffer[rs->rs[0].id]);
863 else
864 pr_info("%s: resource %s: value %d\n",
865 __func__, rs->name, rs->rs[0].value);
866 }
867 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
868}
869
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600870static void *msm_rpmrs_lowest_limits(bool from_idle,
871 enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
872 uint32_t sleep_us, uint32_t *power)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873{
874 unsigned int cpu = smp_processor_id();
875 struct msm_rpmrs_level *best_level = NULL;
876 bool irqs_detectable = false;
877 bool gpio_detectable = false;
878 int i;
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600879 uint32_t pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700880
881 if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
882 irqs_detectable = msm_mpm_irqs_detectable(from_idle);
883 gpio_detectable = msm_mpm_gpio_irqs_detectable(from_idle);
884 }
885
886 for (i = 0; i < msm_rpmrs_level_count; i++) {
887 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888
889 if (!level->available)
890 continue;
891
892 if (sleep_mode != level->sleep_mode)
893 continue;
894
895 if (latency_us < level->latency_us)
896 continue;
897
Girish Mahadevan4eb6b3c2012-03-20 17:07:28 -0600898 if (sleep_us <= level->time_overhead_us)
899 continue;
900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 if (!msm_rpmrs_irqs_detectable(&level->rs_limits,
902 irqs_detectable, gpio_detectable))
903 continue;
904
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600905 if (MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE == sleep_mode)
906 if (!cpu && msm_rpm_local_request_is_outstanding())
907 break;
908
909
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700910 if (sleep_us <= 1) {
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600911 pwr = level->energy_overhead;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912 } else if (sleep_us <= level->time_overhead_us) {
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600913 pwr = level->energy_overhead / sleep_us;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914 } else if ((sleep_us >> 10) > level->time_overhead_us) {
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600915 pwr = level->steady_state_power;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916 } else {
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600917 pwr = level->steady_state_power;
918 pwr -= (level->time_overhead_us *
Maheshkumar Sivasubramaniand2239ad2011-10-04 09:07:51 -0600919 level->steady_state_power)/sleep_us;
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600920 pwr += level->energy_overhead / sleep_us;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921 }
922
923 if (!best_level ||
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600924 best_level->rs_limits.power[cpu] >= pwr) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 level->rs_limits.latency_us[cpu] = level->latency_us;
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600926 level->rs_limits.power[cpu] = pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700927 best_level = level;
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600928 if (power)
929 *power = pwr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700930 }
931 }
932
933 return best_level ? &best_level->rs_limits : NULL;
934}
935
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600936static int msm_rpmrs_enter_sleep(uint32_t sclk_count, void *limits,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 bool from_idle, bool notify_rpm)
938{
939 int rc = 0;
940
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700941 if (notify_rpm) {
942 rc = msm_rpmrs_flush_buffer(sclk_count, limits, from_idle);
943 if (rc)
944 return rc;
945
946 if (msm_rpmrs_use_mpm(limits))
947 msm_mpm_enter_sleep(from_idle);
948 }
949
Mahesh Sivasubramanian6e22a1e2011-12-01 10:20:34 -0700950 rc = msm_rpmrs_flush_L2(limits, notify_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700951 return rc;
952}
953
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -0600954static void msm_rpmrs_exit_sleep(void *limits, bool from_idle,
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600955 bool notify_rpm, bool collapsed)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956{
957
958 /* Disable L2 for now, we dont want L2 to do retention by default */
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600959 msm_rpmrs_L2_restore(limits, notify_rpm, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960
961 if (msm_rpmrs_use_mpm(limits))
962 msm_mpm_exit_sleep(from_idle);
963}
964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965static int rpmrs_cpu_callback(struct notifier_block *nfb,
966 unsigned long action, void *hcpu)
967{
968 switch (action) {
969 case CPU_ONLINE_FROZEN:
970 case CPU_ONLINE:
971 if (num_online_cpus() > 1)
972 msm_rpmrs_l2_cache.rs[0].value =
973 MSM_RPMRS_L2_CACHE_ACTIVE;
974 break;
975 case CPU_DEAD_FROZEN:
976 case CPU_DEAD:
977 if (num_online_cpus() == 1)
978 msm_rpmrs_l2_cache.rs[0].value =
979 MSM_RPMRS_L2_CACHE_HSFS_OPEN;
980 break;
981 }
982
983 msm_rpmrs_update_levels();
984 return NOTIFY_OK;
985}
986
987static struct notifier_block __refdata rpmrs_cpu_notifier = {
988 .notifier_call = rpmrs_cpu_callback,
989};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990
Praveen Chidambaram78499012011-11-01 17:15:17 -0600991int __init msm_rpmrs_levels_init(struct msm_rpmrs_platform_data *data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992{
Praveen Chidambaram78499012011-11-01 17:15:17 -0600993 int i, k;
994 struct msm_rpmrs_level *levels = data->levels;
995
996 msm_rpmrs_level_count = data->num_levels;
997
998 msm_rpmrs_levels = kzalloc(sizeof(struct msm_rpmrs_level) *
999 msm_rpmrs_level_count, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 if (!msm_rpmrs_levels)
1001 return -ENOMEM;
Praveen Chidambaram78499012011-11-01 17:15:17 -06001002
1003 memcpy(msm_rpmrs_levels, levels,
1004 msm_rpmrs_level_count * sizeof(struct msm_rpmrs_level));
1005
1006 memcpy(vdd_dig_vlevels, data->vdd_dig_levels,
1007 (MSM_RPMRS_VDD_DIG_MAX + 1) * sizeof(vdd_dig_vlevels[0]));
1008
1009 memcpy(vdd_mem_vlevels, data->vdd_mem_levels,
1010 (MSM_RPMRS_VDD_MEM_MAX + 1) * sizeof(vdd_mem_vlevels[0]));
1011 vdd_mask = data->vdd_mask;
1012
1013 msm_rpmrs_pxo.rs[0].id = data->rpmrs_target_id[MSM_RPMRS_ID_PXO_CLK];
1014 msm_rpmrs_l2_cache.rs[0].id =
1015 data->rpmrs_target_id[MSM_RPMRS_ID_L2_CACHE_CTL];
1016 msm_rpmrs_vdd_mem.rs[0].id =
1017 data->rpmrs_target_id[MSM_RPMRS_ID_VDD_MEM_0];
1018 msm_rpmrs_vdd_mem.rs[1].id =
1019 data->rpmrs_target_id[MSM_RPMRS_ID_VDD_MEM_1];
1020 msm_rpmrs_vdd_dig.rs[0].id =
1021 data->rpmrs_target_id[MSM_RPMRS_ID_VDD_DIG_0];
1022 msm_rpmrs_vdd_dig.rs[1].id =
1023 data->rpmrs_target_id[MSM_RPMRS_ID_VDD_DIG_1];
1024 msm_rpmrs_rpm_ctl.rs[0].id =
1025 data->rpmrs_target_id[MSM_RPMRS_ID_RPM_CTL];
1026
1027 /* Initialize listed bitmap for valid resource IDs */
1028 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
Stephen Boydbf46af02012-02-06 15:48:34 -08001029 for (k = 0; k < msm_rpmrs_resources[i]->size; k++) {
Praveen Chidambaram78499012011-11-01 17:15:17 -06001030 if (msm_rpmrs_resources[i]->rs[k].id >=
1031 MSM_RPM_ID_LAST)
1032 continue;
1033 set_bit(msm_rpmrs_resources[i]->rs[k].id,
1034 msm_rpmrs_listed);
Stephen Boydbf46af02012-02-06 15:48:34 -08001035 }
Praveen Chidambaram78499012011-11-01 17:15:17 -06001036 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001037
1038 return 0;
1039}
1040
1041static int __init msm_rpmrs_init(void)
1042{
1043 struct msm_rpm_iv_pair req;
1044 int rc;
1045
1046 BUG_ON(!msm_rpmrs_levels);
1047
Praveen Chidambaram841d46c2011-08-04 09:07:53 -06001048 if (cpu_is_msm8x60()) {
Praveen Chidambaram78499012011-11-01 17:15:17 -06001049 req.id = msm_rpmrs_l2_cache.rs[0].id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050 req.value = 1;
1051
1052 rc = msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
1053 if (rc) {
1054 pr_err("%s: failed to request L2 cache: %d\n",
1055 __func__, rc);
1056 goto init_exit;
1057 }
1058
Praveen Chidambaram78499012011-11-01 17:15:17 -06001059 req.id = msm_rpmrs_l2_cache.rs[0].id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 req.value = 0;
1061
1062 rc = msm_rpmrs_set(MSM_RPM_CTX_SET_SLEEP, &req, 1);
1063 if (rc) {
1064 pr_err("%s: failed to initialize L2 cache for sleep: "
1065 "%d\n", __func__, rc);
1066 goto init_exit;
1067 }
1068 }
1069
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001070 rc = msm_rpmrs_resource_sysfs_add();
1071
1072init_exit:
1073 return rc;
1074}
1075device_initcall(msm_rpmrs_init);
1076
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -06001077static struct msm_pm_sleep_ops msm_rpmrs_ops = {
1078 .lowest_limits = msm_rpmrs_lowest_limits,
1079 .enter_sleep = msm_rpmrs_enter_sleep,
1080 .exit_sleep = msm_rpmrs_exit_sleep,
1081};
1082
Maheshkumar Sivasubramanian16588412011-10-13 12:16:23 -06001083static int __init msm_rpmrs_l2_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084{
Mahesh Sivasubramanianfc8d9f72012-01-23 14:31:59 -07001085 if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -06001087 msm_pm_set_l2_flush_flag(0);
1088
Maheshkumar Sivasubramanian07c363f2011-10-18 09:52:33 -06001089 msm_rpmrs_l2_cache.beyond_limits =
1090 msm_spm_l2_cache_beyond_limits;
1091 msm_rpmrs_l2_cache.aggregate = NULL;
1092 msm_rpmrs_l2_cache.restore = NULL;
Maheshkumar Sivasubramanian16588412011-10-13 12:16:23 -06001093
Maheshkumar Sivasubramanian07c363f2011-10-18 09:52:33 -06001094 register_hotcpu_notifier(&rpmrs_cpu_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095
Maheshkumar Sivasubramanian07c363f2011-10-18 09:52:33 -06001096 } else if (cpu_is_msm9615()) {
1097 msm_rpmrs_l2_cache.beyond_limits = NULL;
1098 msm_rpmrs_l2_cache.aggregate = NULL;
1099 msm_rpmrs_l2_cache.restore = NULL;
1100 }
Mahesh Sivasubramanian6d06e3a2012-05-16 13:41:07 -06001101
1102 msm_pm_set_sleep_ops(&msm_rpmrs_ops);
1103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104 return 0;
1105}
Maheshkumar Sivasubramanian16588412011-10-13 12:16:23 -06001106early_initcall(msm_rpmrs_l2_init);