blob: e29afa6870cf9e1710ed3fa7405a3fdb83ee9931 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/bug.h>
19#include <linux/mutex.h>
20#include <linux/proc_fs.h>
21#include <linux/spinlock.h>
22#include <linux/cpu.h>
23#include <mach/rpm.h>
24#include <mach/msm_iomap.h>
25#include <asm/mach-types.h>
26#include <linux/io.h>
Praveen Chidambaram841d46c2011-08-04 09:07:53 -060027#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include "mpm.h"
29#include "rpm_resources.h"
30#include "spm.h"
31
32/******************************************************************************
33 * Debug Definitions
34 *****************************************************************************/
35
36enum {
37 MSM_RPMRS_DEBUG_OUTPUT = BIT(0),
38 MSM_RPMRS_DEBUG_BUFFER = BIT(1),
39};
40
41static int msm_rpmrs_debug_mask;
42module_param_named(
43 debug_mask, msm_rpmrs_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
44);
45
46static struct msm_rpmrs_level *msm_rpmrs_levels;
47static int msm_rpmrs_level_count;
48
49static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits);
50static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits);
51static void msm_rpmrs_restore_pxo(void);
52static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits);
53static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits);
54static void msm_rpmrs_restore_l2_cache(void);
55static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits);
56static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits);
57static void msm_rpmrs_restore_vdd_mem(void);
58static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits);
59static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits);
60static void msm_rpmrs_restore_vdd_dig(void);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
Praveen Chidambaram66775c62011-08-04 16:59:24 -060062static ssize_t msm_rpmrs_resource_attr_show(
63 struct kobject *kobj, struct kobj_attribute *attr, char *buf);
64static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
65 struct kobj_attribute *attr, const char *buf, size_t count);
66
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067#ifdef CONFIG_MSM_L2_SPM
68static void *msm_rpmrs_l2_counter_addr;
69static int msm_rpmrs_l2_reset_count;
70#define L2_PC_COUNTER_ADDR 0x660
71#endif
72
73#define MSM_RPMRS_MAX_RS_REGISTER_COUNT 2
74
Praveen Chidambaram66775c62011-08-04 16:59:24 -060075#define RPMRS_ATTR(_name) \
76 __ATTR(_name, S_IRUGO|S_IWUSR, \
77 msm_rpmrs_resource_attr_show, msm_rpmrs_resource_attr_store)
78
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079struct msm_rpmrs_resource {
80 struct msm_rpm_iv_pair rs[MSM_RPMRS_MAX_RS_REGISTER_COUNT];
81 uint32_t size;
82 char *name;
83
84 uint32_t enable_low_power;
85
86 bool (*beyond_limits)(struct msm_rpmrs_limits *limits);
87 void (*aggregate)(struct msm_rpmrs_limits *limits);
88 void (*restore)(void);
Praveen Chidambaram66775c62011-08-04 16:59:24 -060089
90 struct kobj_attribute ko_attr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091};
92
93static struct msm_rpmrs_resource msm_rpmrs_pxo = {
94 .rs[0].id = MSM_RPMRS_ID_PXO_CLK,
95 .size = 1,
96 .name = "pxo",
97 .beyond_limits = msm_rpmrs_pxo_beyond_limits,
98 .aggregate = msm_rpmrs_aggregate_pxo,
99 .restore = msm_rpmrs_restore_pxo,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600100 .ko_attr = RPMRS_ATTR(pxo),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101};
102
103static struct msm_rpmrs_resource msm_rpmrs_l2_cache = {
104 .rs[0].id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL,
105 .size = 1,
106 .name = "L2_cache",
107 .beyond_limits = msm_rpmrs_l2_cache_beyond_limits,
108 .aggregate = msm_rpmrs_aggregate_l2_cache,
109 .restore = msm_rpmrs_restore_l2_cache,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600110 .ko_attr = RPMRS_ATTR(L2_cache),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700111};
112
113static struct msm_rpmrs_resource msm_rpmrs_vdd_mem = {
114 .rs[0].id = MSM_RPMRS_ID_VDD_MEM_0,
115 .rs[1].id = MSM_RPMRS_ID_VDD_MEM_1,
116 .size = 2,
117 .name = "vdd_mem",
118 .beyond_limits = msm_rpmrs_vdd_mem_beyond_limits,
119 .aggregate = msm_rpmrs_aggregate_vdd_mem,
120 .restore = msm_rpmrs_restore_vdd_mem,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600121 .ko_attr = RPMRS_ATTR(vdd_mem),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122};
123
124static struct msm_rpmrs_resource msm_rpmrs_vdd_dig = {
125 .rs[0].id = MSM_RPMRS_ID_VDD_DIG_0,
126 .rs[1].id = MSM_RPMRS_ID_VDD_DIG_1,
127 .size = 2,
128 .name = "vdd_dig",
129 .beyond_limits = msm_rpmrs_vdd_dig_beyond_limits,
130 .aggregate = msm_rpmrs_aggregate_vdd_dig,
131 .restore = msm_rpmrs_restore_vdd_dig,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600132 .ko_attr = RPMRS_ATTR(vdd_dig),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133};
134
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600135static struct msm_rpmrs_resource msm_rpmrs_rpm_ctl = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136 .rs[0].id = MSM_RPMRS_ID_RPM_CTL,
137 .size = 1,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600138 .name = "rpm_ctl",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700139 .beyond_limits = NULL,
Eugene Seah78aa5e72011-07-18 18:28:37 -0600140 .aggregate = NULL,
141 .restore = NULL,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600142 .ko_attr = RPMRS_ATTR(rpm_ctl),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143};
144
145static struct msm_rpmrs_resource *msm_rpmrs_resources[] = {
146 &msm_rpmrs_pxo,
147 &msm_rpmrs_l2_cache,
148 &msm_rpmrs_vdd_mem,
149 &msm_rpmrs_vdd_dig,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600150 &msm_rpmrs_rpm_ctl,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151};
152
153static uint32_t msm_rpmrs_buffer[MSM_RPM_ID_LAST + 1];
154static DECLARE_BITMAP(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
155static DECLARE_BITMAP(msm_rpmrs_listed, MSM_RPM_ID_LAST + 1);
156static DEFINE_SPINLOCK(msm_rpmrs_lock);
157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158#define MSM_RPMRS_VDD(v) ((v) & (MSM_RPMRS_VDD_MASK))
159
160/******************************************************************************
161 * Attribute Definitions
162 *****************************************************************************/
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600163static struct attribute *msm_rpmrs_attributes[] = {
164 &msm_rpmrs_pxo.ko_attr.attr,
165 &msm_rpmrs_l2_cache.ko_attr.attr,
166 &msm_rpmrs_vdd_mem.ko_attr.attr,
167 &msm_rpmrs_vdd_dig.ko_attr.attr,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600168 NULL,
169};
170static struct attribute *msm_rpmrs_mode_attributes[] = {
171 &msm_rpmrs_rpm_ctl.ko_attr.attr,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600172 NULL,
173};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600175static struct attribute_group msm_rpmrs_attribute_group = {
176 .attrs = msm_rpmrs_attributes,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177};
178
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600179static struct attribute_group msm_rpmrs_mode_attribute_group = {
180 .attrs = msm_rpmrs_mode_attributes,
181};
182
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183#define GET_RS_FROM_ATTR(attr) \
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600184 (container_of(attr, struct msm_rpmrs_resource, ko_attr))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186
187/******************************************************************************
188 * Resource Specific Functions
189 *****************************************************************************/
190
191static void msm_rpmrs_aggregate_sclk(uint32_t sclk_count)
192{
193 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
194 set_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
195 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = sclk_count;
196 set_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
197}
198
199static void msm_rpmrs_restore_sclk(void)
200{
201 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
202 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = 0;
203 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
204 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
205}
206
207static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits)
208{
209 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
210 uint32_t pxo;
211
212 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
213 pxo = msm_rpmrs_buffer[rs->rs[0].id];
214 else
215 pxo = MSM_RPMRS_PXO_ON;
216
217 return pxo > limits->pxo;
218}
219
220static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits)
221{
222 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
223 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
224
225 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
226 rs->rs[0].value = *buf;
227 if (limits->pxo > *buf)
228 *buf = limits->pxo;
229 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
230 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
231 }
232}
233
234static void msm_rpmrs_restore_pxo(void)
235{
236 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
237
238 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
239 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
240}
241
242static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
243{
244 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
245 uint32_t l2_cache;
246
247 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
248 l2_cache = msm_rpmrs_buffer[rs->rs[0].id];
249 else
250 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
251
252 return l2_cache > limits->l2_cache;
253}
254
255static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits)
256{
257 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
258 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
259
260 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
261 rs->rs[0].value = *buf;
262 if (limits->l2_cache > *buf)
263 *buf = limits->l2_cache;
264
265 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
266 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
267 }
268}
269
270#ifdef CONFIG_MSM_L2_SPM
271static bool msm_spm_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
272{
273 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
274 uint32_t l2_cache = rs->rs[0].value;
275
276 if (!rs->enable_low_power)
277 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
278
279 return l2_cache > limits->l2_cache;
280}
281#endif
282
283static void msm_rpmrs_restore_l2_cache(void)
284{
285 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
286
287 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
288 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
289}
290
291static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits)
292{
293 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
294 uint32_t vdd_mem;
295
296 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
297 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
298
299 if (rs->enable_low_power == 0)
300 vdd_mem = MSM_RPMRS_VDD_MEM_ACTIVE;
301 else if (rs->enable_low_power == 1)
302 vdd_mem = MSM_RPMRS_VDD_MEM_RET_HIGH;
303 else
304 vdd_mem = MSM_RPMRS_VDD_MEM_RET_LOW;
305
306 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_mem))
307 vdd_mem = buffered_value;
308 } else {
309 vdd_mem = MSM_RPMRS_VDD_MEM_ACTIVE;
310 }
311
312 return MSM_RPMRS_VDD(vdd_mem) >=
313 MSM_RPMRS_VDD(limits->vdd_mem_upper_bound);
314}
315
316static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits)
317{
318 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
319 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
320
321 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
322 rs->rs[0].value = *buf;
323 if (MSM_RPMRS_VDD(limits->vdd_mem) > MSM_RPMRS_VDD(*buf)) {
324 *buf &= ~MSM_RPMRS_VDD_MASK;
325 *buf |= MSM_RPMRS_VDD(limits->vdd_mem);
326 }
327
328 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
329 pr_info("%s: vdd %d (0x%x)\n", __func__,
330 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
331 }
332}
333
334static void msm_rpmrs_restore_vdd_mem(void)
335{
336 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
337
338 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
339 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
340}
341
342static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits)
343{
344 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
345 uint32_t vdd_dig;
346
347 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
348 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
349
350 if (rs->enable_low_power == 0)
351 vdd_dig = MSM_RPMRS_VDD_DIG_ACTIVE;
352 else if (rs->enable_low_power == 1)
353 vdd_dig = MSM_RPMRS_VDD_DIG_RET_HIGH;
354 else
355 vdd_dig = MSM_RPMRS_VDD_DIG_RET_LOW;
356
357 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_dig))
358 vdd_dig = buffered_value;
359 } else {
360 vdd_dig = MSM_RPMRS_VDD_DIG_ACTIVE;
361 }
362
363 return MSM_RPMRS_VDD(vdd_dig) >=
364 MSM_RPMRS_VDD(limits->vdd_dig_upper_bound);
365}
366
367static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits)
368{
369 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
370 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
371
372 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
373 rs->rs[0].value = *buf;
374 if (MSM_RPMRS_VDD(limits->vdd_dig) > MSM_RPMRS_VDD(*buf)) {
375 *buf &= ~MSM_RPMRS_VDD_MASK;
376 *buf |= MSM_RPMRS_VDD(limits->vdd_dig);
377 }
378
379
380 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
381 pr_info("%s: vdd %d (0x%x)\n", __func__,
382 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
383 }
384}
385
386static void msm_rpmrs_restore_vdd_dig(void)
387{
388 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
389
390 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
391 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
392}
393
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394/******************************************************************************
395 * Buffering Functions
396 *****************************************************************************/
397
398static bool msm_rpmrs_irqs_detectable(struct msm_rpmrs_limits *limits,
399 bool irqs_detect, bool gpio_detect)
400{
401
402 if (limits->vdd_dig <= MSM_RPMRS_VDD_DIG_RET_HIGH)
403 return irqs_detect;
404
405 if (limits->pxo == MSM_RPMRS_PXO_OFF)
406 return gpio_detect;
407
408 return true;
409}
410
411static bool msm_rpmrs_use_mpm(struct msm_rpmrs_limits *limits)
412{
413 return (limits->pxo == MSM_RPMRS_PXO_OFF) ||
414 (limits->vdd_dig <= MSM_RPMRS_VDD_DIG_RET_HIGH);
415}
416
417static void msm_rpmrs_update_levels(void)
418{
419 int i, k;
420
421 for (i = 0; i < msm_rpmrs_level_count; i++) {
422 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
423
424 if (level->sleep_mode != MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
425 continue;
426
427 level->available = true;
428
429 for (k = 0; k < ARRAY_SIZE(msm_rpmrs_resources); k++) {
430 struct msm_rpmrs_resource *rs = msm_rpmrs_resources[k];
431
432 if (rs->beyond_limits &&
433 rs->beyond_limits(&level->rs_limits)) {
434 level->available = false;
435 break;
436 }
437 }
438 }
439}
440
441/*
442 * Return value:
443 * 0: no entries in <req> is on our resource list
444 * 1: one or more entries in <req> is on our resource list
445 * -EINVAL: invalid id in <req> array
446 */
447static int msm_rpmrs_buffer_request(struct msm_rpm_iv_pair *req, int count)
448{
449 bool listed;
450 int i;
451
452 for (i = 0; i < count; i++)
453 if (req[i].id > MSM_RPM_ID_LAST)
454 return -EINVAL;
455
456 for (i = 0, listed = false; i < count; i++) {
457 msm_rpmrs_buffer[req[i].id] = req[i].value;
458 set_bit(req[i].id, msm_rpmrs_buffered);
459
460 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
461 pr_info("%s: reg %d: 0x%x\n",
462 __func__, req[i].id, req[i].value);
463
464 if (listed)
465 continue;
466
467 if (test_bit(req[i].id, msm_rpmrs_listed))
468 listed = true;
469 }
470
471 return listed ? 1 : 0;
472}
473
474/*
475 * Return value:
476 * 0: no entries in <req> is on our resource list
477 * 1: one or more entries in <req> is on our resource list
478 * -EINVAL: invalid id in <req> array
479 */
480static int msm_rpmrs_clear_buffer(struct msm_rpm_iv_pair *req, int count)
481{
482 bool listed;
483 int i;
484
485 for (i = 0; i < count; i++)
486 if (req[i].id > MSM_RPM_ID_LAST)
487 return -EINVAL;
488
489 for (i = 0, listed = false; i < count; i++) {
490 msm_rpmrs_buffer[req[i].id] = 0;
491 clear_bit(req[i].id, msm_rpmrs_buffered);
492
493 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
494 pr_info("%s: reg %d\n", __func__, req[i].id);
495
496 if (listed)
497 continue;
498
499 if (test_bit(req[i].id, msm_rpmrs_listed))
500 listed = true;
501 }
502
503 return listed ? 1 : 0;
504}
505
506#ifdef CONFIG_MSM_L2_SPM
507static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
508{
509 int rc = 0;
510 int lpm;
511
512 switch (limits->l2_cache) {
513 case MSM_RPMRS_L2_CACHE_HSFS_OPEN:
514 lpm = MSM_SPM_L2_MODE_POWER_COLLAPSE;
515 /* Increment the counter for TZ to init L2 on warmboot */
516 /* Barrier in msm_spm_l2_set_low_power_mode */
517 BUG_ON(!msm_rpmrs_l2_counter_addr);
518 writel_relaxed(++msm_rpmrs_l2_reset_count,
519 msm_rpmrs_l2_counter_addr);
520 break;
521 case MSM_RPMRS_L2_CACHE_GDHS:
522 lpm = MSM_SPM_L2_MODE_GDHS;
523 break;
524 case MSM_RPMRS_L2_CACHE_RETENTION:
525 lpm = MSM_SPM_L2_MODE_RETENTION;
526 break;
527 default:
528 case MSM_RPMRS_L2_CACHE_ACTIVE:
529 lpm = MSM_SPM_L2_MODE_DISABLED;
530 break;
531 }
532
533 rc = msm_spm_l2_set_low_power_mode(lpm, notify_rpm);
534 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
535 pr_info("%s: Requesting low power mode %d returned %d\n",
536 __func__, lpm, rc);
537
538 return rc;
539}
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600540static void msm_rpmrs_L2_restore(struct msm_rpmrs_limits *limits,
541 bool notify_rpm, bool collapsed)
542{
543 msm_spm_l2_set_low_power_mode(MSM_SPM_MODE_DISABLED, notify_rpm);
544 if (!collapsed && (limits->l2_cache == MSM_RPMRS_L2_CACHE_HSFS_OPEN))
545 writel_relaxed(--msm_rpmrs_l2_reset_count,
546 msm_rpmrs_l2_counter_addr);
547}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548#else
549static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
550{
551 return 0;
552}
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600553static void msm_rpmrs_L2_restore(struct msm_rpmrs_limits *limits,
554 bool notify_rpm, bool collapsed)
555{
556}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557#endif
558
559static int msm_rpmrs_flush_buffer(
560 uint32_t sclk_count, struct msm_rpmrs_limits *limits, int from_idle)
561{
562 struct msm_rpm_iv_pair *req;
563 int count;
564 int rc;
565 int i;
566
567 msm_rpmrs_aggregate_sclk(sclk_count);
568 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
569 if (msm_rpmrs_resources[i]->aggregate)
570 msm_rpmrs_resources[i]->aggregate(limits);
571 }
572
573 count = bitmap_weight(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
574
575 req = kmalloc(sizeof(*req) * count, GFP_ATOMIC);
576 if (!req) {
577 rc = -ENOMEM;
578 goto flush_buffer_restore;
579 }
580
581 count = 0;
582 i = find_first_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
583
584 while (i < MSM_RPM_ID_LAST + 1) {
585 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
586 pr_info("%s: reg %d: 0x%x\n",
587 __func__, i, msm_rpmrs_buffer[i]);
588
589 req[count].id = i;
590 req[count].value = msm_rpmrs_buffer[i];
591 count++;
592
593 i = find_next_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST+1, i+1);
594 }
595
596 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_SLEEP, req, count);
597 kfree(req);
598
599 if (rc)
600 goto flush_buffer_restore;
601
602 bitmap_and(msm_rpmrs_buffered,
603 msm_rpmrs_buffered, msm_rpmrs_listed, MSM_RPM_ID_LAST + 1);
604
605flush_buffer_restore:
606 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
607 if (msm_rpmrs_resources[i]->restore)
608 msm_rpmrs_resources[i]->restore();
609 }
610 msm_rpmrs_restore_sclk();
611
612 if (rc)
613 pr_err("%s: failed: %d\n", __func__, rc);
614 return rc;
615}
616
617static int msm_rpmrs_set_common(
618 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
619{
620 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
621 unsigned long flags;
622 int rc;
623
624 spin_lock_irqsave(&msm_rpmrs_lock, flags);
625 rc = msm_rpmrs_buffer_request(req, count);
626 if (rc > 0) {
627 msm_rpmrs_update_levels();
628 rc = 0;
629 }
630 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
631
632 return rc;
633 }
634
635 if (noirq)
636 return msm_rpm_set_noirq(ctx, req, count);
637 else
638 return msm_rpm_set(ctx, req, count);
639}
640
641static int msm_rpmrs_clear_common(
642 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
643{
644 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
645 unsigned long flags;
646 int rc;
647
648 spin_lock_irqsave(&msm_rpmrs_lock, flags);
649 rc = msm_rpmrs_clear_buffer(req, count);
650 if (rc > 0) {
651 msm_rpmrs_update_levels();
652 rc = 0;
653 }
654 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
655
656 if (rc < 0)
657 return rc;
658 }
659
660 if (noirq)
661 return msm_rpm_clear_noirq(ctx, req, count);
662 else
663 return msm_rpm_clear(ctx, req, count);
664}
665
666/******************************************************************************
667 * Attribute Functions
668 *****************************************************************************/
669
670static ssize_t msm_rpmrs_resource_attr_show(
671 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
672{
673 struct kernel_param kp;
674 unsigned long flags;
675 unsigned int temp;
676 int rc;
677
678 spin_lock_irqsave(&msm_rpmrs_lock, flags);
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600679 /* special case active-set signal for MSM_RPMRS_ID_RPM_CTL */
680 if (GET_RS_FROM_ATTR(attr)->rs[0].id == MSM_RPMRS_ID_RPM_CTL)
681 temp = GET_RS_FROM_ATTR(attr)->rs[0].value;
682 else
683 temp = GET_RS_FROM_ATTR(attr)->enable_low_power;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700684 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
685
686 kp.arg = &temp;
687 rc = param_get_uint(buf, &kp);
688
689 if (rc > 0) {
690 strcat(buf, "\n");
691 rc++;
692 }
693
694 return rc;
695}
696
697static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
698 struct kobj_attribute *attr, const char *buf, size_t count)
699{
700 struct kernel_param kp;
701 unsigned long flags;
702 unsigned int temp;
703 int rc;
704
705 kp.arg = &temp;
706 rc = param_set_uint(buf, &kp);
707 if (rc)
708 return rc;
709
710 spin_lock_irqsave(&msm_rpmrs_lock, flags);
711 GET_RS_FROM_ATTR(attr)->enable_low_power = temp;
Eugene Seah78aa5e72011-07-18 18:28:37 -0600712
713 /* special case active-set signal for MSM_RPMRS_ID_RPM_CTL */
714 if (GET_RS_FROM_ATTR(attr)->rs[0].id == MSM_RPMRS_ID_RPM_CTL) {
715 struct msm_rpm_iv_pair req;
716 req.id = MSM_RPMRS_ID_RPM_CTL;
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600717 req.value = GET_RS_FROM_ATTR(attr)->enable_low_power;
718 GET_RS_FROM_ATTR(attr)->rs[0].value = req.value;
Eugene Seah78aa5e72011-07-18 18:28:37 -0600719
720 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &req, 1);
721 if (rc) {
722 pr_err("%s: failed to request RPM_CTL to %d: %d\n",
723 __func__, req.value, rc);
724 }
725 }
726
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700727 msm_rpmrs_update_levels();
728 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
729
730 return count;
731}
732
733static int __init msm_rpmrs_resource_sysfs_add(void)
734{
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600735 struct kobject *module_kobj = NULL;
736 struct kobject *low_power_kobj = NULL;
737 struct kobject *mode_kobj = NULL;
738 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739
740 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
741 if (!module_kobj) {
742 pr_err("%s: cannot find kobject for module %s\n",
743 __func__, KBUILD_MODNAME);
744 rc = -ENOENT;
745 goto resource_sysfs_add_exit;
746 }
747
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600748 low_power_kobj = kobject_create_and_add(
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749 "enable_low_power", module_kobj);
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600750 if (!low_power_kobj) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700751 pr_err("%s: cannot create kobject\n", __func__);
752 rc = -ENOMEM;
753 goto resource_sysfs_add_exit;
754 }
755
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600756 mode_kobj = kobject_create_and_add(
757 "mode", module_kobj);
758 if (!mode_kobj) {
759 pr_err("%s: cannot create kobject\n", __func__);
760 rc = -ENOMEM;
761 goto resource_sysfs_add_exit;
762 }
763
764 rc = sysfs_create_group(low_power_kobj, &msm_rpmrs_attribute_group);
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600765 if (rc) {
766 pr_err("%s: cannot create kobject attribute group\n", __func__);
767 goto resource_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700768 }
769
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600770 rc = sysfs_create_group(mode_kobj, &msm_rpmrs_mode_attribute_group);
771 if (rc) {
772 pr_err("%s: cannot create kobject attribute group\n", __func__);
773 goto resource_sysfs_add_exit;
774 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700775
776resource_sysfs_add_exit:
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600777 if (rc) {
778 if (low_power_kobj)
779 sysfs_remove_group(low_power_kobj,
780 &msm_rpmrs_attribute_group);
781 kobject_del(low_power_kobj);
782 kobject_del(mode_kobj);
783 }
784
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700785 return rc;
786}
787
788/******************************************************************************
789 * Public Functions
790 *****************************************************************************/
791
792int msm_rpmrs_set(int ctx, struct msm_rpm_iv_pair *req, int count)
793{
794 return msm_rpmrs_set_common(ctx, req, count, false);
795}
796
797int msm_rpmrs_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
798{
799 WARN(!irqs_disabled(), "msm_rpmrs_set_noirq can only be called "
800 "safely when local irqs are disabled. Consider using "
801 "msm_rpmrs_set or msm_rpmrs_set_nosleep instead.");
802 return msm_rpmrs_set_common(ctx, req, count, true);
803}
804
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600805/* Allow individual bits of an rpm resource be set, currently used only for
806 * active context resource viz. RPM_CTL. The API is generic enough to possibly
807 * extend it to other resources as well in the future.
808 */
809int msm_rpmrs_set_bits_noirq(int ctx, struct msm_rpm_iv_pair *req, int count,
810 int *mask)
811{
812 unsigned long flags;
813 int i, j;
814 int rc = -1;
815 struct msm_rpmrs_resource *rs;
816
817 if (ctx != MSM_RPM_CTX_SET_0)
818 return -ENOSYS;
819
820 spin_lock_irqsave(&msm_rpmrs_lock, flags);
821 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
822 rs = msm_rpmrs_resources[i];
823 if (rs->rs[0].id == req[0].id && rs->size == count) {
824 for (j = 0; j < rs->size; j++) {
825 rs->rs[j].value &= ~mask[j];
826 rs->rs[j].value |= req[j].value & mask[j];
827 }
828 break;
829 }
830 }
831
832 if (i != ARRAY_SIZE(msm_rpmrs_resources)) {
833 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &rs->rs[0], rs->size);
834 if (rc) {
835 for (j = 0; j < rs->size; j++) {
836 pr_err("%s: failed to request %d to %d: %d\n",
837 __func__,
838 rs->rs[j].id, rs->rs[j].value, rc);
839 }
840 }
841 }
842 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
843
844 return rc;
845
846}
847
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848int msm_rpmrs_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
849{
850 return msm_rpmrs_clear_common(ctx, req, count, false);
851}
852
853int msm_rpmrs_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
854{
855 WARN(!irqs_disabled(), "msm_rpmrs_clear_noirq can only be called "
856 "safely when local irqs are disabled. Consider using "
857 "msm_rpmrs_clear or msm_rpmrs_clear_nosleep instead.");
858 return msm_rpmrs_clear_common(ctx, req, count, true);
859}
860
861void msm_rpmrs_show_resources(void)
862{
863 struct msm_rpmrs_resource *rs;
864 unsigned long flags;
865 int i;
866
867 spin_lock_irqsave(&msm_rpmrs_lock, flags);
868 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
869 rs = msm_rpmrs_resources[i];
870 if (rs->rs[0].id < MSM_RPM_ID_LAST + 1)
871 pr_info("%s: resource %s: buffered %d, value 0x%x\n",
872 __func__, rs->name,
873 test_bit(rs->rs[0].id, msm_rpmrs_buffered),
874 msm_rpmrs_buffer[rs->rs[0].id]);
875 else
876 pr_info("%s: resource %s: value %d\n",
877 __func__, rs->name, rs->rs[0].value);
878 }
879 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
880}
881
882struct msm_rpmrs_limits *msm_rpmrs_lowest_limits(
883 bool from_idle, enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
884 uint32_t sleep_us)
885{
886 unsigned int cpu = smp_processor_id();
887 struct msm_rpmrs_level *best_level = NULL;
888 bool irqs_detectable = false;
889 bool gpio_detectable = false;
890 int i;
891
892 if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
893 irqs_detectable = msm_mpm_irqs_detectable(from_idle);
894 gpio_detectable = msm_mpm_gpio_irqs_detectable(from_idle);
895 }
896
897 for (i = 0; i < msm_rpmrs_level_count; i++) {
898 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
899 uint32_t power;
900
901 if (!level->available)
902 continue;
903
904 if (sleep_mode != level->sleep_mode)
905 continue;
906
907 if (latency_us < level->latency_us)
908 continue;
909
910 if (!msm_rpmrs_irqs_detectable(&level->rs_limits,
911 irqs_detectable, gpio_detectable))
912 continue;
913
914 if (sleep_us <= 1) {
915 power = level->energy_overhead;
916 } else if (sleep_us <= level->time_overhead_us) {
917 power = level->energy_overhead / sleep_us;
918 } else if ((sleep_us >> 10) > level->time_overhead_us) {
919 power = level->steady_state_power;
920 } else {
921 power = (sleep_us - level->time_overhead_us);
922 power *= level->steady_state_power;
923 power /= sleep_us;
924 power += level->energy_overhead / sleep_us;
925 }
926
927 if (!best_level ||
928 best_level->rs_limits.power[cpu] >= power) {
929 level->rs_limits.latency_us[cpu] = level->latency_us;
930 level->rs_limits.power[cpu] = power;
931 best_level = level;
932 }
933 }
934
935 return best_level ? &best_level->rs_limits : NULL;
936}
937
938int msm_rpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
939 bool from_idle, bool notify_rpm)
940{
941 int rc = 0;
942
943 rc = msm_rpmrs_flush_L2(limits, notify_rpm);
944 if (rc)
945 return rc;
946
947 if (notify_rpm) {
948 rc = msm_rpmrs_flush_buffer(sclk_count, limits, from_idle);
949 if (rc)
950 return rc;
951
952 if (msm_rpmrs_use_mpm(limits))
953 msm_mpm_enter_sleep(from_idle);
954 }
955
956 return rc;
957}
958
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600959void msm_rpmrs_exit_sleep(struct msm_rpmrs_limits *limits, bool from_idle,
960 bool notify_rpm, bool collapsed)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961{
962
963 /* Disable L2 for now, we dont want L2 to do retention by default */
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600964 msm_rpmrs_L2_restore(limits, notify_rpm, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965
966 if (msm_rpmrs_use_mpm(limits))
967 msm_mpm_exit_sleep(from_idle);
968}
969
970#ifdef CONFIG_MSM_L2_SPM
971static int rpmrs_cpu_callback(struct notifier_block *nfb,
972 unsigned long action, void *hcpu)
973{
974 switch (action) {
975 case CPU_ONLINE_FROZEN:
976 case CPU_ONLINE:
977 if (num_online_cpus() > 1)
978 msm_rpmrs_l2_cache.rs[0].value =
979 MSM_RPMRS_L2_CACHE_ACTIVE;
980 break;
981 case CPU_DEAD_FROZEN:
982 case CPU_DEAD:
983 if (num_online_cpus() == 1)
984 msm_rpmrs_l2_cache.rs[0].value =
985 MSM_RPMRS_L2_CACHE_HSFS_OPEN;
986 break;
987 }
988
989 msm_rpmrs_update_levels();
990 return NOTIFY_OK;
991}
992
993static struct notifier_block __refdata rpmrs_cpu_notifier = {
994 .notifier_call = rpmrs_cpu_callback,
995};
996#endif
997
998int __init msm_rpmrs_levels_init(struct msm_rpmrs_level *levels, int size)
999{
1000 msm_rpmrs_levels = kzalloc(sizeof(struct msm_rpmrs_level) * size,
1001 GFP_KERNEL);
1002 if (!msm_rpmrs_levels)
1003 return -ENOMEM;
1004 msm_rpmrs_level_count = size;
1005 memcpy(msm_rpmrs_levels, levels, size * sizeof(struct msm_rpmrs_level));
1006
1007 return 0;
1008}
1009
1010static int __init msm_rpmrs_init(void)
1011{
1012 struct msm_rpm_iv_pair req;
1013 int rc;
1014
Stepan Moskovchenko0302fbc2011-08-05 18:06:13 -07001015 if (cpu_is_apq8064())
1016 return -ENODEV;
1017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001018 BUG_ON(!msm_rpmrs_levels);
1019
Praveen Chidambaram841d46c2011-08-04 09:07:53 -06001020 if (cpu_is_msm8x60()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001021 req.id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL;
1022 req.value = 1;
1023
1024 rc = msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
1025 if (rc) {
1026 pr_err("%s: failed to request L2 cache: %d\n",
1027 __func__, rc);
1028 goto init_exit;
1029 }
1030
1031 req.id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL;
1032 req.value = 0;
1033
1034 rc = msm_rpmrs_set(MSM_RPM_CTX_SET_SLEEP, &req, 1);
1035 if (rc) {
1036 pr_err("%s: failed to initialize L2 cache for sleep: "
1037 "%d\n", __func__, rc);
1038 goto init_exit;
1039 }
1040 }
1041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042 rc = msm_rpmrs_resource_sysfs_add();
1043
1044init_exit:
1045 return rc;
1046}
1047device_initcall(msm_rpmrs_init);
1048
1049static int __init msm_rpmrs_early_init(void)
1050{
1051 int i, k;
1052
1053 /* Initialize listed bitmap for valid resource IDs */
1054 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
1055 for (k = 0; k < msm_rpmrs_resources[i]->size; k++)
1056 set_bit(msm_rpmrs_resources[i]->rs[k].id,
1057 msm_rpmrs_listed);
1058 }
1059
1060 return 0;
1061}
1062early_initcall(msm_rpmrs_early_init);
1063
1064#ifdef CONFIG_MSM_L2_SPM
1065static int __init msm_rpmrs_l2_counter_init(void)
1066{
1067 msm_rpmrs_l2_counter_addr = MSM_IMEM_BASE + L2_PC_COUNTER_ADDR;
1068 writel_relaxed(msm_rpmrs_l2_reset_count, msm_rpmrs_l2_counter_addr);
1069 mb();
1070
1071 msm_rpmrs_l2_cache.beyond_limits = msm_spm_l2_cache_beyond_limits;
1072 msm_rpmrs_l2_cache.aggregate = NULL;
1073 msm_rpmrs_l2_cache.restore = NULL;
1074
1075 register_hotcpu_notifier(&rpmrs_cpu_notifier);
1076
1077 return 0;
1078}
1079early_initcall(msm_rpmrs_l2_counter_init);
1080#endif