blob: 69bb0f1a9f584cb2179133824bfffbb19da5e1ce [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/bug.h>
19#include <linux/mutex.h>
20#include <linux/proc_fs.h>
21#include <linux/spinlock.h>
22#include <linux/cpu.h>
23#include <mach/rpm.h>
24#include <mach/msm_iomap.h>
25#include <asm/mach-types.h>
26#include <linux/io.h>
Praveen Chidambaram841d46c2011-08-04 09:07:53 -060027#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include "mpm.h"
29#include "rpm_resources.h"
30#include "spm.h"
31
32/******************************************************************************
33 * Debug Definitions
34 *****************************************************************************/
35
36enum {
37 MSM_RPMRS_DEBUG_OUTPUT = BIT(0),
38 MSM_RPMRS_DEBUG_BUFFER = BIT(1),
39};
40
41static int msm_rpmrs_debug_mask;
42module_param_named(
43 debug_mask, msm_rpmrs_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
44);
45
46static struct msm_rpmrs_level *msm_rpmrs_levels;
47static int msm_rpmrs_level_count;
48
49static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits);
50static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits);
51static void msm_rpmrs_restore_pxo(void);
52static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits);
53static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits);
54static void msm_rpmrs_restore_l2_cache(void);
55static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits);
56static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits);
57static void msm_rpmrs_restore_vdd_mem(void);
58static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits);
59static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits);
60static void msm_rpmrs_restore_vdd_dig(void);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
62#ifdef CONFIG_MSM_L2_SPM
63static void *msm_rpmrs_l2_counter_addr;
64static int msm_rpmrs_l2_reset_count;
65#define L2_PC_COUNTER_ADDR 0x660
66#endif
67
68#define MSM_RPMRS_MAX_RS_REGISTER_COUNT 2
69
70struct msm_rpmrs_resource {
71 struct msm_rpm_iv_pair rs[MSM_RPMRS_MAX_RS_REGISTER_COUNT];
72 uint32_t size;
73 char *name;
74
75 uint32_t enable_low_power;
76
77 bool (*beyond_limits)(struct msm_rpmrs_limits *limits);
78 void (*aggregate)(struct msm_rpmrs_limits *limits);
79 void (*restore)(void);
80};
81
82static struct msm_rpmrs_resource msm_rpmrs_pxo = {
83 .rs[0].id = MSM_RPMRS_ID_PXO_CLK,
84 .size = 1,
85 .name = "pxo",
86 .beyond_limits = msm_rpmrs_pxo_beyond_limits,
87 .aggregate = msm_rpmrs_aggregate_pxo,
88 .restore = msm_rpmrs_restore_pxo,
89};
90
91static struct msm_rpmrs_resource msm_rpmrs_l2_cache = {
92 .rs[0].id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL,
93 .size = 1,
94 .name = "L2_cache",
95 .beyond_limits = msm_rpmrs_l2_cache_beyond_limits,
96 .aggregate = msm_rpmrs_aggregate_l2_cache,
97 .restore = msm_rpmrs_restore_l2_cache,
98};
99
100static struct msm_rpmrs_resource msm_rpmrs_vdd_mem = {
101 .rs[0].id = MSM_RPMRS_ID_VDD_MEM_0,
102 .rs[1].id = MSM_RPMRS_ID_VDD_MEM_1,
103 .size = 2,
104 .name = "vdd_mem",
105 .beyond_limits = msm_rpmrs_vdd_mem_beyond_limits,
106 .aggregate = msm_rpmrs_aggregate_vdd_mem,
107 .restore = msm_rpmrs_restore_vdd_mem,
108};
109
110static struct msm_rpmrs_resource msm_rpmrs_vdd_dig = {
111 .rs[0].id = MSM_RPMRS_ID_VDD_DIG_0,
112 .rs[1].id = MSM_RPMRS_ID_VDD_DIG_1,
113 .size = 2,
114 .name = "vdd_dig",
115 .beyond_limits = msm_rpmrs_vdd_dig_beyond_limits,
116 .aggregate = msm_rpmrs_aggregate_vdd_dig,
117 .restore = msm_rpmrs_restore_vdd_dig,
118};
119
120static struct msm_rpmrs_resource msm_rpmrs_rpm_cpu = {
121 .rs[0].id = MSM_RPMRS_ID_RPM_CTL,
122 .size = 1,
123 .name = "rpm_cpu",
124 .beyond_limits = NULL,
Eugene Seah78aa5e72011-07-18 18:28:37 -0600125 .aggregate = NULL,
126 .restore = NULL,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127};
128
129static struct msm_rpmrs_resource *msm_rpmrs_resources[] = {
130 &msm_rpmrs_pxo,
131 &msm_rpmrs_l2_cache,
132 &msm_rpmrs_vdd_mem,
133 &msm_rpmrs_vdd_dig,
134 &msm_rpmrs_rpm_cpu,
135};
136
137static uint32_t msm_rpmrs_buffer[MSM_RPM_ID_LAST + 1];
138static DECLARE_BITMAP(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
139static DECLARE_BITMAP(msm_rpmrs_listed, MSM_RPM_ID_LAST + 1);
140static DEFINE_SPINLOCK(msm_rpmrs_lock);
141
142#define MSM_RPMRS_VDD_MASK 0xfff
143#define MSM_RPMRS_VDD(v) ((v) & (MSM_RPMRS_VDD_MASK))
144
145/******************************************************************************
146 * Attribute Definitions
147 *****************************************************************************/
148
149struct msm_rpmrs_kboj_attribute {
150 struct msm_rpmrs_resource *rs;
151 struct kobj_attribute ka;
152};
153
154#define GET_RS_FROM_ATTR(attr) \
155 (container_of(attr, struct msm_rpmrs_kboj_attribute, ka)->rs)
156
157struct msm_rpmrs_resource_sysfs {
158 struct attribute_group attr_group;
159 struct attribute *attrs[2];
160 struct msm_rpmrs_kboj_attribute kas;
161};
162
163/******************************************************************************
164 * Resource Specific Functions
165 *****************************************************************************/
166
167static void msm_rpmrs_aggregate_sclk(uint32_t sclk_count)
168{
169 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
170 set_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
171 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = sclk_count;
172 set_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
173}
174
175static void msm_rpmrs_restore_sclk(void)
176{
177 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
178 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = 0;
179 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
180 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
181}
182
183static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits)
184{
185 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
186 uint32_t pxo;
187
188 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
189 pxo = msm_rpmrs_buffer[rs->rs[0].id];
190 else
191 pxo = MSM_RPMRS_PXO_ON;
192
193 return pxo > limits->pxo;
194}
195
196static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits)
197{
198 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
199 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
200
201 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
202 rs->rs[0].value = *buf;
203 if (limits->pxo > *buf)
204 *buf = limits->pxo;
205 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
206 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
207 }
208}
209
210static void msm_rpmrs_restore_pxo(void)
211{
212 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
213
214 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
215 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
216}
217
218static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
219{
220 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
221 uint32_t l2_cache;
222
223 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
224 l2_cache = msm_rpmrs_buffer[rs->rs[0].id];
225 else
226 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
227
228 return l2_cache > limits->l2_cache;
229}
230
231static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits)
232{
233 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
234 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
235
236 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
237 rs->rs[0].value = *buf;
238 if (limits->l2_cache > *buf)
239 *buf = limits->l2_cache;
240
241 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
242 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
243 }
244}
245
246#ifdef CONFIG_MSM_L2_SPM
247static bool msm_spm_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
248{
249 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
250 uint32_t l2_cache = rs->rs[0].value;
251
252 if (!rs->enable_low_power)
253 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
254
255 return l2_cache > limits->l2_cache;
256}
257#endif
258
259static void msm_rpmrs_restore_l2_cache(void)
260{
261 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
262
263 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
264 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
265}
266
267static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits)
268{
269 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
270 uint32_t vdd_mem;
271
272 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
273 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
274
275 if (rs->enable_low_power == 0)
276 vdd_mem = MSM_RPMRS_VDD_MEM_ACTIVE;
277 else if (rs->enable_low_power == 1)
278 vdd_mem = MSM_RPMRS_VDD_MEM_RET_HIGH;
279 else
280 vdd_mem = MSM_RPMRS_VDD_MEM_RET_LOW;
281
282 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_mem))
283 vdd_mem = buffered_value;
284 } else {
285 vdd_mem = MSM_RPMRS_VDD_MEM_ACTIVE;
286 }
287
288 return MSM_RPMRS_VDD(vdd_mem) >=
289 MSM_RPMRS_VDD(limits->vdd_mem_upper_bound);
290}
291
292static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits)
293{
294 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
295 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
296
297 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
298 rs->rs[0].value = *buf;
299 if (MSM_RPMRS_VDD(limits->vdd_mem) > MSM_RPMRS_VDD(*buf)) {
300 *buf &= ~MSM_RPMRS_VDD_MASK;
301 *buf |= MSM_RPMRS_VDD(limits->vdd_mem);
302 }
303
304 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
305 pr_info("%s: vdd %d (0x%x)\n", __func__,
306 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
307 }
308}
309
310static void msm_rpmrs_restore_vdd_mem(void)
311{
312 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
313
314 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
315 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
316}
317
318static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits)
319{
320 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
321 uint32_t vdd_dig;
322
323 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
324 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
325
326 if (rs->enable_low_power == 0)
327 vdd_dig = MSM_RPMRS_VDD_DIG_ACTIVE;
328 else if (rs->enable_low_power == 1)
329 vdd_dig = MSM_RPMRS_VDD_DIG_RET_HIGH;
330 else
331 vdd_dig = MSM_RPMRS_VDD_DIG_RET_LOW;
332
333 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_dig))
334 vdd_dig = buffered_value;
335 } else {
336 vdd_dig = MSM_RPMRS_VDD_DIG_ACTIVE;
337 }
338
339 return MSM_RPMRS_VDD(vdd_dig) >=
340 MSM_RPMRS_VDD(limits->vdd_dig_upper_bound);
341}
342
343static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits)
344{
345 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
346 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
347
348 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
349 rs->rs[0].value = *buf;
350 if (MSM_RPMRS_VDD(limits->vdd_dig) > MSM_RPMRS_VDD(*buf)) {
351 *buf &= ~MSM_RPMRS_VDD_MASK;
352 *buf |= MSM_RPMRS_VDD(limits->vdd_dig);
353 }
354
355
356 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
357 pr_info("%s: vdd %d (0x%x)\n", __func__,
358 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
359 }
360}
361
362static void msm_rpmrs_restore_vdd_dig(void)
363{
364 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
365
366 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
367 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
368}
369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370/******************************************************************************
371 * Buffering Functions
372 *****************************************************************************/
373
374static bool msm_rpmrs_irqs_detectable(struct msm_rpmrs_limits *limits,
375 bool irqs_detect, bool gpio_detect)
376{
377
378 if (limits->vdd_dig <= MSM_RPMRS_VDD_DIG_RET_HIGH)
379 return irqs_detect;
380
381 if (limits->pxo == MSM_RPMRS_PXO_OFF)
382 return gpio_detect;
383
384 return true;
385}
386
387static bool msm_rpmrs_use_mpm(struct msm_rpmrs_limits *limits)
388{
389 return (limits->pxo == MSM_RPMRS_PXO_OFF) ||
390 (limits->vdd_dig <= MSM_RPMRS_VDD_DIG_RET_HIGH);
391}
392
393static void msm_rpmrs_update_levels(void)
394{
395 int i, k;
396
397 for (i = 0; i < msm_rpmrs_level_count; i++) {
398 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
399
400 if (level->sleep_mode != MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
401 continue;
402
403 level->available = true;
404
405 for (k = 0; k < ARRAY_SIZE(msm_rpmrs_resources); k++) {
406 struct msm_rpmrs_resource *rs = msm_rpmrs_resources[k];
407
408 if (rs->beyond_limits &&
409 rs->beyond_limits(&level->rs_limits)) {
410 level->available = false;
411 break;
412 }
413 }
414 }
415}
416
417/*
418 * Return value:
419 * 0: no entries in <req> is on our resource list
420 * 1: one or more entries in <req> is on our resource list
421 * -EINVAL: invalid id in <req> array
422 */
423static int msm_rpmrs_buffer_request(struct msm_rpm_iv_pair *req, int count)
424{
425 bool listed;
426 int i;
427
428 for (i = 0; i < count; i++)
429 if (req[i].id > MSM_RPM_ID_LAST)
430 return -EINVAL;
431
432 for (i = 0, listed = false; i < count; i++) {
433 msm_rpmrs_buffer[req[i].id] = req[i].value;
434 set_bit(req[i].id, msm_rpmrs_buffered);
435
436 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
437 pr_info("%s: reg %d: 0x%x\n",
438 __func__, req[i].id, req[i].value);
439
440 if (listed)
441 continue;
442
443 if (test_bit(req[i].id, msm_rpmrs_listed))
444 listed = true;
445 }
446
447 return listed ? 1 : 0;
448}
449
450/*
451 * Return value:
452 * 0: no entries in <req> is on our resource list
453 * 1: one or more entries in <req> is on our resource list
454 * -EINVAL: invalid id in <req> array
455 */
456static int msm_rpmrs_clear_buffer(struct msm_rpm_iv_pair *req, int count)
457{
458 bool listed;
459 int i;
460
461 for (i = 0; i < count; i++)
462 if (req[i].id > MSM_RPM_ID_LAST)
463 return -EINVAL;
464
465 for (i = 0, listed = false; i < count; i++) {
466 msm_rpmrs_buffer[req[i].id] = 0;
467 clear_bit(req[i].id, msm_rpmrs_buffered);
468
469 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
470 pr_info("%s: reg %d\n", __func__, req[i].id);
471
472 if (listed)
473 continue;
474
475 if (test_bit(req[i].id, msm_rpmrs_listed))
476 listed = true;
477 }
478
479 return listed ? 1 : 0;
480}
481
482#ifdef CONFIG_MSM_L2_SPM
483static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
484{
485 int rc = 0;
486 int lpm;
487
488 switch (limits->l2_cache) {
489 case MSM_RPMRS_L2_CACHE_HSFS_OPEN:
490 lpm = MSM_SPM_L2_MODE_POWER_COLLAPSE;
491 /* Increment the counter for TZ to init L2 on warmboot */
492 /* Barrier in msm_spm_l2_set_low_power_mode */
493 BUG_ON(!msm_rpmrs_l2_counter_addr);
494 writel_relaxed(++msm_rpmrs_l2_reset_count,
495 msm_rpmrs_l2_counter_addr);
496 break;
497 case MSM_RPMRS_L2_CACHE_GDHS:
498 lpm = MSM_SPM_L2_MODE_GDHS;
499 break;
500 case MSM_RPMRS_L2_CACHE_RETENTION:
501 lpm = MSM_SPM_L2_MODE_RETENTION;
502 break;
503 default:
504 case MSM_RPMRS_L2_CACHE_ACTIVE:
505 lpm = MSM_SPM_L2_MODE_DISABLED;
506 break;
507 }
508
509 rc = msm_spm_l2_set_low_power_mode(lpm, notify_rpm);
510 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
511 pr_info("%s: Requesting low power mode %d returned %d\n",
512 __func__, lpm, rc);
513
514 return rc;
515}
516#else
517static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
518{
519 return 0;
520}
521#endif
522
523static int msm_rpmrs_flush_buffer(
524 uint32_t sclk_count, struct msm_rpmrs_limits *limits, int from_idle)
525{
526 struct msm_rpm_iv_pair *req;
527 int count;
528 int rc;
529 int i;
530
531 msm_rpmrs_aggregate_sclk(sclk_count);
532 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
533 if (msm_rpmrs_resources[i]->aggregate)
534 msm_rpmrs_resources[i]->aggregate(limits);
535 }
536
537 count = bitmap_weight(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
538
539 req = kmalloc(sizeof(*req) * count, GFP_ATOMIC);
540 if (!req) {
541 rc = -ENOMEM;
542 goto flush_buffer_restore;
543 }
544
545 count = 0;
546 i = find_first_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
547
548 while (i < MSM_RPM_ID_LAST + 1) {
549 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
550 pr_info("%s: reg %d: 0x%x\n",
551 __func__, i, msm_rpmrs_buffer[i]);
552
553 req[count].id = i;
554 req[count].value = msm_rpmrs_buffer[i];
555 count++;
556
557 i = find_next_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST+1, i+1);
558 }
559
560 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_SLEEP, req, count);
561 kfree(req);
562
563 if (rc)
564 goto flush_buffer_restore;
565
566 bitmap_and(msm_rpmrs_buffered,
567 msm_rpmrs_buffered, msm_rpmrs_listed, MSM_RPM_ID_LAST + 1);
568
569flush_buffer_restore:
570 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
571 if (msm_rpmrs_resources[i]->restore)
572 msm_rpmrs_resources[i]->restore();
573 }
574 msm_rpmrs_restore_sclk();
575
576 if (rc)
577 pr_err("%s: failed: %d\n", __func__, rc);
578 return rc;
579}
580
581static int msm_rpmrs_set_common(
582 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
583{
584 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
585 unsigned long flags;
586 int rc;
587
588 spin_lock_irqsave(&msm_rpmrs_lock, flags);
589 rc = msm_rpmrs_buffer_request(req, count);
590 if (rc > 0) {
591 msm_rpmrs_update_levels();
592 rc = 0;
593 }
594 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
595
596 return rc;
597 }
598
599 if (noirq)
600 return msm_rpm_set_noirq(ctx, req, count);
601 else
602 return msm_rpm_set(ctx, req, count);
603}
604
605static int msm_rpmrs_clear_common(
606 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
607{
608 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
609 unsigned long flags;
610 int rc;
611
612 spin_lock_irqsave(&msm_rpmrs_lock, flags);
613 rc = msm_rpmrs_clear_buffer(req, count);
614 if (rc > 0) {
615 msm_rpmrs_update_levels();
616 rc = 0;
617 }
618 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
619
620 if (rc < 0)
621 return rc;
622 }
623
624 if (noirq)
625 return msm_rpm_clear_noirq(ctx, req, count);
626 else
627 return msm_rpm_clear(ctx, req, count);
628}
629
630/******************************************************************************
631 * Attribute Functions
632 *****************************************************************************/
633
634static ssize_t msm_rpmrs_resource_attr_show(
635 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
636{
637 struct kernel_param kp;
638 unsigned long flags;
639 unsigned int temp;
640 int rc;
641
642 spin_lock_irqsave(&msm_rpmrs_lock, flags);
643 temp = GET_RS_FROM_ATTR(attr)->enable_low_power;
644 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
645
646 kp.arg = &temp;
647 rc = param_get_uint(buf, &kp);
648
649 if (rc > 0) {
650 strcat(buf, "\n");
651 rc++;
652 }
653
654 return rc;
655}
656
657static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
658 struct kobj_attribute *attr, const char *buf, size_t count)
659{
660 struct kernel_param kp;
661 unsigned long flags;
662 unsigned int temp;
663 int rc;
664
665 kp.arg = &temp;
666 rc = param_set_uint(buf, &kp);
667 if (rc)
668 return rc;
669
670 spin_lock_irqsave(&msm_rpmrs_lock, flags);
671 GET_RS_FROM_ATTR(attr)->enable_low_power = temp;
Eugene Seah78aa5e72011-07-18 18:28:37 -0600672
673 /* special case active-set signal for MSM_RPMRS_ID_RPM_CTL */
674 if (GET_RS_FROM_ATTR(attr)->rs[0].id == MSM_RPMRS_ID_RPM_CTL) {
675 struct msm_rpm_iv_pair req;
676 req.id = MSM_RPMRS_ID_RPM_CTL;
677 req.value = GET_RS_FROM_ATTR(attr)->enable_low_power ? 0 : 1;
678
679 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &req, 1);
680 if (rc) {
681 pr_err("%s: failed to request RPM_CTL to %d: %d\n",
682 __func__, req.value, rc);
683 }
684 }
685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686 msm_rpmrs_update_levels();
687 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
688
689 return count;
690}
691
692static int __init msm_rpmrs_resource_sysfs_add(void)
693{
694 struct kobject *module_kobj;
695 struct kobject *low_power_kboj;
696 struct msm_rpmrs_resource_sysfs *rs;
697 int i;
698 int rc;
699
700 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
701 if (!module_kobj) {
702 pr_err("%s: cannot find kobject for module %s\n",
703 __func__, KBUILD_MODNAME);
704 rc = -ENOENT;
705 goto resource_sysfs_add_exit;
706 }
707
708 low_power_kboj = kobject_create_and_add(
709 "enable_low_power", module_kobj);
710 if (!low_power_kboj) {
711 pr_err("%s: cannot create kobject\n", __func__);
712 rc = -ENOMEM;
713 goto resource_sysfs_add_exit;
714 }
715
716 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
717 rs = kzalloc(sizeof(*rs), GFP_KERNEL);
718 if (!rs) {
719 pr_err("%s: cannot allocate memory for attributes\n",
720 __func__);
721 rc = -ENOMEM;
722 goto resource_sysfs_add_exit;
723 }
724
725 rs->kas.rs = msm_rpmrs_resources[i];
726 rs->kas.ka.attr.name = msm_rpmrs_resources[i]->name;
727 rs->kas.ka.attr.mode = 0644;
728 rs->kas.ka.show = msm_rpmrs_resource_attr_show;
729 rs->kas.ka.store = msm_rpmrs_resource_attr_store;
730
731 rs->attrs[0] = &rs->kas.ka.attr;
732 rs->attrs[1] = NULL;
733 rs->attr_group.attrs = rs->attrs;
734
735 rc = sysfs_create_group(low_power_kboj, &rs->attr_group);
736 if (rc) {
737 pr_err("%s: cannot create kobject attribute group\n",
738 __func__);
739 goto resource_sysfs_add_exit;
740 }
741 }
742
743 rc = 0;
744
745resource_sysfs_add_exit:
746 return rc;
747}
748
749/******************************************************************************
750 * Public Functions
751 *****************************************************************************/
752
753int msm_rpmrs_set(int ctx, struct msm_rpm_iv_pair *req, int count)
754{
755 return msm_rpmrs_set_common(ctx, req, count, false);
756}
757
758int msm_rpmrs_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
759{
760 WARN(!irqs_disabled(), "msm_rpmrs_set_noirq can only be called "
761 "safely when local irqs are disabled. Consider using "
762 "msm_rpmrs_set or msm_rpmrs_set_nosleep instead.");
763 return msm_rpmrs_set_common(ctx, req, count, true);
764}
765
766int msm_rpmrs_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
767{
768 return msm_rpmrs_clear_common(ctx, req, count, false);
769}
770
771int msm_rpmrs_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
772{
773 WARN(!irqs_disabled(), "msm_rpmrs_clear_noirq can only be called "
774 "safely when local irqs are disabled. Consider using "
775 "msm_rpmrs_clear or msm_rpmrs_clear_nosleep instead.");
776 return msm_rpmrs_clear_common(ctx, req, count, true);
777}
778
779void msm_rpmrs_show_resources(void)
780{
781 struct msm_rpmrs_resource *rs;
782 unsigned long flags;
783 int i;
784
785 spin_lock_irqsave(&msm_rpmrs_lock, flags);
786 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
787 rs = msm_rpmrs_resources[i];
788 if (rs->rs[0].id < MSM_RPM_ID_LAST + 1)
789 pr_info("%s: resource %s: buffered %d, value 0x%x\n",
790 __func__, rs->name,
791 test_bit(rs->rs[0].id, msm_rpmrs_buffered),
792 msm_rpmrs_buffer[rs->rs[0].id]);
793 else
794 pr_info("%s: resource %s: value %d\n",
795 __func__, rs->name, rs->rs[0].value);
796 }
797 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
798}
799
800struct msm_rpmrs_limits *msm_rpmrs_lowest_limits(
801 bool from_idle, enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
802 uint32_t sleep_us)
803{
804 unsigned int cpu = smp_processor_id();
805 struct msm_rpmrs_level *best_level = NULL;
806 bool irqs_detectable = false;
807 bool gpio_detectable = false;
808 int i;
809
810 if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
811 irqs_detectable = msm_mpm_irqs_detectable(from_idle);
812 gpio_detectable = msm_mpm_gpio_irqs_detectable(from_idle);
813 }
814
815 for (i = 0; i < msm_rpmrs_level_count; i++) {
816 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
817 uint32_t power;
818
819 if (!level->available)
820 continue;
821
822 if (sleep_mode != level->sleep_mode)
823 continue;
824
825 if (latency_us < level->latency_us)
826 continue;
827
828 if (!msm_rpmrs_irqs_detectable(&level->rs_limits,
829 irqs_detectable, gpio_detectable))
830 continue;
831
832 if (sleep_us <= 1) {
833 power = level->energy_overhead;
834 } else if (sleep_us <= level->time_overhead_us) {
835 power = level->energy_overhead / sleep_us;
836 } else if ((sleep_us >> 10) > level->time_overhead_us) {
837 power = level->steady_state_power;
838 } else {
839 power = (sleep_us - level->time_overhead_us);
840 power *= level->steady_state_power;
841 power /= sleep_us;
842 power += level->energy_overhead / sleep_us;
843 }
844
845 if (!best_level ||
846 best_level->rs_limits.power[cpu] >= power) {
847 level->rs_limits.latency_us[cpu] = level->latency_us;
848 level->rs_limits.power[cpu] = power;
849 best_level = level;
850 }
851 }
852
853 return best_level ? &best_level->rs_limits : NULL;
854}
855
856int msm_rpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
857 bool from_idle, bool notify_rpm)
858{
859 int rc = 0;
860
861 rc = msm_rpmrs_flush_L2(limits, notify_rpm);
862 if (rc)
863 return rc;
864
865 if (notify_rpm) {
866 rc = msm_rpmrs_flush_buffer(sclk_count, limits, from_idle);
867 if (rc)
868 return rc;
869
870 if (msm_rpmrs_use_mpm(limits))
871 msm_mpm_enter_sleep(from_idle);
872 }
873
874 return rc;
875}
876
877void msm_rpmrs_exit_sleep(struct msm_rpmrs_limits *limits,
878 bool from_idle, bool notify_rpm)
879{
880
881 /* Disable L2 for now, we dont want L2 to do retention by default */
882 msm_spm_l2_set_low_power_mode(MSM_SPM_MODE_DISABLED, notify_rpm);
883
884 if (msm_rpmrs_use_mpm(limits))
885 msm_mpm_exit_sleep(from_idle);
886}
887
888#ifdef CONFIG_MSM_L2_SPM
889static int rpmrs_cpu_callback(struct notifier_block *nfb,
890 unsigned long action, void *hcpu)
891{
892 switch (action) {
893 case CPU_ONLINE_FROZEN:
894 case CPU_ONLINE:
895 if (num_online_cpus() > 1)
896 msm_rpmrs_l2_cache.rs[0].value =
897 MSM_RPMRS_L2_CACHE_ACTIVE;
898 break;
899 case CPU_DEAD_FROZEN:
900 case CPU_DEAD:
901 if (num_online_cpus() == 1)
902 msm_rpmrs_l2_cache.rs[0].value =
903 MSM_RPMRS_L2_CACHE_HSFS_OPEN;
904 break;
905 }
906
907 msm_rpmrs_update_levels();
908 return NOTIFY_OK;
909}
910
911static struct notifier_block __refdata rpmrs_cpu_notifier = {
912 .notifier_call = rpmrs_cpu_callback,
913};
914#endif
915
916int __init msm_rpmrs_levels_init(struct msm_rpmrs_level *levels, int size)
917{
918 msm_rpmrs_levels = kzalloc(sizeof(struct msm_rpmrs_level) * size,
919 GFP_KERNEL);
920 if (!msm_rpmrs_levels)
921 return -ENOMEM;
922 msm_rpmrs_level_count = size;
923 memcpy(msm_rpmrs_levels, levels, size * sizeof(struct msm_rpmrs_level));
924
925 return 0;
926}
927
928static int __init msm_rpmrs_init(void)
929{
930 struct msm_rpm_iv_pair req;
931 int rc;
932
Stepan Moskovchenko0302fbc2011-08-05 18:06:13 -0700933 if (cpu_is_apq8064())
934 return -ENODEV;
935
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700936 BUG_ON(!msm_rpmrs_levels);
937
Praveen Chidambaram841d46c2011-08-04 09:07:53 -0600938 if (cpu_is_msm8x60()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 req.id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL;
940 req.value = 1;
941
942 rc = msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
943 if (rc) {
944 pr_err("%s: failed to request L2 cache: %d\n",
945 __func__, rc);
946 goto init_exit;
947 }
948
949 req.id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL;
950 req.value = 0;
951
952 rc = msm_rpmrs_set(MSM_RPM_CTX_SET_SLEEP, &req, 1);
953 if (rc) {
954 pr_err("%s: failed to initialize L2 cache for sleep: "
955 "%d\n", __func__, rc);
956 goto init_exit;
957 }
958 }
959
Eugene Seah78aa5e72011-07-18 18:28:37 -0600960 /* Enable RPM SWFI on Apps initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961 req.id = MSM_RPMRS_ID_RPM_CTL;
962 req.value = 0;
963
Eugene Seah78aa5e72011-07-18 18:28:37 -0600964 rc = msm_rpmrs_set(MSM_RPM_CTX_SET_0, &req, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965 if (rc) {
Eugene Seah78aa5e72011-07-18 18:28:37 -0600966 pr_err("%s: failed to initialize RPM halt: "
967 "%d\n", __func__, rc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968 goto init_exit;
969 }
970
971 rc = msm_rpmrs_resource_sysfs_add();
972
973init_exit:
974 return rc;
975}
976device_initcall(msm_rpmrs_init);
977
978static int __init msm_rpmrs_early_init(void)
979{
980 int i, k;
981
982 /* Initialize listed bitmap for valid resource IDs */
983 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
984 for (k = 0; k < msm_rpmrs_resources[i]->size; k++)
985 set_bit(msm_rpmrs_resources[i]->rs[k].id,
986 msm_rpmrs_listed);
987 }
988
989 return 0;
990}
991early_initcall(msm_rpmrs_early_init);
992
993#ifdef CONFIG_MSM_L2_SPM
994static int __init msm_rpmrs_l2_counter_init(void)
995{
996 msm_rpmrs_l2_counter_addr = MSM_IMEM_BASE + L2_PC_COUNTER_ADDR;
997 writel_relaxed(msm_rpmrs_l2_reset_count, msm_rpmrs_l2_counter_addr);
998 mb();
999
1000 msm_rpmrs_l2_cache.beyond_limits = msm_spm_l2_cache_beyond_limits;
1001 msm_rpmrs_l2_cache.aggregate = NULL;
1002 msm_rpmrs_l2_cache.restore = NULL;
1003
1004 register_hotcpu_notifier(&rpmrs_cpu_notifier);
1005
1006 return 0;
1007}
1008early_initcall(msm_rpmrs_l2_counter_init);
1009#endif