blob: 49a2574b11788ea507aa8d82e37df4272b584245 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/bug.h>
19#include <linux/mutex.h>
20#include <linux/proc_fs.h>
21#include <linux/spinlock.h>
22#include <linux/cpu.h>
23#include <mach/rpm.h>
24#include <mach/msm_iomap.h>
25#include <asm/mach-types.h>
26#include <linux/io.h>
27#include "mpm.h"
28#include "rpm_resources.h"
29#include "spm.h"
30
31/******************************************************************************
32 * Debug Definitions
33 *****************************************************************************/
34
35enum {
36 MSM_RPMRS_DEBUG_OUTPUT = BIT(0),
37 MSM_RPMRS_DEBUG_BUFFER = BIT(1),
38};
39
40static int msm_rpmrs_debug_mask;
41module_param_named(
42 debug_mask, msm_rpmrs_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
43);
44
45static struct msm_rpmrs_level *msm_rpmrs_levels;
46static int msm_rpmrs_level_count;
47
48static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits);
49static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits);
50static void msm_rpmrs_restore_pxo(void);
51static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits);
52static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits);
53static void msm_rpmrs_restore_l2_cache(void);
54static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits);
55static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits);
56static void msm_rpmrs_restore_vdd_mem(void);
57static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits);
58static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits);
59static void msm_rpmrs_restore_vdd_dig(void);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060
61#ifdef CONFIG_MSM_L2_SPM
62static void *msm_rpmrs_l2_counter_addr;
63static int msm_rpmrs_l2_reset_count;
64#define L2_PC_COUNTER_ADDR 0x660
65#endif
66
67#define MSM_RPMRS_MAX_RS_REGISTER_COUNT 2
68
69struct msm_rpmrs_resource {
70 struct msm_rpm_iv_pair rs[MSM_RPMRS_MAX_RS_REGISTER_COUNT];
71 uint32_t size;
72 char *name;
73
74 uint32_t enable_low_power;
75
76 bool (*beyond_limits)(struct msm_rpmrs_limits *limits);
77 void (*aggregate)(struct msm_rpmrs_limits *limits);
78 void (*restore)(void);
79};
80
81static struct msm_rpmrs_resource msm_rpmrs_pxo = {
82 .rs[0].id = MSM_RPMRS_ID_PXO_CLK,
83 .size = 1,
84 .name = "pxo",
85 .beyond_limits = msm_rpmrs_pxo_beyond_limits,
86 .aggregate = msm_rpmrs_aggregate_pxo,
87 .restore = msm_rpmrs_restore_pxo,
88};
89
90static struct msm_rpmrs_resource msm_rpmrs_l2_cache = {
91 .rs[0].id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL,
92 .size = 1,
93 .name = "L2_cache",
94 .beyond_limits = msm_rpmrs_l2_cache_beyond_limits,
95 .aggregate = msm_rpmrs_aggregate_l2_cache,
96 .restore = msm_rpmrs_restore_l2_cache,
97};
98
99static struct msm_rpmrs_resource msm_rpmrs_vdd_mem = {
100 .rs[0].id = MSM_RPMRS_ID_VDD_MEM_0,
101 .rs[1].id = MSM_RPMRS_ID_VDD_MEM_1,
102 .size = 2,
103 .name = "vdd_mem",
104 .beyond_limits = msm_rpmrs_vdd_mem_beyond_limits,
105 .aggregate = msm_rpmrs_aggregate_vdd_mem,
106 .restore = msm_rpmrs_restore_vdd_mem,
107};
108
109static struct msm_rpmrs_resource msm_rpmrs_vdd_dig = {
110 .rs[0].id = MSM_RPMRS_ID_VDD_DIG_0,
111 .rs[1].id = MSM_RPMRS_ID_VDD_DIG_1,
112 .size = 2,
113 .name = "vdd_dig",
114 .beyond_limits = msm_rpmrs_vdd_dig_beyond_limits,
115 .aggregate = msm_rpmrs_aggregate_vdd_dig,
116 .restore = msm_rpmrs_restore_vdd_dig,
117};
118
119static struct msm_rpmrs_resource msm_rpmrs_rpm_cpu = {
120 .rs[0].id = MSM_RPMRS_ID_RPM_CTL,
121 .size = 1,
122 .name = "rpm_cpu",
123 .beyond_limits = NULL,
Eugene Seah78aa5e72011-07-18 18:28:37 -0600124 .aggregate = NULL,
125 .restore = NULL,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126};
127
128static struct msm_rpmrs_resource *msm_rpmrs_resources[] = {
129 &msm_rpmrs_pxo,
130 &msm_rpmrs_l2_cache,
131 &msm_rpmrs_vdd_mem,
132 &msm_rpmrs_vdd_dig,
133 &msm_rpmrs_rpm_cpu,
134};
135
136static uint32_t msm_rpmrs_buffer[MSM_RPM_ID_LAST + 1];
137static DECLARE_BITMAP(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
138static DECLARE_BITMAP(msm_rpmrs_listed, MSM_RPM_ID_LAST + 1);
139static DEFINE_SPINLOCK(msm_rpmrs_lock);
140
141#define MSM_RPMRS_VDD_MASK 0xfff
142#define MSM_RPMRS_VDD(v) ((v) & (MSM_RPMRS_VDD_MASK))
143
144/******************************************************************************
145 * Attribute Definitions
146 *****************************************************************************/
147
148struct msm_rpmrs_kboj_attribute {
149 struct msm_rpmrs_resource *rs;
150 struct kobj_attribute ka;
151};
152
153#define GET_RS_FROM_ATTR(attr) \
154 (container_of(attr, struct msm_rpmrs_kboj_attribute, ka)->rs)
155
156struct msm_rpmrs_resource_sysfs {
157 struct attribute_group attr_group;
158 struct attribute *attrs[2];
159 struct msm_rpmrs_kboj_attribute kas;
160};
161
162/******************************************************************************
163 * Resource Specific Functions
164 *****************************************************************************/
165
166static void msm_rpmrs_aggregate_sclk(uint32_t sclk_count)
167{
168 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
169 set_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
170 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = sclk_count;
171 set_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
172}
173
174static void msm_rpmrs_restore_sclk(void)
175{
176 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
177 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = 0;
178 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
179 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
180}
181
182static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits)
183{
184 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
185 uint32_t pxo;
186
187 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
188 pxo = msm_rpmrs_buffer[rs->rs[0].id];
189 else
190 pxo = MSM_RPMRS_PXO_ON;
191
192 return pxo > limits->pxo;
193}
194
195static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits)
196{
197 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
198 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
199
200 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
201 rs->rs[0].value = *buf;
202 if (limits->pxo > *buf)
203 *buf = limits->pxo;
204 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
205 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
206 }
207}
208
209static void msm_rpmrs_restore_pxo(void)
210{
211 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
212
213 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
214 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
215}
216
217static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
218{
219 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
220 uint32_t l2_cache;
221
222 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
223 l2_cache = msm_rpmrs_buffer[rs->rs[0].id];
224 else
225 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
226
227 return l2_cache > limits->l2_cache;
228}
229
230static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits)
231{
232 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
233 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
234
235 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
236 rs->rs[0].value = *buf;
237 if (limits->l2_cache > *buf)
238 *buf = limits->l2_cache;
239
240 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
241 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
242 }
243}
244
245#ifdef CONFIG_MSM_L2_SPM
246static bool msm_spm_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
247{
248 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
249 uint32_t l2_cache = rs->rs[0].value;
250
251 if (!rs->enable_low_power)
252 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
253
254 return l2_cache > limits->l2_cache;
255}
256#endif
257
258static void msm_rpmrs_restore_l2_cache(void)
259{
260 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
261
262 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
263 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
264}
265
266static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits)
267{
268 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
269 uint32_t vdd_mem;
270
271 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
272 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
273
274 if (rs->enable_low_power == 0)
275 vdd_mem = MSM_RPMRS_VDD_MEM_ACTIVE;
276 else if (rs->enable_low_power == 1)
277 vdd_mem = MSM_RPMRS_VDD_MEM_RET_HIGH;
278 else
279 vdd_mem = MSM_RPMRS_VDD_MEM_RET_LOW;
280
281 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_mem))
282 vdd_mem = buffered_value;
283 } else {
284 vdd_mem = MSM_RPMRS_VDD_MEM_ACTIVE;
285 }
286
287 return MSM_RPMRS_VDD(vdd_mem) >=
288 MSM_RPMRS_VDD(limits->vdd_mem_upper_bound);
289}
290
291static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits)
292{
293 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
294 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
295
296 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
297 rs->rs[0].value = *buf;
298 if (MSM_RPMRS_VDD(limits->vdd_mem) > MSM_RPMRS_VDD(*buf)) {
299 *buf &= ~MSM_RPMRS_VDD_MASK;
300 *buf |= MSM_RPMRS_VDD(limits->vdd_mem);
301 }
302
303 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
304 pr_info("%s: vdd %d (0x%x)\n", __func__,
305 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
306 }
307}
308
309static void msm_rpmrs_restore_vdd_mem(void)
310{
311 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
312
313 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
314 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
315}
316
317static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits)
318{
319 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
320 uint32_t vdd_dig;
321
322 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
323 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
324
325 if (rs->enable_low_power == 0)
326 vdd_dig = MSM_RPMRS_VDD_DIG_ACTIVE;
327 else if (rs->enable_low_power == 1)
328 vdd_dig = MSM_RPMRS_VDD_DIG_RET_HIGH;
329 else
330 vdd_dig = MSM_RPMRS_VDD_DIG_RET_LOW;
331
332 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_dig))
333 vdd_dig = buffered_value;
334 } else {
335 vdd_dig = MSM_RPMRS_VDD_DIG_ACTIVE;
336 }
337
338 return MSM_RPMRS_VDD(vdd_dig) >=
339 MSM_RPMRS_VDD(limits->vdd_dig_upper_bound);
340}
341
342static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits)
343{
344 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
345 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
346
347 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
348 rs->rs[0].value = *buf;
349 if (MSM_RPMRS_VDD(limits->vdd_dig) > MSM_RPMRS_VDD(*buf)) {
350 *buf &= ~MSM_RPMRS_VDD_MASK;
351 *buf |= MSM_RPMRS_VDD(limits->vdd_dig);
352 }
353
354
355 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
356 pr_info("%s: vdd %d (0x%x)\n", __func__,
357 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
358 }
359}
360
361static void msm_rpmrs_restore_vdd_dig(void)
362{
363 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
364
365 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
366 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
367}
368
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369/******************************************************************************
370 * Buffering Functions
371 *****************************************************************************/
372
373static bool msm_rpmrs_irqs_detectable(struct msm_rpmrs_limits *limits,
374 bool irqs_detect, bool gpio_detect)
375{
376
377 if (limits->vdd_dig <= MSM_RPMRS_VDD_DIG_RET_HIGH)
378 return irqs_detect;
379
380 if (limits->pxo == MSM_RPMRS_PXO_OFF)
381 return gpio_detect;
382
383 return true;
384}
385
386static bool msm_rpmrs_use_mpm(struct msm_rpmrs_limits *limits)
387{
388 return (limits->pxo == MSM_RPMRS_PXO_OFF) ||
389 (limits->vdd_dig <= MSM_RPMRS_VDD_DIG_RET_HIGH);
390}
391
392static void msm_rpmrs_update_levels(void)
393{
394 int i, k;
395
396 for (i = 0; i < msm_rpmrs_level_count; i++) {
397 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
398
399 if (level->sleep_mode != MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
400 continue;
401
402 level->available = true;
403
404 for (k = 0; k < ARRAY_SIZE(msm_rpmrs_resources); k++) {
405 struct msm_rpmrs_resource *rs = msm_rpmrs_resources[k];
406
407 if (rs->beyond_limits &&
408 rs->beyond_limits(&level->rs_limits)) {
409 level->available = false;
410 break;
411 }
412 }
413 }
414}
415
416/*
417 * Return value:
418 * 0: no entries in <req> is on our resource list
419 * 1: one or more entries in <req> is on our resource list
420 * -EINVAL: invalid id in <req> array
421 */
422static int msm_rpmrs_buffer_request(struct msm_rpm_iv_pair *req, int count)
423{
424 bool listed;
425 int i;
426
427 for (i = 0; i < count; i++)
428 if (req[i].id > MSM_RPM_ID_LAST)
429 return -EINVAL;
430
431 for (i = 0, listed = false; i < count; i++) {
432 msm_rpmrs_buffer[req[i].id] = req[i].value;
433 set_bit(req[i].id, msm_rpmrs_buffered);
434
435 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
436 pr_info("%s: reg %d: 0x%x\n",
437 __func__, req[i].id, req[i].value);
438
439 if (listed)
440 continue;
441
442 if (test_bit(req[i].id, msm_rpmrs_listed))
443 listed = true;
444 }
445
446 return listed ? 1 : 0;
447}
448
449/*
450 * Return value:
451 * 0: no entries in <req> is on our resource list
452 * 1: one or more entries in <req> is on our resource list
453 * -EINVAL: invalid id in <req> array
454 */
455static int msm_rpmrs_clear_buffer(struct msm_rpm_iv_pair *req, int count)
456{
457 bool listed;
458 int i;
459
460 for (i = 0; i < count; i++)
461 if (req[i].id > MSM_RPM_ID_LAST)
462 return -EINVAL;
463
464 for (i = 0, listed = false; i < count; i++) {
465 msm_rpmrs_buffer[req[i].id] = 0;
466 clear_bit(req[i].id, msm_rpmrs_buffered);
467
468 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
469 pr_info("%s: reg %d\n", __func__, req[i].id);
470
471 if (listed)
472 continue;
473
474 if (test_bit(req[i].id, msm_rpmrs_listed))
475 listed = true;
476 }
477
478 return listed ? 1 : 0;
479}
480
481#ifdef CONFIG_MSM_L2_SPM
482static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
483{
484 int rc = 0;
485 int lpm;
486
487 switch (limits->l2_cache) {
488 case MSM_RPMRS_L2_CACHE_HSFS_OPEN:
489 lpm = MSM_SPM_L2_MODE_POWER_COLLAPSE;
490 /* Increment the counter for TZ to init L2 on warmboot */
491 /* Barrier in msm_spm_l2_set_low_power_mode */
492 BUG_ON(!msm_rpmrs_l2_counter_addr);
493 writel_relaxed(++msm_rpmrs_l2_reset_count,
494 msm_rpmrs_l2_counter_addr);
495 break;
496 case MSM_RPMRS_L2_CACHE_GDHS:
497 lpm = MSM_SPM_L2_MODE_GDHS;
498 break;
499 case MSM_RPMRS_L2_CACHE_RETENTION:
500 lpm = MSM_SPM_L2_MODE_RETENTION;
501 break;
502 default:
503 case MSM_RPMRS_L2_CACHE_ACTIVE:
504 lpm = MSM_SPM_L2_MODE_DISABLED;
505 break;
506 }
507
508 rc = msm_spm_l2_set_low_power_mode(lpm, notify_rpm);
509 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
510 pr_info("%s: Requesting low power mode %d returned %d\n",
511 __func__, lpm, rc);
512
513 return rc;
514}
515#else
516static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
517{
518 return 0;
519}
520#endif
521
522static int msm_rpmrs_flush_buffer(
523 uint32_t sclk_count, struct msm_rpmrs_limits *limits, int from_idle)
524{
525 struct msm_rpm_iv_pair *req;
526 int count;
527 int rc;
528 int i;
529
530 msm_rpmrs_aggregate_sclk(sclk_count);
531 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
532 if (msm_rpmrs_resources[i]->aggregate)
533 msm_rpmrs_resources[i]->aggregate(limits);
534 }
535
536 count = bitmap_weight(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
537
538 req = kmalloc(sizeof(*req) * count, GFP_ATOMIC);
539 if (!req) {
540 rc = -ENOMEM;
541 goto flush_buffer_restore;
542 }
543
544 count = 0;
545 i = find_first_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
546
547 while (i < MSM_RPM_ID_LAST + 1) {
548 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
549 pr_info("%s: reg %d: 0x%x\n",
550 __func__, i, msm_rpmrs_buffer[i]);
551
552 req[count].id = i;
553 req[count].value = msm_rpmrs_buffer[i];
554 count++;
555
556 i = find_next_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST+1, i+1);
557 }
558
559 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_SLEEP, req, count);
560 kfree(req);
561
562 if (rc)
563 goto flush_buffer_restore;
564
565 bitmap_and(msm_rpmrs_buffered,
566 msm_rpmrs_buffered, msm_rpmrs_listed, MSM_RPM_ID_LAST + 1);
567
568flush_buffer_restore:
569 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
570 if (msm_rpmrs_resources[i]->restore)
571 msm_rpmrs_resources[i]->restore();
572 }
573 msm_rpmrs_restore_sclk();
574
575 if (rc)
576 pr_err("%s: failed: %d\n", __func__, rc);
577 return rc;
578}
579
580static int msm_rpmrs_set_common(
581 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
582{
583 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
584 unsigned long flags;
585 int rc;
586
587 spin_lock_irqsave(&msm_rpmrs_lock, flags);
588 rc = msm_rpmrs_buffer_request(req, count);
589 if (rc > 0) {
590 msm_rpmrs_update_levels();
591 rc = 0;
592 }
593 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
594
595 return rc;
596 }
597
598 if (noirq)
599 return msm_rpm_set_noirq(ctx, req, count);
600 else
601 return msm_rpm_set(ctx, req, count);
602}
603
604static int msm_rpmrs_clear_common(
605 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
606{
607 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
608 unsigned long flags;
609 int rc;
610
611 spin_lock_irqsave(&msm_rpmrs_lock, flags);
612 rc = msm_rpmrs_clear_buffer(req, count);
613 if (rc > 0) {
614 msm_rpmrs_update_levels();
615 rc = 0;
616 }
617 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
618
619 if (rc < 0)
620 return rc;
621 }
622
623 if (noirq)
624 return msm_rpm_clear_noirq(ctx, req, count);
625 else
626 return msm_rpm_clear(ctx, req, count);
627}
628
629/******************************************************************************
630 * Attribute Functions
631 *****************************************************************************/
632
633static ssize_t msm_rpmrs_resource_attr_show(
634 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
635{
636 struct kernel_param kp;
637 unsigned long flags;
638 unsigned int temp;
639 int rc;
640
641 spin_lock_irqsave(&msm_rpmrs_lock, flags);
642 temp = GET_RS_FROM_ATTR(attr)->enable_low_power;
643 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
644
645 kp.arg = &temp;
646 rc = param_get_uint(buf, &kp);
647
648 if (rc > 0) {
649 strcat(buf, "\n");
650 rc++;
651 }
652
653 return rc;
654}
655
656static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
657 struct kobj_attribute *attr, const char *buf, size_t count)
658{
659 struct kernel_param kp;
660 unsigned long flags;
661 unsigned int temp;
662 int rc;
663
664 kp.arg = &temp;
665 rc = param_set_uint(buf, &kp);
666 if (rc)
667 return rc;
668
669 spin_lock_irqsave(&msm_rpmrs_lock, flags);
670 GET_RS_FROM_ATTR(attr)->enable_low_power = temp;
Eugene Seah78aa5e72011-07-18 18:28:37 -0600671
672 /* special case active-set signal for MSM_RPMRS_ID_RPM_CTL */
673 if (GET_RS_FROM_ATTR(attr)->rs[0].id == MSM_RPMRS_ID_RPM_CTL) {
674 struct msm_rpm_iv_pair req;
675 req.id = MSM_RPMRS_ID_RPM_CTL;
676 req.value = GET_RS_FROM_ATTR(attr)->enable_low_power ? 0 : 1;
677
678 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &req, 1);
679 if (rc) {
680 pr_err("%s: failed to request RPM_CTL to %d: %d\n",
681 __func__, req.value, rc);
682 }
683 }
684
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685 msm_rpmrs_update_levels();
686 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
687
688 return count;
689}
690
691static int __init msm_rpmrs_resource_sysfs_add(void)
692{
693 struct kobject *module_kobj;
694 struct kobject *low_power_kboj;
695 struct msm_rpmrs_resource_sysfs *rs;
696 int i;
697 int rc;
698
699 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
700 if (!module_kobj) {
701 pr_err("%s: cannot find kobject for module %s\n",
702 __func__, KBUILD_MODNAME);
703 rc = -ENOENT;
704 goto resource_sysfs_add_exit;
705 }
706
707 low_power_kboj = kobject_create_and_add(
708 "enable_low_power", module_kobj);
709 if (!low_power_kboj) {
710 pr_err("%s: cannot create kobject\n", __func__);
711 rc = -ENOMEM;
712 goto resource_sysfs_add_exit;
713 }
714
715 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
716 rs = kzalloc(sizeof(*rs), GFP_KERNEL);
717 if (!rs) {
718 pr_err("%s: cannot allocate memory for attributes\n",
719 __func__);
720 rc = -ENOMEM;
721 goto resource_sysfs_add_exit;
722 }
723
724 rs->kas.rs = msm_rpmrs_resources[i];
725 rs->kas.ka.attr.name = msm_rpmrs_resources[i]->name;
726 rs->kas.ka.attr.mode = 0644;
727 rs->kas.ka.show = msm_rpmrs_resource_attr_show;
728 rs->kas.ka.store = msm_rpmrs_resource_attr_store;
729
730 rs->attrs[0] = &rs->kas.ka.attr;
731 rs->attrs[1] = NULL;
732 rs->attr_group.attrs = rs->attrs;
733
734 rc = sysfs_create_group(low_power_kboj, &rs->attr_group);
735 if (rc) {
736 pr_err("%s: cannot create kobject attribute group\n",
737 __func__);
738 goto resource_sysfs_add_exit;
739 }
740 }
741
742 rc = 0;
743
744resource_sysfs_add_exit:
745 return rc;
746}
747
748/******************************************************************************
749 * Public Functions
750 *****************************************************************************/
751
752int msm_rpmrs_set(int ctx, struct msm_rpm_iv_pair *req, int count)
753{
754 return msm_rpmrs_set_common(ctx, req, count, false);
755}
756
757int msm_rpmrs_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
758{
759 WARN(!irqs_disabled(), "msm_rpmrs_set_noirq can only be called "
760 "safely when local irqs are disabled. Consider using "
761 "msm_rpmrs_set or msm_rpmrs_set_nosleep instead.");
762 return msm_rpmrs_set_common(ctx, req, count, true);
763}
764
765int msm_rpmrs_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
766{
767 return msm_rpmrs_clear_common(ctx, req, count, false);
768}
769
770int msm_rpmrs_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
771{
772 WARN(!irqs_disabled(), "msm_rpmrs_clear_noirq can only be called "
773 "safely when local irqs are disabled. Consider using "
774 "msm_rpmrs_clear or msm_rpmrs_clear_nosleep instead.");
775 return msm_rpmrs_clear_common(ctx, req, count, true);
776}
777
778void msm_rpmrs_show_resources(void)
779{
780 struct msm_rpmrs_resource *rs;
781 unsigned long flags;
782 int i;
783
784 spin_lock_irqsave(&msm_rpmrs_lock, flags);
785 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
786 rs = msm_rpmrs_resources[i];
787 if (rs->rs[0].id < MSM_RPM_ID_LAST + 1)
788 pr_info("%s: resource %s: buffered %d, value 0x%x\n",
789 __func__, rs->name,
790 test_bit(rs->rs[0].id, msm_rpmrs_buffered),
791 msm_rpmrs_buffer[rs->rs[0].id]);
792 else
793 pr_info("%s: resource %s: value %d\n",
794 __func__, rs->name, rs->rs[0].value);
795 }
796 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
797}
798
799struct msm_rpmrs_limits *msm_rpmrs_lowest_limits(
800 bool from_idle, enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
801 uint32_t sleep_us)
802{
803 unsigned int cpu = smp_processor_id();
804 struct msm_rpmrs_level *best_level = NULL;
805 bool irqs_detectable = false;
806 bool gpio_detectable = false;
807 int i;
808
809 if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
810 irqs_detectable = msm_mpm_irqs_detectable(from_idle);
811 gpio_detectable = msm_mpm_gpio_irqs_detectable(from_idle);
812 }
813
814 for (i = 0; i < msm_rpmrs_level_count; i++) {
815 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
816 uint32_t power;
817
818 if (!level->available)
819 continue;
820
821 if (sleep_mode != level->sleep_mode)
822 continue;
823
824 if (latency_us < level->latency_us)
825 continue;
826
827 if (!msm_rpmrs_irqs_detectable(&level->rs_limits,
828 irqs_detectable, gpio_detectable))
829 continue;
830
831 if (sleep_us <= 1) {
832 power = level->energy_overhead;
833 } else if (sleep_us <= level->time_overhead_us) {
834 power = level->energy_overhead / sleep_us;
835 } else if ((sleep_us >> 10) > level->time_overhead_us) {
836 power = level->steady_state_power;
837 } else {
838 power = (sleep_us - level->time_overhead_us);
839 power *= level->steady_state_power;
840 power /= sleep_us;
841 power += level->energy_overhead / sleep_us;
842 }
843
844 if (!best_level ||
845 best_level->rs_limits.power[cpu] >= power) {
846 level->rs_limits.latency_us[cpu] = level->latency_us;
847 level->rs_limits.power[cpu] = power;
848 best_level = level;
849 }
850 }
851
852 return best_level ? &best_level->rs_limits : NULL;
853}
854
855int msm_rpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
856 bool from_idle, bool notify_rpm)
857{
858 int rc = 0;
859
860 rc = msm_rpmrs_flush_L2(limits, notify_rpm);
861 if (rc)
862 return rc;
863
864 if (notify_rpm) {
865 rc = msm_rpmrs_flush_buffer(sclk_count, limits, from_idle);
866 if (rc)
867 return rc;
868
869 if (msm_rpmrs_use_mpm(limits))
870 msm_mpm_enter_sleep(from_idle);
871 }
872
873 return rc;
874}
875
876void msm_rpmrs_exit_sleep(struct msm_rpmrs_limits *limits,
877 bool from_idle, bool notify_rpm)
878{
879
880 /* Disable L2 for now, we dont want L2 to do retention by default */
881 msm_spm_l2_set_low_power_mode(MSM_SPM_MODE_DISABLED, notify_rpm);
882
883 if (msm_rpmrs_use_mpm(limits))
884 msm_mpm_exit_sleep(from_idle);
885}
886
887#ifdef CONFIG_MSM_L2_SPM
888static int rpmrs_cpu_callback(struct notifier_block *nfb,
889 unsigned long action, void *hcpu)
890{
891 switch (action) {
892 case CPU_ONLINE_FROZEN:
893 case CPU_ONLINE:
894 if (num_online_cpus() > 1)
895 msm_rpmrs_l2_cache.rs[0].value =
896 MSM_RPMRS_L2_CACHE_ACTIVE;
897 break;
898 case CPU_DEAD_FROZEN:
899 case CPU_DEAD:
900 if (num_online_cpus() == 1)
901 msm_rpmrs_l2_cache.rs[0].value =
902 MSM_RPMRS_L2_CACHE_HSFS_OPEN;
903 break;
904 }
905
906 msm_rpmrs_update_levels();
907 return NOTIFY_OK;
908}
909
910static struct notifier_block __refdata rpmrs_cpu_notifier = {
911 .notifier_call = rpmrs_cpu_callback,
912};
913#endif
914
915int __init msm_rpmrs_levels_init(struct msm_rpmrs_level *levels, int size)
916{
917 msm_rpmrs_levels = kzalloc(sizeof(struct msm_rpmrs_level) * size,
918 GFP_KERNEL);
919 if (!msm_rpmrs_levels)
920 return -ENOMEM;
921 msm_rpmrs_level_count = size;
922 memcpy(msm_rpmrs_levels, levels, size * sizeof(struct msm_rpmrs_level));
923
924 return 0;
925}
926
927static int __init msm_rpmrs_init(void)
928{
929 struct msm_rpm_iv_pair req;
930 int rc;
931
932 BUG_ON(!msm_rpmrs_levels);
933
934 if (machine_is_msm8x60_surf() || machine_is_msm8x60_ffa() ||
935 machine_is_msm8x60_fluid() || machine_is_msm8x60_fusion() ||
936 machine_is_msm8x60_fusn_ffa()) {
937
938 req.id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL;
939 req.value = 1;
940
941 rc = msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
942 if (rc) {
943 pr_err("%s: failed to request L2 cache: %d\n",
944 __func__, rc);
945 goto init_exit;
946 }
947
948 req.id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL;
949 req.value = 0;
950
951 rc = msm_rpmrs_set(MSM_RPM_CTX_SET_SLEEP, &req, 1);
952 if (rc) {
953 pr_err("%s: failed to initialize L2 cache for sleep: "
954 "%d\n", __func__, rc);
955 goto init_exit;
956 }
957 }
958
Eugene Seah78aa5e72011-07-18 18:28:37 -0600959 /* Enable RPM SWFI on Apps initialization */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 req.id = MSM_RPMRS_ID_RPM_CTL;
961 req.value = 0;
962
Eugene Seah78aa5e72011-07-18 18:28:37 -0600963 rc = msm_rpmrs_set(MSM_RPM_CTX_SET_0, &req, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964 if (rc) {
Eugene Seah78aa5e72011-07-18 18:28:37 -0600965 pr_err("%s: failed to initialize RPM halt: "
966 "%d\n", __func__, rc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 goto init_exit;
968 }
969
970 rc = msm_rpmrs_resource_sysfs_add();
971
972init_exit:
973 return rc;
974}
975device_initcall(msm_rpmrs_init);
976
977static int __init msm_rpmrs_early_init(void)
978{
979 int i, k;
980
981 /* Initialize listed bitmap for valid resource IDs */
982 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
983 for (k = 0; k < msm_rpmrs_resources[i]->size; k++)
984 set_bit(msm_rpmrs_resources[i]->rs[k].id,
985 msm_rpmrs_listed);
986 }
987
988 return 0;
989}
990early_initcall(msm_rpmrs_early_init);
991
992#ifdef CONFIG_MSM_L2_SPM
993static int __init msm_rpmrs_l2_counter_init(void)
994{
995 msm_rpmrs_l2_counter_addr = MSM_IMEM_BASE + L2_PC_COUNTER_ADDR;
996 writel_relaxed(msm_rpmrs_l2_reset_count, msm_rpmrs_l2_counter_addr);
997 mb();
998
999 msm_rpmrs_l2_cache.beyond_limits = msm_spm_l2_cache_beyond_limits;
1000 msm_rpmrs_l2_cache.aggregate = NULL;
1001 msm_rpmrs_l2_cache.restore = NULL;
1002
1003 register_hotcpu_notifier(&rpmrs_cpu_notifier);
1004
1005 return 0;
1006}
1007early_initcall(msm_rpmrs_l2_counter_init);
1008#endif