blob: 3568070f923c8331dbee81dac1b5255bd608dd4c [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/bug.h>
19#include <linux/mutex.h>
20#include <linux/proc_fs.h>
21#include <linux/spinlock.h>
22#include <linux/cpu.h>
23#include <mach/rpm.h>
24#include <mach/msm_iomap.h>
25#include <asm/mach-types.h>
26#include <linux/io.h>
Praveen Chidambaram841d46c2011-08-04 09:07:53 -060027#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include "mpm.h"
29#include "rpm_resources.h"
30#include "spm.h"
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -060031#include "idle.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33/******************************************************************************
34 * Debug Definitions
35 *****************************************************************************/
36
37enum {
38 MSM_RPMRS_DEBUG_OUTPUT = BIT(0),
39 MSM_RPMRS_DEBUG_BUFFER = BIT(1),
40};
41
42static int msm_rpmrs_debug_mask;
43module_param_named(
44 debug_mask, msm_rpmrs_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
45);
46
47static struct msm_rpmrs_level *msm_rpmrs_levels;
48static int msm_rpmrs_level_count;
49
50static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits);
51static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits);
52static void msm_rpmrs_restore_pxo(void);
53static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits);
54static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits);
55static void msm_rpmrs_restore_l2_cache(void);
56static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits);
57static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits);
58static void msm_rpmrs_restore_vdd_mem(void);
59static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits);
60static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits);
61static void msm_rpmrs_restore_vdd_dig(void);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Praveen Chidambaram66775c62011-08-04 16:59:24 -060063static ssize_t msm_rpmrs_resource_attr_show(
64 struct kobject *kobj, struct kobj_attribute *attr, char *buf);
65static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
66 struct kobj_attribute *attr, const char *buf, size_t count);
67
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068#define MSM_RPMRS_MAX_RS_REGISTER_COUNT 2
69
Praveen Chidambaram66775c62011-08-04 16:59:24 -060070#define RPMRS_ATTR(_name) \
71 __ATTR(_name, S_IRUGO|S_IWUSR, \
72 msm_rpmrs_resource_attr_show, msm_rpmrs_resource_attr_store)
73
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074struct msm_rpmrs_resource {
75 struct msm_rpm_iv_pair rs[MSM_RPMRS_MAX_RS_REGISTER_COUNT];
76 uint32_t size;
77 char *name;
78
79 uint32_t enable_low_power;
80
81 bool (*beyond_limits)(struct msm_rpmrs_limits *limits);
82 void (*aggregate)(struct msm_rpmrs_limits *limits);
83 void (*restore)(void);
Praveen Chidambaram66775c62011-08-04 16:59:24 -060084
85 struct kobj_attribute ko_attr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086};
87
88static struct msm_rpmrs_resource msm_rpmrs_pxo = {
89 .rs[0].id = MSM_RPMRS_ID_PXO_CLK,
90 .size = 1,
91 .name = "pxo",
92 .beyond_limits = msm_rpmrs_pxo_beyond_limits,
93 .aggregate = msm_rpmrs_aggregate_pxo,
94 .restore = msm_rpmrs_restore_pxo,
Praveen Chidambaram66775c62011-08-04 16:59:24 -060095 .ko_attr = RPMRS_ATTR(pxo),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096};
97
98static struct msm_rpmrs_resource msm_rpmrs_l2_cache = {
99 .rs[0].id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL,
100 .size = 1,
101 .name = "L2_cache",
102 .beyond_limits = msm_rpmrs_l2_cache_beyond_limits,
103 .aggregate = msm_rpmrs_aggregate_l2_cache,
104 .restore = msm_rpmrs_restore_l2_cache,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600105 .ko_attr = RPMRS_ATTR(L2_cache),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106};
107
108static struct msm_rpmrs_resource msm_rpmrs_vdd_mem = {
109 .rs[0].id = MSM_RPMRS_ID_VDD_MEM_0,
110 .rs[1].id = MSM_RPMRS_ID_VDD_MEM_1,
111 .size = 2,
112 .name = "vdd_mem",
113 .beyond_limits = msm_rpmrs_vdd_mem_beyond_limits,
114 .aggregate = msm_rpmrs_aggregate_vdd_mem,
115 .restore = msm_rpmrs_restore_vdd_mem,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600116 .ko_attr = RPMRS_ATTR(vdd_mem),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117};
118
119static struct msm_rpmrs_resource msm_rpmrs_vdd_dig = {
120 .rs[0].id = MSM_RPMRS_ID_VDD_DIG_0,
121 .rs[1].id = MSM_RPMRS_ID_VDD_DIG_1,
122 .size = 2,
123 .name = "vdd_dig",
124 .beyond_limits = msm_rpmrs_vdd_dig_beyond_limits,
125 .aggregate = msm_rpmrs_aggregate_vdd_dig,
126 .restore = msm_rpmrs_restore_vdd_dig,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600127 .ko_attr = RPMRS_ATTR(vdd_dig),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128};
129
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600130static struct msm_rpmrs_resource msm_rpmrs_rpm_ctl = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131 .rs[0].id = MSM_RPMRS_ID_RPM_CTL,
132 .size = 1,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600133 .name = "rpm_ctl",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134 .beyond_limits = NULL,
Eugene Seah78aa5e72011-07-18 18:28:37 -0600135 .aggregate = NULL,
136 .restore = NULL,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600137 .ko_attr = RPMRS_ATTR(rpm_ctl),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138};
139
140static struct msm_rpmrs_resource *msm_rpmrs_resources[] = {
141 &msm_rpmrs_pxo,
142 &msm_rpmrs_l2_cache,
143 &msm_rpmrs_vdd_mem,
144 &msm_rpmrs_vdd_dig,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600145 &msm_rpmrs_rpm_ctl,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146};
147
148static uint32_t msm_rpmrs_buffer[MSM_RPM_ID_LAST + 1];
149static DECLARE_BITMAP(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
150static DECLARE_BITMAP(msm_rpmrs_listed, MSM_RPM_ID_LAST + 1);
151static DEFINE_SPINLOCK(msm_rpmrs_lock);
152
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153#define MSM_RPMRS_VDD(v) ((v) & (MSM_RPMRS_VDD_MASK))
154
155/******************************************************************************
156 * Attribute Definitions
157 *****************************************************************************/
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600158static struct attribute *msm_rpmrs_attributes[] = {
159 &msm_rpmrs_pxo.ko_attr.attr,
160 &msm_rpmrs_l2_cache.ko_attr.attr,
161 &msm_rpmrs_vdd_mem.ko_attr.attr,
162 &msm_rpmrs_vdd_dig.ko_attr.attr,
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600163 NULL,
164};
165static struct attribute *msm_rpmrs_mode_attributes[] = {
166 &msm_rpmrs_rpm_ctl.ko_attr.attr,
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600167 NULL,
168};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600170static struct attribute_group msm_rpmrs_attribute_group = {
171 .attrs = msm_rpmrs_attributes,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172};
173
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600174static struct attribute_group msm_rpmrs_mode_attribute_group = {
175 .attrs = msm_rpmrs_mode_attributes,
176};
177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178#define GET_RS_FROM_ATTR(attr) \
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600179 (container_of(attr, struct msm_rpmrs_resource, ko_attr))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181
182/******************************************************************************
183 * Resource Specific Functions
184 *****************************************************************************/
185
186static void msm_rpmrs_aggregate_sclk(uint32_t sclk_count)
187{
188 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
189 set_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
190 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = sclk_count;
191 set_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
192}
193
194static void msm_rpmrs_restore_sclk(void)
195{
196 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT, msm_rpmrs_buffered);
197 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_SCLK_COUNT] = 0;
198 clear_bit(MSM_RPM_ID_TRIGGER_TIMED_TO, msm_rpmrs_buffered);
199 msm_rpmrs_buffer[MSM_RPM_ID_TRIGGER_TIMED_TO] = 0;
200}
201
202static bool msm_rpmrs_pxo_beyond_limits(struct msm_rpmrs_limits *limits)
203{
204 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
205 uint32_t pxo;
206
207 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
208 pxo = msm_rpmrs_buffer[rs->rs[0].id];
209 else
210 pxo = MSM_RPMRS_PXO_ON;
211
212 return pxo > limits->pxo;
213}
214
215static void msm_rpmrs_aggregate_pxo(struct msm_rpmrs_limits *limits)
216{
217 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
218 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
219
220 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
221 rs->rs[0].value = *buf;
222 if (limits->pxo > *buf)
223 *buf = limits->pxo;
224 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
225 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
226 }
227}
228
229static void msm_rpmrs_restore_pxo(void)
230{
231 struct msm_rpmrs_resource *rs = &msm_rpmrs_pxo;
232
233 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
234 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
235}
236
237static bool msm_rpmrs_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
238{
239 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
240 uint32_t l2_cache;
241
242 if (rs->enable_low_power && test_bit(rs->rs[0].id, msm_rpmrs_buffered))
243 l2_cache = msm_rpmrs_buffer[rs->rs[0].id];
244 else
245 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
246
247 return l2_cache > limits->l2_cache;
248}
249
250static void msm_rpmrs_aggregate_l2_cache(struct msm_rpmrs_limits *limits)
251{
252 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
253 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
254
255 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
256 rs->rs[0].value = *buf;
257 if (limits->l2_cache > *buf)
258 *buf = limits->l2_cache;
259
260 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
261 pr_info("%s: %d (0x%x)\n", __func__, *buf, *buf);
262 }
263}
264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265static bool msm_spm_l2_cache_beyond_limits(struct msm_rpmrs_limits *limits)
266{
267 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
268 uint32_t l2_cache = rs->rs[0].value;
269
270 if (!rs->enable_low_power)
271 l2_cache = MSM_RPMRS_L2_CACHE_ACTIVE;
272
273 return l2_cache > limits->l2_cache;
274}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275
276static void msm_rpmrs_restore_l2_cache(void)
277{
278 struct msm_rpmrs_resource *rs = &msm_rpmrs_l2_cache;
279
280 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
281 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
282}
283
284static bool msm_rpmrs_vdd_mem_beyond_limits(struct msm_rpmrs_limits *limits)
285{
286 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
287 uint32_t vdd_mem;
288
289 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
290 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
291
292 if (rs->enable_low_power == 0)
293 vdd_mem = MSM_RPMRS_VDD_MEM_ACTIVE;
294 else if (rs->enable_low_power == 1)
295 vdd_mem = MSM_RPMRS_VDD_MEM_RET_HIGH;
296 else
297 vdd_mem = MSM_RPMRS_VDD_MEM_RET_LOW;
298
299 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_mem))
300 vdd_mem = buffered_value;
301 } else {
302 vdd_mem = MSM_RPMRS_VDD_MEM_ACTIVE;
303 }
304
Maheshkumar Sivasubramanian7df12362011-11-02 08:25:49 -0600305 return MSM_RPMRS_VDD(vdd_mem) >
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306 MSM_RPMRS_VDD(limits->vdd_mem_upper_bound);
307}
308
309static void msm_rpmrs_aggregate_vdd_mem(struct msm_rpmrs_limits *limits)
310{
311 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
312 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
313
314 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
315 rs->rs[0].value = *buf;
316 if (MSM_RPMRS_VDD(limits->vdd_mem) > MSM_RPMRS_VDD(*buf)) {
317 *buf &= ~MSM_RPMRS_VDD_MASK;
318 *buf |= MSM_RPMRS_VDD(limits->vdd_mem);
319 }
320
321 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
322 pr_info("%s: vdd %d (0x%x)\n", __func__,
323 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
324 }
325}
326
327static void msm_rpmrs_restore_vdd_mem(void)
328{
329 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_mem;
330
331 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
332 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
333}
334
335static bool msm_rpmrs_vdd_dig_beyond_limits(struct msm_rpmrs_limits *limits)
336{
337 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
338 uint32_t vdd_dig;
339
340 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
341 uint32_t buffered_value = msm_rpmrs_buffer[rs->rs[0].id];
342
343 if (rs->enable_low_power == 0)
344 vdd_dig = MSM_RPMRS_VDD_DIG_ACTIVE;
345 else if (rs->enable_low_power == 1)
346 vdd_dig = MSM_RPMRS_VDD_DIG_RET_HIGH;
347 else
348 vdd_dig = MSM_RPMRS_VDD_DIG_RET_LOW;
349
350 if (MSM_RPMRS_VDD(buffered_value) > MSM_RPMRS_VDD(vdd_dig))
351 vdd_dig = buffered_value;
352 } else {
353 vdd_dig = MSM_RPMRS_VDD_DIG_ACTIVE;
354 }
355
Maheshkumar Sivasubramanian7df12362011-11-02 08:25:49 -0600356 return MSM_RPMRS_VDD(vdd_dig) >
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357 MSM_RPMRS_VDD(limits->vdd_dig_upper_bound);
358}
359
360static void msm_rpmrs_aggregate_vdd_dig(struct msm_rpmrs_limits *limits)
361{
362 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
363 uint32_t *buf = &msm_rpmrs_buffer[rs->rs[0].id];
364
365 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered)) {
366 rs->rs[0].value = *buf;
367 if (MSM_RPMRS_VDD(limits->vdd_dig) > MSM_RPMRS_VDD(*buf)) {
368 *buf &= ~MSM_RPMRS_VDD_MASK;
369 *buf |= MSM_RPMRS_VDD(limits->vdd_dig);
370 }
371
372
373 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
374 pr_info("%s: vdd %d (0x%x)\n", __func__,
375 MSM_RPMRS_VDD(*buf), MSM_RPMRS_VDD(*buf));
376 }
377}
378
379static void msm_rpmrs_restore_vdd_dig(void)
380{
381 struct msm_rpmrs_resource *rs = &msm_rpmrs_vdd_dig;
382
383 if (test_bit(rs->rs[0].id, msm_rpmrs_buffered))
384 msm_rpmrs_buffer[rs->rs[0].id] = rs->rs[0].value;
385}
386
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387/******************************************************************************
388 * Buffering Functions
389 *****************************************************************************/
390
391static bool msm_rpmrs_irqs_detectable(struct msm_rpmrs_limits *limits,
392 bool irqs_detect, bool gpio_detect)
393{
394
Maheshkumar Sivasubramanian7df12362011-11-02 08:25:49 -0600395 if (limits->vdd_dig_upper_bound <= MSM_RPMRS_VDD_DIG_RET_HIGH)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 return irqs_detect;
397
398 if (limits->pxo == MSM_RPMRS_PXO_OFF)
399 return gpio_detect;
400
401 return true;
402}
403
404static bool msm_rpmrs_use_mpm(struct msm_rpmrs_limits *limits)
405{
406 return (limits->pxo == MSM_RPMRS_PXO_OFF) ||
407 (limits->vdd_dig <= MSM_RPMRS_VDD_DIG_RET_HIGH);
408}
409
410static void msm_rpmrs_update_levels(void)
411{
412 int i, k;
413
414 for (i = 0; i < msm_rpmrs_level_count; i++) {
415 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
416
417 if (level->sleep_mode != MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
418 continue;
419
420 level->available = true;
421
422 for (k = 0; k < ARRAY_SIZE(msm_rpmrs_resources); k++) {
423 struct msm_rpmrs_resource *rs = msm_rpmrs_resources[k];
424
425 if (rs->beyond_limits &&
426 rs->beyond_limits(&level->rs_limits)) {
427 level->available = false;
428 break;
429 }
430 }
431 }
432}
433
434/*
435 * Return value:
436 * 0: no entries in <req> is on our resource list
437 * 1: one or more entries in <req> is on our resource list
438 * -EINVAL: invalid id in <req> array
439 */
440static int msm_rpmrs_buffer_request(struct msm_rpm_iv_pair *req, int count)
441{
442 bool listed;
443 int i;
444
445 for (i = 0; i < count; i++)
446 if (req[i].id > MSM_RPM_ID_LAST)
447 return -EINVAL;
448
449 for (i = 0, listed = false; i < count; i++) {
450 msm_rpmrs_buffer[req[i].id] = req[i].value;
451 set_bit(req[i].id, msm_rpmrs_buffered);
452
453 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
454 pr_info("%s: reg %d: 0x%x\n",
455 __func__, req[i].id, req[i].value);
456
457 if (listed)
458 continue;
459
460 if (test_bit(req[i].id, msm_rpmrs_listed))
461 listed = true;
462 }
463
464 return listed ? 1 : 0;
465}
466
467/*
468 * Return value:
469 * 0: no entries in <req> is on our resource list
470 * 1: one or more entries in <req> is on our resource list
471 * -EINVAL: invalid id in <req> array
472 */
473static int msm_rpmrs_clear_buffer(struct msm_rpm_iv_pair *req, int count)
474{
475 bool listed;
476 int i;
477
478 for (i = 0; i < count; i++)
479 if (req[i].id > MSM_RPM_ID_LAST)
480 return -EINVAL;
481
482 for (i = 0, listed = false; i < count; i++) {
483 msm_rpmrs_buffer[req[i].id] = 0;
484 clear_bit(req[i].id, msm_rpmrs_buffered);
485
486 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
487 pr_info("%s: reg %d\n", __func__, req[i].id);
488
489 if (listed)
490 continue;
491
492 if (test_bit(req[i].id, msm_rpmrs_listed))
493 listed = true;
494 }
495
496 return listed ? 1 : 0;
497}
498
499#ifdef CONFIG_MSM_L2_SPM
500static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
501{
502 int rc = 0;
503 int lpm;
504
505 switch (limits->l2_cache) {
506 case MSM_RPMRS_L2_CACHE_HSFS_OPEN:
507 lpm = MSM_SPM_L2_MODE_POWER_COLLAPSE;
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600508 msm_pm_set_l2_flush_flag(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509 break;
510 case MSM_RPMRS_L2_CACHE_GDHS:
511 lpm = MSM_SPM_L2_MODE_GDHS;
512 break;
513 case MSM_RPMRS_L2_CACHE_RETENTION:
514 lpm = MSM_SPM_L2_MODE_RETENTION;
515 break;
516 default:
517 case MSM_RPMRS_L2_CACHE_ACTIVE:
518 lpm = MSM_SPM_L2_MODE_DISABLED;
519 break;
520 }
521
522 rc = msm_spm_l2_set_low_power_mode(lpm, notify_rpm);
523 if (MSM_RPMRS_DEBUG_BUFFER & msm_rpmrs_debug_mask)
524 pr_info("%s: Requesting low power mode %d returned %d\n",
525 __func__, lpm, rc);
526
527 return rc;
528}
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600529static void msm_rpmrs_L2_restore(struct msm_rpmrs_limits *limits,
530 bool notify_rpm, bool collapsed)
531{
532 msm_spm_l2_set_low_power_mode(MSM_SPM_MODE_DISABLED, notify_rpm);
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600533 msm_pm_set_l2_flush_flag(0);
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600534}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535#else
536static int msm_rpmrs_flush_L2(struct msm_rpmrs_limits *limits, int notify_rpm)
537{
538 return 0;
539}
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600540static void msm_rpmrs_L2_restore(struct msm_rpmrs_limits *limits,
541 bool notify_rpm, bool collapsed)
542{
543}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544#endif
545
546static int msm_rpmrs_flush_buffer(
547 uint32_t sclk_count, struct msm_rpmrs_limits *limits, int from_idle)
548{
549 struct msm_rpm_iv_pair *req;
550 int count;
551 int rc;
552 int i;
553
554 msm_rpmrs_aggregate_sclk(sclk_count);
555 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
556 if (msm_rpmrs_resources[i]->aggregate)
557 msm_rpmrs_resources[i]->aggregate(limits);
558 }
559
560 count = bitmap_weight(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
561
562 req = kmalloc(sizeof(*req) * count, GFP_ATOMIC);
563 if (!req) {
564 rc = -ENOMEM;
565 goto flush_buffer_restore;
566 }
567
568 count = 0;
569 i = find_first_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST + 1);
570
571 while (i < MSM_RPM_ID_LAST + 1) {
572 if (MSM_RPMRS_DEBUG_OUTPUT & msm_rpmrs_debug_mask)
573 pr_info("%s: reg %d: 0x%x\n",
574 __func__, i, msm_rpmrs_buffer[i]);
575
576 req[count].id = i;
577 req[count].value = msm_rpmrs_buffer[i];
578 count++;
579
580 i = find_next_bit(msm_rpmrs_buffered, MSM_RPM_ID_LAST+1, i+1);
581 }
582
583 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_SLEEP, req, count);
584 kfree(req);
585
586 if (rc)
587 goto flush_buffer_restore;
588
589 bitmap_and(msm_rpmrs_buffered,
590 msm_rpmrs_buffered, msm_rpmrs_listed, MSM_RPM_ID_LAST + 1);
591
592flush_buffer_restore:
593 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
594 if (msm_rpmrs_resources[i]->restore)
595 msm_rpmrs_resources[i]->restore();
596 }
597 msm_rpmrs_restore_sclk();
598
599 if (rc)
600 pr_err("%s: failed: %d\n", __func__, rc);
601 return rc;
602}
603
604static int msm_rpmrs_set_common(
605 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
606{
607 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
608 unsigned long flags;
609 int rc;
610
611 spin_lock_irqsave(&msm_rpmrs_lock, flags);
612 rc = msm_rpmrs_buffer_request(req, count);
613 if (rc > 0) {
614 msm_rpmrs_update_levels();
615 rc = 0;
616 }
617 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
618
619 return rc;
620 }
621
622 if (noirq)
623 return msm_rpm_set_noirq(ctx, req, count);
624 else
625 return msm_rpm_set(ctx, req, count);
626}
627
628static int msm_rpmrs_clear_common(
629 int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq)
630{
631 if (ctx == MSM_RPM_CTX_SET_SLEEP) {
632 unsigned long flags;
633 int rc;
634
635 spin_lock_irqsave(&msm_rpmrs_lock, flags);
636 rc = msm_rpmrs_clear_buffer(req, count);
637 if (rc > 0) {
638 msm_rpmrs_update_levels();
639 rc = 0;
640 }
641 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
642
643 if (rc < 0)
644 return rc;
645 }
646
647 if (noirq)
648 return msm_rpm_clear_noirq(ctx, req, count);
649 else
650 return msm_rpm_clear(ctx, req, count);
651}
652
653/******************************************************************************
654 * Attribute Functions
655 *****************************************************************************/
656
657static ssize_t msm_rpmrs_resource_attr_show(
658 struct kobject *kobj, struct kobj_attribute *attr, char *buf)
659{
660 struct kernel_param kp;
661 unsigned long flags;
662 unsigned int temp;
663 int rc;
664
665 spin_lock_irqsave(&msm_rpmrs_lock, flags);
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600666 /* special case active-set signal for MSM_RPMRS_ID_RPM_CTL */
667 if (GET_RS_FROM_ATTR(attr)->rs[0].id == MSM_RPMRS_ID_RPM_CTL)
668 temp = GET_RS_FROM_ATTR(attr)->rs[0].value;
669 else
670 temp = GET_RS_FROM_ATTR(attr)->enable_low_power;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
672
673 kp.arg = &temp;
674 rc = param_get_uint(buf, &kp);
675
676 if (rc > 0) {
Praveen Chidambaram2b0fdd02011-10-28 16:40:58 -0600677 strlcat(buf, "\n", PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 rc++;
679 }
680
681 return rc;
682}
683
684static ssize_t msm_rpmrs_resource_attr_store(struct kobject *kobj,
685 struct kobj_attribute *attr, const char *buf, size_t count)
686{
687 struct kernel_param kp;
688 unsigned long flags;
689 unsigned int temp;
690 int rc;
691
692 kp.arg = &temp;
693 rc = param_set_uint(buf, &kp);
694 if (rc)
695 return rc;
696
697 spin_lock_irqsave(&msm_rpmrs_lock, flags);
698 GET_RS_FROM_ATTR(attr)->enable_low_power = temp;
Eugene Seah78aa5e72011-07-18 18:28:37 -0600699
700 /* special case active-set signal for MSM_RPMRS_ID_RPM_CTL */
701 if (GET_RS_FROM_ATTR(attr)->rs[0].id == MSM_RPMRS_ID_RPM_CTL) {
702 struct msm_rpm_iv_pair req;
703 req.id = MSM_RPMRS_ID_RPM_CTL;
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600704 req.value = GET_RS_FROM_ATTR(attr)->enable_low_power;
705 GET_RS_FROM_ATTR(attr)->rs[0].value = req.value;
Eugene Seah78aa5e72011-07-18 18:28:37 -0600706
707 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &req, 1);
708 if (rc) {
709 pr_err("%s: failed to request RPM_CTL to %d: %d\n",
710 __func__, req.value, rc);
711 }
712 }
713
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714 msm_rpmrs_update_levels();
715 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
716
717 return count;
718}
719
720static int __init msm_rpmrs_resource_sysfs_add(void)
721{
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600722 struct kobject *module_kobj = NULL;
723 struct kobject *low_power_kobj = NULL;
724 struct kobject *mode_kobj = NULL;
725 int rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726
727 module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
728 if (!module_kobj) {
729 pr_err("%s: cannot find kobject for module %s\n",
730 __func__, KBUILD_MODNAME);
731 rc = -ENOENT;
732 goto resource_sysfs_add_exit;
733 }
734
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600735 low_power_kobj = kobject_create_and_add(
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700736 "enable_low_power", module_kobj);
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600737 if (!low_power_kobj) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 pr_err("%s: cannot create kobject\n", __func__);
739 rc = -ENOMEM;
740 goto resource_sysfs_add_exit;
741 }
742
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600743 mode_kobj = kobject_create_and_add(
744 "mode", module_kobj);
745 if (!mode_kobj) {
746 pr_err("%s: cannot create kobject\n", __func__);
747 rc = -ENOMEM;
748 goto resource_sysfs_add_exit;
749 }
750
751 rc = sysfs_create_group(low_power_kobj, &msm_rpmrs_attribute_group);
Praveen Chidambaram66775c62011-08-04 16:59:24 -0600752 if (rc) {
753 pr_err("%s: cannot create kobject attribute group\n", __func__);
754 goto resource_sysfs_add_exit;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 }
756
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600757 rc = sysfs_create_group(mode_kobj, &msm_rpmrs_mode_attribute_group);
758 if (rc) {
759 pr_err("%s: cannot create kobject attribute group\n", __func__);
760 goto resource_sysfs_add_exit;
761 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -0600763 rc = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764resource_sysfs_add_exit:
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600765 if (rc) {
766 if (low_power_kobj)
767 sysfs_remove_group(low_power_kobj,
768 &msm_rpmrs_attribute_group);
769 kobject_del(low_power_kobj);
770 kobject_del(mode_kobj);
771 }
772
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700773 return rc;
774}
775
776/******************************************************************************
777 * Public Functions
778 *****************************************************************************/
779
780int msm_rpmrs_set(int ctx, struct msm_rpm_iv_pair *req, int count)
781{
782 return msm_rpmrs_set_common(ctx, req, count, false);
783}
784
785int msm_rpmrs_set_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
786{
787 WARN(!irqs_disabled(), "msm_rpmrs_set_noirq can only be called "
788 "safely when local irqs are disabled. Consider using "
789 "msm_rpmrs_set or msm_rpmrs_set_nosleep instead.");
790 return msm_rpmrs_set_common(ctx, req, count, true);
791}
792
Praveen Chidambaram9dfa8712011-09-14 16:25:01 -0600793/* Allow individual bits of an rpm resource be set, currently used only for
794 * active context resource viz. RPM_CTL. The API is generic enough to possibly
795 * extend it to other resources as well in the future.
796 */
797int msm_rpmrs_set_bits_noirq(int ctx, struct msm_rpm_iv_pair *req, int count,
798 int *mask)
799{
800 unsigned long flags;
801 int i, j;
802 int rc = -1;
803 struct msm_rpmrs_resource *rs;
804
805 if (ctx != MSM_RPM_CTX_SET_0)
806 return -ENOSYS;
807
808 spin_lock_irqsave(&msm_rpmrs_lock, flags);
809 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
810 rs = msm_rpmrs_resources[i];
811 if (rs->rs[0].id == req[0].id && rs->size == count) {
812 for (j = 0; j < rs->size; j++) {
813 rs->rs[j].value &= ~mask[j];
814 rs->rs[j].value |= req[j].value & mask[j];
815 }
816 break;
817 }
818 }
819
820 if (i != ARRAY_SIZE(msm_rpmrs_resources)) {
821 rc = msm_rpm_set_noirq(MSM_RPM_CTX_SET_0, &rs->rs[0], rs->size);
822 if (rc) {
823 for (j = 0; j < rs->size; j++) {
824 pr_err("%s: failed to request %d to %d: %d\n",
825 __func__,
826 rs->rs[j].id, rs->rs[j].value, rc);
827 }
828 }
829 }
830 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
831
832 return rc;
833
834}
835
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836int msm_rpmrs_clear(int ctx, struct msm_rpm_iv_pair *req, int count)
837{
838 return msm_rpmrs_clear_common(ctx, req, count, false);
839}
840
841int msm_rpmrs_clear_noirq(int ctx, struct msm_rpm_iv_pair *req, int count)
842{
843 WARN(!irqs_disabled(), "msm_rpmrs_clear_noirq can only be called "
844 "safely when local irqs are disabled. Consider using "
845 "msm_rpmrs_clear or msm_rpmrs_clear_nosleep instead.");
846 return msm_rpmrs_clear_common(ctx, req, count, true);
847}
848
849void msm_rpmrs_show_resources(void)
850{
851 struct msm_rpmrs_resource *rs;
852 unsigned long flags;
853 int i;
854
855 spin_lock_irqsave(&msm_rpmrs_lock, flags);
856 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
857 rs = msm_rpmrs_resources[i];
858 if (rs->rs[0].id < MSM_RPM_ID_LAST + 1)
859 pr_info("%s: resource %s: buffered %d, value 0x%x\n",
860 __func__, rs->name,
861 test_bit(rs->rs[0].id, msm_rpmrs_buffered),
862 msm_rpmrs_buffer[rs->rs[0].id]);
863 else
864 pr_info("%s: resource %s: value %d\n",
865 __func__, rs->name, rs->rs[0].value);
866 }
867 spin_unlock_irqrestore(&msm_rpmrs_lock, flags);
868}
869
870struct msm_rpmrs_limits *msm_rpmrs_lowest_limits(
871 bool from_idle, enum msm_pm_sleep_mode sleep_mode, uint32_t latency_us,
872 uint32_t sleep_us)
873{
874 unsigned int cpu = smp_processor_id();
875 struct msm_rpmrs_level *best_level = NULL;
876 bool irqs_detectable = false;
877 bool gpio_detectable = false;
878 int i;
879
880 if (sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
881 irqs_detectable = msm_mpm_irqs_detectable(from_idle);
882 gpio_detectable = msm_mpm_gpio_irqs_detectable(from_idle);
883 }
884
885 for (i = 0; i < msm_rpmrs_level_count; i++) {
886 struct msm_rpmrs_level *level = &msm_rpmrs_levels[i];
887 uint32_t power;
888
889 if (!level->available)
890 continue;
891
892 if (sleep_mode != level->sleep_mode)
893 continue;
894
895 if (latency_us < level->latency_us)
896 continue;
897
898 if (!msm_rpmrs_irqs_detectable(&level->rs_limits,
899 irqs_detectable, gpio_detectable))
900 continue;
901
902 if (sleep_us <= 1) {
903 power = level->energy_overhead;
904 } else if (sleep_us <= level->time_overhead_us) {
905 power = level->energy_overhead / sleep_us;
906 } else if ((sleep_us >> 10) > level->time_overhead_us) {
907 power = level->steady_state_power;
908 } else {
Maheshkumar Sivasubramaniand2239ad2011-10-04 09:07:51 -0600909 power = level->steady_state_power;
910 power -= (level->time_overhead_us *
911 level->steady_state_power)/sleep_us;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912 power += level->energy_overhead / sleep_us;
913 }
914
915 if (!best_level ||
916 best_level->rs_limits.power[cpu] >= power) {
917 level->rs_limits.latency_us[cpu] = level->latency_us;
918 level->rs_limits.power[cpu] = power;
919 best_level = level;
920 }
921 }
922
923 return best_level ? &best_level->rs_limits : NULL;
924}
925
926int msm_rpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
927 bool from_idle, bool notify_rpm)
928{
929 int rc = 0;
930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931 if (notify_rpm) {
932 rc = msm_rpmrs_flush_buffer(sclk_count, limits, from_idle);
933 if (rc)
934 return rc;
935
936 if (msm_rpmrs_use_mpm(limits))
937 msm_mpm_enter_sleep(from_idle);
938 }
939
Mahesh Sivasubramanian6e22a1e2011-12-01 10:20:34 -0700940 rc = msm_rpmrs_flush_L2(limits, notify_rpm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700941 return rc;
942}
943
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600944void msm_rpmrs_exit_sleep(struct msm_rpmrs_limits *limits, bool from_idle,
945 bool notify_rpm, bool collapsed)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946{
947
948 /* Disable L2 for now, we dont want L2 to do retention by default */
Maheshkumar Sivasubramaniandd93ecf2011-09-15 19:39:14 -0600949 msm_rpmrs_L2_restore(limits, notify_rpm, collapsed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950
951 if (msm_rpmrs_use_mpm(limits))
952 msm_mpm_exit_sleep(from_idle);
953}
954
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700955static int rpmrs_cpu_callback(struct notifier_block *nfb,
956 unsigned long action, void *hcpu)
957{
958 switch (action) {
959 case CPU_ONLINE_FROZEN:
960 case CPU_ONLINE:
961 if (num_online_cpus() > 1)
962 msm_rpmrs_l2_cache.rs[0].value =
963 MSM_RPMRS_L2_CACHE_ACTIVE;
964 break;
965 case CPU_DEAD_FROZEN:
966 case CPU_DEAD:
967 if (num_online_cpus() == 1)
968 msm_rpmrs_l2_cache.rs[0].value =
969 MSM_RPMRS_L2_CACHE_HSFS_OPEN;
970 break;
971 }
972
973 msm_rpmrs_update_levels();
974 return NOTIFY_OK;
975}
976
977static struct notifier_block __refdata rpmrs_cpu_notifier = {
978 .notifier_call = rpmrs_cpu_callback,
979};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700980
981int __init msm_rpmrs_levels_init(struct msm_rpmrs_level *levels, int size)
982{
983 msm_rpmrs_levels = kzalloc(sizeof(struct msm_rpmrs_level) * size,
984 GFP_KERNEL);
985 if (!msm_rpmrs_levels)
986 return -ENOMEM;
987 msm_rpmrs_level_count = size;
988 memcpy(msm_rpmrs_levels, levels, size * sizeof(struct msm_rpmrs_level));
989
990 return 0;
991}
992
993static int __init msm_rpmrs_init(void)
994{
995 struct msm_rpm_iv_pair req;
996 int rc;
997
Stepan Moskovchenko0302fbc2011-08-05 18:06:13 -0700998 if (cpu_is_apq8064())
999 return -ENODEV;
1000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001001 BUG_ON(!msm_rpmrs_levels);
1002
Praveen Chidambaram841d46c2011-08-04 09:07:53 -06001003 if (cpu_is_msm8x60()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004 req.id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL;
1005 req.value = 1;
1006
1007 rc = msm_rpm_set(MSM_RPM_CTX_SET_0, &req, 1);
1008 if (rc) {
1009 pr_err("%s: failed to request L2 cache: %d\n",
1010 __func__, rc);
1011 goto init_exit;
1012 }
1013
1014 req.id = MSM_RPMRS_ID_APPS_L2_CACHE_CTL;
1015 req.value = 0;
1016
1017 rc = msm_rpmrs_set(MSM_RPM_CTX_SET_SLEEP, &req, 1);
1018 if (rc) {
1019 pr_err("%s: failed to initialize L2 cache for sleep: "
1020 "%d\n", __func__, rc);
1021 goto init_exit;
1022 }
1023 }
1024
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001025 rc = msm_rpmrs_resource_sysfs_add();
1026
1027init_exit:
1028 return rc;
1029}
1030device_initcall(msm_rpmrs_init);
1031
1032static int __init msm_rpmrs_early_init(void)
1033{
1034 int i, k;
1035
1036 /* Initialize listed bitmap for valid resource IDs */
1037 for (i = 0; i < ARRAY_SIZE(msm_rpmrs_resources); i++) {
1038 for (k = 0; k < msm_rpmrs_resources[i]->size; k++)
1039 set_bit(msm_rpmrs_resources[i]->rs[k].id,
1040 msm_rpmrs_listed);
1041 }
1042
1043 return 0;
1044}
1045early_initcall(msm_rpmrs_early_init);
1046
Maheshkumar Sivasubramanian16588412011-10-13 12:16:23 -06001047static int __init msm_rpmrs_l2_init(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001048{
Stepan Moskovchenko7aab57c2011-10-25 14:41:14 -07001049 if (cpu_is_msm8960() || cpu_is_msm8930()) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050
Maheshkumar Sivasubramaniana012e092011-08-18 10:13:03 -06001051 msm_pm_set_l2_flush_flag(0);
1052
Maheshkumar Sivasubramanian07c363f2011-10-18 09:52:33 -06001053 msm_rpmrs_l2_cache.beyond_limits =
1054 msm_spm_l2_cache_beyond_limits;
1055 msm_rpmrs_l2_cache.aggregate = NULL;
1056 msm_rpmrs_l2_cache.restore = NULL;
Maheshkumar Sivasubramanian16588412011-10-13 12:16:23 -06001057
Maheshkumar Sivasubramanian07c363f2011-10-18 09:52:33 -06001058 register_hotcpu_notifier(&rpmrs_cpu_notifier);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001059
Maheshkumar Sivasubramanian07c363f2011-10-18 09:52:33 -06001060 } else if (cpu_is_msm9615()) {
1061 msm_rpmrs_l2_cache.beyond_limits = NULL;
1062 msm_rpmrs_l2_cache.aggregate = NULL;
1063 msm_rpmrs_l2_cache.restore = NULL;
1064 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001065 return 0;
1066}
Maheshkumar Sivasubramanian16588412011-10-13 12:16:23 -06001067early_initcall(msm_rpmrs_l2_init);