msm: 8960: Add support for PASR in DMM.
8960 v2 and future devices only support PASR, so the RPM interface
for DMM should trigger self-refresh instead of deep power down.
Add support to provide a logical grouping of memory regions
that abstract the physical memory topology to achieve independent
power control. The mode of power control is automatically switched
based on the version of the device.
Change-Id: I93fddc9437ba2eaffbb2853f9d91dadb2edc5d0b
Signed-off-by: Naveen Ramaraj <nramaraj@codeaurora.org>
diff --git a/arch/arm/mach-msm/memory_topology.c b/arch/arm/mach-msm/memory_topology.c
index b9d9d3b..7a75bc1 100644
--- a/arch/arm/mach-msm/memory_topology.c
+++ b/arch/arm/mach-msm/memory_topology.c
@@ -14,8 +14,11 @@
#include <asm/setup.h>
#include <asm/errno.h>
#include <asm/sizes.h>
+#include <asm/pgtable.h>
#include <linux/mutex.h>
+#include <linux/memory.h>
#include <mach/msm_memtypes.h>
+#include <mach/socinfo.h>
#include "smd_private.h"
#if defined(CONFIG_ARCH_MSM8960)
@@ -40,6 +43,14 @@
STATE_DEFAULT = STATE_ACTIVE
};
+enum {
+ MEM_NO_CHANGE = 0x0,
+ MEM_DEEP_POWER_DOWN,
+ MEM_SELF_REFRESH,
+};
+
+static unsigned int dmm_mode;
+
static int default_mask = ~0x0;
/* Return the number of chipselects populated with a memory bank */
@@ -103,27 +114,40 @@
static int switch_memory_state(int id, int new_state)
{
int mask;
- int disable_masks[MAX_NR_REGIONS] = { 0xFFFFFF00, 0xFFFF00FF,
+ int power_down_masks[MAX_NR_REGIONS] = { 0xFFFFFF00, 0xFFFF00FF,
0xFF00FFFF, 0x00FFFFFF };
+ int self_refresh_masks[MAX_NR_REGIONS] = { 0xFFFFFFF0, 0xFFFFFF0F,
+ 0xFFFFF0FF, 0xFFFF0FFF };
mutex_lock(&mem_regions[id].state_mutex);
if (new_state == mem_regions[id].state)
goto no_change;
- if (new_state == STATE_POWER_DOWN)
- mask = mem_regions[id].mask & disable_masks[id];
- else if (new_state == STATE_ACTIVE)
- mask = mem_regions[id].mask | (~disable_masks[id]);
+ pr_info("request memory %d state switch (%d->%d) mode %d\n", id,
+ mem_regions[id].state, new_state, dmm_mode);
+ if (new_state == STATE_POWER_DOWN) {
+ if (dmm_mode == MEM_DEEP_POWER_DOWN)
+ mask = mem_regions[id].mask & power_down_masks[id];
+ else
+ mask = mem_regions[id].mask & self_refresh_masks[id];
+ } else if (new_state == STATE_ACTIVE) {
+ if (dmm_mode == MEM_DEEP_POWER_DOWN)
+ mask = mem_regions[id].mask | (~power_down_masks[id]);
+ else
+ mask = mem_regions[id].mask | (~self_refresh_masks[id]);
+ }
- /* For now we only support Deep Power Down */
- /* So set the active and retention states as the same */
if (rpm_change_memory_state(mask, mask) == 0) {
mem_regions[id].state = new_state;
mem_regions[id].mask = mask;
+ pr_info("completed memory %d state switch to %d mode %d\n",
+ id, new_state, dmm_mode);
mutex_unlock(&mem_regions[id].state_mutex);
return 0;
}
+ pr_err("failed memory %d state switch (%d->%d) mode %d\n", id,
+ mem_regions[id].state, new_state, dmm_mode);
no_change:
mutex_unlock(&mem_regions[id].state_mutex);
return -EINVAL;
@@ -189,7 +213,12 @@
int __init meminfo_init(unsigned int type, unsigned int min_bank_size)
{
unsigned int i;
+ unsigned long bank_size;
+ unsigned long bank_start;
struct smem_ram_ptable *ram_ptable;
+ /* physical memory banks */
+ unsigned int nr_mem_banks = 0;
+ /* logical memory regions for dmm */
nr_mem_regions = 0;
ram_ptable = smem_alloc(SMEM_USABLE_RAM_PARTITION_TABLE,
@@ -200,22 +229,41 @@
return -EINVAL;
}
+ /* Determine power control mode based on the hw version */
+ /* This check will be removed when PASR is fully supported */
+ if (cpu_is_msm8960() &&
+ SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 2)
+ dmm_mode = MEM_DEEP_POWER_DOWN;
+ else
+ dmm_mode = MEM_SELF_REFRESH;
+
pr_info("meminfo_init: smem ram ptable found: ver: %d len: %d\n",
ram_ptable->version, ram_ptable->len);
for (i = 0; i < ram_ptable->len; i++) {
if (ram_ptable->parts[i].type == type &&
ram_ptable->parts[i].size >= min_bank_size) {
- mem_regions[nr_mem_regions].start =
- ram_ptable->parts[i].start;
- mem_regions[nr_mem_regions].size =
- ram_ptable->parts[i].size;
- mutex_init(&mem_regions[nr_mem_regions].state_mutex);
- mem_regions[nr_mem_regions].state = STATE_DEFAULT;
- mem_regions[nr_mem_regions].mask = default_mask;
- nr_mem_regions++;
+ bank_start = ram_ptable->parts[i].start;
+ bank_size = ram_ptable->parts[i].size;
+ /* Divide into logical memory regions of same size */
+ while (bank_size) {
+ mem_regions[nr_mem_regions].start =
+ bank_start;
+ mem_regions[nr_mem_regions].size =
+ MIN_MEMORY_BLOCK_SIZE;
+ mutex_init(&mem_regions[nr_mem_regions]
+ .state_mutex);
+ mem_regions[nr_mem_regions].state =
+ STATE_DEFAULT;
+ mem_regions[nr_mem_regions].mask = default_mask;
+ bank_start += MIN_MEMORY_BLOCK_SIZE;
+ bank_size -= MIN_MEMORY_BLOCK_SIZE;
+ nr_mem_regions++;
+ }
+ nr_mem_banks++;
}
}
- pr_info("Found %d memory banks\n", nr_mem_regions);
+ pr_info("Found %d memory banks grouped into %d memory regions\n",
+ nr_mem_banks, nr_mem_regions);
return 0;
}