msm: pm: save registers to noncached area before pc
The power collapse path is simplified if a noncached area is
used for the purpose of saving register state prior to power
collapsing. This is especially beneficial in the case of targets
with an outer cache, where the process of cleaning the register
state would require complex operations.
Change-Id: I9edac28e4091548e2843fc87c89e3451df2b6ae9
Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/arch/arm/mach-msm/idle-v7.S b/arch/arm/mach-msm/idle-v7.S
index 868054d..63ac78e 100644
--- a/arch/arm/mach-msm/idle-v7.S
+++ b/arch/arm/mach-msm/idle-v7.S
@@ -19,13 +19,8 @@
#include <linux/threads.h>
#include <asm/assembler.h>
-#ifdef CONFIG_MSM_CPU_AVS
-/* 11 general purpose registers (r4-r14), 10 cp15 registers, 3 AVS registers */
-#define CPU_SAVED_STATE_SIZE (4 * 11 + 4 * 10 + 4 * 3)
-#else
-/* 11 general purpose registers (r4-r14), 10 cp15 registers */
-#define CPU_SAVED_STATE_SIZE (4 * 11 + 4 * 10)
-#endif
+#include "idle.h"
+
#ifdef CONFIG_ARCH_MSM_KRAIT
#define SCM_SVC_BOOT 0x1
#define SCM_CMD_TERMINATE_PC 0x2
@@ -45,7 +40,8 @@
cpsid f
#endif
- ldr r0, =saved_state
+ ldr r0, =msm_saved_state /* address of msm_saved_state ptr */
+ ldr r0, [r0] /* load ptr */
#if (NR_CPUS >= 2)
mrc p15, 0, r1, c0, c0, 5 /* MPIDR */
ands r1, r1, #15 /* What CPU am I */
@@ -100,14 +96,7 @@
cmp r1, #1
bne skip
bl v7_flush_dcache_all
- b skip2
-
-skip: ldr r0, =saved_state
- ldr r1, =saved_state_end
- sub r1, r1, r0
- bl v7_flush_kern_dcache_area
-
-skip2:
+skip:
#ifdef CONFIG_ARCH_MSM_KRAIT
ldr r0, =SCM_SVC_BOOT
ldr r1, =SCM_CMD_TERMINATE_PC
@@ -134,7 +123,8 @@
#ifdef CONFIG_MSM_JTAG
bl msm_jtag_restore_state
#endif
- ldr r0, =saved_state /* restore registers */
+ ldr r0, =msm_saved_state /* address of msm_saved_state ptr */
+ ldr r0, [r0] /* load ptr */
#if (NR_CPUS >= 2)
mrc p15, 0, r1, c0, c0, 5 /* MPIDR */
ands r1, r1, #15 /* What CPU am I */
@@ -143,7 +133,7 @@
add r0, r0, r2
#endif
- ldmfd r0, {r4-r14}
+ ldmfd r0, {r4-r14} /* restore registers */
mov r0, #0 /* return power collapse failed */
bx lr
@@ -157,11 +147,12 @@
mov r1, #'A'
str r1, [r0, #0x00C]
#endif
- ldr r1, =saved_state
+ ldr r1, =msm_saved_state_phys
ldr r2, =msm_pm_collapse_exit
adr r3, msm_pm_collapse_exit
add r1, r1, r3
sub r1, r1, r2
+ ldr r1, [r1]
add r1, r1, #CPU_SAVED_STATE_SIZE
#if (NR_CPUS >= 2)
mrc p15, 0, r2, c0, c0, 5 /* MPIDR */
@@ -277,9 +268,13 @@
msm_pm_pc_pgd:
.long 0x0
-saved_state:
- .space CPU_SAVED_STATE_SIZE * NR_CPUS
-saved_state_end:
+ .globl msm_saved_state
+msm_saved_state:
+ .long 0x0
+
+ .globl msm_saved_state_phys
+msm_saved_state_phys:
+ .long 0x0
msm_pm_boot_vector:
.space 4 * NR_CPUS
diff --git a/arch/arm/mach-msm/idle.h b/arch/arm/mach-msm/idle.h
index 6311b3c..f8f6b3c 100644
--- a/arch/arm/mach-msm/idle.h
+++ b/arch/arm/mach-msm/idle.h
@@ -14,9 +14,21 @@
#ifndef _ARCH_ARM_MACH_MSM_IDLE_H_
#define _ARCH_ARM_MACH_MSM_IDLE_H_
+#ifdef CONFIG_MSM_CPU_AVS
+/* 11 general purpose registers (r4-r14), 10 cp15 registers, 3 AVS registers */
+#define CPU_SAVED_STATE_SIZE (4 * 11 + 4 * 10 + 4 * 3)
+#else
+/* 11 general purpose registers (r4-r14), 10 cp15 registers */
+#define CPU_SAVED_STATE_SIZE (4 * 11 + 4 * 10)
+#endif
+
+#ifndef __ASSEMBLY__
+
int msm_arch_idle(void);
int msm_pm_collapse(void);
void msm_pm_collapse_exit(void);
+extern void *msm_saved_state;
+extern unsigned long msm_saved_state_phys;
#ifdef CONFIG_CPU_V7
void msm_pm_boot_entry(void);
@@ -38,5 +50,5 @@
/* empty */
}
#endif
-
+#endif
#endif
diff --git a/arch/arm/mach-msm/pm-8x60.c b/arch/arm/mach-msm/pm-8x60.c
index 9257f2f..5274e8b 100644
--- a/arch/arm/mach-msm/pm-8x60.c
+++ b/arch/arm/mach-msm/pm-8x60.c
@@ -1182,6 +1182,17 @@
pmd[0] = __pmd(pmdval);
pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
+ msm_saved_state_phys =
+ allocate_contiguous_ebi_nomap(CPU_SAVED_STATE_SIZE *
+ num_possible_cpus(), 4);
+ if (!msm_saved_state_phys)
+ return -ENOMEM;
+ msm_saved_state = ioremap_nocache(msm_saved_state_phys,
+ CPU_SAVED_STATE_SIZE *
+ num_possible_cpus());
+ if (!msm_saved_state)
+ return -ENOMEM;
+
/* It is remotely possible that the code in msm_pm_collapse_exit()
* which turns on the MMU with this mapping is in the
* next even-numbered megabyte beyond the
diff --git a/arch/arm/mach-msm/pm2.c b/arch/arm/mach-msm/pm2.c
index 7903eab..58f2075 100644
--- a/arch/arm/mach-msm/pm2.c
+++ b/arch/arm/mach-msm/pm2.c
@@ -1879,6 +1879,17 @@
pmd[0] = __pmd(pmdval);
pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
+ msm_saved_state_phys =
+ allocate_contiguous_ebi_nomap(CPU_SAVED_STATE_SIZE *
+ num_possible_cpus(), 4);
+ if (!msm_saved_state_phys)
+ return -ENOMEM;
+ msm_saved_state = ioremap_nocache(msm_saved_state_phys,
+ CPU_SAVED_STATE_SIZE *
+ num_possible_cpus());
+ if (!msm_saved_state)
+ return -ENOMEM;
+
/* It is remotely possible that the code in msm_pm_collapse_exit()
* which turns on the MMU with this mapping is in the
* next even-numbered megabyte beyond the