arm: Support the safe WFE sequence for Krait CPUs

Certain version of the Krait processor require a specific
code sequence to be executed prior to executing a WFE
instruction to permit that instruction to place the
processor into a low-power state.

Change-Id: I308adc691f110a323cbd84e9779675ac045826fa
Signed-off-by: Stepan Moskovchenko <stepanm@codeaurora.org>
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 51be229..94aa75e 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -7,6 +7,8 @@
 
 #include <asm/processor.h>
 
+extern int msm_krait_need_wfe_fixup;
+
 /*
  * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
  * extensions, so when running on UP, we have to patch these instructions away.
@@ -34,6 +36,31 @@
 #define WFE()		ALT_SMP("wfe", "nop")
 #endif
 
+/*
+ * The fixup involves disabling interrupts during execution of the WFE
+ * instruction. This could potentially lead to deadlock if a thread is trying
+ * to acquire a spinlock which is being released from an interrupt context.
+ */
+#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
+#define WFE_SAFE(fixup, tmp) 				\
+"	mrs	" tmp ", cpsr\n"			\
+"	cmp	" fixup ", #0\n"			\
+"	wfeeq\n"					\
+"	beq	10f\n"					\
+"	cpsid	if\n"					\
+"	mrc	p15, 7, " fixup ", c15, c0, 5\n"	\
+"	bic	" fixup ", " fixup ", #0x10000\n"	\
+"	mcr	p15, 7, " fixup ", c15, c0, 5\n"	\
+"	isb\n"						\
+"	wfe\n"						\
+"	orr	" fixup ", " fixup ", #0x10000\n"	\
+"	mcr	p15, 7, " fixup ", c15, c0, 5\n"	\
+"	isb\n"						\
+"10:	msr	cpsr_cf, " tmp "\n"
+#else
+#define WFE_SAFE(fixup, tmp)	"	wfe\n"
+#endif
+
 static inline void dsb_sev(void)
 {
 #if __LINUX_ARM_ARCH__ >= 7
@@ -71,18 +98,18 @@
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
-	unsigned long tmp;
+	unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
 
 	__asm__ __volatile__(
 "1:	ldrex	%[tmp], [%[lock]]\n"
 "	teq	%[tmp], #0\n"
 "	beq	2f\n"
-	WFE()
+	WFE_SAFE("%[fixup]", "%[tmp]")
 "2:\n"
 "	strexeq	%[tmp], %[bit0], [%[lock]]\n"
 "	teqeq	%[tmp], #0\n"
 "	bne	1b"
-	: [tmp] "=&r" (tmp)
+	: [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
 	: [lock] "r" (&lock->lock), [bit0] "r" (1)
 	: "cc");
 
@@ -149,6 +176,7 @@
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
 	unsigned long tmp, ticket, next_ticket;
+	unsigned long fixup = msm_krait_need_wfe_fixup;
 
 	/* Grab the next ticket and wait for it to be "served" */
 	__asm__ __volatile__(
@@ -161,13 +189,14 @@
 "2:\n"
 #ifdef CONFIG_CPU_32v6K
 "	beq	3f\n"
-	WFE()
+	WFE_SAFE("%[fixup]", "%[tmp]")
 "3:\n"
 #endif
 "	ldr	%[tmp], [%[lockaddr]]\n"
 "	cmp	%[ticket], %[tmp], lsr #16\n"
 "	bne	2b"
-	: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp), [next_ticket]"=&r" (next_ticket)
+	: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
+	  [next_ticket]"=&r" (next_ticket), [fixup]"+r" (fixup)
 	: [lockaddr]"r" (&lock->lock), [val1]"r" (1)
 	: "cc");
 	smp_mb();
@@ -216,7 +245,7 @@
 
 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
-	unsigned long ticket;
+	unsigned long ticket, tmp, fixup = msm_krait_need_wfe_fixup;
 
 	/* Wait for now_serving == next_ticket */
 	__asm__ __volatile__(
@@ -224,7 +253,7 @@
 "	cmpne	%[lockaddr], %[lockaddr]\n"
 "1:\n"
 "	beq	2f\n"
-	WFE()
+	WFE_SAFE("%[fixup]", "%[tmp]")
 "2:\n"
 #else
 "1:\n"
@@ -234,7 +263,8 @@
 "	uxth	%[ticket], %[ticket]\n"
 "	cmp	%[ticket], #0\n"
 "	bne	1b"
-	: [ticket]"=&r" (ticket)
+	: [ticket]"=&r" (ticket), [tmp]"=&r" (tmp),
+	  [fixup]"+r" (fixup)
 	: [lockaddr]"r" (&lock->lock)
 	: "cc");
 }
@@ -262,18 +292,18 @@
 
 static inline void arch_write_lock(arch_rwlock_t *rw)
 {
-	unsigned long tmp;
+	unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
 
 	__asm__ __volatile__(
 "1:	ldrex	%[tmp], [%[lock]]\n"
 "	teq	%[tmp], #0\n"
 "	beq	2f\n"
-	WFE()
+	WFE_SAFE("%[fixup]", "%[tmp]")
 "2:\n"
 "	strexeq	%[tmp], %[bit31], [%[lock]]\n"
 "	teq	%[tmp], #0\n"
 "	bne	1b"
-	: [tmp] "=&r" (tmp)
+	: [tmp] "=&r" (tmp), [fixup] "+r" (fixup)
 	: [lock] "r" (&rw->lock), [bit31] "r" (0x80000000)
 	: "cc");
 
@@ -330,18 +360,18 @@
  */
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
-	unsigned long tmp, tmp2;
+	unsigned long tmp, tmp2, fixup = msm_krait_need_wfe_fixup;
 
 	__asm__ __volatile__(
 "1:	ldrex	%[tmp], [%[lock]]\n"
 "	adds	%[tmp], %[tmp], #1\n"
 "	strexpl	%[tmp2], %[tmp], [%[lock]]\n"
 "	bpl	2f\n"
-	WFE()
+	WFE_SAFE("%[fixup]", "%[tmp]")
 "2:\n"
 "	rsbpls	%[tmp], %[tmp2], #0\n"
 "	bmi	1b"
-	: [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2)
+	: [tmp] "=&r" (tmp), [tmp2] "=&r" (tmp2), [fixup] "+r" (fixup)
 	: [lock] "r" (&rw->lock)
 	: "cc");
 
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index c23bc10..5e7965e 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -172,6 +172,7 @@
 	select HOLES_IN_ZONE if SPARSEMEM
 	select MSM_RUN_QUEUE_STATS
 	select ARM_HAS_SG_CHAIN
+	select MSM_KRAIT_WFE_FIXUP
 
 config ARCH_MSM8930
 	bool "MSM8930"
@@ -203,6 +204,7 @@
 	select MSM_PM8X60 if PM
 	select HOLES_IN_ZONE if SPARSEMEM
 	select ARM_HAS_SG_CHAIN
+	select MSM_KRAIT_WFE_FIXUP
 
 config ARCH_APQ8064
 	bool "APQ8064"
@@ -229,6 +231,7 @@
 	select MIGHT_HAVE_PCI
 	select ARCH_SUPPORTS_MSI
 	select ARM_HAS_SG_CHAIN
+	select MSM_KRAIT_WFE_FIXUP
 
 config ARCH_MSM8974
 	bool "MSM8974"
@@ -370,6 +373,9 @@
 	select MSM_SMP
 	bool
 
+config  MSM_KRAIT_WFE_FIXUP
+	bool
+
 config  ARCH_MSM_CORTEX_A5
 	bool
 	select HAVE_HW_BRKPT_RESERVED_RW_ACCESS
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 32f61f5..b6fb52a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -32,6 +32,7 @@
 #include <asm/sizes.h>
 #include <asm/tlb.h>
 #include <asm/fixmap.h>
+#include <asm/cputype.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -40,6 +41,8 @@
 
 static unsigned long phys_initrd_start __initdata = 0;
 static unsigned long phys_initrd_size __initdata = 0;
+int msm_krait_need_wfe_fixup;
+EXPORT_SYMBOL(msm_krait_need_wfe_fixup);
 
 static int __init early_initrd(char *p)
 {
@@ -916,3 +919,17 @@
 
 __setup("keepinitrd", keepinitrd_setup);
 #endif
+
+#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
+static int __init msm_krait_wfe_init(void)
+{
+	unsigned int val, midr;
+	midr = read_cpuid_id() & 0xffffff00;
+	if ((midr == 0x511f0400) || (midr == 0x510f0600)) {
+		asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (val));
+		msm_krait_need_wfe_fixup = (val & 0x10000) ? 1 : 0;
+	}
+	return 0;
+}
+pure_initcall(msm_krait_wfe_init);
+#endif