MIPS: Allow kernel use of LL/SC to be separate from the presence of LL/SC.

On some CPUs, it is more efficient to disable and enable interrupts in the
kernel rather than use ll/sc for atomic operations.  But if we were to set
cpu_has_llsc to false, we would break the userspace futex interface (in
asm/futex.h).

We separate the two concepts, with a new predicate kernel_uses_llsc, that
lets us disable the kernel's use of ll/sc while still allowing the futex
code to use it.

Also there were a couple of cases in bitops.h where we were using ll/sc
unconditionally even if cpu_has_llsc were false.

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index eb7f01c..dd75d67 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -49,7 +49,7 @@
  */
 static __inline__ void atomic_add(int i, atomic_t * v)
 {
-	if (cpu_has_llsc && R10000_LLSC_WAR) {
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		int temp;
 
 		__asm__ __volatile__(
@@ -61,7 +61,7 @@
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter));
-	} else if (cpu_has_llsc) {
+	} else if (kernel_uses_llsc) {
 		int temp;
 
 		__asm__ __volatile__(
@@ -94,7 +94,7 @@
  */
 static __inline__ void atomic_sub(int i, atomic_t * v)
 {
-	if (cpu_has_llsc && R10000_LLSC_WAR) {
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		int temp;
 
 		__asm__ __volatile__(
@@ -106,7 +106,7 @@
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter));
-	} else if (cpu_has_llsc) {
+	} else if (kernel_uses_llsc) {
 		int temp;
 
 		__asm__ __volatile__(
@@ -139,7 +139,7 @@
 
 	smp_llsc_mb();
 
-	if (cpu_has_llsc && R10000_LLSC_WAR) {
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		int temp;
 
 		__asm__ __volatile__(
@@ -153,7 +153,7 @@
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
 		: "memory");
-	} else if (cpu_has_llsc) {
+	} else if (kernel_uses_llsc) {
 		int temp;
 
 		__asm__ __volatile__(
@@ -191,7 +191,7 @@
 
 	smp_llsc_mb();
 
-	if (cpu_has_llsc && R10000_LLSC_WAR) {
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		int temp;
 
 		__asm__ __volatile__(
@@ -205,7 +205,7 @@
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
 		: "memory");
-	} else if (cpu_has_llsc) {
+	} else if (kernel_uses_llsc) {
 		int temp;
 
 		__asm__ __volatile__(
@@ -251,7 +251,7 @@
 
 	smp_llsc_mb();
 
-	if (cpu_has_llsc && R10000_LLSC_WAR) {
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		int temp;
 
 		__asm__ __volatile__(
@@ -269,7 +269,7 @@
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
 		: "memory");
-	} else if (cpu_has_llsc) {
+	} else if (kernel_uses_llsc) {
 		int temp;
 
 		__asm__ __volatile__(
@@ -428,7 +428,7 @@
  */
 static __inline__ void atomic64_add(long i, atomic64_t * v)
 {
-	if (cpu_has_llsc && R10000_LLSC_WAR) {
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		long temp;
 
 		__asm__ __volatile__(
@@ -440,7 +440,7 @@
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter));
-	} else if (cpu_has_llsc) {
+	} else if (kernel_uses_llsc) {
 		long temp;
 
 		__asm__ __volatile__(
@@ -473,7 +473,7 @@
  */
 static __inline__ void atomic64_sub(long i, atomic64_t * v)
 {
-	if (cpu_has_llsc && R10000_LLSC_WAR) {
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		long temp;
 
 		__asm__ __volatile__(
@@ -485,7 +485,7 @@
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter));
-	} else if (cpu_has_llsc) {
+	} else if (kernel_uses_llsc) {
 		long temp;
 
 		__asm__ __volatile__(
@@ -518,7 +518,7 @@
 
 	smp_llsc_mb();
 
-	if (cpu_has_llsc && R10000_LLSC_WAR) {
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		long temp;
 
 		__asm__ __volatile__(
@@ -532,7 +532,7 @@
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
 		: "memory");
-	} else if (cpu_has_llsc) {
+	} else if (kernel_uses_llsc) {
 		long temp;
 
 		__asm__ __volatile__(
@@ -570,7 +570,7 @@
 
 	smp_llsc_mb();
 
-	if (cpu_has_llsc && R10000_LLSC_WAR) {
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		long temp;
 
 		__asm__ __volatile__(
@@ -584,7 +584,7 @@
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
 		: "memory");
-	} else if (cpu_has_llsc) {
+	} else if (kernel_uses_llsc) {
 		long temp;
 
 		__asm__ __volatile__(
@@ -630,7 +630,7 @@
 
 	smp_llsc_mb();
 
-	if (cpu_has_llsc && R10000_LLSC_WAR) {
+	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 		long temp;
 
 		__asm__ __volatile__(
@@ -648,7 +648,7 @@
 		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
 		: "Ir" (i), "m" (v->counter)
 		: "memory");
-	} else if (cpu_has_llsc) {
+	} else if (kernel_uses_llsc) {
 		long temp;
 
 		__asm__ __volatile__(