alpha: lock bitops

Alpha can avoid one mb when acquiring a lock with test_and_set_bit_lock.

[bunk@kernel.org: alpha bitops.h must #include <asm/barrier.h>]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Signed-off-by: Adrian Bunk <bunk@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index f1bbe6c..381b4f5 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -2,6 +2,7 @@
 #define _ALPHA_BITOPS_H
 
 #include <asm/compiler.h>
+#include <asm/barrier.h>
 
 /*
  * Copyright 1994, Linus Torvalds.
@@ -69,6 +70,13 @@
 	:"Ir" (1UL << (nr & 31)), "m" (*m));
 }
 
+static inline void
+clear_bit_unlock(unsigned long nr, volatile void * addr)
+{
+	smp_mb();
+	clear_bit(nr, addr);
+}
+
 /*
  * WARNING: non atomic version.
  */
@@ -81,6 +89,13 @@
 }
 
 static inline void
+__clear_bit_unlock(unsigned long nr, volatile void * addr)
+{
+	smp_mb();
+	__clear_bit(nr, addr);
+}
+
+static inline void
 change_bit(unsigned long nr, volatile void * addr)
 {
 	unsigned long temp;
@@ -139,6 +154,33 @@
 	return oldbit != 0;
 }
 
+static inline int
+test_and_set_bit_lock(unsigned long nr, volatile void *addr)
+{
+	unsigned long oldbit;
+	unsigned long temp;
+	int *m = ((int *) addr) + (nr >> 5);
+
+	__asm__ __volatile__(
+	"1:	ldl_l %0,%4\n"
+	"	and %0,%3,%2\n"
+	"	bne %2,2f\n"
+	"	xor %0,%3,%0\n"
+	"	stl_c %0,%1\n"
+	"	beq %0,3f\n"
+	"2:\n"
+#ifdef CONFIG_SMP
+	"	mb\n"
+#endif
+	".subsection 2\n"
+	"3:	br 1b\n"
+	".previous"
+	:"=&r" (temp), "=m" (*m), "=&r" (oldbit)
+	:"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
+
+	return oldbit != 0;
+}
+
 /*
  * WARNING: non atomic version.
  */
@@ -376,7 +418,6 @@
 #else
 #include <asm-generic/bitops/hweight.h>
 #endif
-#include <asm-generic/bitops/lock.h>
 
 #endif /* __KERNEL__ */