locking: Convert raw_rwlock functions to arch_rwlock

Name space cleanup for rwlock functions. No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h
index 7f3626a..bdc0f3b 100644
--- a/arch/sh/include/asm/spinlock.h
+++ b/arch/sh/include/asm/spinlock.h
@@ -100,21 +100,21 @@
  * read_can_lock - would read_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_read_can_lock(x)	((x)->lock > 0)
+#define arch_read_can_lock(x)	((x)->lock > 0)
 
 /**
  * write_can_lock - would write_trylock() succeed?
  * @lock: the rwlock in question.
  */
-#define __raw_write_can_lock(x)	((x)->lock == RW_LOCK_BIAS)
+#define arch_write_can_lock(x)	((x)->lock == RW_LOCK_BIAS)
 
-static inline void __raw_read_lock(arch_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
 {
 	unsigned long tmp;
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%1, %0	! __raw_read_lock	\n\t"
+		"movli.l	@%1, %0	! arch_read_lock	\n\t"
 		"cmp/pl		%0				\n\t"
 		"bf		1b				\n\t"
 		"add		#-1, %0				\n\t"
@@ -126,13 +126,13 @@
 	);
 }
 
-static inline void __raw_read_unlock(arch_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
 	unsigned long tmp;
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%1, %0	! __raw_read_unlock	\n\t"
+		"movli.l	@%1, %0	! arch_read_unlock	\n\t"
 		"add		#1, %0				\n\t"
 		"movco.l	%0, @%1				\n\t"
 		"bf		1b				\n\t"
@@ -142,13 +142,13 @@
 	);
 }
 
-static inline void __raw_write_lock(arch_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
 {
 	unsigned long tmp;
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%1, %0	! __raw_write_lock	\n\t"
+		"movli.l	@%1, %0	! arch_write_lock	\n\t"
 		"cmp/hs		%2, %0				\n\t"
 		"bf		1b				\n\t"
 		"sub		%2, %0				\n\t"
@@ -160,23 +160,23 @@
 	);
 }
 
-static inline void __raw_write_unlock(arch_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
 	__asm__ __volatile__ (
-		"mov.l		%1, @%0 ! __raw_write_unlock	\n\t"
+		"mov.l		%1, @%0 ! arch_write_unlock	\n\t"
 		:
 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
 		: "t", "memory"
 	);
 }
 
-static inline int __raw_read_trylock(arch_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
 	unsigned long tmp, oldval;
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%2, %0	! __raw_read_trylock	\n\t"
+		"movli.l	@%2, %0	! arch_read_trylock	\n\t"
 		"mov		%0, %1				\n\t"
 		"cmp/pl		%0				\n\t"
 		"bf		2f				\n\t"
@@ -193,13 +193,13 @@
 	return (oldval > 0);
 }
 
-static inline int __raw_write_trylock(arch_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
 	unsigned long tmp, oldval;
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%2, %0	! __raw_write_trylock	\n\t"
+		"movli.l	@%2, %0	! arch_write_trylock	\n\t"
 		"mov		%0, %1				\n\t"
 		"cmp/hs		%3, %0				\n\t"
 		"bf		2f				\n\t"
@@ -216,8 +216,8 @@
 	return (oldval > (RW_LOCK_BIAS - 1));
 }
 
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 
 #define arch_spin_relax(lock)	cpu_relax()
 #define arch_read_relax(lock)	cpu_relax()