locking: Implement new raw_spinlock

Now that the raw_spin name space is freed up, we can implement
raw_spinlock and the related functions which are used to annotate the
locks which are not converted to sleeping spinlocks in preempt-rt.

A side effect is that only such locks can be used with the low level
lock fsunctions which circumvent lockdep.

For !rt spin_* functions are mapped to the raw_spin* implementations.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index a2b2c9d..eabe506 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,26 +17,29 @@
 
 int in_lock_functions(unsigned long addr);
 
-#define assert_spin_locked(x)	BUG_ON(!spin_is_locked(x))
+#define assert_raw_spin_locked(x)	BUG_ON(!raw_spin_is_locked(x))
 
-void __lockfunc _spin_lock(spinlock_t *lock)		__acquires(lock);
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
+void __lockfunc _spin_lock(raw_spinlock_t *lock)	__acquires(lock);
+void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
 							__acquires(lock);
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
+void __lockfunc
+_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
 							__acquires(lock);
-void __lockfunc _spin_lock_bh(spinlock_t *lock)		__acquires(lock);
-void __lockfunc _spin_lock_irq(spinlock_t *lock)	__acquires(lock);
+void __lockfunc _spin_lock_bh(raw_spinlock_t *lock)	__acquires(lock);
+void __lockfunc _spin_lock_irq(raw_spinlock_t *lock)	__acquires(lock);
 
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
+unsigned long __lockfunc _spin_lock_irqsave(raw_spinlock_t *lock)
 							__acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
+unsigned long __lockfunc
+_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
 							__acquires(lock);
-int __lockfunc _spin_trylock(spinlock_t *lock);
-int __lockfunc _spin_trylock_bh(spinlock_t *lock);
-void __lockfunc _spin_unlock(spinlock_t *lock)		__releases(lock);
-void __lockfunc _spin_unlock_bh(spinlock_t *lock)	__releases(lock);
-void __lockfunc _spin_unlock_irq(spinlock_t *lock)	__releases(lock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+int __lockfunc _spin_trylock(raw_spinlock_t *lock);
+int __lockfunc _spin_trylock_bh(raw_spinlock_t *lock);
+void __lockfunc _spin_unlock(raw_spinlock_t *lock)	__releases(lock);
+void __lockfunc _spin_unlock_bh(raw_spinlock_t *lock)	__releases(lock);
+void __lockfunc _spin_unlock_irq(raw_spinlock_t *lock)	__releases(lock);
+void __lockfunc
+_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
 							__releases(lock);
 
 #ifdef CONFIG_INLINE_SPIN_LOCK
@@ -79,7 +82,7 @@
 #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
 #endif
 
-static inline int __spin_trylock(spinlock_t *lock)
+static inline int __spin_trylock(raw_spinlock_t *lock)
 {
 	preempt_disable();
 	if (_raw_spin_trylock(lock)) {
@@ -97,7 +100,7 @@
  */
 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
 
-static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
+static inline unsigned long __spin_lock_irqsave(raw_spinlock_t *lock)
 {
 	unsigned long flags;
 
@@ -117,7 +120,7 @@
 	return flags;
 }
 
-static inline void __spin_lock_irq(spinlock_t *lock)
+static inline void __spin_lock_irq(raw_spinlock_t *lock)
 {
 	local_irq_disable();
 	preempt_disable();
@@ -125,7 +128,7 @@
 	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 }
 
-static inline void __spin_lock_bh(spinlock_t *lock)
+static inline void __spin_lock_bh(raw_spinlock_t *lock)
 {
 	local_bh_disable();
 	preempt_disable();
@@ -133,7 +136,7 @@
 	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 }
 
-static inline void __spin_lock(spinlock_t *lock)
+static inline void __spin_lock(raw_spinlock_t *lock)
 {
 	preempt_disable();
 	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
@@ -142,14 +145,14 @@
 
 #endif /* CONFIG_PREEMPT */
 
-static inline void __spin_unlock(spinlock_t *lock)
+static inline void __spin_unlock(raw_spinlock_t *lock)
 {
 	spin_release(&lock->dep_map, 1, _RET_IP_);
 	_raw_spin_unlock(lock);
 	preempt_enable();
 }
 
-static inline void __spin_unlock_irqrestore(spinlock_t *lock,
+static inline void __spin_unlock_irqrestore(raw_spinlock_t *lock,
 					    unsigned long flags)
 {
 	spin_release(&lock->dep_map, 1, _RET_IP_);
@@ -158,7 +161,7 @@
 	preempt_enable();
 }
 
-static inline void __spin_unlock_irq(spinlock_t *lock)
+static inline void __spin_unlock_irq(raw_spinlock_t *lock)
 {
 	spin_release(&lock->dep_map, 1, _RET_IP_);
 	_raw_spin_unlock(lock);
@@ -166,7 +169,7 @@
 	preempt_enable();
 }
 
-static inline void __spin_unlock_bh(spinlock_t *lock)
+static inline void __spin_unlock_bh(raw_spinlock_t *lock)
 {
 	spin_release(&lock->dep_map, 1, _RET_IP_);
 	_raw_spin_unlock(lock);
@@ -174,7 +177,7 @@
 	local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 }
 
-static inline int __spin_trylock_bh(spinlock_t *lock)
+static inline int __spin_trylock_bh(raw_spinlock_t *lock)
 {
 	local_bh_disable();
 	preempt_disable();