locking: Implement new raw_spinlock

Now that the raw_spin name space is freed up, we can implement
raw_spinlock and the related functions which are used to annotate the
locks which are not converted to sleeping spinlocks in preempt-rt.

A side effect is that only such locks can be used with the low level
lock fsunctions which circumvent lockdep.

For !rt spin_* functions are mapped to the raw_spin* implementations.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Ingo Molnar <mingo@elte.hu>

diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 53bc221..ef5a55d 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -80,7 +80,7 @@
 #include <linux/spinlock_types.h>
 
 /*
- * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
+ * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
  */
 #ifdef CONFIG_SMP
 # include <asm/spinlock.h>
@@ -89,30 +89,30 @@
 #endif
 
 #ifdef CONFIG_DEBUG_SPINLOCK
-  extern void __spin_lock_init(spinlock_t *lock, const char *name,
-			       struct lock_class_key *key);
-# define spin_lock_init(lock)					\
+  extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+				   struct lock_class_key *key);
+# define raw_spin_lock_init(lock)				\
 do {								\
 	static struct lock_class_key __key;			\
 								\
-	__spin_lock_init((lock), #lock, &__key);		\
+	__raw_spin_lock_init((lock), #lock, &__key);		\
 } while (0)
 
 #else
-# define spin_lock_init(lock)					\
-	do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
+# define raw_spin_lock_init(lock)				\
+	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
 #endif
 
-#define spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
+#define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
 
 #ifdef CONFIG_GENERIC_LOCKBREAK
-#define spin_is_contended(lock) ((lock)->break_lock)
+#define raw_spin_is_contended(lock) ((lock)->break_lock)
 #else
 
 #ifdef arch_spin_is_contended
-#define spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
+#define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
 #else
-#define spin_is_contended(lock)	(((void)(lock), 0))
+#define raw_spin_is_contended(lock)	(((void)(lock), 0))
 #endif /*arch_spin_is_contended*/
 #endif
 
@@ -122,22 +122,37 @@
 #endif
 
 /**
- * spin_unlock_wait - wait until the spinlock gets unlocked
+ * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  * @lock: the spinlock in question.
  */
-#define spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
+#define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
 
 #ifdef CONFIG_DEBUG_SPINLOCK
- extern void _raw_spin_lock(spinlock_t *lock);
+ extern void _raw_spin_lock(raw_spinlock_t *lock);
 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
- extern int _raw_spin_trylock(spinlock_t *lock);
- extern void _raw_spin_unlock(spinlock_t *lock);
+ extern int _raw_spin_trylock(raw_spinlock_t *lock);
+ extern void _raw_spin_unlock(raw_spinlock_t *lock);
 #else
-# define _raw_spin_lock(lock)		arch_spin_lock(&(lock)->raw_lock)
-# define _raw_spin_lock_flags(lock, flags) \
-		arch_spin_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_spin_trylock(lock)	arch_spin_trylock(&(lock)->raw_lock)
-# define _raw_spin_unlock(lock)		arch_spin_unlock(&(lock)->raw_lock)
+static inline void _raw_spin_lock(raw_spinlock_t *lock)
+{
+	arch_spin_lock(&lock->raw_lock);
+}
+
+static inline void
+_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
+{
+	arch_spin_lock_flags(&lock->raw_lock, *flags);
+}
+
+static inline int _raw_spin_trylock(raw_spinlock_t *lock)
+{
+	return arch_spin_trylock(&(lock)->raw_lock);
+}
+
+static inline void _raw_spin_unlock(raw_spinlock_t *lock)
+{
+	arch_spin_unlock(&lock->raw_lock);
+}
 #endif
 
 /*
@@ -146,38 +161,38 @@
  * various methods are defined as nops in the case they are not
  * required.
  */
-#define spin_trylock(lock)		__cond_lock(lock, _spin_trylock(lock))
+#define raw_spin_trylock(lock)		__cond_lock(lock, _spin_trylock(lock))
 
-#define spin_lock(lock)			_spin_lock(lock)
+#define raw_spin_lock(lock)		_spin_lock(lock)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
-# define spin_lock_nest_lock(lock, nest_lock)				\
+# define raw_spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
+# define raw_spin_lock_nest_lock(lock, nest_lock)			\
 	 do {								\
 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
 		 _spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
 	 } while (0)
 #else
-# define spin_lock_nested(lock, subclass) _spin_lock(lock)
-# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
+# define raw_spin_lock_nested(lock, subclass)		_spin_lock(lock)
+# define raw_spin_lock_nest_lock(lock, nest_lock)	_spin_lock(lock)
 #endif
 
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 
-#define spin_lock_irqsave(lock, flags)			\
+#define raw_spin_lock_irqsave(lock, flags)			\
 	do {						\
 		typecheck(unsigned long, flags);	\
 		flags = _spin_lock_irqsave(lock);	\
 	} while (0)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define spin_lock_irqsave_nested(lock, flags, subclass)			\
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
 	do {								\
 		typecheck(unsigned long, flags);			\
 		flags = _spin_lock_irqsave_nested(lock, subclass);	\
 	} while (0)
 #else
-#define spin_lock_irqsave_nested(lock, flags, subclass)			\
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
 	do {								\
 		typecheck(unsigned long, flags);			\
 		flags = _spin_lock_irqsave(lock);			\
@@ -186,45 +201,178 @@
 
 #else
 
-#define spin_lock_irqsave(lock, flags)			\
+#define raw_spin_lock_irqsave(lock, flags)		\
 	do {						\
 		typecheck(unsigned long, flags);	\
 		_spin_lock_irqsave(lock, flags);	\
 	} while (0)
 
-#define spin_lock_irqsave_nested(lock, flags, subclass)	\
-	spin_lock_irqsave(lock, flags)
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
+	raw_spin_lock_irqsave(lock, flags)
 
 #endif
 
-#define spin_lock_irq(lock)		_spin_lock_irq(lock)
-#define spin_lock_bh(lock)		_spin_lock_bh(lock)
-#define spin_unlock(lock)		_spin_unlock(lock)
-#define spin_unlock_irq(lock)		_spin_unlock_irq(lock)
+#define raw_spin_lock_irq(lock)	_spin_lock_irq(lock)
+#define raw_spin_lock_bh(lock)		_spin_lock_bh(lock)
+#define raw_spin_unlock(lock)		_spin_unlock(lock)
+#define raw_spin_unlock_irq(lock)	_spin_unlock_irq(lock)
 
-#define spin_unlock_irqrestore(lock, flags)		\
-	do {						\
-		typecheck(unsigned long, flags);	\
+#define raw_spin_unlock_irqrestore(lock, flags)		\
+	do {							\
+		typecheck(unsigned long, flags);		\
 		_spin_unlock_irqrestore(lock, flags);	\
 	} while (0)
-#define spin_unlock_bh(lock)		_spin_unlock_bh(lock)
+#define raw_spin_unlock_bh(lock)	_spin_unlock_bh(lock)
 
-#define spin_trylock_bh(lock)	__cond_lock(lock, _spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock)	__cond_lock(lock, _spin_trylock_bh(lock))
 
-#define spin_trylock_irq(lock) \
+#define raw_spin_trylock_irq(lock) \
 ({ \
 	local_irq_disable(); \
-	spin_trylock(lock) ? \
+	raw_spin_trylock(lock) ? \
 	1 : ({ local_irq_enable(); 0;  }); \
 })
 
-#define spin_trylock_irqsave(lock, flags) \
+#define raw_spin_trylock_irqsave(lock, flags) \
 ({ \
 	local_irq_save(flags); \
-	spin_trylock(lock) ? \
+	raw_spin_trylock(lock) ? \
 	1 : ({ local_irq_restore(flags); 0; }); \
 })
 
+/**
+ * raw_spin_can_lock - would raw_spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
+
+/* Include rwlock functions */
+#include <linux/rwlock.h>
+
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
+/*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+
+static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+{
+	return &lock->rlock;
+}
+
+#define spin_lock_init(_lock)				\
+do {							\
+	spinlock_check(_lock);				\
+	raw_spin_lock_init(&(_lock)->rlock);		\
+} while (0)
+
+static inline void spin_lock(spinlock_t *lock)
+{
+	raw_spin_lock(&lock->rlock);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+	raw_spin_lock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+	return raw_spin_trylock(&lock->rlock);
+}
+
+#define spin_lock_nested(lock, subclass)			\
+do {								\
+	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
+} while (0)
+
+#define spin_lock_nest_lock(lock, nest_lock)				\
+do {									\
+	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
+} while (0)
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+	raw_spin_lock_irq(&lock->rlock);
+}
+
+#define spin_lock_irqsave(lock, flags)				\
+do {								\
+	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
+} while (0)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass)			\
+do {									\
+	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+} while (0)
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+	raw_spin_unlock(&lock->rlock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+	raw_spin_unlock_bh(&lock->rlock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+	raw_spin_unlock_irq(&lock->rlock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+	raw_spin_unlock_irqrestore(&lock->rlock, flags);
+}
+
+static inline int spin_trylock_bh(spinlock_t *lock)
+{
+	return raw_spin_trylock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock_irq(spinlock_t *lock)
+{
+	return raw_spin_trylock_irq(&lock->rlock);
+}
+
+#define spin_trylock_irqsave(lock, flags)			\
+({								\
+	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
+})
+
+static inline void spin_unlock_wait(spinlock_t *lock)
+{
+	raw_spin_unlock_wait(&lock->rlock);
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+	return raw_spin_is_locked(&lock->rlock);
+}
+
+static inline int spin_is_contended(spinlock_t *lock)
+{
+	return raw_spin_is_contended(&lock->rlock);
+}
+
+static inline int spin_can_lock(spinlock_t *lock)
+{
+	return raw_spin_can_lock(&lock->rlock);
+}
+
+static inline void assert_spin_locked(spinlock_t *lock)
+{
+	assert_raw_spin_locked(&lock->rlock);
+}
+
 /*
  * Pull the atomic_t declaration:
  * (asm-mips/atomic.h needs above definitions)
@@ -242,22 +390,4 @@
 #define atomic_dec_and_lock(atomic, lock) \
 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
 
-/**
- * spin_can_lock - would spin_trylock() succeed?
- * @lock: the spinlock in question.
- */
-#define spin_can_lock(lock)	(!spin_is_locked(lock))
-
-/* Include rwlock functions */
-#include <linux/rwlock.h>
-
-/*
- * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-# include <linux/spinlock_api_smp.h>
-#else
-# include <linux/spinlock_api_up.h>
-#endif
-
 #endif /* __LINUX_SPINLOCK_H */