[PATCH] Directed yield: direct yield of spinlocks for s390.

Use the new diagnose 0x9c in the spinlock implementation for s390.  It
yields the remaining timeslice of the virtual cpu that tries to acquire a
lock to the virtual cpu that is the current holder of the lock.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h
index 5f00fea..6b78af1 100644
--- a/include/asm-s390/spinlock.h
+++ b/include/asm-s390/spinlock.h
@@ -13,6 +13,8 @@
 
 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
 
+#include <linux/smp.h>
+
 static inline int
 _raw_compare_and_swap(volatile unsigned int *lock,
 		      unsigned int old, unsigned int new)
@@ -50,34 +52,46 @@
  * (the type definitions are in asm/spinlock_types.h)
  */
 
-#define __raw_spin_is_locked(x) ((x)->lock != 0)
+#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 #define __raw_spin_unlock_wait(lock) \
-	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+	do { while (__raw_spin_is_locked(lock)) \
+		 _raw_spin_relax(lock); } while (0)
 
-extern void _raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc);
-extern int _raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc);
+extern void _raw_spin_lock_wait(raw_spinlock_t *, unsigned int pc);
+extern int _raw_spin_trylock_retry(raw_spinlock_t *, unsigned int pc);
+extern void _raw_spin_relax(raw_spinlock_t *lock);
 
 static inline void __raw_spin_lock(raw_spinlock_t *lp)
 {
 	unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
+	int old;
 
-	if (unlikely(_raw_compare_and_swap(&lp->lock, 0, pc) != 0))
-		_raw_spin_lock_wait(lp, pc);
+	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
+	if (likely(old == 0)) {
+		lp->owner_pc = pc;
+		return;
+	}
+	_raw_spin_lock_wait(lp, pc);
 }
 
 static inline int __raw_spin_trylock(raw_spinlock_t *lp)
 {
 	unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
+	int old;
 
-	if (likely(_raw_compare_and_swap(&lp->lock, 0, pc) == 0))
+	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
+	if (likely(old == 0)) {
+		lp->owner_pc = pc;
 		return 1;
+	}
 	return _raw_spin_trylock_retry(lp, pc);
 }
 
 static inline void __raw_spin_unlock(raw_spinlock_t *lp)
 {
-	_raw_compare_and_swap(&lp->lock, lp->lock, 0);
+	lp->owner_pc = 0;
+	_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
 }
 		
 /*
@@ -154,7 +168,6 @@
 	return _raw_write_trylock_retry(rw);
 }
 
-#define _raw_spin_relax(lock)	cpu_relax()
 #define _raw_read_relax(lock)	cpu_relax()
 #define _raw_write_relax(lock)	cpu_relax()