x86: fix asm constraints in spinlock_32/64.h

Use the correct constraints for the spinlock assembler functions.

read (modify) write functions need "+m" instead of "=m"

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/include/asm-x86/spinlock_32.h b/include/asm-x86/spinlock_32.h
index c42c3f1..fca124a 100644
--- a/include/asm-x86/spinlock_32.h
+++ b/include/asm-x86/spinlock_32.h
@@ -99,7 +99,7 @@
 
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-	asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory");
+	asm volatile("movb $1,%0" : "=m" (lock->slock) :: "memory");
 }
 
 #else
diff --git a/include/asm-x86/spinlock_64.h b/include/asm-x86/spinlock_64.h
index 3b5adf9..e81f6c1 100644
--- a/include/asm-x86/spinlock_64.h
+++ b/include/asm-x86/spinlock_64.h
@@ -34,7 +34,7 @@
 		"jle 3b\n\t"
 		"jmp 1b\n"
 		"2:\t"
-		: "=m" (lock->slock) : : "memory");
+		: "+m" (lock->slock) : : "memory");
 }
 
 /*
@@ -80,7 +80,7 @@
 
 	asm volatile(
 		"xchgl %0,%1"
-		:"=q" (oldval), "=m" (lock->slock)
+		:"=q" (oldval), "+m" (lock->slock)
 		:"0" (0) : "memory");
 
 	return oldval > 0;
@@ -162,13 +162,13 @@
 
 static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
-	asm volatile(LOCK_PREFIX "incl %0" :"=m" (rw->lock) : : "memory");
+	asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
 }
 
 static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
-	asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ",%0"
-				: "=m" (rw->lock) : : "memory");
+	asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
+				: "+m" (rw->lock) : : "memory");
 }
 
 #define _raw_spin_relax(lock)	cpu_relax()