x86, 64-bit: add sync_cmpxchg

Add sync_cmpxchg to match 32-bit's sync_cmpxchg.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: xen-devel <xen-devel@lists.xensource.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h
index d9b26b9..17463cc 100644
--- a/include/asm-x86/cmpxchg_64.h
+++ b/include/asm-x86/cmpxchg_64.h
@@ -93,6 +93,39 @@
 	return old;
 }
 
+/*
+ * Always use locked operations when touching memory shared with a
+ * hypervisor, since the system may be SMP even if the guest kernel
+ * isn't.
+ */
+static inline unsigned long __sync_cmpxchg(volatile void *ptr,
+					   unsigned long old,
+					   unsigned long new, int size)
+{
+	unsigned long prev;
+	switch (size) {
+	case 1:
+		asm volatile("lock; cmpxchgb %b1,%2"
+			     : "=a"(prev)
+			     : "q"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
+		return prev;
+	case 2:
+		asm volatile("lock; cmpxchgw %w1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
+		return prev;
+	case 4:
+		asm volatile("lock; cmpxchgl %1,%2"
+			     : "=a"(prev)
+			     : "r"(new), "m"(*__xg(ptr)), "0"(old)
+			     : "memory");
+		return prev;
+	}
+	return old;
+}
+
 static inline unsigned long __cmpxchg_local(volatile void *ptr,
 					    unsigned long old,
 					    unsigned long new, int size)
@@ -139,6 +172,10 @@
 	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
 					     (unsigned long)(n),	\
 					     sizeof(*(ptr))))
+#define sync_cmpxchg(ptr, o, n)						\
+	((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),	\
+					    (unsigned long)(n),		\
+					    sizeof(*(ptr))))
 #define cmpxchg64_local(ptr, o, n)					\
 ({									\
 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\