blob: f8bf2eecab864f4d00bcd4cd2a47d4611e422787 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_CMPXCHG_32_H
2#define _ASM_X86_CMPXCHG_32_H
Jeff Dikea436ed92007-05-08 00:35:02 -07003
Avi Kivity2d9ce172007-07-19 14:30:14 +03004/*
5 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
6 * you need to test for the feature in boot_cpu_data.
7 */
8
Jeff Dikea436ed92007-05-08 00:35:02 -07009/*
H. Peter Anvin69309a02010-07-27 23:29:52 -070010 * CMPXCHG8B only writes to the target if we had the previous
11 * value in registers, otherwise it acts as a read and gives us the
12 * "new previous" value. That is why there is a loop. Preloading
13 * EDX:EAX is a performance optimization: in the common case it means
14 * we need only one locked operation.
Jeff Dikea436ed92007-05-08 00:35:02 -070015 *
H. Peter Anvin69309a02010-07-27 23:29:52 -070016 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
17 * least an FPU save and/or %cr0.ts manipulation.
18 *
19 * cmpxchg8b must be used with the lock prefix here to allow the
20 * instruction to be executed atomically. We need to have the reader
21 * side to see the coherent 64bit value.
Jeff Dikea436ed92007-05-08 00:35:02 -070022 */
H. Peter Anvin69309a02010-07-27 23:29:52 -070023static inline void set_64bit(volatile u64 *ptr, u64 value)
Jeff Dikea436ed92007-05-08 00:35:02 -070024{
H. Peter Anvin69309a02010-07-27 23:29:52 -070025 u32 low = value;
26 u32 high = value >> 32;
27 u64 prev = *ptr;
28
Joe Perches81210192008-03-23 01:01:51 -070029 asm volatile("\n1:\t"
H. Peter Anvin69309a02010-07-27 23:29:52 -070030 LOCK_PREFIX "cmpxchg8b %0\n\t"
Joe Perches81210192008-03-23 01:01:51 -070031 "jnz 1b"
H. Peter Anvin69309a02010-07-27 23:29:52 -070032 : "=m" (*ptr), "+A" (prev)
33 : "b" (low), "c" (high)
34 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -070035}
36
Jeff Dikea436ed92007-05-08 00:35:02 -070037#define __HAVE_ARCH_CMPXCHG 1
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +010038
39#ifdef CONFIG_X86_CMPXCHG64
Joe Perches81210192008-03-23 01:01:51 -070040#define cmpxchg64(ptr, o, n) \
41 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
42 (unsigned long long)(n)))
43#define cmpxchg64_local(ptr, o, n) \
44 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
45 (unsigned long long)(n)))
Jeff Dikea436ed92007-05-08 00:35:02 -070046#endif
47
H. Peter Anvin4532b302010-07-28 15:18:35 -070048static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
Jeff Dikea436ed92007-05-08 00:35:02 -070049{
H. Peter Anvin4532b302010-07-28 15:18:35 -070050 u64 prev;
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070051 asm volatile(LOCK_PREFIX "cmpxchg8b %1"
52 : "=A" (prev),
H. Peter Anvin4532b302010-07-28 15:18:35 -070053 "+m" (*ptr)
54 : "b" ((u32)new),
55 "c" ((u32)(new >> 32)),
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070056 "0" (old)
Joe Perches81210192008-03-23 01:01:51 -070057 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -070058 return prev;
59}
60
H. Peter Anvin4532b302010-07-28 15:18:35 -070061static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
Jeff Dikea436ed92007-05-08 00:35:02 -070062{
H. Peter Anvin4532b302010-07-28 15:18:35 -070063 u64 prev;
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070064 asm volatile("cmpxchg8b %1"
65 : "=A" (prev),
H. Peter Anvin4532b302010-07-28 15:18:35 -070066 "+m" (*ptr)
67 : "b" ((u32)new),
68 "c" ((u32)(new >> 32)),
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070069 "0" (old)
Joe Perches81210192008-03-23 01:01:51 -070070 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -070071 return prev;
72}
73
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +010074#ifndef CONFIG_X86_CMPXCHG64
75/*
76 * Building a kernel capable running on 80386 and 80486. It may be necessary
77 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
78 */
79
Arjan van de Ven79e1dd02009-09-30 17:07:54 +020080#define cmpxchg64(ptr, o, n) \
81({ \
82 __typeof__(*(ptr)) __ret; \
83 __typeof__(*(ptr)) __old = (o); \
84 __typeof__(*(ptr)) __new = (n); \
Luca Barbieri9c76b382010-02-24 10:54:23 +010085 alternative_io(LOCK_PREFIX_HERE \
86 "call cmpxchg8b_emu", \
Arjan van de Ven79e1dd02009-09-30 17:07:54 +020087 "lock; cmpxchg8b (%%esi)" , \
88 X86_FEATURE_CX8, \
89 "=A" (__ret), \
90 "S" ((ptr)), "0" (__old), \
91 "b" ((unsigned int)__new), \
92 "c" ((unsigned int)(__new>>32)) \
93 : "memory"); \
94 __ret; })
95
96
H. Peter Anvina378d932010-07-28 17:05:11 -070097#define cmpxchg64_local(ptr, o, n) \
98({ \
99 __typeof__(*(ptr)) __ret; \
100 __typeof__(*(ptr)) __old = (o); \
101 __typeof__(*(ptr)) __new = (n); \
102 alternative_io("call cmpxchg8b_emu", \
103 "cmpxchg8b (%%esi)" , \
104 X86_FEATURE_CX8, \
105 "=A" (__ret), \
106 "S" ((ptr)), "0" (__old), \
107 "b" ((unsigned int)__new), \
108 "c" ((unsigned int)(__new>>32)) \
109 : "memory"); \
110 __ret; })
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100111
112#endif
113
Christoph Lameter3824abd2011-06-01 12:25:47 -0500114#define system_has_cmpxchg_double() cpu_has_cx8
115
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700116#endif /* _ASM_X86_CMPXCHG_32_H */