blob: f7e142926481b6fce09ce3f2d5ddc7f5869b86d8 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_CMPXCHG_32_H
2#define _ASM_X86_CMPXCHG_32_H
Jeff Dikea436ed92007-05-08 00:35:02 -07003
Avi Kivity2d9ce172007-07-19 14:30:14 +03004/*
5 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
6 * you need to test for the feature in boot_cpu_data.
7 */
8
Jeff Dikea436ed92007-05-08 00:35:02 -07009/*
H. Peter Anvin69309a02010-07-27 23:29:52 -070010 * CMPXCHG8B only writes to the target if we had the previous
11 * value in registers, otherwise it acts as a read and gives us the
12 * "new previous" value. That is why there is a loop. Preloading
13 * EDX:EAX is a performance optimization: in the common case it means
14 * we need only one locked operation.
Jeff Dikea436ed92007-05-08 00:35:02 -070015 *
H. Peter Anvin69309a02010-07-27 23:29:52 -070016 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
17 * least an FPU save and/or %cr0.ts manipulation.
18 *
19 * cmpxchg8b must be used with the lock prefix here to allow the
20 * instruction to be executed atomically. We need to have the reader
21 * side to see the coherent 64bit value.
Jeff Dikea436ed92007-05-08 00:35:02 -070022 */
H. Peter Anvin69309a02010-07-27 23:29:52 -070023static inline void set_64bit(volatile u64 *ptr, u64 value)
Jeff Dikea436ed92007-05-08 00:35:02 -070024{
H. Peter Anvin69309a02010-07-27 23:29:52 -070025 u32 low = value;
26 u32 high = value >> 32;
27 u64 prev = *ptr;
28
Joe Perches81210192008-03-23 01:01:51 -070029 asm volatile("\n1:\t"
H. Peter Anvin69309a02010-07-27 23:29:52 -070030 LOCK_PREFIX "cmpxchg8b %0\n\t"
Joe Perches81210192008-03-23 01:01:51 -070031 "jnz 1b"
H. Peter Anvin69309a02010-07-27 23:29:52 -070032 : "=m" (*ptr), "+A" (prev)
33 : "b" (low), "c" (high)
34 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -070035}
36
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +010037#ifdef CONFIG_X86_CMPXCHG64
Joe Perches81210192008-03-23 01:01:51 -070038#define cmpxchg64(ptr, o, n) \
39 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
40 (unsigned long long)(n)))
41#define cmpxchg64_local(ptr, o, n) \
42 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
43 (unsigned long long)(n)))
Jeff Dikea436ed92007-05-08 00:35:02 -070044#endif
45
H. Peter Anvin4532b302010-07-28 15:18:35 -070046static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
Jeff Dikea436ed92007-05-08 00:35:02 -070047{
H. Peter Anvin4532b302010-07-28 15:18:35 -070048 u64 prev;
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070049 asm volatile(LOCK_PREFIX "cmpxchg8b %1"
50 : "=A" (prev),
H. Peter Anvin4532b302010-07-28 15:18:35 -070051 "+m" (*ptr)
52 : "b" ((u32)new),
53 "c" ((u32)(new >> 32)),
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070054 "0" (old)
Joe Perches81210192008-03-23 01:01:51 -070055 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -070056 return prev;
57}
58
H. Peter Anvin4532b302010-07-28 15:18:35 -070059static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
Jeff Dikea436ed92007-05-08 00:35:02 -070060{
H. Peter Anvin4532b302010-07-28 15:18:35 -070061 u64 prev;
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070062 asm volatile("cmpxchg8b %1"
63 : "=A" (prev),
H. Peter Anvin4532b302010-07-28 15:18:35 -070064 "+m" (*ptr)
65 : "b" ((u32)new),
66 "c" ((u32)(new >> 32)),
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070067 "0" (old)
Joe Perches81210192008-03-23 01:01:51 -070068 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -070069 return prev;
70}
71
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +010072#ifndef CONFIG_X86_CMPXCHG64
73/*
74 * Building a kernel capable running on 80386 and 80486. It may be necessary
75 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
76 */
77
Arjan van de Ven79e1dd02009-09-30 17:07:54 +020078#define cmpxchg64(ptr, o, n) \
79({ \
80 __typeof__(*(ptr)) __ret; \
81 __typeof__(*(ptr)) __old = (o); \
82 __typeof__(*(ptr)) __new = (n); \
Luca Barbieri9c76b382010-02-24 10:54:23 +010083 alternative_io(LOCK_PREFIX_HERE \
84 "call cmpxchg8b_emu", \
Arjan van de Ven79e1dd02009-09-30 17:07:54 +020085 "lock; cmpxchg8b (%%esi)" , \
86 X86_FEATURE_CX8, \
87 "=A" (__ret), \
88 "S" ((ptr)), "0" (__old), \
89 "b" ((unsigned int)__new), \
90 "c" ((unsigned int)(__new>>32)) \
91 : "memory"); \
92 __ret; })
93
94
H. Peter Anvina378d932010-07-28 17:05:11 -070095#define cmpxchg64_local(ptr, o, n) \
96({ \
97 __typeof__(*(ptr)) __ret; \
98 __typeof__(*(ptr)) __old = (o); \
99 __typeof__(*(ptr)) __new = (n); \
100 alternative_io("call cmpxchg8b_emu", \
101 "cmpxchg8b (%%esi)" , \
102 X86_FEATURE_CX8, \
103 "=A" (__ret), \
104 "S" ((ptr)), "0" (__old), \
105 "b" ((unsigned int)__new), \
106 "c" ((unsigned int)(__new>>32)) \
107 : "memory"); \
108 __ret; })
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100109
110#endif
111
Christoph Lameter3824abd2011-06-01 12:25:47 -0500112#define system_has_cmpxchg_double() cpu_has_cx8
113
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700114#endif /* _ASM_X86_CMPXCHG_32_H */