blob: 53f4b219336be527e017c821eee57b7bcd1bd966 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_CMPXCHG_32_H
2#define _ASM_X86_CMPXCHG_32_H
Jeff Dikea436ed92007-05-08 00:35:02 -07003
Avi Kivity2d9ce172007-07-19 14:30:14 +03004/*
5 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
6 * you need to test for the feature in boot_cpu_data.
7 */
8
Jeff Dikea436ed92007-05-08 00:35:02 -07009/*
H. Peter Anvin69309a02010-07-27 23:29:52 -070010 * CMPXCHG8B only writes to the target if we had the previous
11 * value in registers, otherwise it acts as a read and gives us the
12 * "new previous" value. That is why there is a loop. Preloading
13 * EDX:EAX is a performance optimization: in the common case it means
14 * we need only one locked operation.
Jeff Dikea436ed92007-05-08 00:35:02 -070015 *
H. Peter Anvin69309a02010-07-27 23:29:52 -070016 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
17 * least an FPU save and/or %cr0.ts manipulation.
18 *
19 * cmpxchg8b must be used with the lock prefix here to allow the
20 * instruction to be executed atomically. We need to have the reader
21 * side to see the coherent 64bit value.
Jeff Dikea436ed92007-05-08 00:35:02 -070022 */
H. Peter Anvin69309a02010-07-27 23:29:52 -070023static inline void set_64bit(volatile u64 *ptr, u64 value)
Jeff Dikea436ed92007-05-08 00:35:02 -070024{
H. Peter Anvin69309a02010-07-27 23:29:52 -070025 u32 low = value;
26 u32 high = value >> 32;
27 u64 prev = *ptr;
28
Joe Perches81210192008-03-23 01:01:51 -070029 asm volatile("\n1:\t"
H. Peter Anvin69309a02010-07-27 23:29:52 -070030 LOCK_PREFIX "cmpxchg8b %0\n\t"
Joe Perches81210192008-03-23 01:01:51 -070031 "jnz 1b"
H. Peter Anvin69309a02010-07-27 23:29:52 -070032 : "=m" (*ptr), "+A" (prev)
33 : "b" (low), "c" (high)
34 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -070035}
36
Jeff Dikea436ed92007-05-08 00:35:02 -070037#ifdef CONFIG_X86_CMPXCHG
38#define __HAVE_ARCH_CMPXCHG 1
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +010039#endif
40
41#ifdef CONFIG_X86_CMPXCHG64
Joe Perches81210192008-03-23 01:01:51 -070042#define cmpxchg64(ptr, o, n) \
43 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
44 (unsigned long long)(n)))
45#define cmpxchg64_local(ptr, o, n) \
46 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
47 (unsigned long long)(n)))
Jeff Dikea436ed92007-05-08 00:35:02 -070048#endif
49
H. Peter Anvin4532b302010-07-28 15:18:35 -070050static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
Jeff Dikea436ed92007-05-08 00:35:02 -070051{
H. Peter Anvin4532b302010-07-28 15:18:35 -070052 u64 prev;
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070053 asm volatile(LOCK_PREFIX "cmpxchg8b %1"
54 : "=A" (prev),
H. Peter Anvin4532b302010-07-28 15:18:35 -070055 "+m" (*ptr)
56 : "b" ((u32)new),
57 "c" ((u32)(new >> 32)),
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070058 "0" (old)
Joe Perches81210192008-03-23 01:01:51 -070059 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -070060 return prev;
61}
62
H. Peter Anvin4532b302010-07-28 15:18:35 -070063static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
Jeff Dikea436ed92007-05-08 00:35:02 -070064{
H. Peter Anvin4532b302010-07-28 15:18:35 -070065 u64 prev;
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070066 asm volatile("cmpxchg8b %1"
67 : "=A" (prev),
H. Peter Anvin4532b302010-07-28 15:18:35 -070068 "+m" (*ptr)
69 : "b" ((u32)new),
70 "c" ((u32)(new >> 32)),
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070071 "0" (old)
Joe Perches81210192008-03-23 01:01:51 -070072 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -070073 return prev;
74}
75
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +010076#ifndef CONFIG_X86_CMPXCHG
77/*
78 * Building a kernel capable running on 80386. It may be necessary to
79 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
80 * a function for each of the sizes we support.
81 */
82
83extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
84extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
85extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
86
87static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
Joe Perches81210192008-03-23 01:01:51 -070088 unsigned long new, int size)
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +010089{
90 switch (size) {
91 case 1:
92 return cmpxchg_386_u8(ptr, old, new);
93 case 2:
94 return cmpxchg_386_u16(ptr, old, new);
95 case 4:
96 return cmpxchg_386_u32(ptr, old, new);
97 }
98 return old;
99}
100
101#define cmpxchg(ptr, o, n) \
102({ \
103 __typeof__(*(ptr)) __ret; \
104 if (likely(boot_cpu_data.x86 > 3)) \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100105 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
106 (unsigned long)(o), (unsigned long)(n), \
107 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100108 else \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100109 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
110 (unsigned long)(o), (unsigned long)(n), \
111 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100112 __ret; \
113})
114#define cmpxchg_local(ptr, o, n) \
115({ \
116 __typeof__(*(ptr)) __ret; \
117 if (likely(boot_cpu_data.x86 > 3)) \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100118 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
119 (unsigned long)(o), (unsigned long)(n), \
120 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100121 else \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100122 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
123 (unsigned long)(o), (unsigned long)(n), \
124 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100125 __ret; \
126})
127#endif
128
129#ifndef CONFIG_X86_CMPXCHG64
130/*
131 * Building a kernel capable running on 80386 and 80486. It may be necessary
132 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
133 */
134
Arjan van de Ven79e1dd02009-09-30 17:07:54 +0200135#define cmpxchg64(ptr, o, n) \
136({ \
137 __typeof__(*(ptr)) __ret; \
138 __typeof__(*(ptr)) __old = (o); \
139 __typeof__(*(ptr)) __new = (n); \
Luca Barbieri9c76b382010-02-24 10:54:23 +0100140 alternative_io(LOCK_PREFIX_HERE \
141 "call cmpxchg8b_emu", \
Arjan van de Ven79e1dd02009-09-30 17:07:54 +0200142 "lock; cmpxchg8b (%%esi)" , \
143 X86_FEATURE_CX8, \
144 "=A" (__ret), \
145 "S" ((ptr)), "0" (__old), \
146 "b" ((unsigned int)__new), \
147 "c" ((unsigned int)(__new>>32)) \
148 : "memory"); \
149 __ret; })
150
151
H. Peter Anvina378d932010-07-28 17:05:11 -0700152#define cmpxchg64_local(ptr, o, n) \
153({ \
154 __typeof__(*(ptr)) __ret; \
155 __typeof__(*(ptr)) __old = (o); \
156 __typeof__(*(ptr)) __new = (n); \
157 alternative_io("call cmpxchg8b_emu", \
158 "cmpxchg8b (%%esi)" , \
159 X86_FEATURE_CX8, \
160 "=A" (__ret), \
161 "S" ((ptr)), "0" (__old), \
162 "b" ((unsigned int)__new), \
163 "c" ((unsigned int)(__new>>32)) \
164 : "memory"); \
165 __ret; })
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100166
167#endif
168
Christoph Lameter3824abd2011-06-01 12:25:47 -0500169#define system_has_cmpxchg_double() cpu_has_cx8
170
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700171#endif /* _ASM_X86_CMPXCHG_32_H */