blob: 20955ea7bc12cb9289bd248cc3a62f5b53d8ff23 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_CMPXCHG_32_H
2#define _ASM_X86_CMPXCHG_32_H
Jeff Dikea436ed92007-05-08 00:35:02 -07003
4#include <linux/bitops.h> /* for LOCK_PREFIX */
5
Avi Kivity2d9ce172007-07-19 14:30:14 +03006/*
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data.
9 */
10
Peter Zijlstraf3834b92009-10-09 10:12:46 +020011extern void __xchg_wrong_size(void);
12
13/*
14 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
15 * Note 2: xchg has side effect, so that attribute volatile is necessary,
16 * but generally the primitive is invalid, *ptr is output argument. --ANK
17 */
Jeff Dikea436ed92007-05-08 00:35:02 -070018
Joe Perches81210192008-03-23 01:01:51 -070019struct __xchg_dummy {
20 unsigned long a[100];
21};
Jeff Dikea436ed92007-05-08 00:35:02 -070022#define __xg(x) ((struct __xchg_dummy *)(x))
23
Peter Zijlstraf3834b92009-10-09 10:12:46 +020024#define __xchg(x, ptr, size) \
25({ \
26 __typeof(*(ptr)) __x = (x); \
27 switch (size) { \
28 case 1: \
29 asm volatile("xchgb %b0,%1" \
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070030 : "=q" (__x), "+m" (*__xg(ptr)) \
31 : "0" (__x) \
Peter Zijlstraf3834b92009-10-09 10:12:46 +020032 : "memory"); \
33 break; \
34 case 2: \
35 asm volatile("xchgw %w0,%1" \
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070036 : "=r" (__x), "+m" (*__xg(ptr)) \
37 : "0" (__x) \
Peter Zijlstraf3834b92009-10-09 10:12:46 +020038 : "memory"); \
39 break; \
40 case 4: \
41 asm volatile("xchgl %0,%1" \
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070042 : "=r" (__x), "+m" (*__xg(ptr)) \
43 : "0" (__x) \
Peter Zijlstraf3834b92009-10-09 10:12:46 +020044 : "memory"); \
45 break; \
46 default: \
47 __xchg_wrong_size(); \
48 } \
49 __x; \
50})
51
52#define xchg(ptr, v) \
53 __xchg((v), (ptr), sizeof(*ptr))
54
Jeff Dikea436ed92007-05-08 00:35:02 -070055/*
H. Peter Anvin69309a02010-07-27 23:29:52 -070056 * CMPXCHG8B only writes to the target if we had the previous
57 * value in registers, otherwise it acts as a read and gives us the
58 * "new previous" value. That is why there is a loop. Preloading
59 * EDX:EAX is a performance optimization: in the common case it means
60 * we need only one locked operation.
Jeff Dikea436ed92007-05-08 00:35:02 -070061 *
H. Peter Anvin69309a02010-07-27 23:29:52 -070062 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
63 * least an FPU save and/or %cr0.ts manipulation.
64 *
65 * cmpxchg8b must be used with the lock prefix here to allow the
66 * instruction to be executed atomically. We need to have the reader
67 * side to see the coherent 64bit value.
Jeff Dikea436ed92007-05-08 00:35:02 -070068 */
H. Peter Anvin69309a02010-07-27 23:29:52 -070069static inline void set_64bit(volatile u64 *ptr, u64 value)
Jeff Dikea436ed92007-05-08 00:35:02 -070070{
H. Peter Anvin69309a02010-07-27 23:29:52 -070071 u32 low = value;
72 u32 high = value >> 32;
73 u64 prev = *ptr;
74
Joe Perches81210192008-03-23 01:01:51 -070075 asm volatile("\n1:\t"
H. Peter Anvin69309a02010-07-27 23:29:52 -070076 LOCK_PREFIX "cmpxchg8b %0\n\t"
Joe Perches81210192008-03-23 01:01:51 -070077 "jnz 1b"
H. Peter Anvin69309a02010-07-27 23:29:52 -070078 : "=m" (*ptr), "+A" (prev)
79 : "b" (low), "c" (high)
80 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -070081}
82
Peter Zijlstraf3834b92009-10-09 10:12:46 +020083extern void __cmpxchg_wrong_size(void);
Jeff Dikea436ed92007-05-08 00:35:02 -070084
85/*
86 * Atomic compare and exchange. Compare OLD with MEM, if identical,
87 * store NEW in MEM. Return the initial value in MEM. Success is
88 * indicated by comparing RETURN with OLD.
89 */
Peter Zijlstraf3834b92009-10-09 10:12:46 +020090#define __raw_cmpxchg(ptr, old, new, size, lock) \
91({ \
92 __typeof__(*(ptr)) __ret; \
93 __typeof__(*(ptr)) __old = (old); \
94 __typeof__(*(ptr)) __new = (new); \
95 switch (size) { \
96 case 1: \
H. Peter Anvin113fc5a2010-07-27 17:01:49 -070097 asm volatile(lock "cmpxchgb %b2,%1" \
98 : "=a" (__ret), "+m" (*__xg(ptr)) \
99 : "q" (__new), "0" (__old) \
Peter Zijlstraf3834b92009-10-09 10:12:46 +0200100 : "memory"); \
101 break; \
102 case 2: \
H. Peter Anvin113fc5a2010-07-27 17:01:49 -0700103 asm volatile(lock "cmpxchgw %w2,%1" \
104 : "=a" (__ret), "+m" (*__xg(ptr)) \
105 : "r" (__new), "0" (__old) \
Peter Zijlstraf3834b92009-10-09 10:12:46 +0200106 : "memory"); \
107 break; \
108 case 4: \
H. Peter Anvin113fc5a2010-07-27 17:01:49 -0700109 asm volatile(lock "cmpxchgl %2,%1" \
110 : "=a" (__ret), "+m" (*__xg(ptr)) \
111 : "r" (__new), "0" (__old) \
Peter Zijlstraf3834b92009-10-09 10:12:46 +0200112 : "memory"); \
113 break; \
114 default: \
115 __cmpxchg_wrong_size(); \
116 } \
117 __ret; \
118})
119
120#define __cmpxchg(ptr, old, new, size) \
121 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
122
123#define __sync_cmpxchg(ptr, old, new, size) \
124 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
125
126#define __cmpxchg_local(ptr, old, new, size) \
127 __raw_cmpxchg((ptr), (old), (new), (size), "")
Jeff Dikea436ed92007-05-08 00:35:02 -0700128
129#ifdef CONFIG_X86_CMPXCHG
130#define __HAVE_ARCH_CMPXCHG 1
Peter Zijlstraf3834b92009-10-09 10:12:46 +0200131
132#define cmpxchg(ptr, old, new) \
133 __cmpxchg((ptr), (old), (new), sizeof(*ptr))
134
135#define sync_cmpxchg(ptr, old, new) \
136 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
137
138#define cmpxchg_local(ptr, old, new) \
139 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100140#endif
141
142#ifdef CONFIG_X86_CMPXCHG64
Joe Perches81210192008-03-23 01:01:51 -0700143#define cmpxchg64(ptr, o, n) \
144 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
145 (unsigned long long)(n)))
146#define cmpxchg64_local(ptr, o, n) \
147 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
148 (unsigned long long)(n)))
Jeff Dikea436ed92007-05-08 00:35:02 -0700149#endif
150
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100151static inline unsigned long long __cmpxchg64(volatile void *ptr,
Joe Perches81210192008-03-23 01:01:51 -0700152 unsigned long long old,
153 unsigned long long new)
Jeff Dikea436ed92007-05-08 00:35:02 -0700154{
155 unsigned long long prev;
H. Peter Anvin113fc5a2010-07-27 17:01:49 -0700156 asm volatile(LOCK_PREFIX "cmpxchg8b %1"
157 : "=A" (prev),
158 "+m" (*__xg(ptr))
159 : "b" ((unsigned long)new),
160 "c" ((unsigned long)(new >> 32)),
161 "0" (old)
Joe Perches81210192008-03-23 01:01:51 -0700162 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700163 return prev;
164}
165
166static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
Joe Perches81210192008-03-23 01:01:51 -0700167 unsigned long long old,
168 unsigned long long new)
Jeff Dikea436ed92007-05-08 00:35:02 -0700169{
170 unsigned long long prev;
H. Peter Anvin113fc5a2010-07-27 17:01:49 -0700171 asm volatile("cmpxchg8b %1"
172 : "=A" (prev),
173 "+m" (*__xg(ptr))
174 : "b" ((unsigned long)new),
175 "c" ((unsigned long)(new >> 32)),
176 "0" (old)
Joe Perches81210192008-03-23 01:01:51 -0700177 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700178 return prev;
179}
180
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100181#ifndef CONFIG_X86_CMPXCHG
182/*
183 * Building a kernel capable running on 80386. It may be necessary to
184 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
185 * a function for each of the sizes we support.
186 */
187
188extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
189extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
190extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
191
192static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
Joe Perches81210192008-03-23 01:01:51 -0700193 unsigned long new, int size)
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100194{
195 switch (size) {
196 case 1:
197 return cmpxchg_386_u8(ptr, old, new);
198 case 2:
199 return cmpxchg_386_u16(ptr, old, new);
200 case 4:
201 return cmpxchg_386_u32(ptr, old, new);
202 }
203 return old;
204}
205
206#define cmpxchg(ptr, o, n) \
207({ \
208 __typeof__(*(ptr)) __ret; \
209 if (likely(boot_cpu_data.x86 > 3)) \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100210 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
211 (unsigned long)(o), (unsigned long)(n), \
212 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100213 else \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100214 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
215 (unsigned long)(o), (unsigned long)(n), \
216 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100217 __ret; \
218})
219#define cmpxchg_local(ptr, o, n) \
220({ \
221 __typeof__(*(ptr)) __ret; \
222 if (likely(boot_cpu_data.x86 > 3)) \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100223 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
224 (unsigned long)(o), (unsigned long)(n), \
225 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100226 else \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100227 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
228 (unsigned long)(o), (unsigned long)(n), \
229 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100230 __ret; \
231})
232#endif
233
234#ifndef CONFIG_X86_CMPXCHG64
235/*
236 * Building a kernel capable running on 80386 and 80486. It may be necessary
237 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
238 */
239
240extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
241
Arjan van de Ven79e1dd02009-09-30 17:07:54 +0200242#define cmpxchg64(ptr, o, n) \
243({ \
244 __typeof__(*(ptr)) __ret; \
245 __typeof__(*(ptr)) __old = (o); \
246 __typeof__(*(ptr)) __new = (n); \
Luca Barbieri9c76b382010-02-24 10:54:23 +0100247 alternative_io(LOCK_PREFIX_HERE \
248 "call cmpxchg8b_emu", \
Arjan van de Ven79e1dd02009-09-30 17:07:54 +0200249 "lock; cmpxchg8b (%%esi)" , \
250 X86_FEATURE_CX8, \
251 "=A" (__ret), \
252 "S" ((ptr)), "0" (__old), \
253 "b" ((unsigned int)__new), \
254 "c" ((unsigned int)(__new>>32)) \
255 : "memory"); \
256 __ret; })
257
258
259
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100260#define cmpxchg64_local(ptr, o, n) \
261({ \
262 __typeof__(*(ptr)) __ret; \
263 if (likely(boot_cpu_data.x86 > 4)) \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100264 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
265 (unsigned long long)(o), \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100266 (unsigned long long)(n)); \
267 else \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100268 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
269 (unsigned long long)(o), \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100270 (unsigned long long)(n)); \
271 __ret; \
272})
273
274#endif
275
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700276#endif /* _ASM_X86_CMPXCHG_32_H */