blob: 82ceb788a981e7c73a7cb06e3d0bec311e205be7 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_CMPXCHG_32_H
2#define _ASM_X86_CMPXCHG_32_H
Jeff Dikea436ed92007-05-08 00:35:02 -07003
4#include <linux/bitops.h> /* for LOCK_PREFIX */
5
Avi Kivity2d9ce172007-07-19 14:30:14 +03006/*
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data.
9 */
10
Joe Perches81210192008-03-23 01:01:51 -070011#define xchg(ptr, v) \
12 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
Jeff Dikea436ed92007-05-08 00:35:02 -070013
Joe Perches81210192008-03-23 01:01:51 -070014struct __xchg_dummy {
15 unsigned long a[100];
16};
Jeff Dikea436ed92007-05-08 00:35:02 -070017#define __xg(x) ((struct __xchg_dummy *)(x))
18
Jeff Dikea436ed92007-05-08 00:35:02 -070019/*
20 * The semantics of XCHGCMP8B are a bit strange, this is why
21 * there is a loop and the loading of %%eax and %%edx has to
22 * be inside. This inlines well in most cases, the cached
23 * cost is around ~38 cycles. (in the future we might want
24 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
25 * might have an implicit FPU-save as a cost, so it's not
26 * clear which path to go.)
27 *
28 * cmpxchg8b must be used with the lock prefix here to allow
29 * the instruction to be executed atomically, see page 3-102
30 * of the instruction set reference 24319102.pdf. We need
31 * the reader side to see the coherent 64bit value.
32 */
Joe Perches81210192008-03-23 01:01:51 -070033static inline void __set_64bit(unsigned long long *ptr,
34 unsigned int low, unsigned int high)
Jeff Dikea436ed92007-05-08 00:35:02 -070035{
Joe Perches81210192008-03-23 01:01:51 -070036 asm volatile("\n1:\t"
37 "movl (%0), %%eax\n\t"
38 "movl 4(%0), %%edx\n\t"
39 LOCK_PREFIX "cmpxchg8b (%0)\n\t"
40 "jnz 1b"
41 : /* no outputs */
42 : "D"(ptr),
43 "b"(low),
44 "c"(high)
45 : "ax", "dx", "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -070046}
47
Joe Perches81210192008-03-23 01:01:51 -070048static inline void __set_64bit_constant(unsigned long long *ptr,
49 unsigned long long value)
Jeff Dikea436ed92007-05-08 00:35:02 -070050{
Joe Perches81210192008-03-23 01:01:51 -070051 __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
Jeff Dikea436ed92007-05-08 00:35:02 -070052}
53
Joe Perches81210192008-03-23 01:01:51 -070054#define ll_low(x) *(((unsigned int *)&(x)) + 0)
55#define ll_high(x) *(((unsigned int *)&(x)) + 1)
Jeff Dikea436ed92007-05-08 00:35:02 -070056
Joe Perches81210192008-03-23 01:01:51 -070057static inline void __set_64bit_var(unsigned long long *ptr,
58 unsigned long long value)
59{
60 __set_64bit(ptr, ll_low(value), ll_high(value));
61}
62
63#define set_64bit(ptr, value) \
64 (__builtin_constant_p((value)) \
65 ? __set_64bit_constant((ptr), (value)) \
66 : __set_64bit_var((ptr), (value)))
67
68#define _set_64bit(ptr, value) \
69 (__builtin_constant_p(value) \
70 ? __set_64bit(ptr, (unsigned int)(value), \
71 (unsigned int)((value) >> 32)) \
72 : __set_64bit(ptr, ll_low((value)), ll_high((value))))
Jeff Dikea436ed92007-05-08 00:35:02 -070073
Jeff Dikea436ed92007-05-08 00:35:02 -070074/*
75 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
76 * Note 2: xchg has side effect, so that attribute volatile is necessary,
77 * but generally the primitive is invalid, *ptr is output argument. --ANK
78 */
Joe Perches81210192008-03-23 01:01:51 -070079static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
80 int size)
Jeff Dikea436ed92007-05-08 00:35:02 -070081{
82 switch (size) {
Joe Perches81210192008-03-23 01:01:51 -070083 case 1:
84 asm volatile("xchgb %b0,%1"
85 : "=q" (x)
86 : "m" (*__xg(ptr)), "0" (x)
87 : "memory");
88 break;
89 case 2:
90 asm volatile("xchgw %w0,%1"
91 : "=r" (x)
92 : "m" (*__xg(ptr)), "0" (x)
93 : "memory");
94 break;
95 case 4:
96 asm volatile("xchgl %0,%1"
97 : "=r" (x)
98 : "m" (*__xg(ptr)), "0" (x)
99 : "memory");
100 break;
Jeff Dikea436ed92007-05-08 00:35:02 -0700101 }
102 return x;
103}
104
105/*
106 * Atomic compare and exchange. Compare OLD with MEM, if identical,
107 * store NEW in MEM. Return the initial value in MEM. Success is
108 * indicated by comparing RETURN with OLD.
109 */
110
111#ifdef CONFIG_X86_CMPXCHG
112#define __HAVE_ARCH_CMPXCHG 1
Joe Perches81210192008-03-23 01:01:51 -0700113#define cmpxchg(ptr, o, n) \
114 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
115 (unsigned long)(n), \
116 sizeof(*(ptr))))
117#define sync_cmpxchg(ptr, o, n) \
118 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
119 (unsigned long)(n), \
120 sizeof(*(ptr))))
121#define cmpxchg_local(ptr, o, n) \
122 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
123 (unsigned long)(n), \
124 sizeof(*(ptr))))
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100125#endif
126
127#ifdef CONFIG_X86_CMPXCHG64
Joe Perches81210192008-03-23 01:01:51 -0700128#define cmpxchg64(ptr, o, n) \
129 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
130 (unsigned long long)(n)))
131#define cmpxchg64_local(ptr, o, n) \
132 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
133 (unsigned long long)(n)))
Jeff Dikea436ed92007-05-08 00:35:02 -0700134#endif
135
136static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
137 unsigned long new, int size)
138{
139 unsigned long prev;
140 switch (size) {
141 case 1:
Joe Perches81210192008-03-23 01:01:51 -0700142 asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
143 : "=a"(prev)
144 : "q"(new), "m"(*__xg(ptr)), "0"(old)
145 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700146 return prev;
147 case 2:
Joe Perches81210192008-03-23 01:01:51 -0700148 asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
149 : "=a"(prev)
150 : "r"(new), "m"(*__xg(ptr)), "0"(old)
151 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700152 return prev;
153 case 4:
Joe Perches81210192008-03-23 01:01:51 -0700154 asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
155 : "=a"(prev)
156 : "r"(new), "m"(*__xg(ptr)), "0"(old)
157 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700158 return prev;
159 }
160 return old;
161}
162
163/*
164 * Always use locked operations when touching memory shared with a
165 * hypervisor, since the system may be SMP even if the guest kernel
166 * isn't.
167 */
168static inline unsigned long __sync_cmpxchg(volatile void *ptr,
Joe Perches81210192008-03-23 01:01:51 -0700169 unsigned long old,
170 unsigned long new, int size)
Jeff Dikea436ed92007-05-08 00:35:02 -0700171{
172 unsigned long prev;
173 switch (size) {
174 case 1:
Joe Perches81210192008-03-23 01:01:51 -0700175 asm volatile("lock; cmpxchgb %b1,%2"
176 : "=a"(prev)
177 : "q"(new), "m"(*__xg(ptr)), "0"(old)
178 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700179 return prev;
180 case 2:
Joe Perches81210192008-03-23 01:01:51 -0700181 asm volatile("lock; cmpxchgw %w1,%2"
182 : "=a"(prev)
183 : "r"(new), "m"(*__xg(ptr)), "0"(old)
184 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700185 return prev;
186 case 4:
Joe Perches81210192008-03-23 01:01:51 -0700187 asm volatile("lock; cmpxchgl %1,%2"
188 : "=a"(prev)
189 : "r"(new), "m"(*__xg(ptr)), "0"(old)
190 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700191 return prev;
192 }
193 return old;
194}
195
196static inline unsigned long __cmpxchg_local(volatile void *ptr,
Joe Perches81210192008-03-23 01:01:51 -0700197 unsigned long old,
198 unsigned long new, int size)
Jeff Dikea436ed92007-05-08 00:35:02 -0700199{
200 unsigned long prev;
201 switch (size) {
202 case 1:
Joe Perches81210192008-03-23 01:01:51 -0700203 asm volatile("cmpxchgb %b1,%2"
204 : "=a"(prev)
205 : "q"(new), "m"(*__xg(ptr)), "0"(old)
206 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700207 return prev;
208 case 2:
Joe Perches81210192008-03-23 01:01:51 -0700209 asm volatile("cmpxchgw %w1,%2"
210 : "=a"(prev)
211 : "r"(new), "m"(*__xg(ptr)), "0"(old)
212 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700213 return prev;
214 case 4:
Joe Perches81210192008-03-23 01:01:51 -0700215 asm volatile("cmpxchgl %1,%2"
216 : "=a"(prev)
217 : "r"(new), "m"(*__xg(ptr)), "0"(old)
218 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700219 return prev;
220 }
221 return old;
222}
223
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100224static inline unsigned long long __cmpxchg64(volatile void *ptr,
Joe Perches81210192008-03-23 01:01:51 -0700225 unsigned long long old,
226 unsigned long long new)
Jeff Dikea436ed92007-05-08 00:35:02 -0700227{
228 unsigned long long prev;
Joe Perches81210192008-03-23 01:01:51 -0700229 asm volatile(LOCK_PREFIX "cmpxchg8b %3"
230 : "=A"(prev)
231 : "b"((unsigned long)new),
232 "c"((unsigned long)(new >> 32)),
233 "m"(*__xg(ptr)),
234 "0"(old)
235 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700236 return prev;
237}
238
239static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
Joe Perches81210192008-03-23 01:01:51 -0700240 unsigned long long old,
241 unsigned long long new)
Jeff Dikea436ed92007-05-08 00:35:02 -0700242{
243 unsigned long long prev;
Joe Perches81210192008-03-23 01:01:51 -0700244 asm volatile("cmpxchg8b %3"
245 : "=A"(prev)
246 : "b"((unsigned long)new),
247 "c"((unsigned long)(new >> 32)),
248 "m"(*__xg(ptr)),
249 "0"(old)
250 : "memory");
Jeff Dikea436ed92007-05-08 00:35:02 -0700251 return prev;
252}
253
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100254#ifndef CONFIG_X86_CMPXCHG
255/*
256 * Building a kernel capable running on 80386. It may be necessary to
257 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
258 * a function for each of the sizes we support.
259 */
260
261extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
262extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
263extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
264
265static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
Joe Perches81210192008-03-23 01:01:51 -0700266 unsigned long new, int size)
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100267{
268 switch (size) {
269 case 1:
270 return cmpxchg_386_u8(ptr, old, new);
271 case 2:
272 return cmpxchg_386_u16(ptr, old, new);
273 case 4:
274 return cmpxchg_386_u32(ptr, old, new);
275 }
276 return old;
277}
278
279#define cmpxchg(ptr, o, n) \
280({ \
281 __typeof__(*(ptr)) __ret; \
282 if (likely(boot_cpu_data.x86 > 3)) \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100283 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
284 (unsigned long)(o), (unsigned long)(n), \
285 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100286 else \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100287 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
288 (unsigned long)(o), (unsigned long)(n), \
289 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100290 __ret; \
291})
292#define cmpxchg_local(ptr, o, n) \
293({ \
294 __typeof__(*(ptr)) __ret; \
295 if (likely(boot_cpu_data.x86 > 3)) \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100296 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
297 (unsigned long)(o), (unsigned long)(n), \
298 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100299 else \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100300 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
301 (unsigned long)(o), (unsigned long)(n), \
302 sizeof(*(ptr))); \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100303 __ret; \
304})
305#endif
306
307#ifndef CONFIG_X86_CMPXCHG64
308/*
309 * Building a kernel capable running on 80386 and 80486. It may be necessary
310 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
311 */
312
313extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
314
315#define cmpxchg64(ptr, o, n) \
316({ \
317 __typeof__(*(ptr)) __ret; \
318 if (likely(boot_cpu_data.x86 > 4)) \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100319 __ret = (__typeof__(*(ptr)))__cmpxchg64((ptr), \
320 (unsigned long long)(o), \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100321 (unsigned long long)(n)); \
322 else \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100323 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
324 (unsigned long long)(o), \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100325 (unsigned long long)(n)); \
326 __ret; \
327})
328#define cmpxchg64_local(ptr, o, n) \
329({ \
330 __typeof__(*(ptr)) __ret; \
331 if (likely(boot_cpu_data.x86 > 4)) \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100332 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
333 (unsigned long long)(o), \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100334 (unsigned long long)(n)); \
335 else \
Mathieu Desnoyers3078b792008-03-06 13:45:46 +0100336 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
337 (unsigned long long)(o), \
Mathieu Desnoyers2c0b8a72008-01-30 13:30:47 +0100338 (unsigned long long)(n)); \
339 __ret; \
340})
341
342#endif
343
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700344#endif /* _ASM_X86_CMPXCHG_32_H */