blob: 9733361fed6f4f7368d92f98d21b0d0edfbf6261 [file] [log] [blame]
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -07001#ifndef ASM_X86_CMPXCHG_H
2#define ASM_X86_CMPXCHG_H
3
Jeremy Fitzhardinge61e2cd02011-08-29 14:47:58 -07004#include <linux/compiler.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +01005#include <asm/cpufeatures.h>
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -07006#include <asm/alternative.h> /* Provides LOCK_PREFIX */
7
Jeremy Fitzhardinge61e2cd02011-08-29 14:47:58 -07008/*
9 * Non-existant functions to indicate usage errors at link time
10 * (or compile-time if the compiler implements __compiletime_error().
11 */
12extern void __xchg_wrong_size(void)
13 __compiletime_error("Bad argument size for xchg");
14extern void __cmpxchg_wrong_size(void)
15 __compiletime_error("Bad argument size for cmpxchg");
16extern void __xadd_wrong_size(void)
17 __compiletime_error("Bad argument size for xadd");
Jeremy Fitzhardinge3d94ae02011-09-28 11:49:28 -070018extern void __add_wrong_size(void)
19 __compiletime_error("Bad argument size for add");
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -070020
21/*
22 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
23 * -1 because sizeof will never return -1, thereby making those switch
24 * case statements guaranteeed dead code which the compiler will
25 * eliminate, and allowing the "missing symbol in the default case" to
26 * indicate a usage error.
27 */
28#define __X86_CASE_B 1
29#define __X86_CASE_W 2
30#define __X86_CASE_L 4
31#ifdef CONFIG_64BIT
32#define __X86_CASE_Q 8
33#else
34#define __X86_CASE_Q -1 /* sizeof will never return -1 */
35#endif
36
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070037/*
38 * An exchange-type operation, which takes a value and a pointer, and
Li Zhong7f5281a2013-04-25 15:20:54 +080039 * returns the old value.
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070040 */
41#define __xchg_op(ptr, arg, op, lock) \
42 ({ \
43 __typeof__ (*(ptr)) __ret = (arg); \
44 switch (sizeof(*(ptr))) { \
45 case __X86_CASE_B: \
46 asm volatile (lock #op "b %b0, %1\n" \
Jeremy Fitzhardinge2ca052a2012-04-02 16:15:33 -070047 : "+q" (__ret), "+m" (*(ptr)) \
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070048 : : "memory", "cc"); \
49 break; \
50 case __X86_CASE_W: \
51 asm volatile (lock #op "w %w0, %1\n" \
52 : "+r" (__ret), "+m" (*(ptr)) \
53 : : "memory", "cc"); \
54 break; \
55 case __X86_CASE_L: \
56 asm volatile (lock #op "l %0, %1\n" \
57 : "+r" (__ret), "+m" (*(ptr)) \
58 : : "memory", "cc"); \
59 break; \
60 case __X86_CASE_Q: \
61 asm volatile (lock #op "q %q0, %1\n" \
62 : "+r" (__ret), "+m" (*(ptr)) \
63 : : "memory", "cc"); \
64 break; \
65 default: \
66 __ ## op ## _wrong_size(); \
67 } \
68 __ret; \
69 })
70
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -070071/*
72 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
73 * Since this is generally used to protect other memory information, we
74 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
75 * information around.
76 */
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070077#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -070078
79/*
80 * Atomic compare and exchange. Compare OLD with MEM, if identical,
81 * store NEW in MEM. Return the initial value in MEM. Success is
82 * indicated by comparing RETURN with OLD.
83 */
84#define __raw_cmpxchg(ptr, old, new, size, lock) \
85({ \
86 __typeof__(*(ptr)) __ret; \
87 __typeof__(*(ptr)) __old = (old); \
88 __typeof__(*(ptr)) __new = (new); \
89 switch (size) { \
90 case __X86_CASE_B: \
91 { \
92 volatile u8 *__ptr = (volatile u8 *)(ptr); \
93 asm volatile(lock "cmpxchgb %2,%1" \
94 : "=a" (__ret), "+m" (*__ptr) \
95 : "q" (__new), "0" (__old) \
96 : "memory"); \
97 break; \
98 } \
99 case __X86_CASE_W: \
100 { \
101 volatile u16 *__ptr = (volatile u16 *)(ptr); \
102 asm volatile(lock "cmpxchgw %2,%1" \
103 : "=a" (__ret), "+m" (*__ptr) \
104 : "r" (__new), "0" (__old) \
105 : "memory"); \
106 break; \
107 } \
108 case __X86_CASE_L: \
109 { \
110 volatile u32 *__ptr = (volatile u32 *)(ptr); \
111 asm volatile(lock "cmpxchgl %2,%1" \
112 : "=a" (__ret), "+m" (*__ptr) \
113 : "r" (__new), "0" (__old) \
114 : "memory"); \
115 break; \
116 } \
117 case __X86_CASE_Q: \
118 { \
119 volatile u64 *__ptr = (volatile u64 *)(ptr); \
120 asm volatile(lock "cmpxchgq %2,%1" \
121 : "=a" (__ret), "+m" (*__ptr) \
122 : "r" (__new), "0" (__old) \
123 : "memory"); \
124 break; \
125 } \
126 default: \
127 __cmpxchg_wrong_size(); \
128 } \
129 __ret; \
130})
131
132#define __cmpxchg(ptr, old, new, size) \
133 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
134
135#define __sync_cmpxchg(ptr, old, new, size) \
136 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
137
138#define __cmpxchg_local(ptr, old, new, size) \
139 __raw_cmpxchg((ptr), (old), (new), (size), "")
140
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200141#ifdef CONFIG_X86_32
David Howellsa1ce3922012-10-02 18:01:25 +0100142# include <asm/cmpxchg_32.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200143#else
David Howellsa1ce3922012-10-02 18:01:25 +0100144# include <asm/cmpxchg_64.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200145#endif
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700146
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700147#define cmpxchg(ptr, old, new) \
Jan Beulichfc395b92012-01-26 15:47:37 +0000148 __cmpxchg(ptr, old, new, sizeof(*(ptr)))
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700149
150#define sync_cmpxchg(ptr, old, new) \
Jan Beulichfc395b92012-01-26 15:47:37 +0000151 __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700152
153#define cmpxchg_local(ptr, old, new) \
Jan Beulichfc395b92012-01-26 15:47:37 +0000154 __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700155
Jeremy Fitzhardinge433b3522011-06-21 12:00:55 -0700156/*
157 * xadd() adds "inc" to "*ptr" and atomically returns the previous
158 * value of "*ptr".
159 *
160 * xadd() is locked when multiple CPUs are online
161 * xadd_sync() is always locked
162 * xadd_local() is never locked
163 */
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -0700164#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
Jeremy Fitzhardinge433b3522011-06-21 12:00:55 -0700165#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
166#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
167#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
168
Jeremy Fitzhardinge3d94ae02011-09-28 11:49:28 -0700169#define __add(ptr, inc, lock) \
170 ({ \
171 __typeof__ (*(ptr)) __ret = (inc); \
172 switch (sizeof(*(ptr))) { \
173 case __X86_CASE_B: \
174 asm volatile (lock "addb %b1, %0\n" \
H. Peter Anvin8c91c532012-04-06 09:30:57 -0700175 : "+m" (*(ptr)) : "qi" (inc) \
Jeremy Fitzhardinge3d94ae02011-09-28 11:49:28 -0700176 : "memory", "cc"); \
177 break; \
178 case __X86_CASE_W: \
179 asm volatile (lock "addw %w1, %0\n" \
180 : "+m" (*(ptr)) : "ri" (inc) \
181 : "memory", "cc"); \
182 break; \
183 case __X86_CASE_L: \
184 asm volatile (lock "addl %1, %0\n" \
185 : "+m" (*(ptr)) : "ri" (inc) \
186 : "memory", "cc"); \
187 break; \
188 case __X86_CASE_Q: \
189 asm volatile (lock "addq %1, %0\n" \
190 : "+m" (*(ptr)) : "ri" (inc) \
191 : "memory", "cc"); \
192 break; \
193 default: \
194 __add_wrong_size(); \
195 } \
196 __ret; \
197 })
198
199/*
200 * add_*() adds "inc" to "*ptr"
201 *
202 * __add() takes a lock prefix
203 * add_smp() is locked when multiple CPUs are online
204 * add_sync() is always locked
205 */
206#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
207#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
208
Jan Beulichcdcd6292012-01-02 17:02:18 +0000209#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
210({ \
211 bool __ret; \
212 __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
213 __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
214 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
215 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
216 VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
217 VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
218 asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
219 : "=a" (__ret), "+d" (__old2), \
220 "+m" (*(p1)), "+m" (*(p2)) \
221 : "i" (2 * sizeof(long)), "a" (__old1), \
222 "b" (__new1), "c" (__new2)); \
223 __ret; \
224})
225
226#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
227 __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
228
229#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
230 __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
231
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700232#endif /* ASM_X86_CMPXCHG_H */