blob: ad19841eddfe142fec6b2b27dc329059d8fb7296 [file] [log] [blame]
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -07001#ifndef ASM_X86_CMPXCHG_H
2#define ASM_X86_CMPXCHG_H
3
Jeremy Fitzhardinge61e2cd02011-08-29 14:47:58 -07004#include <linux/compiler.h>
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -07005#include <asm/alternative.h> /* Provides LOCK_PREFIX */
6
Jeremy Fitzhardinge61e2cd02011-08-29 14:47:58 -07007/*
8 * Non-existant functions to indicate usage errors at link time
9 * (or compile-time if the compiler implements __compiletime_error().
10 */
11extern void __xchg_wrong_size(void)
12 __compiletime_error("Bad argument size for xchg");
13extern void __cmpxchg_wrong_size(void)
14 __compiletime_error("Bad argument size for cmpxchg");
15extern void __xadd_wrong_size(void)
16 __compiletime_error("Bad argument size for xadd");
Jeremy Fitzhardinge3d94ae02011-09-28 11:49:28 -070017extern void __add_wrong_size(void)
18 __compiletime_error("Bad argument size for add");
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -070019
20/*
21 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
22 * -1 because sizeof will never return -1, thereby making those switch
23 * case statements guaranteeed dead code which the compiler will
24 * eliminate, and allowing the "missing symbol in the default case" to
25 * indicate a usage error.
26 */
27#define __X86_CASE_B 1
28#define __X86_CASE_W 2
29#define __X86_CASE_L 4
30#ifdef CONFIG_64BIT
31#define __X86_CASE_Q 8
32#else
33#define __X86_CASE_Q -1 /* sizeof will never return -1 */
34#endif
35
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070036/*
37 * An exchange-type operation, which takes a value and a pointer, and
Li Zhong7f5281a2013-04-25 15:20:54 +080038 * returns the old value.
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070039 */
40#define __xchg_op(ptr, arg, op, lock) \
41 ({ \
42 __typeof__ (*(ptr)) __ret = (arg); \
43 switch (sizeof(*(ptr))) { \
44 case __X86_CASE_B: \
45 asm volatile (lock #op "b %b0, %1\n" \
Jeremy Fitzhardinge2ca052a2012-04-02 16:15:33 -070046 : "+q" (__ret), "+m" (*(ptr)) \
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070047 : : "memory", "cc"); \
48 break; \
49 case __X86_CASE_W: \
50 asm volatile (lock #op "w %w0, %1\n" \
51 : "+r" (__ret), "+m" (*(ptr)) \
52 : : "memory", "cc"); \
53 break; \
54 case __X86_CASE_L: \
55 asm volatile (lock #op "l %0, %1\n" \
56 : "+r" (__ret), "+m" (*(ptr)) \
57 : : "memory", "cc"); \
58 break; \
59 case __X86_CASE_Q: \
60 asm volatile (lock #op "q %q0, %1\n" \
61 : "+r" (__ret), "+m" (*(ptr)) \
62 : : "memory", "cc"); \
63 break; \
64 default: \
65 __ ## op ## _wrong_size(); \
66 } \
67 __ret; \
68 })
69
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -070070/*
71 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
72 * Since this is generally used to protect other memory information, we
73 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
74 * information around.
75 */
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -070076#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -070077
78/*
79 * Atomic compare and exchange. Compare OLD with MEM, if identical,
80 * store NEW in MEM. Return the initial value in MEM. Success is
81 * indicated by comparing RETURN with OLD.
82 */
83#define __raw_cmpxchg(ptr, old, new, size, lock) \
84({ \
85 __typeof__(*(ptr)) __ret; \
86 __typeof__(*(ptr)) __old = (old); \
87 __typeof__(*(ptr)) __new = (new); \
88 switch (size) { \
89 case __X86_CASE_B: \
90 { \
91 volatile u8 *__ptr = (volatile u8 *)(ptr); \
92 asm volatile(lock "cmpxchgb %2,%1" \
93 : "=a" (__ret), "+m" (*__ptr) \
94 : "q" (__new), "0" (__old) \
95 : "memory"); \
96 break; \
97 } \
98 case __X86_CASE_W: \
99 { \
100 volatile u16 *__ptr = (volatile u16 *)(ptr); \
101 asm volatile(lock "cmpxchgw %2,%1" \
102 : "=a" (__ret), "+m" (*__ptr) \
103 : "r" (__new), "0" (__old) \
104 : "memory"); \
105 break; \
106 } \
107 case __X86_CASE_L: \
108 { \
109 volatile u32 *__ptr = (volatile u32 *)(ptr); \
110 asm volatile(lock "cmpxchgl %2,%1" \
111 : "=a" (__ret), "+m" (*__ptr) \
112 : "r" (__new), "0" (__old) \
113 : "memory"); \
114 break; \
115 } \
116 case __X86_CASE_Q: \
117 { \
118 volatile u64 *__ptr = (volatile u64 *)(ptr); \
119 asm volatile(lock "cmpxchgq %2,%1" \
120 : "=a" (__ret), "+m" (*__ptr) \
121 : "r" (__new), "0" (__old) \
122 : "memory"); \
123 break; \
124 } \
125 default: \
126 __cmpxchg_wrong_size(); \
127 } \
128 __ret; \
129})
130
131#define __cmpxchg(ptr, old, new, size) \
132 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
133
134#define __sync_cmpxchg(ptr, old, new, size) \
135 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
136
137#define __cmpxchg_local(ptr, old, new, size) \
138 __raw_cmpxchg((ptr), (old), (new), (size), "")
139
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200140#ifdef CONFIG_X86_32
David Howellsa1ce3922012-10-02 18:01:25 +0100141# include <asm/cmpxchg_32.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200142#else
David Howellsa1ce3922012-10-02 18:01:25 +0100143# include <asm/cmpxchg_64.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200144#endif
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700145
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700146#define cmpxchg(ptr, old, new) \
Jan Beulichfc395b92012-01-26 15:47:37 +0000147 __cmpxchg(ptr, old, new, sizeof(*(ptr)))
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700148
149#define sync_cmpxchg(ptr, old, new) \
Jan Beulichfc395b92012-01-26 15:47:37 +0000150 __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700151
152#define cmpxchg_local(ptr, old, new) \
Jan Beulichfc395b92012-01-26 15:47:37 +0000153 __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700154
Jeremy Fitzhardinge433b3522011-06-21 12:00:55 -0700155/*
156 * xadd() adds "inc" to "*ptr" and atomically returns the previous
157 * value of "*ptr".
158 *
159 * xadd() is locked when multiple CPUs are online
160 * xadd_sync() is always locked
161 * xadd_local() is never locked
162 */
Jeremy Fitzhardinge31a83942011-09-30 12:14:10 -0700163#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
Jeremy Fitzhardinge433b3522011-06-21 12:00:55 -0700164#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
165#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
166#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
167
Jeremy Fitzhardinge3d94ae02011-09-28 11:49:28 -0700168#define __add(ptr, inc, lock) \
169 ({ \
170 __typeof__ (*(ptr)) __ret = (inc); \
171 switch (sizeof(*(ptr))) { \
172 case __X86_CASE_B: \
173 asm volatile (lock "addb %b1, %0\n" \
H. Peter Anvin8c91c532012-04-06 09:30:57 -0700174 : "+m" (*(ptr)) : "qi" (inc) \
Jeremy Fitzhardinge3d94ae02011-09-28 11:49:28 -0700175 : "memory", "cc"); \
176 break; \
177 case __X86_CASE_W: \
178 asm volatile (lock "addw %w1, %0\n" \
179 : "+m" (*(ptr)) : "ri" (inc) \
180 : "memory", "cc"); \
181 break; \
182 case __X86_CASE_L: \
183 asm volatile (lock "addl %1, %0\n" \
184 : "+m" (*(ptr)) : "ri" (inc) \
185 : "memory", "cc"); \
186 break; \
187 case __X86_CASE_Q: \
188 asm volatile (lock "addq %1, %0\n" \
189 : "+m" (*(ptr)) : "ri" (inc) \
190 : "memory", "cc"); \
191 break; \
192 default: \
193 __add_wrong_size(); \
194 } \
195 __ret; \
196 })
197
198/*
199 * add_*() adds "inc" to "*ptr"
200 *
201 * __add() takes a lock prefix
202 * add_smp() is locked when multiple CPUs are online
203 * add_sync() is always locked
204 */
205#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
206#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
207
Jan Beulichcdcd6292012-01-02 17:02:18 +0000208#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
209({ \
210 bool __ret; \
211 __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
212 __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
213 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
214 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
215 VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
216 VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
217 asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
218 : "=a" (__ret), "+d" (__old2), \
219 "+m" (*(p1)), "+m" (*(p2)) \
220 : "i" (2 * sizeof(long)), "a" (__old1), \
221 "b" (__new1), "c" (__new2)); \
222 __ret; \
223})
224
225#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
226 __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
227
228#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
229 __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
230
Jeremy Fitzhardingee9826382011-08-18 11:48:06 -0700231#endif /* ASM_X86_CMPXCHG_H */