H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_CMPXCHG_32_H |
| 2 | #define _ASM_X86_CMPXCHG_32_H |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 3 | |
| 4 | #include <linux/bitops.h> /* for LOCK_PREFIX */ |
| 5 | |
Avi Kivity | 2d9ce17 | 2007-07-19 14:30:14 +0300 | [diff] [blame] | 6 | /* |
| 7 | * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you |
| 8 | * you need to test for the feature in boot_cpu_data. |
| 9 | */ |
| 10 | |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 11 | extern void __xchg_wrong_size(void); |
| 12 | |
| 13 | /* |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 14 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. |
| 15 | * Since this is generally used to protect other memory information, we |
| 16 | * use "asm volatile" and "memory" clobbers to prevent gcc from moving |
| 17 | * information around. |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 18 | */ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 19 | #define __xchg(x, ptr, size) \ |
| 20 | ({ \ |
| 21 | __typeof(*(ptr)) __x = (x); \ |
| 22 | switch (size) { \ |
| 23 | case 1: \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 24 | { \ |
| 25 | volatile u8 *__ptr = (volatile u8 *)(ptr); \ |
| 26 | asm volatile("xchgb %0,%1" \ |
| 27 | : "=q" (__x), "+m" (*__ptr) \ |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 28 | : "0" (__x) \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 29 | : "memory"); \ |
| 30 | break; \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 31 | } \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 32 | case 2: \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 33 | { \ |
| 34 | volatile u16 *__ptr = (volatile u16 *)(ptr); \ |
| 35 | asm volatile("xchgw %0,%1" \ |
| 36 | : "=r" (__x), "+m" (*__ptr) \ |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 37 | : "0" (__x) \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 38 | : "memory"); \ |
| 39 | break; \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 40 | } \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 41 | case 4: \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 42 | { \ |
| 43 | volatile u32 *__ptr = (volatile u32 *)(ptr); \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 44 | asm volatile("xchgl %0,%1" \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 45 | : "=r" (__x), "+m" (*__ptr) \ |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 46 | : "0" (__x) \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 47 | : "memory"); \ |
| 48 | break; \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 49 | } \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 50 | default: \ |
| 51 | __xchg_wrong_size(); \ |
| 52 | } \ |
| 53 | __x; \ |
| 54 | }) |
| 55 | |
| 56 | #define xchg(ptr, v) \ |
| 57 | __xchg((v), (ptr), sizeof(*ptr)) |
| 58 | |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 59 | /* |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 60 | * CMPXCHG8B only writes to the target if we had the previous |
| 61 | * value in registers, otherwise it acts as a read and gives us the |
| 62 | * "new previous" value. That is why there is a loop. Preloading |
| 63 | * EDX:EAX is a performance optimization: in the common case it means |
| 64 | * we need only one locked operation. |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 65 | * |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 66 | * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very |
| 67 | * least an FPU save and/or %cr0.ts manipulation. |
| 68 | * |
| 69 | * cmpxchg8b must be used with the lock prefix here to allow the |
| 70 | * instruction to be executed atomically. We need to have the reader |
| 71 | * side to see the coherent 64bit value. |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 72 | */ |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 73 | static inline void set_64bit(volatile u64 *ptr, u64 value) |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 74 | { |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 75 | u32 low = value; |
| 76 | u32 high = value >> 32; |
| 77 | u64 prev = *ptr; |
| 78 | |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 79 | asm volatile("\n1:\t" |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 80 | LOCK_PREFIX "cmpxchg8b %0\n\t" |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 81 | "jnz 1b" |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 82 | : "=m" (*ptr), "+A" (prev) |
| 83 | : "b" (low), "c" (high) |
| 84 | : "memory"); |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 85 | } |
| 86 | |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 87 | extern void __cmpxchg_wrong_size(void); |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 88 | |
| 89 | /* |
| 90 | * Atomic compare and exchange. Compare OLD with MEM, if identical, |
| 91 | * store NEW in MEM. Return the initial value in MEM. Success is |
| 92 | * indicated by comparing RETURN with OLD. |
| 93 | */ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 94 | #define __raw_cmpxchg(ptr, old, new, size, lock) \ |
| 95 | ({ \ |
| 96 | __typeof__(*(ptr)) __ret; \ |
| 97 | __typeof__(*(ptr)) __old = (old); \ |
| 98 | __typeof__(*(ptr)) __new = (new); \ |
| 99 | switch (size) { \ |
| 100 | case 1: \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 101 | { \ |
| 102 | volatile u8 *__ptr = (volatile u8 *)(ptr); \ |
| 103 | asm volatile(lock "cmpxchgb %2,%1" \ |
| 104 | : "=a" (__ret), "+m" (*__ptr) \ |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 105 | : "q" (__new), "0" (__old) \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 106 | : "memory"); \ |
| 107 | break; \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 108 | } \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 109 | case 2: \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 110 | { \ |
| 111 | volatile u16 *__ptr = (volatile u16 *)(ptr); \ |
| 112 | asm volatile(lock "cmpxchgw %2,%1" \ |
| 113 | : "=a" (__ret), "+m" (*__ptr) \ |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 114 | : "r" (__new), "0" (__old) \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 115 | : "memory"); \ |
| 116 | break; \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 117 | } \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 118 | case 4: \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 119 | { \ |
| 120 | volatile u32 *__ptr = (volatile u32 *)(ptr); \ |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 121 | asm volatile(lock "cmpxchgl %2,%1" \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 122 | : "=a" (__ret), "+m" (*__ptr) \ |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 123 | : "r" (__new), "0" (__old) \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 124 | : "memory"); \ |
| 125 | break; \ |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 126 | } \ |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 127 | default: \ |
| 128 | __cmpxchg_wrong_size(); \ |
| 129 | } \ |
| 130 | __ret; \ |
| 131 | }) |
| 132 | |
| 133 | #define __cmpxchg(ptr, old, new, size) \ |
| 134 | __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX) |
| 135 | |
| 136 | #define __sync_cmpxchg(ptr, old, new, size) \ |
| 137 | __raw_cmpxchg((ptr), (old), (new), (size), "lock; ") |
| 138 | |
| 139 | #define __cmpxchg_local(ptr, old, new, size) \ |
| 140 | __raw_cmpxchg((ptr), (old), (new), (size), "") |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 141 | |
| 142 | #ifdef CONFIG_X86_CMPXCHG |
| 143 | #define __HAVE_ARCH_CMPXCHG 1 |
Peter Zijlstra | f3834b9 | 2009-10-09 10:12:46 +0200 | [diff] [blame] | 144 | |
| 145 | #define cmpxchg(ptr, old, new) \ |
| 146 | __cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
| 147 | |
| 148 | #define sync_cmpxchg(ptr, old, new) \ |
| 149 | __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) |
| 150 | |
| 151 | #define cmpxchg_local(ptr, old, new) \ |
| 152 | __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 153 | #endif |
| 154 | |
| 155 | #ifdef CONFIG_X86_CMPXCHG64 |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 156 | #define cmpxchg64(ptr, o, n) \ |
| 157 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
| 158 | (unsigned long long)(n))) |
| 159 | #define cmpxchg64_local(ptr, o, n) \ |
| 160 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ |
| 161 | (unsigned long long)(n))) |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 162 | #endif |
| 163 | |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 164 | static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 165 | { |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 166 | u64 prev; |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 167 | asm volatile(LOCK_PREFIX "cmpxchg8b %1" |
| 168 | : "=A" (prev), |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 169 | "+m" (*ptr) |
| 170 | : "b" ((u32)new), |
| 171 | "c" ((u32)(new >> 32)), |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 172 | "0" (old) |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 173 | : "memory"); |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 174 | return prev; |
| 175 | } |
| 176 | |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 177 | static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 178 | { |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 179 | u64 prev; |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 180 | asm volatile("cmpxchg8b %1" |
| 181 | : "=A" (prev), |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 182 | "+m" (*ptr) |
| 183 | : "b" ((u32)new), |
| 184 | "c" ((u32)(new >> 32)), |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 185 | "0" (old) |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 186 | : "memory"); |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 187 | return prev; |
| 188 | } |
| 189 | |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 190 | #ifndef CONFIG_X86_CMPXCHG |
| 191 | /* |
| 192 | * Building a kernel capable running on 80386. It may be necessary to |
| 193 | * simulate the cmpxchg on the 80386 CPU. For that purpose we define |
| 194 | * a function for each of the sizes we support. |
| 195 | */ |
| 196 | |
| 197 | extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); |
| 198 | extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); |
| 199 | extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); |
| 200 | |
| 201 | static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 202 | unsigned long new, int size) |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 203 | { |
| 204 | switch (size) { |
| 205 | case 1: |
| 206 | return cmpxchg_386_u8(ptr, old, new); |
| 207 | case 2: |
| 208 | return cmpxchg_386_u16(ptr, old, new); |
| 209 | case 4: |
| 210 | return cmpxchg_386_u32(ptr, old, new); |
| 211 | } |
| 212 | return old; |
| 213 | } |
| 214 | |
| 215 | #define cmpxchg(ptr, o, n) \ |
| 216 | ({ \ |
| 217 | __typeof__(*(ptr)) __ret; \ |
| 218 | if (likely(boot_cpu_data.x86 > 3)) \ |
Mathieu Desnoyers | 3078b79 | 2008-03-06 13:45:46 +0100 | [diff] [blame] | 219 | __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \ |
| 220 | (unsigned long)(o), (unsigned long)(n), \ |
| 221 | sizeof(*(ptr))); \ |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 222 | else \ |
Mathieu Desnoyers | 3078b79 | 2008-03-06 13:45:46 +0100 | [diff] [blame] | 223 | __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ |
| 224 | (unsigned long)(o), (unsigned long)(n), \ |
| 225 | sizeof(*(ptr))); \ |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 226 | __ret; \ |
| 227 | }) |
| 228 | #define cmpxchg_local(ptr, o, n) \ |
| 229 | ({ \ |
| 230 | __typeof__(*(ptr)) __ret; \ |
| 231 | if (likely(boot_cpu_data.x86 > 3)) \ |
Mathieu Desnoyers | 3078b79 | 2008-03-06 13:45:46 +0100 | [diff] [blame] | 232 | __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \ |
| 233 | (unsigned long)(o), (unsigned long)(n), \ |
| 234 | sizeof(*(ptr))); \ |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 235 | else \ |
Mathieu Desnoyers | 3078b79 | 2008-03-06 13:45:46 +0100 | [diff] [blame] | 236 | __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ |
| 237 | (unsigned long)(o), (unsigned long)(n), \ |
| 238 | sizeof(*(ptr))); \ |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 239 | __ret; \ |
| 240 | }) |
| 241 | #endif |
| 242 | |
| 243 | #ifndef CONFIG_X86_CMPXCHG64 |
| 244 | /* |
| 245 | * Building a kernel capable running on 80386 and 80486. It may be necessary |
| 246 | * to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
| 247 | */ |
| 248 | |
| 249 | extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64); |
| 250 | |
Arjan van de Ven | 79e1dd0 | 2009-09-30 17:07:54 +0200 | [diff] [blame] | 251 | #define cmpxchg64(ptr, o, n) \ |
| 252 | ({ \ |
| 253 | __typeof__(*(ptr)) __ret; \ |
| 254 | __typeof__(*(ptr)) __old = (o); \ |
| 255 | __typeof__(*(ptr)) __new = (n); \ |
Luca Barbieri | 9c76b38 | 2010-02-24 10:54:23 +0100 | [diff] [blame] | 256 | alternative_io(LOCK_PREFIX_HERE \ |
| 257 | "call cmpxchg8b_emu", \ |
Arjan van de Ven | 79e1dd0 | 2009-09-30 17:07:54 +0200 | [diff] [blame] | 258 | "lock; cmpxchg8b (%%esi)" , \ |
| 259 | X86_FEATURE_CX8, \ |
| 260 | "=A" (__ret), \ |
| 261 | "S" ((ptr)), "0" (__old), \ |
| 262 | "b" ((unsigned int)__new), \ |
| 263 | "c" ((unsigned int)(__new>>32)) \ |
| 264 | : "memory"); \ |
| 265 | __ret; }) |
| 266 | |
| 267 | |
| 268 | |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 269 | #define cmpxchg64_local(ptr, o, n) \ |
| 270 | ({ \ |
| 271 | __typeof__(*(ptr)) __ret; \ |
| 272 | if (likely(boot_cpu_data.x86 > 4)) \ |
Mathieu Desnoyers | 3078b79 | 2008-03-06 13:45:46 +0100 | [diff] [blame] | 273 | __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \ |
| 274 | (unsigned long long)(o), \ |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 275 | (unsigned long long)(n)); \ |
| 276 | else \ |
Mathieu Desnoyers | 3078b79 | 2008-03-06 13:45:46 +0100 | [diff] [blame] | 277 | __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \ |
| 278 | (unsigned long long)(o), \ |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 279 | (unsigned long long)(n)); \ |
| 280 | __ret; \ |
| 281 | }) |
| 282 | |
| 283 | #endif |
| 284 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 285 | #endif /* _ASM_X86_CMPXCHG_32_H */ |