H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_CMPXCHG_32_H |
| 2 | #define _ASM_X86_CMPXCHG_32_H |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 3 | |
Avi Kivity | 2d9ce17 | 2007-07-19 14:30:14 +0300 | [diff] [blame] | 4 | /* |
| 5 | * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you |
| 6 | * you need to test for the feature in boot_cpu_data. |
| 7 | */ |
| 8 | |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 9 | /* |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 10 | * CMPXCHG8B only writes to the target if we had the previous |
| 11 | * value in registers, otherwise it acts as a read and gives us the |
| 12 | * "new previous" value. That is why there is a loop. Preloading |
| 13 | * EDX:EAX is a performance optimization: in the common case it means |
| 14 | * we need only one locked operation. |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 15 | * |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 16 | * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very |
| 17 | * least an FPU save and/or %cr0.ts manipulation. |
| 18 | * |
| 19 | * cmpxchg8b must be used with the lock prefix here to allow the |
| 20 | * instruction to be executed atomically. We need to have the reader |
| 21 | * side to see the coherent 64bit value. |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 22 | */ |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 23 | static inline void set_64bit(volatile u64 *ptr, u64 value) |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 24 | { |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 25 | u32 low = value; |
| 26 | u32 high = value >> 32; |
| 27 | u64 prev = *ptr; |
| 28 | |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 29 | asm volatile("\n1:\t" |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 30 | LOCK_PREFIX "cmpxchg8b %0\n\t" |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 31 | "jnz 1b" |
H. Peter Anvin | 69309a0 | 2010-07-27 23:29:52 -0700 | [diff] [blame] | 32 | : "=m" (*ptr), "+A" (prev) |
| 33 | : "b" (low), "c" (high) |
| 34 | : "memory"); |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 35 | } |
| 36 | |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 37 | #ifdef CONFIG_X86_CMPXCHG |
| 38 | #define __HAVE_ARCH_CMPXCHG 1 |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 39 | #endif |
| 40 | |
| 41 | #ifdef CONFIG_X86_CMPXCHG64 |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 42 | #define cmpxchg64(ptr, o, n) \ |
| 43 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
| 44 | (unsigned long long)(n))) |
| 45 | #define cmpxchg64_local(ptr, o, n) \ |
| 46 | ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ |
| 47 | (unsigned long long)(n))) |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 48 | #endif |
| 49 | |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 50 | static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 51 | { |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 52 | u64 prev; |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 53 | asm volatile(LOCK_PREFIX "cmpxchg8b %1" |
| 54 | : "=A" (prev), |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 55 | "+m" (*ptr) |
| 56 | : "b" ((u32)new), |
| 57 | "c" ((u32)(new >> 32)), |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 58 | "0" (old) |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 59 | : "memory"); |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 60 | return prev; |
| 61 | } |
| 62 | |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 63 | static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 64 | { |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 65 | u64 prev; |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 66 | asm volatile("cmpxchg8b %1" |
| 67 | : "=A" (prev), |
H. Peter Anvin | 4532b30 | 2010-07-28 15:18:35 -0700 | [diff] [blame] | 68 | "+m" (*ptr) |
| 69 | : "b" ((u32)new), |
| 70 | "c" ((u32)(new >> 32)), |
H. Peter Anvin | 113fc5a | 2010-07-27 17:01:49 -0700 | [diff] [blame] | 71 | "0" (old) |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 72 | : "memory"); |
Jeff Dike | a436ed9 | 2007-05-08 00:35:02 -0700 | [diff] [blame] | 73 | return prev; |
| 74 | } |
| 75 | |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 76 | #ifndef CONFIG_X86_CMPXCHG |
| 77 | /* |
| 78 | * Building a kernel capable running on 80386. It may be necessary to |
| 79 | * simulate the cmpxchg on the 80386 CPU. For that purpose we define |
| 80 | * a function for each of the sizes we support. |
| 81 | */ |
| 82 | |
| 83 | extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); |
| 84 | extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); |
| 85 | extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); |
| 86 | |
| 87 | static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, |
Joe Perches | 8121019 | 2008-03-23 01:01:51 -0700 | [diff] [blame] | 88 | unsigned long new, int size) |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 89 | { |
| 90 | switch (size) { |
| 91 | case 1: |
| 92 | return cmpxchg_386_u8(ptr, old, new); |
| 93 | case 2: |
| 94 | return cmpxchg_386_u16(ptr, old, new); |
| 95 | case 4: |
| 96 | return cmpxchg_386_u32(ptr, old, new); |
| 97 | } |
| 98 | return old; |
| 99 | } |
| 100 | |
| 101 | #define cmpxchg(ptr, o, n) \ |
| 102 | ({ \ |
| 103 | __typeof__(*(ptr)) __ret; \ |
| 104 | if (likely(boot_cpu_data.x86 > 3)) \ |
Mathieu Desnoyers | 3078b79 | 2008-03-06 13:45:46 +0100 | [diff] [blame] | 105 | __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \ |
| 106 | (unsigned long)(o), (unsigned long)(n), \ |
| 107 | sizeof(*(ptr))); \ |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 108 | else \ |
Mathieu Desnoyers | 3078b79 | 2008-03-06 13:45:46 +0100 | [diff] [blame] | 109 | __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ |
| 110 | (unsigned long)(o), (unsigned long)(n), \ |
| 111 | sizeof(*(ptr))); \ |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 112 | __ret; \ |
| 113 | }) |
| 114 | #define cmpxchg_local(ptr, o, n) \ |
| 115 | ({ \ |
| 116 | __typeof__(*(ptr)) __ret; \ |
| 117 | if (likely(boot_cpu_data.x86 > 3)) \ |
Mathieu Desnoyers | 3078b79 | 2008-03-06 13:45:46 +0100 | [diff] [blame] | 118 | __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \ |
| 119 | (unsigned long)(o), (unsigned long)(n), \ |
| 120 | sizeof(*(ptr))); \ |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 121 | else \ |
Mathieu Desnoyers | 3078b79 | 2008-03-06 13:45:46 +0100 | [diff] [blame] | 122 | __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \ |
| 123 | (unsigned long)(o), (unsigned long)(n), \ |
| 124 | sizeof(*(ptr))); \ |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 125 | __ret; \ |
| 126 | }) |
| 127 | #endif |
| 128 | |
| 129 | #ifndef CONFIG_X86_CMPXCHG64 |
| 130 | /* |
| 131 | * Building a kernel capable running on 80386 and 80486. It may be necessary |
| 132 | * to simulate the cmpxchg8b on the 80386 and 80486 CPU. |
| 133 | */ |
| 134 | |
Arjan van de Ven | 79e1dd0 | 2009-09-30 17:07:54 +0200 | [diff] [blame] | 135 | #define cmpxchg64(ptr, o, n) \ |
| 136 | ({ \ |
| 137 | __typeof__(*(ptr)) __ret; \ |
| 138 | __typeof__(*(ptr)) __old = (o); \ |
| 139 | __typeof__(*(ptr)) __new = (n); \ |
Luca Barbieri | 9c76b38 | 2010-02-24 10:54:23 +0100 | [diff] [blame] | 140 | alternative_io(LOCK_PREFIX_HERE \ |
| 141 | "call cmpxchg8b_emu", \ |
Arjan van de Ven | 79e1dd0 | 2009-09-30 17:07:54 +0200 | [diff] [blame] | 142 | "lock; cmpxchg8b (%%esi)" , \ |
| 143 | X86_FEATURE_CX8, \ |
| 144 | "=A" (__ret), \ |
| 145 | "S" ((ptr)), "0" (__old), \ |
| 146 | "b" ((unsigned int)__new), \ |
| 147 | "c" ((unsigned int)(__new>>32)) \ |
| 148 | : "memory"); \ |
| 149 | __ret; }) |
| 150 | |
| 151 | |
H. Peter Anvin | a378d93 | 2010-07-28 17:05:11 -0700 | [diff] [blame] | 152 | #define cmpxchg64_local(ptr, o, n) \ |
| 153 | ({ \ |
| 154 | __typeof__(*(ptr)) __ret; \ |
| 155 | __typeof__(*(ptr)) __old = (o); \ |
| 156 | __typeof__(*(ptr)) __new = (n); \ |
| 157 | alternative_io("call cmpxchg8b_emu", \ |
| 158 | "cmpxchg8b (%%esi)" , \ |
| 159 | X86_FEATURE_CX8, \ |
| 160 | "=A" (__ret), \ |
| 161 | "S" ((ptr)), "0" (__old), \ |
| 162 | "b" ((unsigned int)__new), \ |
| 163 | "c" ((unsigned int)(__new>>32)) \ |
| 164 | : "memory"); \ |
| 165 | __ret; }) |
Mathieu Desnoyers | 2c0b8a7 | 2008-01-30 13:30:47 +0100 | [diff] [blame] | 166 | |
| 167 | #endif |
| 168 | |
Christoph Lameter | 3824abd | 2011-06-01 12:25:47 -0500 | [diff] [blame] | 169 | #define system_has_cmpxchg_double() cpu_has_cx8 |
| 170 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 171 | #endif /* _ASM_X86_CMPXCHG_32_H */ |