blob: 7386fabea9c24e00ccd7e9cf18ebb16efe31670a [file] [log] [blame]
evan@chromium.org75d1cb32011-04-30 05:45:22 +09001// Copyright (c) 2011 The Chromium Authors. All rights reserved.
license.botf003cfe2008-08-24 09:55:55 +09002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
deanm@google.com1579ec72008-08-05 18:57:36 +09004
5// This file is an internal atomic implementation, use base/atomicops.h instead.
6
7#ifndef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
8#define BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
9
darin@chromium.orge585bed2011-08-06 00:34:00 +090010#include "base/base_export.h"
evan@chromium.org75d1cb32011-04-30 05:45:22 +090011
deanm@google.com1579ec72008-08-05 18:57:36 +090012// This struct is not part of the public API of this module; clients may not
darin@chromium.orge585bed2011-08-06 00:34:00 +090013// use it. (However, it's exported via BASE_EXPORT because clients implicitly
evan@chromium.org75d1cb32011-04-30 05:45:22 +090014// do use it at link time by inlining these functions.)
deanm@google.com1579ec72008-08-05 18:57:36 +090015// Features of this x86. Values may not be correct before main() is run,
16// but are set conservatively.
17struct AtomicOps_x86CPUFeatureStruct {
18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
19 // after acquire compare-and-swap.
deanm@google.com1579ec72008-08-05 18:57:36 +090020};
darin@chromium.orge585bed2011-08-06 00:34:00 +090021BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
evan@chromium.org75d1cb32011-04-30 05:45:22 +090022 AtomicOps_Internalx86CPUFeatures;
deanm@google.com1579ec72008-08-05 18:57:36 +090023
24#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
25
26namespace base {
27namespace subtle {
28
29// 32-bit low-level operations on any platform.
30
31inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
32 Atomic32 old_value,
33 Atomic32 new_value) {
34 Atomic32 prev;
35 __asm__ __volatile__("lock; cmpxchgl %1,%2"
36 : "=a" (prev)
37 : "q" (new_value), "m" (*ptr), "0" (old_value)
38 : "memory");
39 return prev;
40}
41
42inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
43 Atomic32 new_value) {
44 __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
45 : "=r" (new_value)
46 : "m" (*ptr), "0" (new_value)
47 : "memory");
48 return new_value; // Now it's the previous value.
49}
50
51inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
52 Atomic32 increment) {
53 Atomic32 temp = increment;
54 __asm__ __volatile__("lock; xaddl %0,%1"
55 : "+r" (temp), "+m" (*ptr)
56 : : "memory");
57 // temp now holds the old value of *ptr
58 return temp + increment;
59}
60
61inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
62 Atomic32 increment) {
63 Atomic32 temp = increment;
64 __asm__ __volatile__("lock; xaddl %0,%1"
65 : "+r" (temp), "+m" (*ptr)
66 : : "memory");
67 // temp now holds the old value of *ptr
68 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
69 __asm__ __volatile__("lfence" : : : "memory");
70 }
71 return temp + increment;
72}
73
74inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
75 Atomic32 old_value,
76 Atomic32 new_value) {
77 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
78 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
79 __asm__ __volatile__("lfence" : : : "memory");
80 }
81 return x;
82}
83
84inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
85 Atomic32 old_value,
86 Atomic32 new_value) {
87 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
88}
89
90inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
91 *ptr = value;
92}
93
deanm@google.com1579ec72008-08-05 18:57:36 +090094inline void MemoryBarrier() {
95 __asm__ __volatile__("mfence" : : : "memory");
96}
97
98inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
99 *ptr = value;
100 MemoryBarrier();
101}
102
deanm@google.com1579ec72008-08-05 18:57:36 +0900103inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
104 ATOMICOPS_COMPILER_BARRIER();
105 *ptr = value; // An x86 store acts as a release barrier.
106 // See comments in Atomic64 version of Release_Store(), below.
107}
108
109inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
110 return *ptr;
111}
112
113inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
114 Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
115 // See comments in Atomic64 version of Release_Store(), below.
116 ATOMICOPS_COMPILER_BARRIER();
117 return value;
118}
119
120inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
121 MemoryBarrier();
122 return *ptr;
123}
124
125#if defined(__x86_64__)
126
127// 64-bit low-level operations on 64-bit platform.
128
129inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
130 Atomic64 old_value,
131 Atomic64 new_value) {
132 Atomic64 prev;
133 __asm__ __volatile__("lock; cmpxchgq %1,%2"
134 : "=a" (prev)
135 : "q" (new_value), "m" (*ptr), "0" (old_value)
136 : "memory");
137 return prev;
138}
139
140inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
141 Atomic64 new_value) {
142 __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
143 : "=r" (new_value)
144 : "m" (*ptr), "0" (new_value)
145 : "memory");
146 return new_value; // Now it's the previous value.
147}
148
149inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
150 Atomic64 increment) {
151 Atomic64 temp = increment;
152 __asm__ __volatile__("lock; xaddq %0,%1"
153 : "+r" (temp), "+m" (*ptr)
154 : : "memory");
155 // temp now contains the previous value of *ptr
156 return temp + increment;
157}
158
159inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
160 Atomic64 increment) {
161 Atomic64 temp = increment;
162 __asm__ __volatile__("lock; xaddq %0,%1"
163 : "+r" (temp), "+m" (*ptr)
164 : : "memory");
165 // temp now contains the previous value of *ptr
166 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
167 __asm__ __volatile__("lfence" : : : "memory");
168 }
169 return temp + increment;
170}
171
172inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
173 *ptr = value;
174}
175
176inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
177 *ptr = value;
178 MemoryBarrier();
179}
180
181inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
182 ATOMICOPS_COMPILER_BARRIER();
183
184 *ptr = value; // An x86 store acts as a release barrier
185 // for current AMD/Intel chips as of Jan 2008.
186 // See also Acquire_Load(), below.
187
188 // When new chips come out, check:
189 // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
190 // System Programming Guide, Chatper 7: Multiple-processor management,
191 // Section 7.2, Memory Ordering.
192 // Last seen at:
193 // http://developer.intel.com/design/pentium4/manuals/index_new.htm
194 //
195 // x86 stores/loads fail to act as barriers for a few instructions (clflush
196 // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
197 // not generated by the compiler, and are rare. Users of these instructions
198 // need to know about cache behaviour in any case since all of these involve
199 // either flushing cache lines or non-temporal cache hints.
200}
201
202inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
203 return *ptr;
204}
205
206inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
207 Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
208 // for current AMD/Intel chips as of Jan 2008.
209 // See also Release_Store(), above.
210 ATOMICOPS_COMPILER_BARRIER();
211 return value;
212}
213
214inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
215 MemoryBarrier();
216 return *ptr;
217}
deanm@chromium.orgdc8b4922009-07-28 06:17:23 +0900218
219inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
220 Atomic64 old_value,
221 Atomic64 new_value) {
222 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
223 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
224 __asm__ __volatile__("lfence" : : : "memory");
225 }
226 return x;
227}
228
229inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
230 Atomic64 old_value,
231 Atomic64 new_value) {
232 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
233}
234
deanm@google.com1579ec72008-08-05 18:57:36 +0900235#endif // defined(__x86_64__)
236
237} // namespace base::subtle
238} // namespace base
239
240#undef ATOMICOPS_COMPILER_BARRIER
241
242#endif // BASE_ATOMICOPS_INTERNALS_X86_GCC_H_