blob: bfb02b3851f2cdb422bbb7431d16cc6fdfb1a1fa [file] [log] [blame]
Ben Murdochb0fe1622011-05-05 13:52:32 +01001// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// This file is an internal atomic implementation, use atomicops.h instead.
29
30#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
31#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
32
33#include <libkern/OSAtomic.h>
34
35namespace v8 {
36namespace internal {
37
Ben Murdoch3ef787d2012-04-12 10:51:47 +010038inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +010039 Atomic32 old_value,
40 Atomic32 new_value) {
41 Atomic32 prev_value;
42 do {
43 if (OSAtomicCompareAndSwap32(old_value, new_value,
44 const_cast<Atomic32*>(ptr))) {
45 return old_value;
46 }
47 prev_value = *ptr;
48 } while (prev_value == old_value);
49 return prev_value;
50}
51
Ben Murdoch3ef787d2012-04-12 10:51:47 +010052inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +010053 Atomic32 new_value) {
54 Atomic32 old_value;
55 do {
56 old_value = *ptr;
57 } while (!OSAtomicCompareAndSwap32(old_value, new_value,
58 const_cast<Atomic32*>(ptr)));
59 return old_value;
60}
61
Ben Murdoch3ef787d2012-04-12 10:51:47 +010062inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +010063 Atomic32 increment) {
64 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
65}
66
Ben Murdoch3ef787d2012-04-12 10:51:47 +010067inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +010068 Atomic32 increment) {
69 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
70}
71
72inline void MemoryBarrier() {
73 OSMemoryBarrier();
74}
75
Ben Murdoch3ef787d2012-04-12 10:51:47 +010076inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +010077 Atomic32 old_value,
78 Atomic32 new_value) {
79 Atomic32 prev_value;
80 do {
81 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
82 const_cast<Atomic32*>(ptr))) {
83 return old_value;
84 }
85 prev_value = *ptr;
86 } while (prev_value == old_value);
87 return prev_value;
88}
89
Ben Murdoch3ef787d2012-04-12 10:51:47 +010090inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +010091 Atomic32 old_value,
92 Atomic32 new_value) {
93 return Acquire_CompareAndSwap(ptr, old_value, new_value);
94}
95
96inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
97 *ptr = value;
98}
99
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100100inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100101 *ptr = value;
102 MemoryBarrier();
103}
104
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100105inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100106 MemoryBarrier();
107 *ptr = value;
108}
109
110inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
111 return *ptr;
112}
113
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100114inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100115 Atomic32 value = *ptr;
116 MemoryBarrier();
117 return value;
118}
119
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100120inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100121 MemoryBarrier();
122 return *ptr;
123}
124
125#ifdef __LP64__
126
127// 64-bit implementation on 64-bit platform
128
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100129inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +0100130 Atomic64 old_value,
131 Atomic64 new_value) {
132 Atomic64 prev_value;
133 do {
134 if (OSAtomicCompareAndSwap64(old_value, new_value,
135 const_cast<Atomic64*>(ptr))) {
136 return old_value;
137 }
138 prev_value = *ptr;
139 } while (prev_value == old_value);
140 return prev_value;
141}
142
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100143inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +0100144 Atomic64 new_value) {
145 Atomic64 old_value;
146 do {
147 old_value = *ptr;
148 } while (!OSAtomicCompareAndSwap64(old_value, new_value,
149 const_cast<Atomic64*>(ptr)));
150 return old_value;
151}
152
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100153inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +0100154 Atomic64 increment) {
155 return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
156}
157
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100158inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +0100159 Atomic64 increment) {
160 return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
161}
162
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100163inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +0100164 Atomic64 old_value,
165 Atomic64 new_value) {
166 Atomic64 prev_value;
167 do {
168 if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
169 const_cast<Atomic64*>(ptr))) {
170 return old_value;
171 }
172 prev_value = *ptr;
173 } while (prev_value == old_value);
174 return prev_value;
175}
176
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100177inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Ben Murdochb0fe1622011-05-05 13:52:32 +0100178 Atomic64 old_value,
179 Atomic64 new_value) {
180 // The lib kern interface does not distinguish between
181 // Acquire and Release memory barriers; they are equivalent.
182 return Acquire_CompareAndSwap(ptr, old_value, new_value);
183}
184
185inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
186 *ptr = value;
187}
188
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100189inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100190 *ptr = value;
191 MemoryBarrier();
192}
193
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100194inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100195 MemoryBarrier();
196 *ptr = value;
197}
198
199inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
200 return *ptr;
201}
202
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100203inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100204 Atomic64 value = *ptr;
205 MemoryBarrier();
206 return value;
207}
208
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100209inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100210 MemoryBarrier();
211 return *ptr;
212}
213
214#endif // defined(__LP64__)
215
216// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
217// on the Mac, even when they are the same size. We need to explicitly cast
218// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
219#ifdef __LP64__
220#define AtomicWordCastType Atomic64
221#else
222#define AtomicWordCastType Atomic32
223#endif
224
225inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
226 AtomicWord old_value,
227 AtomicWord new_value) {
228 return NoBarrier_CompareAndSwap(
229 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
230 old_value, new_value);
231}
232
233inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
234 AtomicWord new_value) {
235 return NoBarrier_AtomicExchange(
236 reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
237}
238
239inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
240 AtomicWord increment) {
241 return NoBarrier_AtomicIncrement(
242 reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
243}
244
245inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
246 AtomicWord increment) {
247 return Barrier_AtomicIncrement(
248 reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
249}
250
251inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
252 AtomicWord old_value,
253 AtomicWord new_value) {
254 return v8::internal::Acquire_CompareAndSwap(
255 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
256 old_value, new_value);
257}
258
259inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
260 AtomicWord old_value,
261 AtomicWord new_value) {
262 return v8::internal::Release_CompareAndSwap(
263 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
264 old_value, new_value);
265}
266
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100267inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100268 NoBarrier_Store(
269 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
270}
271
272inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
273 return v8::internal::Acquire_Store(
274 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
275}
276
277inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
278 return v8::internal::Release_Store(
279 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
280}
281
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100282inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100283 return NoBarrier_Load(
284 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
285}
286
287inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
288 return v8::internal::Acquire_Load(
289 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
290}
291
292inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
293 return v8::internal::Release_Load(
294 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
295}
296
297#undef AtomicWordCastType
298
299} } // namespace v8::internal
300
301#endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_