kasperl@chromium.org | a555126 | 2010-12-07 12:49:48 +0000 | [diff] [blame] | 1 | // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 | // Redistribution and use in source and binary forms, with or without |
| 3 | // modification, are permitted provided that the following conditions are |
| 4 | // met: |
| 5 | // |
| 6 | // * Redistributions of source code must retain the above copyright |
| 7 | // notice, this list of conditions and the following disclaimer. |
| 8 | // * Redistributions in binary form must reproduce the above |
| 9 | // copyright notice, this list of conditions and the following |
| 10 | // disclaimer in the documentation and/or other materials provided |
| 11 | // with the distribution. |
| 12 | // * Neither the name of Google Inc. nor the names of its |
| 13 | // contributors may be used to endorse or promote products derived |
| 14 | // from this software without specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | // This file is an internal atomic implementation, use atomicops.h instead. |
| 29 | |
| 30 | #ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |
| 31 | #define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |
| 32 | |
| 33 | #include <libkern/OSAtomic.h> |
| 34 | |
| 35 | namespace v8 { |
| 36 | namespace internal { |
| 37 | |
| 38 | inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, |
| 39 | Atomic32 old_value, |
| 40 | Atomic32 new_value) { |
| 41 | Atomic32 prev_value; |
| 42 | do { |
| 43 | if (OSAtomicCompareAndSwap32(old_value, new_value, |
| 44 | const_cast<Atomic32*>(ptr))) { |
| 45 | return old_value; |
| 46 | } |
| 47 | prev_value = *ptr; |
| 48 | } while (prev_value == old_value); |
| 49 | return prev_value; |
| 50 | } |
| 51 | |
| 52 | inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, |
| 53 | Atomic32 new_value) { |
| 54 | Atomic32 old_value; |
| 55 | do { |
| 56 | old_value = *ptr; |
| 57 | } while (!OSAtomicCompareAndSwap32(old_value, new_value, |
| 58 | const_cast<Atomic32*>(ptr))); |
| 59 | return old_value; |
| 60 | } |
| 61 | |
| 62 | inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, |
| 63 | Atomic32 increment) { |
| 64 | return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); |
| 65 | } |
| 66 | |
| 67 | inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, |
| 68 | Atomic32 increment) { |
| 69 | return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); |
| 70 | } |
| 71 | |
| 72 | inline void MemoryBarrier() { |
| 73 | OSMemoryBarrier(); |
| 74 | } |
| 75 | |
| 76 | inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, |
| 77 | Atomic32 old_value, |
| 78 | Atomic32 new_value) { |
| 79 | Atomic32 prev_value; |
| 80 | do { |
| 81 | if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, |
| 82 | const_cast<Atomic32*>(ptr))) { |
| 83 | return old_value; |
| 84 | } |
| 85 | prev_value = *ptr; |
| 86 | } while (prev_value == old_value); |
| 87 | return prev_value; |
| 88 | } |
| 89 | |
| 90 | inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, |
| 91 | Atomic32 old_value, |
| 92 | Atomic32 new_value) { |
| 93 | return Acquire_CompareAndSwap(ptr, old_value, new_value); |
| 94 | } |
| 95 | |
| 96 | inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 97 | *ptr = value; |
| 98 | } |
| 99 | |
| 100 | inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { |
| 101 | *ptr = value; |
| 102 | MemoryBarrier(); |
| 103 | } |
| 104 | |
| 105 | inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { |
| 106 | MemoryBarrier(); |
| 107 | *ptr = value; |
| 108 | } |
| 109 | |
| 110 | inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| 111 | return *ptr; |
| 112 | } |
| 113 | |
| 114 | inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { |
| 115 | Atomic32 value = *ptr; |
| 116 | MemoryBarrier(); |
| 117 | return value; |
| 118 | } |
| 119 | |
| 120 | inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { |
| 121 | MemoryBarrier(); |
| 122 | return *ptr; |
| 123 | } |
| 124 | |
| 125 | #ifdef __LP64__ |
| 126 | |
| 127 | // 64-bit implementation on 64-bit platform |
| 128 | |
| 129 | inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, |
| 130 | Atomic64 old_value, |
| 131 | Atomic64 new_value) { |
| 132 | Atomic64 prev_value; |
| 133 | do { |
| 134 | if (OSAtomicCompareAndSwap64(old_value, new_value, |
| 135 | const_cast<Atomic64*>(ptr))) { |
| 136 | return old_value; |
| 137 | } |
| 138 | prev_value = *ptr; |
| 139 | } while (prev_value == old_value); |
| 140 | return prev_value; |
| 141 | } |
| 142 | |
| 143 | inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, |
| 144 | Atomic64 new_value) { |
| 145 | Atomic64 old_value; |
| 146 | do { |
| 147 | old_value = *ptr; |
| 148 | } while (!OSAtomicCompareAndSwap64(old_value, new_value, |
| 149 | const_cast<Atomic64*>(ptr))); |
| 150 | return old_value; |
| 151 | } |
| 152 | |
| 153 | inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, |
| 154 | Atomic64 increment) { |
| 155 | return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr)); |
| 156 | } |
| 157 | |
| 158 | inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, |
| 159 | Atomic64 increment) { |
| 160 | return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr)); |
| 161 | } |
| 162 | |
| 163 | inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, |
| 164 | Atomic64 old_value, |
| 165 | Atomic64 new_value) { |
| 166 | Atomic64 prev_value; |
| 167 | do { |
| 168 | if (OSAtomicCompareAndSwap64Barrier(old_value, new_value, |
| 169 | const_cast<Atomic64*>(ptr))) { |
| 170 | return old_value; |
| 171 | } |
| 172 | prev_value = *ptr; |
| 173 | } while (prev_value == old_value); |
| 174 | return prev_value; |
| 175 | } |
| 176 | |
| 177 | inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, |
| 178 | Atomic64 old_value, |
| 179 | Atomic64 new_value) { |
| 180 | // The lib kern interface does not distinguish between |
| 181 | // Acquire and Release memory barriers; they are equivalent. |
| 182 | return Acquire_CompareAndSwap(ptr, old_value, new_value); |
| 183 | } |
| 184 | |
| 185 | inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 186 | *ptr = value; |
| 187 | } |
| 188 | |
| 189 | inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { |
| 190 | *ptr = value; |
| 191 | MemoryBarrier(); |
| 192 | } |
| 193 | |
| 194 | inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { |
| 195 | MemoryBarrier(); |
| 196 | *ptr = value; |
| 197 | } |
| 198 | |
| 199 | inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 200 | return *ptr; |
| 201 | } |
| 202 | |
| 203 | inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { |
| 204 | Atomic64 value = *ptr; |
| 205 | MemoryBarrier(); |
| 206 | return value; |
| 207 | } |
| 208 | |
| 209 | inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { |
| 210 | MemoryBarrier(); |
| 211 | return *ptr; |
| 212 | } |
| 213 | |
| 214 | #endif // defined(__LP64__) |
| 215 | |
| 216 | // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different |
| 217 | // on the Mac, even when they are the same size. We need to explicitly cast |
| 218 | // from AtomicWord to Atomic32/64 to implement the AtomicWord interface. |
| 219 | #ifdef __LP64__ |
| 220 | #define AtomicWordCastType Atomic64 |
| 221 | #else |
| 222 | #define AtomicWordCastType Atomic32 |
| 223 | #endif |
| 224 | |
| 225 | inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, |
| 226 | AtomicWord old_value, |
| 227 | AtomicWord new_value) { |
| 228 | return NoBarrier_CompareAndSwap( |
| 229 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
| 230 | old_value, new_value); |
| 231 | } |
| 232 | |
| 233 | inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, |
| 234 | AtomicWord new_value) { |
| 235 | return NoBarrier_AtomicExchange( |
| 236 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); |
| 237 | } |
| 238 | |
| 239 | inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, |
| 240 | AtomicWord increment) { |
| 241 | return NoBarrier_AtomicIncrement( |
| 242 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); |
| 243 | } |
| 244 | |
| 245 | inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, |
| 246 | AtomicWord increment) { |
| 247 | return Barrier_AtomicIncrement( |
| 248 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); |
| 249 | } |
| 250 | |
| 251 | inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, |
| 252 | AtomicWord old_value, |
| 253 | AtomicWord new_value) { |
| 254 | return v8::internal::Acquire_CompareAndSwap( |
| 255 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
| 256 | old_value, new_value); |
| 257 | } |
| 258 | |
| 259 | inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, |
| 260 | AtomicWord old_value, |
| 261 | AtomicWord new_value) { |
| 262 | return v8::internal::Release_CompareAndSwap( |
| 263 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
| 264 | old_value, new_value); |
| 265 | } |
| 266 | |
| 267 | inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { |
| 268 | NoBarrier_Store( |
| 269 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
| 270 | } |
| 271 | |
| 272 | inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { |
| 273 | return v8::internal::Acquire_Store( |
| 274 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
| 275 | } |
| 276 | |
| 277 | inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { |
| 278 | return v8::internal::Release_Store( |
| 279 | reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
| 280 | } |
| 281 | |
| 282 | inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) { |
| 283 | return NoBarrier_Load( |
| 284 | reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
| 285 | } |
| 286 | |
| 287 | inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { |
| 288 | return v8::internal::Acquire_Load( |
| 289 | reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
| 290 | } |
| 291 | |
| 292 | inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { |
| 293 | return v8::internal::Release_Load( |
| 294 | reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
| 295 | } |
| 296 | |
| 297 | #undef AtomicWordCastType |
| 298 | |
| 299 | } } // namespace v8::internal |
| 300 | |
| 301 | #endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |