petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 1 | // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | // This file is an internal atomic implementation, use base/atomicops.h instead. |
| 6 | // |
| 7 | // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. |
| 8 | |
| 9 | #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
| 10 | #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 11 | |
petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 12 | namespace base { |
| 13 | namespace subtle { |
| 14 | |
| 15 | // Atomically execute: |
| 16 | // result = *ptr; |
| 17 | // if (*ptr == old_value) |
| 18 | // *ptr = new_value; |
| 19 | // return result; |
| 20 | // |
| 21 | // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". |
| 22 | // Always return the old value of "*ptr" |
| 23 | // |
| 24 | // This routine implies no memory barriers. |
| 25 | inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| 26 | Atomic32 old_value, |
| 27 | Atomic32 new_value) { |
| 28 | Atomic32 prev, tmp; |
| 29 | __asm__ __volatile__(".set push\n" |
| 30 | ".set noreorder\n" |
| 31 | "1:\n" |
| 32 | "ll %0, %5\n" // prev = *ptr |
| 33 | "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 |
| 34 | "move %2, %4\n" // tmp = new_value |
| 35 | "sc %2, %1\n" // *ptr = tmp (with atomic check) |
| 36 | "beqz %2, 1b\n" // start again on atomic error |
| 37 | "nop\n" // delay slot nop |
| 38 | "2:\n" |
| 39 | ".set pop\n" |
| 40 | : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) |
| 41 | : "Ir" (old_value), "r" (new_value), "m" (*ptr) |
| 42 | : "memory"); |
| 43 | return prev; |
| 44 | } |
| 45 | |
| 46 | // Atomically store new_value into *ptr, returning the previous value held in |
| 47 | // *ptr. This routine implies no memory barriers. |
| 48 | inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| 49 | Atomic32 new_value) { |
| 50 | Atomic32 temp, old; |
| 51 | __asm__ __volatile__(".set push\n" |
| 52 | ".set noreorder\n" |
| 53 | "1:\n" |
Gordana.Cmiljanovic@imgtec.com | ab30423 | 2014-08-15 02:14:05 +0900 | [diff] [blame] | 54 | "ll %1, %4\n" // old = *ptr |
petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 55 | "move %0, %3\n" // temp = new_value |
| 56 | "sc %0, %2\n" // *ptr = temp (with atomic check) |
| 57 | "beqz %0, 1b\n" // start again on atomic error |
| 58 | "nop\n" // delay slot nop |
| 59 | ".set pop\n" |
| 60 | : "=&r" (temp), "=&r" (old), "=m" (*ptr) |
| 61 | : "r" (new_value), "m" (*ptr) |
| 62 | : "memory"); |
| 63 | |
| 64 | return old; |
| 65 | } |
| 66 | |
| 67 | // Atomically increment *ptr by "increment". Returns the new value of |
| 68 | // *ptr with the increment applied. This routine implies no memory barriers. |
| 69 | inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
| 70 | Atomic32 increment) { |
| 71 | Atomic32 temp, temp2; |
| 72 | |
| 73 | __asm__ __volatile__(".set push\n" |
| 74 | ".set noreorder\n" |
| 75 | "1:\n" |
Gordana.Cmiljanovic@imgtec.com | ab30423 | 2014-08-15 02:14:05 +0900 | [diff] [blame] | 76 | "ll %0, %4\n" // temp = *ptr |
petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 77 | "addu %1, %0, %3\n" // temp2 = temp + increment |
| 78 | "sc %1, %2\n" // *ptr = temp2 (with atomic check) |
| 79 | "beqz %1, 1b\n" // start again on atomic error |
| 80 | "addu %1, %0, %3\n" // temp2 = temp + increment |
| 81 | ".set pop\n" |
| 82 | : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
| 83 | : "Ir" (increment), "m" (*ptr) |
| 84 | : "memory"); |
| 85 | // temp2 now holds the final value. |
| 86 | return temp2; |
| 87 | } |
| 88 | |
| 89 | inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 90 | Atomic32 increment) { |
paul.lind@imgtec.com | 27cc536 | 2013-06-07 11:31:16 +0900 | [diff] [blame] | 91 | MemoryBarrier(); |
petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 92 | Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
paul.lind@imgtec.com | 27cc536 | 2013-06-07 11:31:16 +0900 | [diff] [blame] | 93 | MemoryBarrier(); |
petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 94 | return res; |
| 95 | } |
| 96 | |
| 97 | // "Acquire" operations |
| 98 | // ensure that no later memory access can be reordered ahead of the operation. |
| 99 | // "Release" operations ensure that no previous memory access can be reordered |
| 100 | // after the operation. "Barrier" operations have both "Acquire" and "Release" |
| 101 | // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory |
| 102 | // access. |
| 103 | inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 104 | Atomic32 old_value, |
| 105 | Atomic32 new_value) { |
petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 106 | Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
paul.lind@imgtec.com | 27cc536 | 2013-06-07 11:31:16 +0900 | [diff] [blame] | 107 | MemoryBarrier(); |
petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 108 | return res; |
| 109 | } |
| 110 | |
| 111 | inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 112 | Atomic32 old_value, |
| 113 | Atomic32 new_value) { |
paul.lind@imgtec.com | 27cc536 | 2013-06-07 11:31:16 +0900 | [diff] [blame] | 114 | MemoryBarrier(); |
| 115 | return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 119 | *ptr = value; |
| 120 | } |
| 121 | |
| 122 | inline void MemoryBarrier() { |
| 123 | __asm__ __volatile__("sync" : : : "memory"); |
| 124 | } |
| 125 | |
| 126 | inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 127 | *ptr = value; |
| 128 | MemoryBarrier(); |
| 129 | } |
| 130 | |
| 131 | inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 132 | MemoryBarrier(); |
| 133 | *ptr = value; |
| 134 | } |
| 135 | |
| 136 | inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| 137 | return *ptr; |
| 138 | } |
| 139 | |
| 140 | inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 141 | Atomic32 value = *ptr; |
| 142 | MemoryBarrier(); |
| 143 | return value; |
| 144 | } |
| 145 | |
| 146 | inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| 147 | MemoryBarrier(); |
| 148 | return *ptr; |
| 149 | } |
| 150 | |
Gordana.Cmiljanovic@imgtec.com | ab30423 | 2014-08-15 02:14:05 +0900 | [diff] [blame] | 151 | #if defined(__LP64__) |
| 152 | // 64-bit versions of the atomic ops. |
| 153 | |
| 154 | inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 155 | Atomic64 old_value, |
| 156 | Atomic64 new_value) { |
| 157 | Atomic64 prev, tmp; |
| 158 | __asm__ __volatile__(".set push\n" |
| 159 | ".set noreorder\n" |
| 160 | "1:\n" |
| 161 | "lld %0, %5\n" // prev = *ptr |
| 162 | "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 |
| 163 | "move %2, %4\n" // tmp = new_value |
| 164 | "scd %2, %1\n" // *ptr = tmp (with atomic check) |
| 165 | "beqz %2, 1b\n" // start again on atomic error |
| 166 | "nop\n" // delay slot nop |
| 167 | "2:\n" |
| 168 | ".set pop\n" |
| 169 | : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) |
| 170 | : "Ir" (old_value), "r" (new_value), "m" (*ptr) |
| 171 | : "memory"); |
| 172 | return prev; |
| 173 | } |
| 174 | |
| 175 | // Atomically store new_value into *ptr, returning the previous value held in |
| 176 | // *ptr. This routine implies no memory barriers. |
| 177 | inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
| 178 | Atomic64 new_value) { |
| 179 | Atomic64 temp, old; |
| 180 | __asm__ __volatile__(".set push\n" |
| 181 | ".set noreorder\n" |
| 182 | "1:\n" |
| 183 | "lld %1, %4\n" // old = *ptr |
| 184 | "move %0, %3\n" // temp = new_value |
| 185 | "scd %0, %2\n" // *ptr = temp (with atomic check) |
| 186 | "beqz %0, 1b\n" // start again on atomic error |
| 187 | "nop\n" // delay slot nop |
| 188 | ".set pop\n" |
| 189 | : "=&r" (temp), "=&r" (old), "=m" (*ptr) |
| 190 | : "r" (new_value), "m" (*ptr) |
| 191 | : "memory"); |
| 192 | |
| 193 | return old; |
| 194 | } |
| 195 | |
| 196 | // Atomically increment *ptr by "increment". Returns the new value of |
| 197 | // *ptr with the increment applied. This routine implies no memory barriers. |
| 198 | inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
| 199 | Atomic64 increment) { |
| 200 | Atomic64 temp, temp2; |
| 201 | |
| 202 | __asm__ __volatile__(".set push\n" |
| 203 | ".set noreorder\n" |
| 204 | "1:\n" |
| 205 | "lld %0, %4\n" // temp = *ptr |
| 206 | "daddu %1, %0, %3\n" // temp2 = temp + increment |
| 207 | "scd %1, %2\n" // *ptr = temp2 (with atomic check) |
| 208 | "beqz %1, 1b\n" // start again on atomic error |
| 209 | "daddu %1, %0, %3\n" // temp2 = temp + increment |
| 210 | ".set pop\n" |
| 211 | : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
| 212 | : "Ir" (increment), "m" (*ptr) |
| 213 | : "memory"); |
| 214 | // temp2 now holds the final value. |
| 215 | return temp2; |
| 216 | } |
| 217 | |
| 218 | inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
| 219 | Atomic64 increment) { |
| 220 | MemoryBarrier(); |
| 221 | Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); |
| 222 | MemoryBarrier(); |
| 223 | return res; |
| 224 | } |
| 225 | |
| 226 | // "Acquire" operations |
| 227 | // ensure that no later memory access can be reordered ahead of the operation. |
| 228 | // "Release" operations ensure that no previous memory access can be reordered |
| 229 | // after the operation. "Barrier" operations have both "Acquire" and "Release" |
| 230 | // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory |
| 231 | // access. |
| 232 | inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| 233 | Atomic64 old_value, |
| 234 | Atomic64 new_value) { |
| 235 | Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 236 | MemoryBarrier(); |
| 237 | return res; |
| 238 | } |
| 239 | |
| 240 | inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| 241 | Atomic64 old_value, |
| 242 | Atomic64 new_value) { |
| 243 | MemoryBarrier(); |
| 244 | return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 245 | } |
| 246 | |
| 247 | inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 248 | *ptr = value; |
| 249 | } |
| 250 | |
| 251 | inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 252 | *ptr = value; |
| 253 | MemoryBarrier(); |
| 254 | } |
| 255 | |
| 256 | inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 257 | MemoryBarrier(); |
| 258 | *ptr = value; |
| 259 | } |
| 260 | |
| 261 | inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 262 | return *ptr; |
| 263 | } |
| 264 | |
| 265 | inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| 266 | Atomic64 value = *ptr; |
| 267 | MemoryBarrier(); |
| 268 | return value; |
| 269 | } |
| 270 | |
| 271 | inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| 272 | MemoryBarrier(); |
| 273 | return *ptr; |
| 274 | } |
| 275 | #endif |
| 276 | |
petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 277 | } // namespace base::subtle |
| 278 | } // namespace base |
| 279 | |
petarj@mips.com | d3a3e76 | 2012-06-18 11:47:05 +0900 | [diff] [blame] | 280 | #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |