Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | // This file is an internal atomic implementation, use atomicops.h instead. |
| 6 | |
| 7 | #ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
| 8 | #define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
| 9 | |
| 10 | namespace v8 { |
| 11 | namespace base { |
| 12 | |
| 13 | // Atomically execute: |
| 14 | // result = *ptr; |
| 15 | // if (*ptr == old_value) |
| 16 | // *ptr = new_value; |
| 17 | // return result; |
| 18 | // |
| 19 | // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". |
| 20 | // Always return the old value of "*ptr" |
| 21 | // |
| 22 | // This routine implies no memory barriers. |
| 23 | inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| 24 | Atomic32 old_value, |
| 25 | Atomic32 new_value) { |
| 26 | Atomic32 prev, tmp; |
| 27 | __asm__ __volatile__(".set push\n" |
| 28 | ".set noreorder\n" |
| 29 | "1:\n" |
| 30 | "ll %0, 0(%4)\n" // prev = *ptr |
| 31 | "bne %0, %2, 2f\n" // if (prev != old_value) goto 2 |
| 32 | "move %1, %3\n" // tmp = new_value |
| 33 | "sc %1, 0(%4)\n" // *ptr = tmp (with atomic check) |
| 34 | "beqz %1, 1b\n" // start again on atomic error |
| 35 | "nop\n" // delay slot nop |
| 36 | "2:\n" |
| 37 | ".set pop\n" |
| 38 | : "=&r" (prev), "=&r" (tmp) |
Paul Lind | 8a80363 | 2015-01-12 18:03:03 -0800 | [diff] [blame] | 39 | : "r" (old_value), "r" (new_value), "r" (ptr) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 40 | : "memory"); |
| 41 | return prev; |
| 42 | } |
| 43 | |
| 44 | // Atomically store new_value into *ptr, returning the previous value held in |
| 45 | // *ptr. This routine implies no memory barriers. |
| 46 | inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| 47 | Atomic32 new_value) { |
| 48 | Atomic32 temp, old; |
| 49 | __asm__ __volatile__(".set push\n" |
| 50 | ".set noreorder\n" |
| 51 | ".set at\n" |
| 52 | "1:\n" |
| 53 | "ll %1, 0(%3)\n" // old = *ptr |
| 54 | "move %0, %2\n" // temp = new_value |
| 55 | "sc %0, 0(%3)\n" // *ptr = temp (with atomic check) |
| 56 | "beqz %0, 1b\n" // start again on atomic error |
| 57 | "nop\n" // delay slot nop |
| 58 | ".set pop\n" |
| 59 | : "=&r" (temp), "=&r" (old) |
| 60 | : "r" (new_value), "r" (ptr) |
| 61 | : "memory"); |
| 62 | |
| 63 | return old; |
| 64 | } |
| 65 | |
| 66 | // Atomically increment *ptr by "increment". Returns the new value of |
| 67 | // *ptr with the increment applied. This routine implies no memory barriers. |
| 68 | inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
| 69 | Atomic32 increment) { |
| 70 | Atomic32 temp, temp2; |
| 71 | |
| 72 | __asm__ __volatile__(".set push\n" |
| 73 | ".set noreorder\n" |
| 74 | "1:\n" |
| 75 | "ll %0, 0(%3)\n" // temp = *ptr |
| 76 | "addu %1, %0, %2\n" // temp2 = temp + increment |
| 77 | "sc %1, 0(%3)\n" // *ptr = temp2 (with atomic check) |
| 78 | "beqz %1, 1b\n" // start again on atomic error |
| 79 | "addu %1, %0, %2\n" // temp2 = temp + increment |
| 80 | ".set pop\n" |
| 81 | : "=&r" (temp), "=&r" (temp2) |
| 82 | : "Ir" (increment), "r" (ptr) |
| 83 | : "memory"); |
| 84 | // temp2 now holds the final value. |
| 85 | return temp2; |
| 86 | } |
| 87 | |
| 88 | inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 89 | Atomic32 increment) { |
| 90 | MemoryBarrier(); |
| 91 | Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
| 92 | MemoryBarrier(); |
| 93 | return res; |
| 94 | } |
| 95 | |
| 96 | // "Acquire" operations |
| 97 | // ensure that no later memory access can be reordered ahead of the operation. |
| 98 | // "Release" operations ensure that no previous memory access can be reordered |
| 99 | // after the operation. "Barrier" operations have both "Acquire" and "Release" |
| 100 | // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory |
| 101 | // access. |
| 102 | inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 103 | Atomic32 old_value, |
| 104 | Atomic32 new_value) { |
| 105 | Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 106 | MemoryBarrier(); |
| 107 | return res; |
| 108 | } |
| 109 | |
| 110 | inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 111 | Atomic32 old_value, |
| 112 | Atomic32 new_value) { |
| 113 | MemoryBarrier(); |
| 114 | return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 115 | } |
| 116 | |
| 117 | inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { |
| 118 | *ptr = value; |
| 119 | } |
| 120 | |
| 121 | inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 122 | *ptr = value; |
| 123 | } |
| 124 | |
| 125 | inline void MemoryBarrier() { |
| 126 | __asm__ __volatile__("sync" : : : "memory"); |
| 127 | } |
| 128 | |
| 129 | inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 130 | *ptr = value; |
| 131 | MemoryBarrier(); |
| 132 | } |
| 133 | |
| 134 | inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 135 | MemoryBarrier(); |
| 136 | *ptr = value; |
| 137 | } |
| 138 | |
| 139 | inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |
| 140 | return *ptr; |
| 141 | } |
| 142 | |
| 143 | inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| 144 | return *ptr; |
| 145 | } |
| 146 | |
| 147 | inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 148 | Atomic32 value = *ptr; |
| 149 | MemoryBarrier(); |
| 150 | return value; |
| 151 | } |
| 152 | |
| 153 | inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| 154 | MemoryBarrier(); |
| 155 | return *ptr; |
| 156 | } |
| 157 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 158 | } // namespace base |
| 159 | } // namespace v8 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 160 | |
| 161 | #endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |