blob: cf2e194e5065752ed374336222b863da5f5bc032 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// This file is an internal atomic implementation, use atomicops.h instead.
29
30#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
31#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
32
33namespace v8 {
34namespace base {
35
36// Atomically execute:
37// result = *ptr;
38// if (*ptr == old_value)
39// *ptr = new_value;
40// return result;
41//
42// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
43// Always return the old value of "*ptr"
44//
45// This routine implies no memory barriers.
46inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
47 Atomic32 old_value,
48 Atomic32 new_value) {
49 Atomic32 prev, tmp;
50 __asm__ __volatile__(".set push\n"
51 ".set noreorder\n"
52 "1:\n"
53 "ll %0, %5\n" // prev = *ptr
54 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
55 "move %2, %4\n" // tmp = new_value
56 "sc %2, %1\n" // *ptr = tmp (with atomic check)
57 "beqz %2, 1b\n" // start again on atomic error
58 "nop\n" // delay slot nop
59 "2:\n"
60 ".set pop\n"
61 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
Paul Lind8a803632015-01-12 18:03:03 -080062 : "r" (old_value), "r" (new_value), "m" (*ptr)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000063 : "memory");
64 return prev;
65}
66
67// Atomically store new_value into *ptr, returning the previous value held in
68// *ptr. This routine implies no memory barriers.
69inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
70 Atomic32 new_value) {
71 Atomic32 temp, old;
72 __asm__ __volatile__(".set push\n"
73 ".set noreorder\n"
74 "1:\n"
75 "ll %1, %2\n" // old = *ptr
76 "move %0, %3\n" // temp = new_value
77 "sc %0, %2\n" // *ptr = temp (with atomic check)
78 "beqz %0, 1b\n" // start again on atomic error
79 "nop\n" // delay slot nop
80 ".set pop\n"
81 : "=&r" (temp), "=&r" (old), "=m" (*ptr)
82 : "r" (new_value), "m" (*ptr)
83 : "memory");
84
85 return old;
86}
87
88// Atomically increment *ptr by "increment". Returns the new value of
89// *ptr with the increment applied. This routine implies no memory barriers.
90inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
91 Atomic32 increment) {
92 Atomic32 temp, temp2;
93
Ben Murdoch61f157c2016-09-16 13:49:30 +010094 __asm__ __volatile__(
95 ".set push\n"
96 ".set noreorder\n"
97 "1:\n"
98 "ll %0, %2\n" // temp = *ptr
99 "addu %1, %0, %3\n" // temp2 = temp + increment
100 "sc %1, %2\n" // *ptr = temp2 (with atomic check)
101 "beqz %1, 1b\n" // start again on atomic error
102 "addu %1, %0, %3\n" // temp2 = temp + increment
103 ".set pop\n"
104 : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
105 : "Ir"(increment), "m"(*ptr)
106 : "memory");
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000107 // temp2 now holds the final value.
108 return temp2;
109}
110
111inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
112 Atomic32 increment) {
113 MemoryBarrier();
114 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
115 MemoryBarrier();
116 return res;
117}
118
119// "Acquire" operations
120// ensure that no later memory access can be reordered ahead of the operation.
121// "Release" operations ensure that no previous memory access can be reordered
122// after the operation. "Barrier" operations have both "Acquire" and "Release"
123// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
124// access.
125inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
126 Atomic32 old_value,
127 Atomic32 new_value) {
128 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
129 MemoryBarrier();
130 return res;
131}
132
133inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
134 Atomic32 old_value,
135 Atomic32 new_value) {
136 MemoryBarrier();
137 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
138}
139
140inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
141 *ptr = value;
142}
143
144inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
145 *ptr = value;
146}
147
148inline void MemoryBarrier() {
149 __asm__ __volatile__("sync" : : : "memory");
150}
151
152inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
153 *ptr = value;
154 MemoryBarrier();
155}
156
157inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
158 MemoryBarrier();
159 *ptr = value;
160}
161
162inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
163 return *ptr;
164}
165
166inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
167 return *ptr;
168}
169
170inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
171 Atomic32 value = *ptr;
172 MemoryBarrier();
173 return value;
174}
175
176inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
177 MemoryBarrier();
178 return *ptr;
179}
180
181
182// 64-bit versions of the atomic ops.
183
184inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
185 Atomic64 old_value,
186 Atomic64 new_value) {
187 Atomic64 prev, tmp;
188 __asm__ __volatile__(".set push\n"
189 ".set noreorder\n"
190 "1:\n"
191 "lld %0, %5\n" // prev = *ptr
192 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
193 "move %2, %4\n" // tmp = new_value
194 "scd %2, %1\n" // *ptr = tmp (with atomic check)
195 "beqz %2, 1b\n" // start again on atomic error
196 "nop\n" // delay slot nop
197 "2:\n"
198 ".set pop\n"
199 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
Paul Lind8a803632015-01-12 18:03:03 -0800200 : "r" (old_value), "r" (new_value), "m" (*ptr)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000201 : "memory");
202 return prev;
203}
204
205// Atomically store new_value into *ptr, returning the previous value held in
206// *ptr. This routine implies no memory barriers.
207inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
208 Atomic64 new_value) {
209 Atomic64 temp, old;
210 __asm__ __volatile__(".set push\n"
211 ".set noreorder\n"
212 "1:\n"
213 "lld %1, %2\n" // old = *ptr
214 "move %0, %3\n" // temp = new_value
215 "scd %0, %2\n" // *ptr = temp (with atomic check)
216 "beqz %0, 1b\n" // start again on atomic error
217 "nop\n" // delay slot nop
218 ".set pop\n"
219 : "=&r" (temp), "=&r" (old), "=m" (*ptr)
220 : "r" (new_value), "m" (*ptr)
221 : "memory");
222
223 return old;
224}
225
226// Atomically increment *ptr by "increment". Returns the new value of
227// *ptr with the increment applied. This routine implies no memory barriers.
228inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
229 Atomic64 increment) {
230 Atomic64 temp, temp2;
231
Ben Murdoch61f157c2016-09-16 13:49:30 +0100232 __asm__ __volatile__(
233 ".set push\n"
234 ".set noreorder\n"
235 "1:\n"
236 "lld %0, %2\n" // temp = *ptr
237 "daddu %1, %0, %3\n" // temp2 = temp + increment
238 "scd %1, %2\n" // *ptr = temp2 (with atomic check)
239 "beqz %1, 1b\n" // start again on atomic error
240 "daddu %1, %0, %3\n" // temp2 = temp + increment
241 ".set pop\n"
242 : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
243 : "Ir"(increment), "m"(*ptr)
244 : "memory");
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000245 // temp2 now holds the final value.
246 return temp2;
247}
248
249inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
250 Atomic64 increment) {
251 MemoryBarrier();
252 Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
253 MemoryBarrier();
254 return res;
255}
256
257// "Acquire" operations
258// ensure that no later memory access can be reordered ahead of the operation.
259// "Release" operations ensure that no previous memory access can be reordered
260// after the operation. "Barrier" operations have both "Acquire" and "Release"
261// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
262// access.
263inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
264 Atomic64 old_value,
265 Atomic64 new_value) {
266 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
267 MemoryBarrier();
268 return res;
269}
270
271inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
272 Atomic64 old_value,
273 Atomic64 new_value) {
274 MemoryBarrier();
275 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
276}
277
278inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
279 *ptr = value;
280}
281
282inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
283 *ptr = value;
284 MemoryBarrier();
285}
286
287inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
288 MemoryBarrier();
289 *ptr = value;
290}
291
292inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
293 return *ptr;
294}
295
296inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
297 Atomic64 value = *ptr;
298 MemoryBarrier();
299 return value;
300}
301
302inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
303 MemoryBarrier();
304 return *ptr;
305}
306
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000307} // namespace base
308} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000309
310#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_