blob: 0269d894a1fa72d592d3e85b824d84612bb0090f [file] [log] [blame]
license.botf003cfe2008-08-24 09:55:55 +09001// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
deanm@google.com1579ec72008-08-05 18:57:36 +09004
5// This file is an internal atomic implementation, use base/atomicops.h instead.
6
7#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
8#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
9
10#include <windows.h>
11
sebmarchand@chromium.orge41fbd72014-06-13 01:17:26 +090012#include <intrin.h>
13
ctruta@blackberry.com83bafe82014-02-08 13:59:55 +090014#include "base/macros.h"
15
scottmg@chromium.org80f6b332013-01-15 17:51:28 +090016#if defined(ARCH_CPU_64_BITS)
17// windows.h #defines this (only on x64). This causes problems because the
18// public API also uses MemoryBarrier at the public name for this fence. So, on
19// X64, undef it, and call its documented
20// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
21// implementation directly.
22#undef MemoryBarrier
23#endif
24
deanm@google.com1579ec72008-08-05 18:57:36 +090025namespace base {
26namespace subtle {
27
28inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
29 Atomic32 old_value,
30 Atomic32 new_value) {
sebmarchand@chromium.orge41fbd72014-06-13 01:17:26 +090031 LONG result = _InterlockedCompareExchange(
deanm@google.com1579ec72008-08-05 18:57:36 +090032 reinterpret_cast<volatile LONG*>(ptr),
33 static_cast<LONG>(new_value),
34 static_cast<LONG>(old_value));
35 return static_cast<Atomic32>(result);
36}
37
38inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
39 Atomic32 new_value) {
sebmarchand@chromium.orge41fbd72014-06-13 01:17:26 +090040 LONG result = _InterlockedExchange(
deanm@google.com1579ec72008-08-05 18:57:36 +090041 reinterpret_cast<volatile LONG*>(ptr),
42 static_cast<LONG>(new_value));
43 return static_cast<Atomic32>(result);
44}
45
46inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
47 Atomic32 increment) {
sebmarchand@chromium.orge41fbd72014-06-13 01:17:26 +090048 return _InterlockedExchangeAdd(
deanm@google.com1579ec72008-08-05 18:57:36 +090049 reinterpret_cast<volatile LONG*>(ptr),
50 static_cast<LONG>(increment)) + increment;
51}
52
53inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
54 Atomic32 increment) {
55 return Barrier_AtomicIncrement(ptr, increment);
56}
57
58#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
59#error "We require at least vs2005 for MemoryBarrier"
60#endif
61inline void MemoryBarrier() {
scottmg@chromium.org80f6b332013-01-15 17:51:28 +090062#if defined(ARCH_CPU_64_BITS)
63 // See #undef and note at the top of this file.
64 __faststorefence();
65#else
deanm@google.com1579ec72008-08-05 18:57:36 +090066 // We use MemoryBarrier from WinNT.h
67 ::MemoryBarrier();
scottmg@chromium.org80f6b332013-01-15 17:51:28 +090068#endif
deanm@google.com1579ec72008-08-05 18:57:36 +090069}
70
71inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
72 Atomic32 old_value,
73 Atomic32 new_value) {
74 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
75}
76
77inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
78 Atomic32 old_value,
79 Atomic32 new_value) {
80 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
81}
82
83inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
84 *ptr = value;
85}
86
87inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
88 NoBarrier_AtomicExchange(ptr, value);
89 // acts as a barrier in this implementation
90}
91
92inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
93 *ptr = value; // works w/o barrier for current Intel chips as of June 2005
94 // See comments in Atomic64 version of Release_Store() below.
95}
96
97inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
98 return *ptr;
99}
100
101inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
102 Atomic32 value = *ptr;
103 return value;
104}
105
106inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
107 MemoryBarrier();
108 return *ptr;
109}
110
111#if defined(_WIN64)
112
113// 64-bit low-level operations on 64-bit platform.
114
115COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
116
117inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
118 Atomic64 old_value,
119 Atomic64 new_value) {
120 PVOID result = InterlockedCompareExchangePointer(
121 reinterpret_cast<volatile PVOID*>(ptr),
122 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
123 return reinterpret_cast<Atomic64>(result);
124}
125
126inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
127 Atomic64 new_value) {
128 PVOID result = InterlockedExchangePointer(
129 reinterpret_cast<volatile PVOID*>(ptr),
130 reinterpret_cast<PVOID>(new_value));
131 return reinterpret_cast<Atomic64>(result);
132}
133
134inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
135 Atomic64 increment) {
136 return InterlockedExchangeAdd64(
137 reinterpret_cast<volatile LONGLONG*>(ptr),
138 static_cast<LONGLONG>(increment)) + increment;
139}
140
141inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
142 Atomic64 increment) {
143 return Barrier_AtomicIncrement(ptr, increment);
144}
145
146inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
147 *ptr = value;
148}
149
150inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
151 NoBarrier_AtomicExchange(ptr, value);
152 // acts as a barrier in this implementation
153}
154
155inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
156 *ptr = value; // works w/o barrier for current Intel chips as of June 2005
157
158 // When new chips come out, check:
159 // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
160 // System Programming Guide, Chatper 7: Multiple-processor management,
161 // Section 7.2, Memory Ordering.
162 // Last seen at:
163 // http://developer.intel.com/design/pentium4/manuals/index_new.htm
164}
165
166inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
167 return *ptr;
168}
169
170inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
171 Atomic64 value = *ptr;
172 return value;
173}
174
175inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
176 MemoryBarrier();
177 return *ptr;
178}
179
gregoryd@google.com3734a872009-11-07 08:24:09 +0900180inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
181 Atomic64 old_value,
182 Atomic64 new_value) {
183 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
184}
185
186inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
187 Atomic64 old_value,
188 Atomic64 new_value) {
189 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
190}
191
192
deanm@google.com1579ec72008-08-05 18:57:36 +0900193#endif // defined(_WIN64)
194
195} // namespace base::subtle
196} // namespace base
197
198#endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_