blob: f6bc84217f9e323c288bf3c8c288538bf3cf440b [file] [log] [blame]
license.botf003cfe2008-08-24 09:55:55 +09001// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
deanm@google.com1579ec72008-08-05 18:57:36 +09004
5// This file is an internal atomic implementation, use base/atomicops.h instead.
6
7#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
8#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
9
10#include <windows.h>
11
12namespace base {
13namespace subtle {
14
15inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
16 Atomic32 old_value,
17 Atomic32 new_value) {
18 LONG result = InterlockedCompareExchange(
19 reinterpret_cast<volatile LONG*>(ptr),
20 static_cast<LONG>(new_value),
21 static_cast<LONG>(old_value));
22 return static_cast<Atomic32>(result);
23}
24
25inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
26 Atomic32 new_value) {
27 LONG result = InterlockedExchange(
28 reinterpret_cast<volatile LONG*>(ptr),
29 static_cast<LONG>(new_value));
30 return static_cast<Atomic32>(result);
31}
32
33inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
34 Atomic32 increment) {
35 return InterlockedExchangeAdd(
36 reinterpret_cast<volatile LONG*>(ptr),
37 static_cast<LONG>(increment)) + increment;
38}
39
40inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
41 Atomic32 increment) {
42 return Barrier_AtomicIncrement(ptr, increment);
43}
44
45#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
46#error "We require at least vs2005 for MemoryBarrier"
47#endif
48inline void MemoryBarrier() {
49 // We use MemoryBarrier from WinNT.h
50 ::MemoryBarrier();
51}
52
53inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
54 Atomic32 old_value,
55 Atomic32 new_value) {
56 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
57}
58
59inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
60 Atomic32 old_value,
61 Atomic32 new_value) {
62 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
63}
64
65inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
66 *ptr = value;
67}
68
69inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
70 NoBarrier_AtomicExchange(ptr, value);
71 // acts as a barrier in this implementation
72}
73
74inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
75 *ptr = value; // works w/o barrier for current Intel chips as of June 2005
76 // See comments in Atomic64 version of Release_Store() below.
77}
78
79inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
80 return *ptr;
81}
82
83inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
84 Atomic32 value = *ptr;
85 return value;
86}
87
88inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
89 MemoryBarrier();
90 return *ptr;
91}
92
93#if defined(_WIN64)
94
95// 64-bit low-level operations on 64-bit platform.
96
97COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
98
99inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
100 Atomic64 old_value,
101 Atomic64 new_value) {
102 PVOID result = InterlockedCompareExchangePointer(
103 reinterpret_cast<volatile PVOID*>(ptr),
104 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
105 return reinterpret_cast<Atomic64>(result);
106}
107
108inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
109 Atomic64 new_value) {
110 PVOID result = InterlockedExchangePointer(
111 reinterpret_cast<volatile PVOID*>(ptr),
112 reinterpret_cast<PVOID>(new_value));
113 return reinterpret_cast<Atomic64>(result);
114}
115
116inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
117 Atomic64 increment) {
118 return InterlockedExchangeAdd64(
119 reinterpret_cast<volatile LONGLONG*>(ptr),
120 static_cast<LONGLONG>(increment)) + increment;
121}
122
123inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
124 Atomic64 increment) {
125 return Barrier_AtomicIncrement(ptr, increment);
126}
127
128inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
129 *ptr = value;
130}
131
132inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
133 NoBarrier_AtomicExchange(ptr, value);
134 // acts as a barrier in this implementation
135}
136
137inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
138 *ptr = value; // works w/o barrier for current Intel chips as of June 2005
139
140 // When new chips come out, check:
141 // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
142 // System Programming Guide, Chatper 7: Multiple-processor management,
143 // Section 7.2, Memory Ordering.
144 // Last seen at:
145 // http://developer.intel.com/design/pentium4/manuals/index_new.htm
146}
147
148inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
149 return *ptr;
150}
151
152inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
153 Atomic64 value = *ptr;
154 return value;
155}
156
157inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
158 MemoryBarrier();
159 return *ptr;
160}
161
162#endif // defined(_WIN64)
163
164} // namespace base::subtle
165} // namespace base
166
167#endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
license.botf003cfe2008-08-24 09:55:55 +0900168