blob: 1bcbcff08ccd81fec07f43a77b156c1b9920d86e [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
Andreas Gamped4901292017-05-30 18:41:34 -070022#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -070023#include "gc/collector/concurrent_copying-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024#include "gc/heap.h"
Andreas Gampec15a2f42017-04-21 12:09:39 -070025#include "mirror/object-readbarrier-inl.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070026#include "mirror/object_reference.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080027#include "mirror/reference.h"
28#include "runtime.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070029
30namespace art {
31
Mathieu Chartierd08f66f2017-04-13 11:47:53 -070032// Disabled for performance reasons.
33static constexpr bool kCheckDebugDisallowReadBarrierCount = false;
34
Hans Boehmcc55e1d2017-07-27 15:28:07 -070035template <typename MirrorType, bool kIsVolatile, ReadBarrierOption kReadBarrierOption,
36 bool kAlwaysUpdateField>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070037inline MirrorType* ReadBarrier::Barrier(
38 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Igor Murashkinc449e8b2015-06-10 15:56:42 -070039 constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080040 if (kUseReadBarrier && with_read_barrier) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -070041 if (kCheckDebugDisallowReadBarrierCount) {
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080042 Thread* const self = Thread::Current();
43 if (self != nullptr) {
44 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080045 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080046 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080047 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070048 // fake_address_dependency (must be zero) is used to create artificial data dependency from
49 // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
50 // the two.
51 uintptr_t fake_address_dependency;
52 bool is_gray = IsGray(obj, &fake_address_dependency);
53 if (kEnableReadBarrierInvariantChecks) {
54 CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
55 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080056 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070057 fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
Hans Boehmcc55e1d2017-07-27 15:28:07 -070058 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080059 MirrorType* old_ref = ref;
60 if (is_gray) {
61 // Slow-path.
62 ref = reinterpret_cast<MirrorType*>(Mark(ref));
63 // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
Roland Levillaina1aa3b12016-10-26 13:03:38 +010064 // updates before us, but it's OK.
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080065 if (kAlwaysUpdateField && ref != old_ref) {
Mathieu Chartiera9746b92018-06-22 10:25:40 -070066 obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
67 old_ref,
68 ref,
69 CASMode::kStrong,
70 std::memory_order_release);
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080071 }
Hiroshi Yamauchifa755182015-09-30 20:12:11 -070072 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080073 AssertToSpaceInvariant(obj, offset, ref);
74 return ref;
75 } else if (kUseBrooksReadBarrier) {
76 // To be implemented.
Hans Boehmcc55e1d2017-07-27 15:28:07 -070077 return ref_addr->template AsMirrorPtr<kIsVolatile>();
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080078 } else if (kUseTableLookupReadBarrier) {
Hans Boehmcc55e1d2017-07-27 15:28:07 -070079 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080080 MirrorType* old_ref = ref;
81 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
82 gc::Heap* heap = Runtime::Current()->GetHeap();
83 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
84 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
85 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
86 if (ref != old_ref) {
Mathieu Chartiera9746b92018-06-22 10:25:40 -070087 obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
88 old_ref,
89 ref,
90 CASMode::kStrong,
91 std::memory_order_release);
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080092 }
93 }
94 AssertToSpaceInvariant(obj, offset, ref);
95 return ref;
96 } else {
97 LOG(FATAL) << "Unexpected read barrier type";
98 UNREACHABLE();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080099 }
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700100 } else {
101 // No read barrier.
Hans Boehmcc55e1d2017-07-27 15:28:07 -0700102 return ref_addr->template AsMirrorPtr<kIsVolatile>();
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700103 }
104}
105
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800106template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700107inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
108 GcRootSource* gc_root_source) {
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -0700109 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700110 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800111 if (kUseReadBarrier && with_read_barrier) {
112 if (kIsDebugBuild) {
113 Thread* const self = Thread::Current();
114 if (self != nullptr) {
115 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700116 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800117 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800118 if (kUseBakerReadBarrier) {
119 // TODO: separate the read barrier code from the collector code more.
120 Thread* self = Thread::Current();
121 if (self != nullptr && self->GetIsGcMarking()) {
122 ref = reinterpret_cast<MirrorType*>(Mark(ref));
123 }
124 AssertToSpaceInvariant(gc_root_source, ref);
125 return ref;
126 } else if (kUseBrooksReadBarrier) {
127 // To be implemented.
128 return ref;
129 } else if (kUseTableLookupReadBarrier) {
130 Thread* self = Thread::Current();
131 if (self != nullptr &&
132 self->GetIsGcMarking() &&
133 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
134 MirrorType* old_ref = ref;
135 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
136 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
137 if (ref != old_ref) {
Orion Hodson88591fe2018-03-06 13:35:43 +0000138 Atomic<MirrorType*>* atomic_root = reinterpret_cast<Atomic<MirrorType*>*>(root);
Orion Hodson4557b382018-01-03 11:47:54 +0000139 atomic_root->CompareAndSetStrongRelaxed(old_ref, ref);
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800140 }
141 }
142 AssertToSpaceInvariant(gc_root_source, ref);
143 return ref;
144 } else {
145 LOG(FATAL) << "Unexpected read barrier type";
146 UNREACHABLE();
147 }
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700148 } else {
149 return ref;
150 }
151}
152
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700153// TODO: Reduce copy paste
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800154template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700155inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
156 GcRootSource* gc_root_source) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700157 MirrorType* ref = root->AsMirrorPtr();
158 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
159 if (with_read_barrier && kUseBakerReadBarrier) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700160 // TODO: separate the read barrier code from the collector code more.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700161 Thread* self = Thread::Current();
162 if (self != nullptr && self->GetIsGcMarking()) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700163 ref = reinterpret_cast<MirrorType*>(Mark(ref));
164 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700165 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700166 return ref;
167 } else if (with_read_barrier && kUseBrooksReadBarrier) {
168 // To be implemented.
169 return ref;
170 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700171 Thread* self = Thread::Current();
172 if (self != nullptr &&
173 self->GetIsGcMarking() &&
174 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700175 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
176 ref = reinterpret_cast<MirrorType*>(Mark(ref));
177 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
178 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700179 if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
180 auto* atomic_root =
181 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
Orion Hodson4557b382018-01-03 11:47:54 +0000182 atomic_root->CompareAndSetStrongRelaxed(old_ref, new_ref);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700183 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700184 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700185 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700186 return ref;
187 } else {
188 return ref;
189 }
190}
191
Nicolas Geoffray13056a12017-05-11 11:48:28 +0000192template <typename MirrorType>
193inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
194 // Only read-barrier configurations can have mutators run while
195 // the GC is marking.
196 if (!kUseReadBarrier) {
197 return ref;
198 }
199 // IsMarked does not handle null, so handle it here.
200 if (ref == nullptr) {
201 return nullptr;
202 }
203 // IsMarked should only be called when the GC is marking.
204 if (!Thread::Current()->GetIsGcMarking()) {
205 return ref;
206 }
207
208 return reinterpret_cast<MirrorType*>(
209 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref));
210}
211
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800212inline bool ReadBarrier::IsDuringStartup() {
213 gc::Heap* heap = Runtime::Current()->GetHeap();
214 if (heap == nullptr) {
215 // During startup, the heap can be null.
216 return true;
217 }
218 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
219 // CC isn't running.
220 return true;
221 }
222 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
223 if (collector == nullptr) {
224 // During startup, the collector can be null.
225 return true;
226 }
227 return false;
228}
229
230inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
231 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800232 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800233 if (ref == nullptr || IsDuringStartup()) {
234 return;
235 }
236 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
237 AssertToSpaceInvariant(obj, offset, ref);
238 }
239}
240
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700241inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
242 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800243 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700244 if (ref == nullptr || IsDuringStartup()) {
245 return;
246 }
247 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
248 AssertToSpaceInvariant(gc_root_source, ref);
249 }
250}
251
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800252inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700253 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800254}
255
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700256inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
Roland Levillain14e5a292018-06-28 12:00:56 +0100257 return obj->GetReadBarrierState(fake_address_dependency) == kGrayState;
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700258}
259
260inline bool ReadBarrier::IsGray(mirror::Object* obj) {
261 // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
262 // GetReadBarrierStateAcquire() has load-acquire semantics.
Roland Levillain14e5a292018-06-28 12:00:56 +0100263 return obj->GetReadBarrierStateAcquire() == kGrayState;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800264}
265
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700266} // namespace art
267
268#endif // ART_RUNTIME_READ_BARRIER_INL_H_