blob: dbe7f5c957a5aebaafaf701887b129c9114b14c4 [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -070022#include "gc/collector/concurrent_copying-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080023#include "gc/heap.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070024#include "mirror/object_reference.h"
Andreas Gampec15a2f42017-04-21 12:09:39 -070025#include "mirror/object-readbarrier-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080026#include "mirror/reference.h"
27#include "runtime.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010028#include "utils.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070029
30namespace art {
31
Mathieu Chartierd08f66f2017-04-13 11:47:53 -070032// Disabled for performance reasons.
33static constexpr bool kCheckDebugDisallowReadBarrierCount = false;
34
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080035template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kAlwaysUpdateField>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070036inline MirrorType* ReadBarrier::Barrier(
37 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Igor Murashkinc449e8b2015-06-10 15:56:42 -070038 constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080039 if (kUseReadBarrier && with_read_barrier) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -070040 if (kCheckDebugDisallowReadBarrierCount) {
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080041 Thread* const self = Thread::Current();
42 if (self != nullptr) {
43 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080044 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080045 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080046 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070047 // fake_address_dependency (must be zero) is used to create artificial data dependency from
48 // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
49 // the two.
50 uintptr_t fake_address_dependency;
51 bool is_gray = IsGray(obj, &fake_address_dependency);
52 if (kEnableReadBarrierInvariantChecks) {
53 CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
54 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080055 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070056 fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080057 MirrorType* ref = ref_addr->AsMirrorPtr();
58 MirrorType* old_ref = ref;
59 if (is_gray) {
60 // Slow-path.
61 ref = reinterpret_cast<MirrorType*>(Mark(ref));
62 // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
Roland Levillaina1aa3b12016-10-26 13:03:38 +010063 // updates before us, but it's OK.
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080064 if (kAlwaysUpdateField && ref != old_ref) {
65 obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
66 offset, old_ref, ref);
67 }
Hiroshi Yamauchifa755182015-09-30 20:12:11 -070068 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080069 AssertToSpaceInvariant(obj, offset, ref);
70 return ref;
71 } else if (kUseBrooksReadBarrier) {
72 // To be implemented.
73 return ref_addr->AsMirrorPtr();
74 } else if (kUseTableLookupReadBarrier) {
75 MirrorType* ref = ref_addr->AsMirrorPtr();
76 MirrorType* old_ref = ref;
77 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
78 gc::Heap* heap = Runtime::Current()->GetHeap();
79 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
80 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
81 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
82 if (ref != old_ref) {
83 obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
84 offset, old_ref, ref);
85 }
86 }
87 AssertToSpaceInvariant(obj, offset, ref);
88 return ref;
89 } else {
90 LOG(FATAL) << "Unexpected read barrier type";
91 UNREACHABLE();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080092 }
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070093 } else {
94 // No read barrier.
95 return ref_addr->AsMirrorPtr();
96 }
97}
98
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080099template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700100inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
101 GcRootSource* gc_root_source) {
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -0700102 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700103 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800104 if (kUseReadBarrier && with_read_barrier) {
105 if (kIsDebugBuild) {
106 Thread* const self = Thread::Current();
107 if (self != nullptr) {
108 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700109 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800110 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800111 if (kUseBakerReadBarrier) {
112 // TODO: separate the read barrier code from the collector code more.
113 Thread* self = Thread::Current();
114 if (self != nullptr && self->GetIsGcMarking()) {
115 ref = reinterpret_cast<MirrorType*>(Mark(ref));
116 }
117 AssertToSpaceInvariant(gc_root_source, ref);
118 return ref;
119 } else if (kUseBrooksReadBarrier) {
120 // To be implemented.
121 return ref;
122 } else if (kUseTableLookupReadBarrier) {
123 Thread* self = Thread::Current();
124 if (self != nullptr &&
125 self->GetIsGcMarking() &&
126 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
127 MirrorType* old_ref = ref;
128 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
129 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
130 if (ref != old_ref) {
131 Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
132 atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
133 }
134 }
135 AssertToSpaceInvariant(gc_root_source, ref);
136 return ref;
137 } else {
138 LOG(FATAL) << "Unexpected read barrier type";
139 UNREACHABLE();
140 }
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700141 } else {
142 return ref;
143 }
144}
145
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700146// TODO: Reduce copy paste
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800147template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700148inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
149 GcRootSource* gc_root_source) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700150 MirrorType* ref = root->AsMirrorPtr();
151 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
152 if (with_read_barrier && kUseBakerReadBarrier) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700153 // TODO: separate the read barrier code from the collector code more.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700154 Thread* self = Thread::Current();
155 if (self != nullptr && self->GetIsGcMarking()) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700156 ref = reinterpret_cast<MirrorType*>(Mark(ref));
157 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700158 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700159 return ref;
160 } else if (with_read_barrier && kUseBrooksReadBarrier) {
161 // To be implemented.
162 return ref;
163 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700164 Thread* self = Thread::Current();
165 if (self != nullptr &&
166 self->GetIsGcMarking() &&
167 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700168 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
169 ref = reinterpret_cast<MirrorType*>(Mark(ref));
170 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
171 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700172 if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
173 auto* atomic_root =
174 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -0700175 atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700176 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700177 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700178 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700179 return ref;
180 } else {
181 return ref;
182 }
183}
184
Nicolas Geoffray13056a12017-05-11 11:48:28 +0000185template <typename MirrorType>
186inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
187 // Only read-barrier configurations can have mutators run while
188 // the GC is marking.
189 if (!kUseReadBarrier) {
190 return ref;
191 }
192 // IsMarked does not handle null, so handle it here.
193 if (ref == nullptr) {
194 return nullptr;
195 }
196 // IsMarked should only be called when the GC is marking.
197 if (!Thread::Current()->GetIsGcMarking()) {
198 return ref;
199 }
200
201 return reinterpret_cast<MirrorType*>(
202 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref));
203}
204
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800205inline bool ReadBarrier::IsDuringStartup() {
206 gc::Heap* heap = Runtime::Current()->GetHeap();
207 if (heap == nullptr) {
208 // During startup, the heap can be null.
209 return true;
210 }
211 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
212 // CC isn't running.
213 return true;
214 }
215 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
216 if (collector == nullptr) {
217 // During startup, the collector can be null.
218 return true;
219 }
220 return false;
221}
222
223inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
224 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800225 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800226 if (ref == nullptr || IsDuringStartup()) {
227 return;
228 }
229 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
230 AssertToSpaceInvariant(obj, offset, ref);
231 }
232}
233
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700234inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
235 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800236 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700237 if (ref == nullptr || IsDuringStartup()) {
238 return;
239 }
240 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
241 AssertToSpaceInvariant(gc_root_source, ref);
242 }
243}
244
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800245inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700246 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800247}
248
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700249inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
250 return obj->GetReadBarrierState(fake_address_dependency) == gray_state_;
251}
252
253inline bool ReadBarrier::IsGray(mirror::Object* obj) {
254 // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
255 // GetReadBarrierStateAcquire() has load-acquire semantics.
256 return obj->GetReadBarrierStateAcquire() == gray_state_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800257}
258
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700259} // namespace art
260
261#endif // ART_RUNTIME_READ_BARRIER_INL_H_