blob: 58f6c04c3e8f9f1c092567f61d4c385863c87b0f [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
David Sehr8f4b0562018-03-02 12:01:51 -080022#include "base/utils.h"
Andreas Gamped4901292017-05-30 18:41:34 -070023#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -070024#include "gc/collector/concurrent_copying-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "gc/heap.h"
Andreas Gampec15a2f42017-04-21 12:09:39 -070026#include "mirror/object-readbarrier-inl.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070027#include "mirror/object_reference.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080028#include "mirror/reference.h"
29#include "runtime.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070030
31namespace art {
32
Mathieu Chartierd08f66f2017-04-13 11:47:53 -070033// Disabled for performance reasons.
34static constexpr bool kCheckDebugDisallowReadBarrierCount = false;
35
Hans Boehmcc55e1d2017-07-27 15:28:07 -070036template <typename MirrorType, bool kIsVolatile, ReadBarrierOption kReadBarrierOption,
37 bool kAlwaysUpdateField>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070038inline MirrorType* ReadBarrier::Barrier(
39 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Igor Murashkinc449e8b2015-06-10 15:56:42 -070040 constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080041 if (kUseReadBarrier && with_read_barrier) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -070042 if (kCheckDebugDisallowReadBarrierCount) {
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080043 Thread* const self = Thread::Current();
44 if (self != nullptr) {
45 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080046 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080047 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080048 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070049 // fake_address_dependency (must be zero) is used to create artificial data dependency from
50 // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
51 // the two.
52 uintptr_t fake_address_dependency;
53 bool is_gray = IsGray(obj, &fake_address_dependency);
54 if (kEnableReadBarrierInvariantChecks) {
55 CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
56 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080057 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070058 fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
Hans Boehmcc55e1d2017-07-27 15:28:07 -070059 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080060 MirrorType* old_ref = ref;
61 if (is_gray) {
62 // Slow-path.
63 ref = reinterpret_cast<MirrorType*>(Mark(ref));
64 // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
Roland Levillaina1aa3b12016-10-26 13:03:38 +010065 // updates before us, but it's OK.
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080066 if (kAlwaysUpdateField && ref != old_ref) {
Mathieu Chartierfdd513d2017-06-01 11:26:50 -070067 obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080068 offset, old_ref, ref);
69 }
Hiroshi Yamauchifa755182015-09-30 20:12:11 -070070 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080071 AssertToSpaceInvariant(obj, offset, ref);
72 return ref;
73 } else if (kUseBrooksReadBarrier) {
74 // To be implemented.
Hans Boehmcc55e1d2017-07-27 15:28:07 -070075 return ref_addr->template AsMirrorPtr<kIsVolatile>();
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080076 } else if (kUseTableLookupReadBarrier) {
Hans Boehmcc55e1d2017-07-27 15:28:07 -070077 MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>();
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080078 MirrorType* old_ref = ref;
79 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
80 gc::Heap* heap = Runtime::Current()->GetHeap();
81 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
82 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
83 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
84 if (ref != old_ref) {
Mathieu Chartierfdd513d2017-06-01 11:26:50 -070085 obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080086 offset, old_ref, ref);
87 }
88 }
89 AssertToSpaceInvariant(obj, offset, ref);
90 return ref;
91 } else {
92 LOG(FATAL) << "Unexpected read barrier type";
93 UNREACHABLE();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080094 }
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070095 } else {
96 // No read barrier.
Hans Boehmcc55e1d2017-07-27 15:28:07 -070097 return ref_addr->template AsMirrorPtr<kIsVolatile>();
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070098 }
99}
100
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800101template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700102inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
103 GcRootSource* gc_root_source) {
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -0700104 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700105 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800106 if (kUseReadBarrier && with_read_barrier) {
107 if (kIsDebugBuild) {
108 Thread* const self = Thread::Current();
109 if (self != nullptr) {
110 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700111 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800112 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800113 if (kUseBakerReadBarrier) {
114 // TODO: separate the read barrier code from the collector code more.
115 Thread* self = Thread::Current();
116 if (self != nullptr && self->GetIsGcMarking()) {
117 ref = reinterpret_cast<MirrorType*>(Mark(ref));
118 }
119 AssertToSpaceInvariant(gc_root_source, ref);
120 return ref;
121 } else if (kUseBrooksReadBarrier) {
122 // To be implemented.
123 return ref;
124 } else if (kUseTableLookupReadBarrier) {
125 Thread* self = Thread::Current();
126 if (self != nullptr &&
127 self->GetIsGcMarking() &&
128 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
129 MirrorType* old_ref = ref;
130 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
131 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
132 if (ref != old_ref) {
133 Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
Orion Hodson4557b382018-01-03 11:47:54 +0000134 atomic_root->CompareAndSetStrongRelaxed(old_ref, ref);
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800135 }
136 }
137 AssertToSpaceInvariant(gc_root_source, ref);
138 return ref;
139 } else {
140 LOG(FATAL) << "Unexpected read barrier type";
141 UNREACHABLE();
142 }
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700143 } else {
144 return ref;
145 }
146}
147
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700148// TODO: Reduce copy paste
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800149template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700150inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
151 GcRootSource* gc_root_source) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700152 MirrorType* ref = root->AsMirrorPtr();
153 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
154 if (with_read_barrier && kUseBakerReadBarrier) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700155 // TODO: separate the read barrier code from the collector code more.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700156 Thread* self = Thread::Current();
157 if (self != nullptr && self->GetIsGcMarking()) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700158 ref = reinterpret_cast<MirrorType*>(Mark(ref));
159 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700160 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700161 return ref;
162 } else if (with_read_barrier && kUseBrooksReadBarrier) {
163 // To be implemented.
164 return ref;
165 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700166 Thread* self = Thread::Current();
167 if (self != nullptr &&
168 self->GetIsGcMarking() &&
169 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700170 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
171 ref = reinterpret_cast<MirrorType*>(Mark(ref));
172 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
173 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700174 if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
175 auto* atomic_root =
176 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
Orion Hodson4557b382018-01-03 11:47:54 +0000177 atomic_root->CompareAndSetStrongRelaxed(old_ref, new_ref);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700178 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700179 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700180 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700181 return ref;
182 } else {
183 return ref;
184 }
185}
186
Nicolas Geoffray13056a12017-05-11 11:48:28 +0000187template <typename MirrorType>
188inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
189 // Only read-barrier configurations can have mutators run while
190 // the GC is marking.
191 if (!kUseReadBarrier) {
192 return ref;
193 }
194 // IsMarked does not handle null, so handle it here.
195 if (ref == nullptr) {
196 return nullptr;
197 }
198 // IsMarked should only be called when the GC is marking.
199 if (!Thread::Current()->GetIsGcMarking()) {
200 return ref;
201 }
202
203 return reinterpret_cast<MirrorType*>(
204 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref));
205}
206
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800207inline bool ReadBarrier::IsDuringStartup() {
208 gc::Heap* heap = Runtime::Current()->GetHeap();
209 if (heap == nullptr) {
210 // During startup, the heap can be null.
211 return true;
212 }
213 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
214 // CC isn't running.
215 return true;
216 }
217 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
218 if (collector == nullptr) {
219 // During startup, the collector can be null.
220 return true;
221 }
222 return false;
223}
224
225inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
226 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800227 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800228 if (ref == nullptr || IsDuringStartup()) {
229 return;
230 }
231 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
232 AssertToSpaceInvariant(obj, offset, ref);
233 }
234}
235
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700236inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
237 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800238 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700239 if (ref == nullptr || IsDuringStartup()) {
240 return;
241 }
242 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
243 AssertToSpaceInvariant(gc_root_source, ref);
244 }
245}
246
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800247inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700248 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800249}
250
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700251inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
252 return obj->GetReadBarrierState(fake_address_dependency) == gray_state_;
253}
254
255inline bool ReadBarrier::IsGray(mirror::Object* obj) {
256 // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
257 // GetReadBarrierStateAcquire() has load-acquire semantics.
258 return obj->GetReadBarrierStateAcquire() == gray_state_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800259}
260
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700261} // namespace art
262
263#endif // ART_RUNTIME_READ_BARRIER_INL_H_