blob: 2b38b2e358a8acc003e24b78714e916b7cbf8ae8 [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -070022#include "gc/collector/concurrent_copying-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080023#include "gc/heap.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070024#include "mirror/object_reference.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "mirror/reference.h"
26#include "runtime.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010027#include "utils.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070028
29namespace art {
30
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080031template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kAlwaysUpdateField>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070032inline MirrorType* ReadBarrier::Barrier(
33 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Igor Murashkinc449e8b2015-06-10 15:56:42 -070034 constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080035 if (kUseReadBarrier && with_read_barrier) {
36 if (kIsDebugBuild) {
37 Thread* const self = Thread::Current();
38 if (self != nullptr) {
39 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080040 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080041 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080042 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070043 // fake_address_dependency (must be zero) is used to create artificial data dependency from
44 // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between
45 // the two.
46 uintptr_t fake_address_dependency;
47 bool is_gray = IsGray(obj, &fake_address_dependency);
48 if (kEnableReadBarrierInvariantChecks) {
49 CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState();
50 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080051 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070052 fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr));
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080053 MirrorType* ref = ref_addr->AsMirrorPtr();
54 MirrorType* old_ref = ref;
55 if (is_gray) {
56 // Slow-path.
57 ref = reinterpret_cast<MirrorType*>(Mark(ref));
58 // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
Roland Levillaina1aa3b12016-10-26 13:03:38 +010059 // updates before us, but it's OK.
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080060 if (kAlwaysUpdateField && ref != old_ref) {
61 obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
62 offset, old_ref, ref);
63 }
Hiroshi Yamauchifa755182015-09-30 20:12:11 -070064 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080065 AssertToSpaceInvariant(obj, offset, ref);
66 return ref;
67 } else if (kUseBrooksReadBarrier) {
68 // To be implemented.
69 return ref_addr->AsMirrorPtr();
70 } else if (kUseTableLookupReadBarrier) {
71 MirrorType* ref = ref_addr->AsMirrorPtr();
72 MirrorType* old_ref = ref;
73 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
74 gc::Heap* heap = Runtime::Current()->GetHeap();
75 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
76 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
77 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
78 if (ref != old_ref) {
79 obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
80 offset, old_ref, ref);
81 }
82 }
83 AssertToSpaceInvariant(obj, offset, ref);
84 return ref;
85 } else {
86 LOG(FATAL) << "Unexpected read barrier type";
87 UNREACHABLE();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080088 }
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070089 } else {
90 // No read barrier.
91 return ref_addr->AsMirrorPtr();
92 }
93}
94
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080095template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -070096inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
97 GcRootSource* gc_root_source) {
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070098 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -070099 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800100 if (kUseReadBarrier && with_read_barrier) {
101 if (kIsDebugBuild) {
102 Thread* const self = Thread::Current();
103 if (self != nullptr) {
104 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700105 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800106 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800107 if (kUseBakerReadBarrier) {
108 // TODO: separate the read barrier code from the collector code more.
109 Thread* self = Thread::Current();
110 if (self != nullptr && self->GetIsGcMarking()) {
111 ref = reinterpret_cast<MirrorType*>(Mark(ref));
112 }
113 AssertToSpaceInvariant(gc_root_source, ref);
114 return ref;
115 } else if (kUseBrooksReadBarrier) {
116 // To be implemented.
117 return ref;
118 } else if (kUseTableLookupReadBarrier) {
119 Thread* self = Thread::Current();
120 if (self != nullptr &&
121 self->GetIsGcMarking() &&
122 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
123 MirrorType* old_ref = ref;
124 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
125 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
126 if (ref != old_ref) {
127 Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
128 atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
129 }
130 }
131 AssertToSpaceInvariant(gc_root_source, ref);
132 return ref;
133 } else {
134 LOG(FATAL) << "Unexpected read barrier type";
135 UNREACHABLE();
136 }
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700137 } else {
138 return ref;
139 }
140}
141
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700142// TODO: Reduce copy paste
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800143template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700144inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
145 GcRootSource* gc_root_source) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700146 MirrorType* ref = root->AsMirrorPtr();
147 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
148 if (with_read_barrier && kUseBakerReadBarrier) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700149 // TODO: separate the read barrier code from the collector code more.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700150 Thread* self = Thread::Current();
151 if (self != nullptr && self->GetIsGcMarking()) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700152 ref = reinterpret_cast<MirrorType*>(Mark(ref));
153 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700154 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700155 return ref;
156 } else if (with_read_barrier && kUseBrooksReadBarrier) {
157 // To be implemented.
158 return ref;
159 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700160 Thread* self = Thread::Current();
161 if (self != nullptr &&
162 self->GetIsGcMarking() &&
163 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700164 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
165 ref = reinterpret_cast<MirrorType*>(Mark(ref));
166 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
167 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700168 if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
169 auto* atomic_root =
170 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -0700171 atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700172 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700173 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700174 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700175 return ref;
176 } else {
177 return ref;
178 }
179}
180
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800181inline bool ReadBarrier::IsDuringStartup() {
182 gc::Heap* heap = Runtime::Current()->GetHeap();
183 if (heap == nullptr) {
184 // During startup, the heap can be null.
185 return true;
186 }
187 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
188 // CC isn't running.
189 return true;
190 }
191 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
192 if (collector == nullptr) {
193 // During startup, the collector can be null.
194 return true;
195 }
196 return false;
197}
198
199inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
200 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800201 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800202 if (ref == nullptr || IsDuringStartup()) {
203 return;
204 }
205 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
206 AssertToSpaceInvariant(obj, offset, ref);
207 }
208}
209
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700210inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
211 mirror::Object* ref) {
Andreas Gampee3ce7872017-02-22 13:36:21 -0800212 if (kEnableToSpaceInvariantChecks) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700213 if (ref == nullptr || IsDuringStartup()) {
214 return;
215 }
216 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
217 AssertToSpaceInvariant(gc_root_source, ref);
218 }
219}
220
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800221inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700222 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800223}
224
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700225inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) {
226 return obj->GetReadBarrierState(fake_address_dependency) == gray_state_;
227}
228
229inline bool ReadBarrier::IsGray(mirror::Object* obj) {
230 // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load.
231 // GetReadBarrierStateAcquire() has load-acquire semantics.
232 return obj->GetReadBarrierStateAcquire() == gray_state_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800233}
234
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700235} // namespace art
236
237#endif // ART_RUNTIME_READ_BARRIER_INL_H_