blob: 92efa211ce9cf4ea64220ad34b35e5763598711a [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -070022#include "gc/collector/concurrent_copying-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080023#include "gc/heap.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070024#include "mirror/object_reference.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "mirror/reference.h"
26#include "runtime.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010027#include "utils.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070028
29namespace art {
30
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080031template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kAlwaysUpdateField>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070032inline MirrorType* ReadBarrier::Barrier(
33 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Igor Murashkinc449e8b2015-06-10 15:56:42 -070034 constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080035 if (kUseReadBarrier && with_read_barrier) {
36 if (kIsDebugBuild) {
37 Thread* const self = Thread::Current();
38 if (self != nullptr) {
39 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080040 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080041 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080042 if (kUseBakerReadBarrier) {
43 // The higher bits of the rb_ptr, rb_ptr_high_bits (must be zero)
44 // is used to create artificial data dependency from the is_gray
45 // load to the ref field (ptr) load to avoid needing a load-load
46 // barrier between the two.
47 uintptr_t rb_ptr_high_bits;
48 bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits);
49 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
50 rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
51 MirrorType* ref = ref_addr->AsMirrorPtr();
52 MirrorType* old_ref = ref;
53 if (is_gray) {
54 // Slow-path.
55 ref = reinterpret_cast<MirrorType*>(Mark(ref));
56 // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
57 // updates before us, but it's ok.
58 if (kAlwaysUpdateField && ref != old_ref) {
59 obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
60 offset, old_ref, ref);
61 }
Hiroshi Yamauchifa755182015-09-30 20:12:11 -070062 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -080063 if (kEnableReadBarrierInvariantChecks) {
64 CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
65 }
66 AssertToSpaceInvariant(obj, offset, ref);
67 return ref;
68 } else if (kUseBrooksReadBarrier) {
69 // To be implemented.
70 return ref_addr->AsMirrorPtr();
71 } else if (kUseTableLookupReadBarrier) {
72 MirrorType* ref = ref_addr->AsMirrorPtr();
73 MirrorType* old_ref = ref;
74 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
75 gc::Heap* heap = Runtime::Current()->GetHeap();
76 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
77 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
78 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
79 if (ref != old_ref) {
80 obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
81 offset, old_ref, ref);
82 }
83 }
84 AssertToSpaceInvariant(obj, offset, ref);
85 return ref;
86 } else {
87 LOG(FATAL) << "Unexpected read barrier type";
88 UNREACHABLE();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080089 }
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070090 } else {
91 // No read barrier.
92 return ref_addr->AsMirrorPtr();
93 }
94}
95
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080096template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -070097inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
98 GcRootSource* gc_root_source) {
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070099 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700100 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800101 if (kUseReadBarrier && with_read_barrier) {
102 if (kIsDebugBuild) {
103 Thread* const self = Thread::Current();
104 if (self != nullptr) {
105 CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700106 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800107 }
Mathieu Chartierdfe02f62016-02-01 20:15:11 -0800108 if (kUseBakerReadBarrier) {
109 // TODO: separate the read barrier code from the collector code more.
110 Thread* self = Thread::Current();
111 if (self != nullptr && self->GetIsGcMarking()) {
112 ref = reinterpret_cast<MirrorType*>(Mark(ref));
113 }
114 AssertToSpaceInvariant(gc_root_source, ref);
115 return ref;
116 } else if (kUseBrooksReadBarrier) {
117 // To be implemented.
118 return ref;
119 } else if (kUseTableLookupReadBarrier) {
120 Thread* self = Thread::Current();
121 if (self != nullptr &&
122 self->GetIsGcMarking() &&
123 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
124 MirrorType* old_ref = ref;
125 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
126 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
127 if (ref != old_ref) {
128 Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
129 atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
130 }
131 }
132 AssertToSpaceInvariant(gc_root_source, ref);
133 return ref;
134 } else {
135 LOG(FATAL) << "Unexpected read barrier type";
136 UNREACHABLE();
137 }
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700138 } else {
139 return ref;
140 }
141}
142
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700143// TODO: Reduce copy paste
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800144template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700145inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
146 GcRootSource* gc_root_source) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700147 MirrorType* ref = root->AsMirrorPtr();
148 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
149 if (with_read_barrier && kUseBakerReadBarrier) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700150 // TODO: separate the read barrier code from the collector code more.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700151 Thread* self = Thread::Current();
152 if (self != nullptr && self->GetIsGcMarking()) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700153 ref = reinterpret_cast<MirrorType*>(Mark(ref));
154 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700155 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700156 return ref;
157 } else if (with_read_barrier && kUseBrooksReadBarrier) {
158 // To be implemented.
159 return ref;
160 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700161 Thread* self = Thread::Current();
162 if (self != nullptr &&
163 self->GetIsGcMarking() &&
164 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700165 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
166 ref = reinterpret_cast<MirrorType*>(Mark(ref));
167 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
168 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700169 if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
170 auto* atomic_root =
171 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -0700172 atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700173 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700174 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700175 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700176 return ref;
177 } else {
178 return ref;
179 }
180}
181
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800182inline bool ReadBarrier::IsDuringStartup() {
183 gc::Heap* heap = Runtime::Current()->GetHeap();
184 if (heap == nullptr) {
185 // During startup, the heap can be null.
186 return true;
187 }
188 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
189 // CC isn't running.
190 return true;
191 }
192 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
193 if (collector == nullptr) {
194 // During startup, the collector can be null.
195 return true;
196 }
197 return false;
198}
199
200inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
201 mirror::Object* ref) {
202 if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
203 if (ref == nullptr || IsDuringStartup()) {
204 return;
205 }
206 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
207 AssertToSpaceInvariant(obj, offset, ref);
208 }
209}
210
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700211inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
212 mirror::Object* ref) {
213 if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
214 if (ref == nullptr || IsDuringStartup()) {
215 return;
216 }
217 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
218 AssertToSpaceInvariant(gc_root_source, ref);
219 }
220}
221
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800222inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700223 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800224}
225
226inline bool ReadBarrier::HasGrayReadBarrierPointer(mirror::Object* obj,
227 uintptr_t* out_rb_ptr_high_bits) {
228 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
229 uintptr_t rb_ptr_bits = reinterpret_cast<uintptr_t>(rb_ptr);
230 uintptr_t rb_ptr_low_bits = rb_ptr_bits & rb_ptr_mask_;
231 if (kEnableReadBarrierInvariantChecks) {
232 CHECK(rb_ptr_low_bits == white_ptr_ || rb_ptr_low_bits == gray_ptr_ ||
233 rb_ptr_low_bits == black_ptr_)
234 << "obj=" << obj << " rb_ptr=" << rb_ptr << " " << PrettyTypeOf(obj);
235 }
236 bool is_gray = rb_ptr_low_bits == gray_ptr_;
237 // The high bits are supposed to be zero. We check this on the caller side.
238 *out_rb_ptr_high_bits = rb_ptr_bits & ~rb_ptr_mask_;
239 return is_gray;
240}
241
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700242} // namespace art
243
244#endif // ART_RUNTIME_READ_BARRIER_INL_H_