blob: 19cf7593e43c1c2b5b60ce2ce47c86a78c5c5618 [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -070022#include "gc/collector/concurrent_copying-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080023#include "gc/heap.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070024#include "mirror/object_reference.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "mirror/reference.h"
26#include "runtime.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010027#include "utils.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070028
29namespace art {
30
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080031template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kAlwaysUpdateField>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070032inline MirrorType* ReadBarrier::Barrier(
33 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Igor Murashkinc449e8b2015-06-10 15:56:42 -070034 constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Hiroshi Yamauchi6e83c172014-05-01 21:25:41 -070035 if (with_read_barrier && kUseBakerReadBarrier) {
Roland Levillain7c1559a2015-12-15 10:55:36 +000036 // The higher bits of the rb_ptr, rb_ptr_high_bits (must be zero)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080037 // is used to create artificial data dependency from the is_gray
38 // load to the ref field (ptr) load to avoid needing a load-load
39 // barrier between the two.
40 uintptr_t rb_ptr_high_bits;
41 bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits);
42 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
43 rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
44 MirrorType* ref = ref_addr->AsMirrorPtr();
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080045 MirrorType* old_ref = ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080046 if (is_gray) {
47 // Slow-path.
48 ref = reinterpret_cast<MirrorType*>(Mark(ref));
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080049 // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
50 // updates before us, but it's ok.
51 if (kAlwaysUpdateField && ref != old_ref) {
52 obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
53 offset, old_ref, ref);
54 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080055 }
56 if (kEnableReadBarrierInvariantChecks) {
57 CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
58 }
59 AssertToSpaceInvariant(obj, offset, ref);
60 return ref;
Hiroshi Yamauchi6e83c172014-05-01 21:25:41 -070061 } else if (with_read_barrier && kUseBrooksReadBarrier) {
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070062 // To be implemented.
63 return ref_addr->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080064 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
65 MirrorType* ref = ref_addr->AsMirrorPtr();
66 MirrorType* old_ref = ref;
67 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
68 gc::Heap* heap = Runtime::Current()->GetHeap();
69 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
70 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
71 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
Hiroshi Yamauchifa755182015-09-30 20:12:11 -070072 if (ref != old_ref) {
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -070073 obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>(
Hiroshi Yamauchifa755182015-09-30 20:12:11 -070074 offset, old_ref, ref);
75 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080076 }
77 AssertToSpaceInvariant(obj, offset, ref);
78 return ref;
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070079 } else {
80 // No read barrier.
81 return ref_addr->AsMirrorPtr();
82 }
83}
84
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -080085template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -070086inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
87 GcRootSource* gc_root_source) {
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070088 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -070089 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
90 if (with_read_barrier && kUseBakerReadBarrier) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080091 // TODO: separate the read barrier code from the collector code more.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -070092 Thread* self = Thread::Current();
93 if (self != nullptr && self->GetIsGcMarking()) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080094 ref = reinterpret_cast<MirrorType*>(Mark(ref));
95 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -070096 AssertToSpaceInvariant(gc_root_source, ref);
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -070097 return ref;
98 } else if (with_read_barrier && kUseBrooksReadBarrier) {
99 // To be implemented.
100 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800101 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700102 Thread* self = Thread::Current();
103 if (self != nullptr &&
104 self->GetIsGcMarking() &&
105 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800106 MirrorType* old_ref = ref;
107 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
108 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700109 if (ref != old_ref) {
110 Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -0700111 atomic_root->CompareExchangeStrongRelaxed(old_ref, ref);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700112 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800113 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700114 AssertToSpaceInvariant(gc_root_source, ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800115 return ref;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700116 } else {
117 return ref;
118 }
119}
120
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700121// TODO: Reduce copy paste
Hiroshi Yamauchicc78f3f2015-12-11 15:51:04 -0800122template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700123inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
124 GcRootSource* gc_root_source) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700125 MirrorType* ref = root->AsMirrorPtr();
126 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
127 if (with_read_barrier && kUseBakerReadBarrier) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700128 // TODO: separate the read barrier code from the collector code more.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700129 Thread* self = Thread::Current();
130 if (self != nullptr && self->GetIsGcMarking()) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700131 ref = reinterpret_cast<MirrorType*>(Mark(ref));
132 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700133 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700134 return ref;
135 } else if (with_read_barrier && kUseBrooksReadBarrier) {
136 // To be implemented.
137 return ref;
138 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700139 Thread* self = Thread::Current();
140 if (self != nullptr &&
141 self->GetIsGcMarking() &&
142 Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700143 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
144 ref = reinterpret_cast<MirrorType*>(Mark(ref));
145 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
146 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700147 if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) {
148 auto* atomic_root =
149 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -0700150 atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref);
Hiroshi Yamauchifa755182015-09-30 20:12:11 -0700151 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700152 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700153 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700154 return ref;
155 } else {
156 return ref;
157 }
158}
159
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800160inline bool ReadBarrier::IsDuringStartup() {
161 gc::Heap* heap = Runtime::Current()->GetHeap();
162 if (heap == nullptr) {
163 // During startup, the heap can be null.
164 return true;
165 }
166 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
167 // CC isn't running.
168 return true;
169 }
170 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
171 if (collector == nullptr) {
172 // During startup, the collector can be null.
173 return true;
174 }
175 return false;
176}
177
178inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
179 mirror::Object* ref) {
180 if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
181 if (ref == nullptr || IsDuringStartup()) {
182 return;
183 }
184 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
185 AssertToSpaceInvariant(obj, offset, ref);
186 }
187}
188
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700189inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
190 mirror::Object* ref) {
191 if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
192 if (ref == nullptr || IsDuringStartup()) {
193 return;
194 }
195 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
196 AssertToSpaceInvariant(gc_root_source, ref);
197 }
198}
199
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800200inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
201 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->Mark(obj);
202}
203
204inline bool ReadBarrier::HasGrayReadBarrierPointer(mirror::Object* obj,
205 uintptr_t* out_rb_ptr_high_bits) {
206 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
207 uintptr_t rb_ptr_bits = reinterpret_cast<uintptr_t>(rb_ptr);
208 uintptr_t rb_ptr_low_bits = rb_ptr_bits & rb_ptr_mask_;
209 if (kEnableReadBarrierInvariantChecks) {
210 CHECK(rb_ptr_low_bits == white_ptr_ || rb_ptr_low_bits == gray_ptr_ ||
211 rb_ptr_low_bits == black_ptr_)
212 << "obj=" << obj << " rb_ptr=" << rb_ptr << " " << PrettyTypeOf(obj);
213 }
214 bool is_gray = rb_ptr_low_bits == gray_ptr_;
215 // The high bits are supposed to be zero. We check this on the caller side.
216 *out_rb_ptr_high_bits = rb_ptr_bits & ~rb_ptr_mask_;
217 return is_gray;
218}
219
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700220} // namespace art
221
222#endif // ART_RUNTIME_READ_BARRIER_INL_H_