blob: 701481392fc153c816cb47b46fb3cc2a58afe1c8 [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080022#include "gc/collector/concurrent_copying.h"
23#include "gc/heap.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070024#include "mirror/object_reference.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "mirror/reference.h"
26#include "runtime.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010027#include "utils.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070028
29namespace art {
30
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080031template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070032inline MirrorType* ReadBarrier::Barrier(
33 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Igor Murashkinc449e8b2015-06-10 15:56:42 -070034 constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
Hiroshi Yamauchi6e83c172014-05-01 21:25:41 -070035 if (with_read_barrier && kUseBakerReadBarrier) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080036 // The higher bits of the rb ptr, rb_ptr_high_bits (must be zero)
37 // is used to create artificial data dependency from the is_gray
38 // load to the ref field (ptr) load to avoid needing a load-load
39 // barrier between the two.
40 uintptr_t rb_ptr_high_bits;
41 bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits);
42 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
43 rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
44 MirrorType* ref = ref_addr->AsMirrorPtr();
45 if (is_gray) {
46 // Slow-path.
47 ref = reinterpret_cast<MirrorType*>(Mark(ref));
48 }
49 if (kEnableReadBarrierInvariantChecks) {
50 CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
51 }
52 AssertToSpaceInvariant(obj, offset, ref);
53 return ref;
Hiroshi Yamauchi6e83c172014-05-01 21:25:41 -070054 } else if (with_read_barrier && kUseBrooksReadBarrier) {
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070055 // To be implemented.
56 return ref_addr->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080057 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
58 MirrorType* ref = ref_addr->AsMirrorPtr();
59 MirrorType* old_ref = ref;
60 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
61 gc::Heap* heap = Runtime::Current()->GetHeap();
62 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
63 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
64 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
65 obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
66 offset, old_ref, ref);
67 }
68 AssertToSpaceInvariant(obj, offset, ref);
69 return ref;
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070070 } else {
71 // No read barrier.
72 return ref_addr->AsMirrorPtr();
73 }
74}
75
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080076template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -070077inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
78 GcRootSource* gc_root_source) {
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070079 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -070080 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
81 if (with_read_barrier && kUseBakerReadBarrier) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080082 if (kMaybeDuringStartup && IsDuringStartup()) {
83 // During startup, the heap may not be initialized yet. Just
84 // return the given ref.
85 return ref;
86 }
87 // TODO: separate the read barrier code from the collector code more.
88 if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
89 ref = reinterpret_cast<MirrorType*>(Mark(ref));
90 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -070091 AssertToSpaceInvariant(gc_root_source, ref);
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -070092 return ref;
93 } else if (with_read_barrier && kUseBrooksReadBarrier) {
94 // To be implemented.
95 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080096 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
97 if (kMaybeDuringStartup && IsDuringStartup()) {
98 // During startup, the heap may not be initialized yet. Just
99 // return the given ref.
100 return ref;
101 }
102 if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
103 MirrorType* old_ref = ref;
104 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
105 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
106 Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
107 atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
108 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700109 AssertToSpaceInvariant(gc_root_source, ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800110 return ref;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700111 } else {
112 return ref;
113 }
114}
115
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700116// TODO: Reduce copy paste
117template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700118inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
119 GcRootSource* gc_root_source) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700120 MirrorType* ref = root->AsMirrorPtr();
121 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
122 if (with_read_barrier && kUseBakerReadBarrier) {
123 if (kMaybeDuringStartup && IsDuringStartup()) {
124 // During startup, the heap may not be initialized yet. Just
125 // return the given ref.
126 return ref;
127 }
128 // TODO: separate the read barrier code from the collector code more.
129 if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
130 ref = reinterpret_cast<MirrorType*>(Mark(ref));
131 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700132 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700133 return ref;
134 } else if (with_read_barrier && kUseBrooksReadBarrier) {
135 // To be implemented.
136 return ref;
137 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
138 if (kMaybeDuringStartup && IsDuringStartup()) {
139 // During startup, the heap may not be initialized yet. Just
140 // return the given ref.
141 return ref;
142 }
143 if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
144 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
145 ref = reinterpret_cast<MirrorType*>(Mark(ref));
146 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
147 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
148 auto* atomic_root =
149 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
150 atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
151 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700152 AssertToSpaceInvariant(gc_root_source, ref);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700153 return ref;
154 } else {
155 return ref;
156 }
157}
158
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800159inline bool ReadBarrier::IsDuringStartup() {
160 gc::Heap* heap = Runtime::Current()->GetHeap();
161 if (heap == nullptr) {
162 // During startup, the heap can be null.
163 return true;
164 }
165 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
166 // CC isn't running.
167 return true;
168 }
169 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
170 if (collector == nullptr) {
171 // During startup, the collector can be null.
172 return true;
173 }
174 return false;
175}
176
177inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
178 mirror::Object* ref) {
179 if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
180 if (ref == nullptr || IsDuringStartup()) {
181 return;
182 }
183 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
184 AssertToSpaceInvariant(obj, offset, ref);
185 }
186}
187
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700188inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source,
189 mirror::Object* ref) {
190 if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
191 if (ref == nullptr || IsDuringStartup()) {
192 return;
193 }
194 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
195 AssertToSpaceInvariant(gc_root_source, ref);
196 }
197}
198
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800199inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
200 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->Mark(obj);
201}
202
203inline bool ReadBarrier::HasGrayReadBarrierPointer(mirror::Object* obj,
204 uintptr_t* out_rb_ptr_high_bits) {
205 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
206 uintptr_t rb_ptr_bits = reinterpret_cast<uintptr_t>(rb_ptr);
207 uintptr_t rb_ptr_low_bits = rb_ptr_bits & rb_ptr_mask_;
208 if (kEnableReadBarrierInvariantChecks) {
209 CHECK(rb_ptr_low_bits == white_ptr_ || rb_ptr_low_bits == gray_ptr_ ||
210 rb_ptr_low_bits == black_ptr_)
211 << "obj=" << obj << " rb_ptr=" << rb_ptr << " " << PrettyTypeOf(obj);
212 }
213 bool is_gray = rb_ptr_low_bits == gray_ptr_;
214 // The high bits are supposed to be zero. We check this on the caller side.
215 *out_rb_ptr_high_bits = rb_ptr_bits & ~rb_ptr_mask_;
216 return is_gray;
217}
218
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700219} // namespace art
220
221#endif // ART_RUNTIME_READ_BARRIER_INL_H_