blob: 5631ff4e25cfaca27d6089b07176414d0608a2bd [file] [log] [blame]
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_READ_BARRIER_INL_H_
18#define ART_RUNTIME_READ_BARRIER_INL_H_
19
20#include "read_barrier.h"
21
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080022#include "gc/collector/concurrent_copying.h"
23#include "gc/heap.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070024#include "mirror/object_reference.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "mirror/reference.h"
26#include "runtime.h"
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070027
28namespace art {
29
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080030template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070031inline MirrorType* ReadBarrier::Barrier(
32 mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
Hiroshi Yamauchi6e83c172014-05-01 21:25:41 -070033 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
34 if (with_read_barrier && kUseBakerReadBarrier) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080035 // The higher bits of the rb ptr, rb_ptr_high_bits (must be zero)
36 // is used to create artificial data dependency from the is_gray
37 // load to the ref field (ptr) load to avoid needing a load-load
38 // barrier between the two.
39 uintptr_t rb_ptr_high_bits;
40 bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits);
41 ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>(
42 rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr));
43 MirrorType* ref = ref_addr->AsMirrorPtr();
44 if (is_gray) {
45 // Slow-path.
46 ref = reinterpret_cast<MirrorType*>(Mark(ref));
47 }
48 if (kEnableReadBarrierInvariantChecks) {
49 CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer();
50 }
51 AssertToSpaceInvariant(obj, offset, ref);
52 return ref;
Hiroshi Yamauchi6e83c172014-05-01 21:25:41 -070053 } else if (with_read_barrier && kUseBrooksReadBarrier) {
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070054 // To be implemented.
55 return ref_addr->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080056 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
57 MirrorType* ref = ref_addr->AsMirrorPtr();
58 MirrorType* old_ref = ref;
59 // The heap or the collector can be null at startup. TODO: avoid the need for this null check.
60 gc::Heap* heap = Runtime::Current()->GetHeap();
61 if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) {
62 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
63 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
64 obj->CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<false, false>(
65 offset, old_ref, ref);
66 }
67 AssertToSpaceInvariant(obj, offset, ref);
68 return ref;
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -070069 } else {
70 // No read barrier.
71 return ref_addr->AsMirrorPtr();
72 }
73}
74
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080075template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
Hiroshi Yamauchia91a4bc2014-06-13 16:44:55 -070076inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root) {
77 MirrorType* ref = *root;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -070078 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
79 if (with_read_barrier && kUseBakerReadBarrier) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080080 if (kMaybeDuringStartup && IsDuringStartup()) {
81 // During startup, the heap may not be initialized yet. Just
82 // return the given ref.
83 return ref;
84 }
85 // TODO: separate the read barrier code from the collector code more.
86 if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
87 ref = reinterpret_cast<MirrorType*>(Mark(ref));
88 }
89 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -070090 return ref;
91 } else if (with_read_barrier && kUseBrooksReadBarrier) {
92 // To be implemented.
93 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080094 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
95 if (kMaybeDuringStartup && IsDuringStartup()) {
96 // During startup, the heap may not be initialized yet. Just
97 // return the given ref.
98 return ref;
99 }
100 if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
101 MirrorType* old_ref = ref;
102 ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
103 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
104 Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root);
105 atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, ref);
106 }
107 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
108 return ref;
Hiroshi Yamauchi4cba0d92014-05-21 21:10:23 -0700109 } else {
110 return ref;
111 }
112}
113
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700114// TODO: Reduce copy paste
115template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kMaybeDuringStartup>
116inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root) {
117 MirrorType* ref = root->AsMirrorPtr();
118 const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
119 if (with_read_barrier && kUseBakerReadBarrier) {
120 if (kMaybeDuringStartup && IsDuringStartup()) {
121 // During startup, the heap may not be initialized yet. Just
122 // return the given ref.
123 return ref;
124 }
125 // TODO: separate the read barrier code from the collector code more.
126 if (Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking()) {
127 ref = reinterpret_cast<MirrorType*>(Mark(ref));
128 }
129 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
130 return ref;
131 } else if (with_read_barrier && kUseBrooksReadBarrier) {
132 // To be implemented.
133 return ref;
134 } else if (with_read_barrier && kUseTableLookupReadBarrier) {
135 if (kMaybeDuringStartup && IsDuringStartup()) {
136 // During startup, the heap may not be initialized yet. Just
137 // return the given ref.
138 return ref;
139 }
140 if (Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) {
141 auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
142 ref = reinterpret_cast<MirrorType*>(Mark(ref));
143 auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref);
144 // Update the field atomically. This may fail if mutator updates before us, but it's ok.
145 auto* atomic_root =
146 reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root);
147 atomic_root->CompareExchangeStrongSequentiallyConsistent(old_ref, new_ref);
148 }
149 AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
150 return ref;
151 } else {
152 return ref;
153 }
154}
155
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800156inline bool ReadBarrier::IsDuringStartup() {
157 gc::Heap* heap = Runtime::Current()->GetHeap();
158 if (heap == nullptr) {
159 // During startup, the heap can be null.
160 return true;
161 }
162 if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) {
163 // CC isn't running.
164 return true;
165 }
166 gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector();
167 if (collector == nullptr) {
168 // During startup, the collector can be null.
169 return true;
170 }
171 return false;
172}
173
174inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
175 mirror::Object* ref) {
176 if (kEnableToSpaceInvariantChecks || kIsDebugBuild) {
177 if (ref == nullptr || IsDuringStartup()) {
178 return;
179 }
180 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->
181 AssertToSpaceInvariant(obj, offset, ref);
182 }
183}
184
185inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) {
186 return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->Mark(obj);
187}
188
189inline bool ReadBarrier::HasGrayReadBarrierPointer(mirror::Object* obj,
190 uintptr_t* out_rb_ptr_high_bits) {
191 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
192 uintptr_t rb_ptr_bits = reinterpret_cast<uintptr_t>(rb_ptr);
193 uintptr_t rb_ptr_low_bits = rb_ptr_bits & rb_ptr_mask_;
194 if (kEnableReadBarrierInvariantChecks) {
195 CHECK(rb_ptr_low_bits == white_ptr_ || rb_ptr_low_bits == gray_ptr_ ||
196 rb_ptr_low_bits == black_ptr_)
197 << "obj=" << obj << " rb_ptr=" << rb_ptr << " " << PrettyTypeOf(obj);
198 }
199 bool is_gray = rb_ptr_low_bits == gray_ptr_;
200 // The high bits are supposed to be zero. We check this on the caller side.
201 *out_rb_ptr_high_bits = rb_ptr_bits & ~rb_ptr_mask_;
202 return is_gray;
203}
204
Hiroshi Yamauchi800ac2d2014-04-02 17:32:54 -0700205} // namespace art
206
207#endif // ART_RUNTIME_READ_BARRIER_INL_H_