Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_READ_BARRIER_INL_H_ |
| 18 | #define ART_RUNTIME_READ_BARRIER_INL_H_ |
| 19 | |
| 20 | #include "read_barrier.h" |
| 21 | |
Hiroshi Yamauchi | 723e6ce | 2015-10-28 20:59:47 -0700 | [diff] [blame] | 22 | #include "gc/collector/concurrent_copying-inl.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 23 | #include "gc/heap.h" |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 24 | #include "mirror/object_reference.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 25 | #include "mirror/reference.h" |
| 26 | #include "runtime.h" |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 27 | #include "utils.h" |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 28 | |
| 29 | namespace art { |
| 30 | |
Hiroshi Yamauchi | cc78f3f | 2015-12-11 15:51:04 -0800 | [diff] [blame] | 31 | template <typename MirrorType, ReadBarrierOption kReadBarrierOption, bool kAlwaysUpdateField> |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 32 | inline MirrorType* ReadBarrier::Barrier( |
| 33 | mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) { |
Igor Murashkin | c449e8b | 2015-06-10 15:56:42 -0700 | [diff] [blame] | 34 | constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 35 | if (kUseReadBarrier && with_read_barrier) { |
| 36 | if (kIsDebugBuild) { |
| 37 | Thread* const self = Thread::Current(); |
| 38 | if (self != nullptr) { |
| 39 | CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u); |
Hiroshi Yamauchi | cc78f3f | 2015-12-11 15:51:04 -0800 | [diff] [blame] | 40 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 41 | } |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 42 | if (kUseBakerReadBarrier) { |
| 43 | // The higher bits of the rb_ptr, rb_ptr_high_bits (must be zero) |
| 44 | // is used to create artificial data dependency from the is_gray |
| 45 | // load to the ref field (ptr) load to avoid needing a load-load |
| 46 | // barrier between the two. |
| 47 | uintptr_t rb_ptr_high_bits; |
| 48 | bool is_gray = HasGrayReadBarrierPointer(obj, &rb_ptr_high_bits); |
| 49 | ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>( |
| 50 | rb_ptr_high_bits | reinterpret_cast<uintptr_t>(ref_addr)); |
| 51 | MirrorType* ref = ref_addr->AsMirrorPtr(); |
| 52 | MirrorType* old_ref = ref; |
| 53 | if (is_gray) { |
| 54 | // Slow-path. |
| 55 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 56 | // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator |
| 57 | // updates before us, but it's ok. |
| 58 | if (kAlwaysUpdateField && ref != old_ref) { |
| 59 | obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>( |
| 60 | offset, old_ref, ref); |
| 61 | } |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 62 | } |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 63 | if (kEnableReadBarrierInvariantChecks) { |
| 64 | CHECK_EQ(rb_ptr_high_bits, 0U) << obj << " rb_ptr=" << obj->GetReadBarrierPointer(); |
| 65 | } |
| 66 | AssertToSpaceInvariant(obj, offset, ref); |
| 67 | return ref; |
| 68 | } else if (kUseBrooksReadBarrier) { |
| 69 | // To be implemented. |
| 70 | return ref_addr->AsMirrorPtr(); |
| 71 | } else if (kUseTableLookupReadBarrier) { |
| 72 | MirrorType* ref = ref_addr->AsMirrorPtr(); |
| 73 | MirrorType* old_ref = ref; |
| 74 | // The heap or the collector can be null at startup. TODO: avoid the need for this null check. |
| 75 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
| 76 | if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) { |
| 77 | ref = reinterpret_cast<MirrorType*>(Mark(old_ref)); |
| 78 | // Update the field atomically. This may fail if mutator updates before us, but it's ok. |
| 79 | if (ref != old_ref) { |
| 80 | obj->CasFieldStrongRelaxedObjectWithoutWriteBarrier<false, false>( |
| 81 | offset, old_ref, ref); |
| 82 | } |
| 83 | } |
| 84 | AssertToSpaceInvariant(obj, offset, ref); |
| 85 | return ref; |
| 86 | } else { |
| 87 | LOG(FATAL) << "Unexpected read barrier type"; |
| 88 | UNREACHABLE(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 89 | } |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 90 | } else { |
| 91 | // No read barrier. |
| 92 | return ref_addr->AsMirrorPtr(); |
| 93 | } |
| 94 | } |
| 95 | |
Hiroshi Yamauchi | cc78f3f | 2015-12-11 15:51:04 -0800 | [diff] [blame] | 96 | template <typename MirrorType, ReadBarrierOption kReadBarrierOption> |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 97 | inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root, |
| 98 | GcRootSource* gc_root_source) { |
Hiroshi Yamauchi | a91a4bc | 2014-06-13 16:44:55 -0700 | [diff] [blame] | 99 | MirrorType* ref = *root; |
Hiroshi Yamauchi | 4cba0d9 | 2014-05-21 21:10:23 -0700 | [diff] [blame] | 100 | const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 101 | if (kUseReadBarrier && with_read_barrier) { |
| 102 | if (kIsDebugBuild) { |
| 103 | Thread* const self = Thread::Current(); |
| 104 | if (self != nullptr) { |
| 105 | CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u); |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 106 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 107 | } |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 108 | if (kUseBakerReadBarrier) { |
| 109 | // TODO: separate the read barrier code from the collector code more. |
| 110 | Thread* self = Thread::Current(); |
| 111 | if (self != nullptr && self->GetIsGcMarking()) { |
| 112 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 113 | } |
| 114 | AssertToSpaceInvariant(gc_root_source, ref); |
| 115 | return ref; |
| 116 | } else if (kUseBrooksReadBarrier) { |
| 117 | // To be implemented. |
| 118 | return ref; |
| 119 | } else if (kUseTableLookupReadBarrier) { |
| 120 | Thread* self = Thread::Current(); |
| 121 | if (self != nullptr && |
| 122 | self->GetIsGcMarking() && |
| 123 | Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) { |
| 124 | MirrorType* old_ref = ref; |
| 125 | ref = reinterpret_cast<MirrorType*>(Mark(old_ref)); |
| 126 | // Update the field atomically. This may fail if mutator updates before us, but it's ok. |
| 127 | if (ref != old_ref) { |
| 128 | Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root); |
| 129 | atomic_root->CompareExchangeStrongRelaxed(old_ref, ref); |
| 130 | } |
| 131 | } |
| 132 | AssertToSpaceInvariant(gc_root_source, ref); |
| 133 | return ref; |
| 134 | } else { |
| 135 | LOG(FATAL) << "Unexpected read barrier type"; |
| 136 | UNREACHABLE(); |
| 137 | } |
Hiroshi Yamauchi | 4cba0d9 | 2014-05-21 21:10:23 -0700 | [diff] [blame] | 138 | } else { |
| 139 | return ref; |
| 140 | } |
| 141 | } |
| 142 | |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 143 | // TODO: Reduce copy paste |
Hiroshi Yamauchi | cc78f3f | 2015-12-11 15:51:04 -0800 | [diff] [blame] | 144 | template <typename MirrorType, ReadBarrierOption kReadBarrierOption> |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 145 | inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root, |
| 146 | GcRootSource* gc_root_source) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 147 | MirrorType* ref = root->AsMirrorPtr(); |
| 148 | const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; |
| 149 | if (with_read_barrier && kUseBakerReadBarrier) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 150 | // TODO: separate the read barrier code from the collector code more. |
Hiroshi Yamauchi | 0037082 | 2015-08-18 14:47:25 -0700 | [diff] [blame] | 151 | Thread* self = Thread::Current(); |
| 152 | if (self != nullptr && self->GetIsGcMarking()) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 153 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 154 | } |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 155 | AssertToSpaceInvariant(gc_root_source, ref); |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 156 | return ref; |
| 157 | } else if (with_read_barrier && kUseBrooksReadBarrier) { |
| 158 | // To be implemented. |
| 159 | return ref; |
| 160 | } else if (with_read_barrier && kUseTableLookupReadBarrier) { |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 161 | Thread* self = Thread::Current(); |
| 162 | if (self != nullptr && |
| 163 | self->GetIsGcMarking() && |
| 164 | Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 165 | auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref); |
| 166 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 167 | auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref); |
| 168 | // Update the field atomically. This may fail if mutator updates before us, but it's ok. |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 169 | if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) { |
| 170 | auto* atomic_root = |
| 171 | reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root); |
Hiroshi Yamauchi | fed3e2f | 2015-10-20 11:11:56 -0700 | [diff] [blame] | 172 | atomic_root->CompareExchangeStrongRelaxed(old_ref, new_ref); |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 173 | } |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 174 | } |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 175 | AssertToSpaceInvariant(gc_root_source, ref); |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 176 | return ref; |
| 177 | } else { |
| 178 | return ref; |
| 179 | } |
| 180 | } |
| 181 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 182 | inline bool ReadBarrier::IsDuringStartup() { |
| 183 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
| 184 | if (heap == nullptr) { |
| 185 | // During startup, the heap can be null. |
| 186 | return true; |
| 187 | } |
| 188 | if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) { |
| 189 | // CC isn't running. |
| 190 | return true; |
| 191 | } |
| 192 | gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector(); |
| 193 | if (collector == nullptr) { |
| 194 | // During startup, the collector can be null. |
| 195 | return true; |
| 196 | } |
| 197 | return false; |
| 198 | } |
| 199 | |
| 200 | inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, |
| 201 | mirror::Object* ref) { |
| 202 | if (kEnableToSpaceInvariantChecks || kIsDebugBuild) { |
| 203 | if (ref == nullptr || IsDuringStartup()) { |
| 204 | return; |
| 205 | } |
| 206 | Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()-> |
| 207 | AssertToSpaceInvariant(obj, offset, ref); |
| 208 | } |
| 209 | } |
| 210 | |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 211 | inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source, |
| 212 | mirror::Object* ref) { |
| 213 | if (kEnableToSpaceInvariantChecks || kIsDebugBuild) { |
| 214 | if (ref == nullptr || IsDuringStartup()) { |
| 215 | return; |
| 216 | } |
| 217 | Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()-> |
| 218 | AssertToSpaceInvariant(gc_root_source, ref); |
| 219 | } |
| 220 | } |
| 221 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 222 | inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) { |
| 223 | return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->Mark(obj); |
| 224 | } |
| 225 | |
| 226 | inline bool ReadBarrier::HasGrayReadBarrierPointer(mirror::Object* obj, |
| 227 | uintptr_t* out_rb_ptr_high_bits) { |
| 228 | mirror::Object* rb_ptr = obj->GetReadBarrierPointer(); |
| 229 | uintptr_t rb_ptr_bits = reinterpret_cast<uintptr_t>(rb_ptr); |
| 230 | uintptr_t rb_ptr_low_bits = rb_ptr_bits & rb_ptr_mask_; |
| 231 | if (kEnableReadBarrierInvariantChecks) { |
| 232 | CHECK(rb_ptr_low_bits == white_ptr_ || rb_ptr_low_bits == gray_ptr_ || |
| 233 | rb_ptr_low_bits == black_ptr_) |
| 234 | << "obj=" << obj << " rb_ptr=" << rb_ptr << " " << PrettyTypeOf(obj); |
| 235 | } |
| 236 | bool is_gray = rb_ptr_low_bits == gray_ptr_; |
| 237 | // The high bits are supposed to be zero. We check this on the caller side. |
| 238 | *out_rb_ptr_high_bits = rb_ptr_bits & ~rb_ptr_mask_; |
| 239 | return is_gray; |
| 240 | } |
| 241 | |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 242 | } // namespace art |
| 243 | |
| 244 | #endif // ART_RUNTIME_READ_BARRIER_INL_H_ |