Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_READ_BARRIER_INL_H_ |
| 18 | #define ART_RUNTIME_READ_BARRIER_INL_H_ |
| 19 | |
| 20 | #include "read_barrier.h" |
| 21 | |
Andreas Gampe | d490129 | 2017-05-30 18:41:34 -0700 | [diff] [blame] | 22 | #include "gc/accounting/read_barrier_table.h" |
Hiroshi Yamauchi | 723e6ce | 2015-10-28 20:59:47 -0700 | [diff] [blame] | 23 | #include "gc/collector/concurrent_copying-inl.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 24 | #include "gc/heap.h" |
Andreas Gampe | c15a2f4 | 2017-04-21 12:09:39 -0700 | [diff] [blame] | 25 | #include "mirror/object-readbarrier-inl.h" |
Andreas Gampe | 8cf9cb3 | 2017-07-19 09:28:38 -0700 | [diff] [blame] | 26 | #include "mirror/object_reference.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 27 | #include "mirror/reference.h" |
| 28 | #include "runtime.h" |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 29 | |
| 30 | namespace art { |
| 31 | |
Mathieu Chartier | d08f66f | 2017-04-13 11:47:53 -0700 | [diff] [blame] | 32 | // Disabled for performance reasons. |
| 33 | static constexpr bool kCheckDebugDisallowReadBarrierCount = false; |
| 34 | |
Hans Boehm | cc55e1d | 2017-07-27 15:28:07 -0700 | [diff] [blame] | 35 | template <typename MirrorType, bool kIsVolatile, ReadBarrierOption kReadBarrierOption, |
| 36 | bool kAlwaysUpdateField> |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 37 | inline MirrorType* ReadBarrier::Barrier( |
| 38 | mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) { |
Igor Murashkin | c449e8b | 2015-06-10 15:56:42 -0700 | [diff] [blame] | 39 | constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 40 | if (kUseReadBarrier && with_read_barrier) { |
Mathieu Chartier | d08f66f | 2017-04-13 11:47:53 -0700 | [diff] [blame] | 41 | if (kCheckDebugDisallowReadBarrierCount) { |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 42 | Thread* const self = Thread::Current(); |
| 43 | if (self != nullptr) { |
| 44 | CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u); |
Hiroshi Yamauchi | cc78f3f | 2015-12-11 15:51:04 -0800 | [diff] [blame] | 45 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 46 | } |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 47 | if (kUseBakerReadBarrier) { |
Hiroshi Yamauchi | 12b58b2 | 2016-11-01 11:55:29 -0700 | [diff] [blame] | 48 | // fake_address_dependency (must be zero) is used to create artificial data dependency from |
| 49 | // the is_gray load to the ref field (ptr) load to avoid needing a load-load barrier between |
| 50 | // the two. |
| 51 | uintptr_t fake_address_dependency; |
| 52 | bool is_gray = IsGray(obj, &fake_address_dependency); |
| 53 | if (kEnableReadBarrierInvariantChecks) { |
| 54 | CHECK_EQ(fake_address_dependency, 0U) << obj << " rb_state=" << obj->GetReadBarrierState(); |
| 55 | } |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 56 | ref_addr = reinterpret_cast<mirror::HeapReference<MirrorType>*>( |
Hiroshi Yamauchi | 12b58b2 | 2016-11-01 11:55:29 -0700 | [diff] [blame] | 57 | fake_address_dependency | reinterpret_cast<uintptr_t>(ref_addr)); |
Hans Boehm | cc55e1d | 2017-07-27 15:28:07 -0700 | [diff] [blame] | 58 | MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>(); |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 59 | MirrorType* old_ref = ref; |
| 60 | if (is_gray) { |
| 61 | // Slow-path. |
| 62 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 63 | // If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator |
Roland Levillain | a1aa3b1 | 2016-10-26 13:03:38 +0100 | [diff] [blame] | 64 | // updates before us, but it's OK. |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 65 | if (kAlwaysUpdateField && ref != old_ref) { |
Mathieu Chartier | a9746b9 | 2018-06-22 10:25:40 -0700 | [diff] [blame] | 66 | obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset, |
| 67 | old_ref, |
| 68 | ref, |
| 69 | CASMode::kStrong, |
| 70 | std::memory_order_release); |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 71 | } |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 72 | } |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 73 | AssertToSpaceInvariant(obj, offset, ref); |
| 74 | return ref; |
| 75 | } else if (kUseBrooksReadBarrier) { |
| 76 | // To be implemented. |
Hans Boehm | cc55e1d | 2017-07-27 15:28:07 -0700 | [diff] [blame] | 77 | return ref_addr->template AsMirrorPtr<kIsVolatile>(); |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 78 | } else if (kUseTableLookupReadBarrier) { |
Hans Boehm | cc55e1d | 2017-07-27 15:28:07 -0700 | [diff] [blame] | 79 | MirrorType* ref = ref_addr->template AsMirrorPtr<kIsVolatile>(); |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 80 | MirrorType* old_ref = ref; |
| 81 | // The heap or the collector can be null at startup. TODO: avoid the need for this null check. |
| 82 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
| 83 | if (heap != nullptr && heap->GetReadBarrierTable()->IsSet(old_ref)) { |
| 84 | ref = reinterpret_cast<MirrorType*>(Mark(old_ref)); |
| 85 | // Update the field atomically. This may fail if mutator updates before us, but it's ok. |
| 86 | if (ref != old_ref) { |
Mathieu Chartier | a9746b9 | 2018-06-22 10:25:40 -0700 | [diff] [blame] | 87 | obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset, |
| 88 | old_ref, |
| 89 | ref, |
| 90 | CASMode::kStrong, |
| 91 | std::memory_order_release); |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 92 | } |
| 93 | } |
| 94 | AssertToSpaceInvariant(obj, offset, ref); |
| 95 | return ref; |
| 96 | } else { |
| 97 | LOG(FATAL) << "Unexpected read barrier type"; |
| 98 | UNREACHABLE(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 99 | } |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 100 | } else { |
| 101 | // No read barrier. |
Hans Boehm | cc55e1d | 2017-07-27 15:28:07 -0700 | [diff] [blame] | 102 | return ref_addr->template AsMirrorPtr<kIsVolatile>(); |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 103 | } |
| 104 | } |
| 105 | |
Hiroshi Yamauchi | cc78f3f | 2015-12-11 15:51:04 -0800 | [diff] [blame] | 106 | template <typename MirrorType, ReadBarrierOption kReadBarrierOption> |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 107 | inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root, |
| 108 | GcRootSource* gc_root_source) { |
Hiroshi Yamauchi | a91a4bc | 2014-06-13 16:44:55 -0700 | [diff] [blame] | 109 | MirrorType* ref = *root; |
Hiroshi Yamauchi | 4cba0d9 | 2014-05-21 21:10:23 -0700 | [diff] [blame] | 110 | const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 111 | if (kUseReadBarrier && with_read_barrier) { |
| 112 | if (kIsDebugBuild) { |
| 113 | Thread* const self = Thread::Current(); |
| 114 | if (self != nullptr) { |
| 115 | CHECK_EQ(self->GetDebugDisallowReadBarrierCount(), 0u); |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 116 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 117 | } |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 118 | if (kUseBakerReadBarrier) { |
| 119 | // TODO: separate the read barrier code from the collector code more. |
| 120 | Thread* self = Thread::Current(); |
| 121 | if (self != nullptr && self->GetIsGcMarking()) { |
| 122 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 123 | } |
| 124 | AssertToSpaceInvariant(gc_root_source, ref); |
| 125 | return ref; |
| 126 | } else if (kUseBrooksReadBarrier) { |
| 127 | // To be implemented. |
| 128 | return ref; |
| 129 | } else if (kUseTableLookupReadBarrier) { |
| 130 | Thread* self = Thread::Current(); |
| 131 | if (self != nullptr && |
| 132 | self->GetIsGcMarking() && |
| 133 | Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) { |
| 134 | MirrorType* old_ref = ref; |
| 135 | ref = reinterpret_cast<MirrorType*>(Mark(old_ref)); |
| 136 | // Update the field atomically. This may fail if mutator updates before us, but it's ok. |
| 137 | if (ref != old_ref) { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 138 | Atomic<MirrorType*>* atomic_root = reinterpret_cast<Atomic<MirrorType*>*>(root); |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 139 | atomic_root->CompareAndSetStrongRelaxed(old_ref, ref); |
Mathieu Chartier | dfe02f6 | 2016-02-01 20:15:11 -0800 | [diff] [blame] | 140 | } |
| 141 | } |
| 142 | AssertToSpaceInvariant(gc_root_source, ref); |
| 143 | return ref; |
| 144 | } else { |
| 145 | LOG(FATAL) << "Unexpected read barrier type"; |
| 146 | UNREACHABLE(); |
| 147 | } |
Hiroshi Yamauchi | 4cba0d9 | 2014-05-21 21:10:23 -0700 | [diff] [blame] | 148 | } else { |
| 149 | return ref; |
| 150 | } |
| 151 | } |
| 152 | |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 153 | // TODO: Reduce copy paste |
Hiroshi Yamauchi | cc78f3f | 2015-12-11 15:51:04 -0800 | [diff] [blame] | 154 | template <typename MirrorType, ReadBarrierOption kReadBarrierOption> |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 155 | inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<MirrorType>* root, |
| 156 | GcRootSource* gc_root_source) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 157 | MirrorType* ref = root->AsMirrorPtr(); |
| 158 | const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; |
| 159 | if (with_read_barrier && kUseBakerReadBarrier) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 160 | // TODO: separate the read barrier code from the collector code more. |
Hiroshi Yamauchi | 0037082 | 2015-08-18 14:47:25 -0700 | [diff] [blame] | 161 | Thread* self = Thread::Current(); |
| 162 | if (self != nullptr && self->GetIsGcMarking()) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 163 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 164 | } |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 165 | AssertToSpaceInvariant(gc_root_source, ref); |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 166 | return ref; |
| 167 | } else if (with_read_barrier && kUseBrooksReadBarrier) { |
| 168 | // To be implemented. |
| 169 | return ref; |
| 170 | } else if (with_read_barrier && kUseTableLookupReadBarrier) { |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 171 | Thread* self = Thread::Current(); |
| 172 | if (self != nullptr && |
| 173 | self->GetIsGcMarking() && |
| 174 | Runtime::Current()->GetHeap()->GetReadBarrierTable()->IsSet(ref)) { |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 175 | auto old_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref); |
| 176 | ref = reinterpret_cast<MirrorType*>(Mark(ref)); |
| 177 | auto new_ref = mirror::CompressedReference<MirrorType>::FromMirrorPtr(ref); |
| 178 | // Update the field atomically. This may fail if mutator updates before us, but it's ok. |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 179 | if (new_ref.AsMirrorPtr() != old_ref.AsMirrorPtr()) { |
| 180 | auto* atomic_root = |
| 181 | reinterpret_cast<Atomic<mirror::CompressedReference<MirrorType>>*>(root); |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 182 | atomic_root->CompareAndSetStrongRelaxed(old_ref, new_ref); |
Hiroshi Yamauchi | fa75518 | 2015-09-30 20:12:11 -0700 | [diff] [blame] | 183 | } |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 184 | } |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 185 | AssertToSpaceInvariant(gc_root_source, ref); |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 186 | return ref; |
| 187 | } else { |
| 188 | return ref; |
| 189 | } |
| 190 | } |
| 191 | |
Nicolas Geoffray | 13056a1 | 2017-05-11 11:48:28 +0000 | [diff] [blame] | 192 | template <typename MirrorType> |
| 193 | inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) { |
| 194 | // Only read-barrier configurations can have mutators run while |
| 195 | // the GC is marking. |
| 196 | if (!kUseReadBarrier) { |
| 197 | return ref; |
| 198 | } |
| 199 | // IsMarked does not handle null, so handle it here. |
| 200 | if (ref == nullptr) { |
| 201 | return nullptr; |
| 202 | } |
| 203 | // IsMarked should only be called when the GC is marking. |
| 204 | if (!Thread::Current()->GetIsGcMarking()) { |
| 205 | return ref; |
| 206 | } |
| 207 | |
| 208 | return reinterpret_cast<MirrorType*>( |
| 209 | Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarked(ref)); |
| 210 | } |
| 211 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 212 | inline bool ReadBarrier::IsDuringStartup() { |
| 213 | gc::Heap* heap = Runtime::Current()->GetHeap(); |
| 214 | if (heap == nullptr) { |
| 215 | // During startup, the heap can be null. |
| 216 | return true; |
| 217 | } |
| 218 | if (heap->CurrentCollectorType() != gc::kCollectorTypeCC) { |
| 219 | // CC isn't running. |
| 220 | return true; |
| 221 | } |
| 222 | gc::collector::ConcurrentCopying* collector = heap->ConcurrentCopyingCollector(); |
| 223 | if (collector == nullptr) { |
| 224 | // During startup, the collector can be null. |
| 225 | return true; |
| 226 | } |
| 227 | return false; |
| 228 | } |
| 229 | |
| 230 | inline void ReadBarrier::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, |
| 231 | mirror::Object* ref) { |
Andreas Gampe | e3ce787 | 2017-02-22 13:36:21 -0800 | [diff] [blame] | 232 | if (kEnableToSpaceInvariantChecks) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 233 | if (ref == nullptr || IsDuringStartup()) { |
| 234 | return; |
| 235 | } |
| 236 | Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()-> |
| 237 | AssertToSpaceInvariant(obj, offset, ref); |
| 238 | } |
| 239 | } |
| 240 | |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 241 | inline void ReadBarrier::AssertToSpaceInvariant(GcRootSource* gc_root_source, |
| 242 | mirror::Object* ref) { |
Andreas Gampe | e3ce787 | 2017-02-22 13:36:21 -0800 | [diff] [blame] | 243 | if (kEnableToSpaceInvariantChecks) { |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 244 | if (ref == nullptr || IsDuringStartup()) { |
| 245 | return; |
| 246 | } |
| 247 | Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()-> |
| 248 | AssertToSpaceInvariant(gc_root_source, ref); |
| 249 | } |
| 250 | } |
| 251 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 252 | inline mirror::Object* ReadBarrier::Mark(mirror::Object* obj) { |
Mathieu Chartier | 56fe258 | 2016-07-14 13:30:03 -0700 | [diff] [blame] | 253 | return Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->MarkFromReadBarrier(obj); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 254 | } |
| 255 | |
Hiroshi Yamauchi | 12b58b2 | 2016-11-01 11:55:29 -0700 | [diff] [blame] | 256 | inline bool ReadBarrier::IsGray(mirror::Object* obj, uintptr_t* fake_address_dependency) { |
Roland Levillain | 14e5a29 | 2018-06-28 12:00:56 +0100 | [diff] [blame] | 257 | return obj->GetReadBarrierState(fake_address_dependency) == kGrayState; |
Hiroshi Yamauchi | 12b58b2 | 2016-11-01 11:55:29 -0700 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | inline bool ReadBarrier::IsGray(mirror::Object* obj) { |
| 261 | // Use a load-acquire to load the read barrier bit to avoid reordering with the subsequent load. |
| 262 | // GetReadBarrierStateAcquire() has load-acquire semantics. |
Roland Levillain | 14e5a29 | 2018-06-28 12:00:56 +0100 | [diff] [blame] | 263 | return obj->GetReadBarrierStateAcquire() == kGrayState; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 264 | } |
| 265 | |
Hiroshi Yamauchi | 800ac2d | 2014-04-02 17:32:54 -0700 | [diff] [blame] | 266 | } // namespace art |
| 267 | |
| 268 | #endif // ART_RUNTIME_READ_BARRIER_INL_H_ |