Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "reference_processor.h" |
| 18 | |
| 19 | #include "mirror/object-inl.h" |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 20 | #include "mirror/reference.h" |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 21 | #include "mirror/reference-inl.h" |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 22 | #include "reference_processor-inl.h" |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 23 | #include "reflection.h" |
| 24 | #include "ScopedLocalRef.h" |
| 25 | #include "scoped_thread_state_change.h" |
| 26 | #include "well_known_classes.h" |
| 27 | |
| 28 | namespace art { |
| 29 | namespace gc { |
| 30 | |
| 31 | ReferenceProcessor::ReferenceProcessor() |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 32 | : process_references_args_(nullptr, nullptr, nullptr), |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 33 | preserving_references_(false), |
| 34 | condition_("reference processor condition", *Locks::reference_processor_lock_) , |
| 35 | soft_reference_queue_(Locks::reference_queue_soft_references_lock_), |
| 36 | weak_reference_queue_(Locks::reference_queue_weak_references_lock_), |
| 37 | finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_), |
| 38 | phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_), |
| 39 | cleared_references_(Locks::reference_queue_cleared_references_lock_) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 40 | } |
| 41 | |
| 42 | void ReferenceProcessor::EnableSlowPath() { |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 43 | mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | void ReferenceProcessor::DisableSlowPath(Thread* self) { |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 47 | mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 48 | condition_.Broadcast(self); |
| 49 | } |
| 50 | |
| 51 | mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) { |
| 52 | mirror::Object* const referent = reference->GetReferent(); |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 53 | // If the referent is null then it is already cleared, we can just return null since there is no |
| 54 | // scenario where it becomes non-null during the reference processing phase. |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 55 | if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 56 | return referent; |
| 57 | } |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 58 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 59 | while (SlowPathEnabled()) { |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 60 | mirror::HeapReference<mirror::Object>* const referent_addr = |
| 61 | reference->GetReferentReferenceAddr(); |
| 62 | // If the referent became cleared, return it. Don't need barrier since thread roots can't get |
| 63 | // updated until after we leave the function due to holding the mutator lock. |
| 64 | if (referent_addr->AsMirrorPtr() == nullptr) { |
Mathieu Chartier | 2175f52 | 2014-05-09 11:01:06 -0700 | [diff] [blame] | 65 | return nullptr; |
| 66 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 67 | // Try to see if the referent is already marked by using the is_marked_callback. We can return |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 68 | // it to the mutator as long as the GC is not preserving references. |
| 69 | IsHeapReferenceMarkedCallback* const is_marked_callback = |
| 70 | process_references_args_.is_marked_callback_; |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 71 | if (LIKELY(is_marked_callback != nullptr)) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 72 | // If it's null it means not marked, but it could become marked if the referent is reachable |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 73 | // by finalizer referents. So we can not return in this case and must block. Otherwise, we |
| 74 | // can return it to the mutator as long as the GC is not preserving references, in which |
| 75 | // case only black nodes can be safely returned. If the GC is preserving references, the |
| 76 | // mutator could take a white field from a grey or white node and move it somewhere else |
| 77 | // in the heap causing corruption since this field would get swept. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 78 | if (is_marked_callback(referent_addr, process_references_args_.arg_)) { |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 79 | if (!preserving_references_ || |
| 80 | (LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) { |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 81 | return referent_addr->AsMirrorPtr(); |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 82 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 83 | } |
| 84 | } |
Mathieu Chartier | 2d1ab0a | 2014-05-08 15:27:31 -0700 | [diff] [blame] | 85 | condition_.WaitHoldingLocks(self); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 86 | } |
| 87 | return reference->GetReferent(); |
| 88 | } |
| 89 | |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 90 | bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj, |
| 91 | void* arg) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 92 | auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg); |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 93 | // TODO: Add smarter logic for preserving soft references. |
| 94 | mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_); |
| 95 | DCHECK(new_obj != nullptr); |
| 96 | obj->Assign(new_obj); |
| 97 | return true; |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | void ReferenceProcessor::StartPreservingReferences(Thread* self) { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 101 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 102 | preserving_references_ = true; |
| 103 | } |
| 104 | |
| 105 | void ReferenceProcessor::StopPreservingReferences(Thread* self) { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 106 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 107 | preserving_references_ = false; |
| 108 | // We are done preserving references, some people who are blocked may see a marked referent. |
| 109 | condition_.Broadcast(self); |
| 110 | } |
| 111 | |
| 112 | // Process reference class instances and schedule finalizations. |
| 113 | void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings, |
| 114 | bool clear_soft_references, |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 115 | IsHeapReferenceMarkedCallback* is_marked_callback, |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 116 | MarkObjectCallback* mark_object_callback, |
| 117 | ProcessMarkStackCallback* process_mark_stack_callback, |
| 118 | void* arg) { |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 119 | TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 120 | Thread* self = Thread::Current(); |
| 121 | { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 122 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 123 | process_references_args_.is_marked_callback_ = is_marked_callback; |
| 124 | process_references_args_.mark_callback_ = mark_object_callback; |
| 125 | process_references_args_.arg_ = arg; |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 126 | CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent"; |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 127 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 128 | // Unless required to clear soft references with white references, preserve some white referents. |
| 129 | if (!clear_soft_references) { |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 130 | TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" : |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 131 | "(Paused)ForwardSoftReferences", timings); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 132 | if (concurrent) { |
| 133 | StartPreservingReferences(self); |
| 134 | } |
Fred Shih | 530e1b5 | 2014-06-09 15:19:54 -0700 | [diff] [blame] | 135 | soft_reference_queue_.ForwardSoftReferences(&PreserveSoftReferenceCallback, |
| 136 | &process_references_args_); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 137 | process_mark_stack_callback(arg); |
| 138 | if (concurrent) { |
| 139 | StopPreservingReferences(self); |
| 140 | } |
| 141 | } |
| 142 | // Clear all remaining soft and weak references with white referents. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 143 | soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); |
| 144 | weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 145 | { |
Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 146 | TimingLogger::ScopedTiming t(concurrent ? "EnqueueFinalizerReferences" : |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 147 | "(Paused)EnqueueFinalizerReferences", timings); |
| 148 | if (concurrent) { |
| 149 | StartPreservingReferences(self); |
| 150 | } |
| 151 | // Preserve all white objects with finalize methods and schedule them for finalization. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 152 | finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback, |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 153 | mark_object_callback, arg); |
| 154 | process_mark_stack_callback(arg); |
| 155 | if (concurrent) { |
| 156 | StopPreservingReferences(self); |
| 157 | } |
| 158 | } |
| 159 | // Clear all finalizer referent reachable soft and weak references with white referents. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 160 | soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); |
| 161 | weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 162 | // Clear all phantom references with white referents. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 163 | phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 164 | // At this point all reference queues other than the cleared references should be empty. |
| 165 | DCHECK(soft_reference_queue_.IsEmpty()); |
| 166 | DCHECK(weak_reference_queue_.IsEmpty()); |
| 167 | DCHECK(finalizer_reference_queue_.IsEmpty()); |
| 168 | DCHECK(phantom_reference_queue_.IsEmpty()); |
Mathieu Chartier | 2175f52 | 2014-05-09 11:01:06 -0700 | [diff] [blame] | 169 | { |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 170 | MutexLock mu(self, *Locks::reference_processor_lock_); |
Mathieu Chartier | 2175f52 | 2014-05-09 11:01:06 -0700 | [diff] [blame] | 171 | // Need to always do this since the next GC may be concurrent. Doing this for only concurrent |
| 172 | // could result in a stale is_marked_callback_ being called before the reference processing |
| 173 | // starts since there is a small window of time where slow_path_enabled_ is enabled but the |
| 174 | // callback isn't yet set. |
| 175 | process_references_args_.is_marked_callback_ = nullptr; |
| 176 | if (concurrent) { |
| 177 | // Done processing, disable the slow path and broadcast to the waiters. |
| 178 | DisableSlowPath(self); |
| 179 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 180 | } |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been |
| 184 | // marked, put it on the appropriate list in the heap for later processing. |
| 185 | void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref, |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 186 | IsHeapReferenceMarkedCallback* is_marked_callback, |
| 187 | void* arg) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 188 | // klass can be the class of the old object if the visitor already updated the class of ref. |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 189 | DCHECK(klass != nullptr); |
Fred Shih | 4ee7a66 | 2014-07-11 09:59:27 -0700 | [diff] [blame] | 190 | DCHECK(klass->IsTypeOfReferenceClass()); |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 191 | mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr(); |
| 192 | if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) { |
| 193 | Thread* self = Thread::Current(); |
| 194 | // TODO: Remove these locks, and use atomic stacks for storing references? |
| 195 | // We need to check that the references haven't already been enqueued since we can end up |
| 196 | // scanning the same reference multiple times due to dirty cards. |
| 197 | if (klass->IsSoftReferenceClass()) { |
| 198 | soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 199 | } else if (klass->IsWeakReferenceClass()) { |
| 200 | weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 201 | } else if (klass->IsFinalizerReferenceClass()) { |
| 202 | finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 203 | } else if (klass->IsPhantomReferenceClass()) { |
| 204 | phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref); |
| 205 | } else { |
| 206 | LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex |
| 207 | << klass->GetAccessFlags(); |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 208 | } |
| 209 | } |
| 210 | } |
| 211 | |
Mathieu Chartier | 52e4b43 | 2014-06-10 11:22:31 -0700 | [diff] [blame] | 212 | void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) { |
| 213 | cleared_references_.UpdateRoots(callback, arg); |
| 214 | } |
| 215 | |
Mathieu Chartier | 308351a | 2014-06-15 12:39:02 -0700 | [diff] [blame] | 216 | void ReferenceProcessor::EnqueueClearedReferences(Thread* self) { |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 217 | Locks::mutator_lock_->AssertNotHeld(self); |
| 218 | if (!cleared_references_.IsEmpty()) { |
| 219 | // When a runtime isn't started there are no reference queues to care about so ignore. |
| 220 | if (LIKELY(Runtime::Current()->IsStarted())) { |
| 221 | ScopedObjectAccess soa(self); |
| 222 | ScopedLocalRef<jobject> arg(self->GetJniEnv(), |
| 223 | soa.AddLocalReference<jobject>(cleared_references_.GetList())); |
| 224 | jvalue args[1]; |
| 225 | args[0].l = arg.get(); |
| 226 | InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args); |
| 227 | } |
| 228 | cleared_references_.Clear(); |
| 229 | } |
| 230 | } |
| 231 | |
Mathieu Chartier | a5a53ef | 2014-09-12 12:58:05 -0700 | [diff] [blame] | 232 | bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) { |
| 233 | Thread* self = Thread::Current(); |
| 234 | MutexLock mu(self, *Locks::reference_processor_lock_); |
| 235 | // Wait untul we are done processing reference. |
| 236 | while (SlowPathEnabled()) { |
| 237 | condition_.Wait(self); |
| 238 | } |
| 239 | // At this point, since the sentinel of the reference is live, it is guaranteed to not be |
| 240 | // enqueued if we just finished processing references. Otherwise, we may be doing the main GC |
| 241 | // phase. Since we are holding the reference processor lock, it guarantees that reference |
| 242 | // processing can't begin. The GC could have just enqueued the reference one one of the internal |
| 243 | // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this |
| 244 | // race. |
| 245 | MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_); |
| 246 | if (!reference->IsEnqueued()) { |
| 247 | CHECK(reference->IsFinalizerReferenceInstance()); |
| 248 | if (Runtime::Current()->IsActiveTransaction()) { |
| 249 | reference->SetPendingNext<true>(reference); |
| 250 | } else { |
| 251 | reference->SetPendingNext<false>(reference); |
| 252 | } |
| 253 | return true; |
| 254 | } |
| 255 | return false; |
| 256 | } |
| 257 | |
Mathieu Chartier | 78f7b4c | 2014-05-06 10:57:27 -0700 | [diff] [blame] | 258 | } // namespace gc |
| 259 | } // namespace art |