blob: bfaa2bb0ccc47c2a2a1c0ef32c718e747c03021b [file] [log] [blame]
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "reference_processor.h"
18
19#include "mirror/object-inl.h"
Fred Shih4ee7a662014-07-11 09:59:27 -070020#include "mirror/reference.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070021#include "mirror/reference-inl.h"
Fred Shih4ee7a662014-07-11 09:59:27 -070022#include "reference_processor-inl.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070023#include "reflection.h"
24#include "ScopedLocalRef.h"
25#include "scoped_thread_state_change.h"
26#include "well_known_classes.h"
27
28namespace art {
29namespace gc {
30
31ReferenceProcessor::ReferenceProcessor()
Fred Shih4ee7a662014-07-11 09:59:27 -070032 : process_references_args_(nullptr, nullptr, nullptr),
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070033 preserving_references_(false),
34 condition_("reference processor condition", *Locks::reference_processor_lock_) ,
35 soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
36 weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
37 finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
38 phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
39 cleared_references_(Locks::reference_queue_cleared_references_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070040}
41
42void ReferenceProcessor::EnableSlowPath() {
Fred Shih4ee7a662014-07-11 09:59:27 -070043 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070044}
45
46void ReferenceProcessor::DisableSlowPath(Thread* self) {
Fred Shih4ee7a662014-07-11 09:59:27 -070047 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070048 condition_.Broadcast(self);
49}
50
51mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
52 mirror::Object* const referent = reference->GetReferent();
Mathieu Chartier308351a2014-06-15 12:39:02 -070053 // If the referent is null then it is already cleared, we can just return null since there is no
54 // scenario where it becomes non-null during the reference processing phase.
Fred Shih4ee7a662014-07-11 09:59:27 -070055 if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070056 return referent;
57 }
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070058 MutexLock mu(self, *Locks::reference_processor_lock_);
Fred Shih4ee7a662014-07-11 09:59:27 -070059 while (SlowPathEnabled()) {
Mathieu Chartier308351a2014-06-15 12:39:02 -070060 mirror::HeapReference<mirror::Object>* const referent_addr =
61 reference->GetReferentReferenceAddr();
62 // If the referent became cleared, return it. Don't need barrier since thread roots can't get
63 // updated until after we leave the function due to holding the mutator lock.
64 if (referent_addr->AsMirrorPtr() == nullptr) {
Mathieu Chartier2175f522014-05-09 11:01:06 -070065 return nullptr;
66 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070067 // Try to see if the referent is already marked by using the is_marked_callback. We can return
Mathieu Chartier308351a2014-06-15 12:39:02 -070068 // it to the mutator as long as the GC is not preserving references.
69 IsHeapReferenceMarkedCallback* const is_marked_callback =
70 process_references_args_.is_marked_callback_;
Fred Shih530e1b52014-06-09 15:19:54 -070071 if (LIKELY(is_marked_callback != nullptr)) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070072 // If it's null it means not marked, but it could become marked if the referent is reachable
Fred Shih530e1b52014-06-09 15:19:54 -070073 // by finalizer referents. So we can not return in this case and must block. Otherwise, we
74 // can return it to the mutator as long as the GC is not preserving references, in which
75 // case only black nodes can be safely returned. If the GC is preserving references, the
76 // mutator could take a white field from a grey or white node and move it somewhere else
77 // in the heap causing corruption since this field would get swept.
Mathieu Chartier308351a2014-06-15 12:39:02 -070078 if (is_marked_callback(referent_addr, process_references_args_.arg_)) {
Fred Shih530e1b52014-06-09 15:19:54 -070079 if (!preserving_references_ ||
80 (LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
Mathieu Chartier308351a2014-06-15 12:39:02 -070081 return referent_addr->AsMirrorPtr();
Fred Shih530e1b52014-06-09 15:19:54 -070082 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070083 }
84 }
Mathieu Chartier2d1ab0a2014-05-08 15:27:31 -070085 condition_.WaitHoldingLocks(self);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070086 }
87 return reference->GetReferent();
88}
89
Mathieu Chartier308351a2014-06-15 12:39:02 -070090bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj,
91 void* arg) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070092 auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
Mathieu Chartier308351a2014-06-15 12:39:02 -070093 // TODO: Add smarter logic for preserving soft references.
94 mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_);
95 DCHECK(new_obj != nullptr);
96 obj->Assign(new_obj);
97 return true;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070098}
99
100void ReferenceProcessor::StartPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700101 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700102 preserving_references_ = true;
103}
104
105void ReferenceProcessor::StopPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700106 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700107 preserving_references_ = false;
108 // We are done preserving references, some people who are blocked may see a marked referent.
109 condition_.Broadcast(self);
110}
111
112// Process reference class instances and schedule finalizations.
113void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
114 bool clear_soft_references,
Mathieu Chartier308351a2014-06-15 12:39:02 -0700115 IsHeapReferenceMarkedCallback* is_marked_callback,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700116 MarkObjectCallback* mark_object_callback,
117 ProcessMarkStackCallback* process_mark_stack_callback,
118 void* arg) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700119 TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700120 Thread* self = Thread::Current();
121 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700122 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700123 process_references_args_.is_marked_callback_ = is_marked_callback;
124 process_references_args_.mark_callback_ = mark_object_callback;
125 process_references_args_.arg_ = arg;
Fred Shih4ee7a662014-07-11 09:59:27 -0700126 CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700127 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700128 // Unless required to clear soft references with white references, preserve some white referents.
129 if (!clear_soft_references) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700130 TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
Fred Shih530e1b52014-06-09 15:19:54 -0700131 "(Paused)ForwardSoftReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700132 if (concurrent) {
133 StartPreservingReferences(self);
134 }
Fred Shih530e1b52014-06-09 15:19:54 -0700135 soft_reference_queue_.ForwardSoftReferences(&PreserveSoftReferenceCallback,
136 &process_references_args_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700137 process_mark_stack_callback(arg);
138 if (concurrent) {
139 StopPreservingReferences(self);
140 }
141 }
142 // Clear all remaining soft and weak references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700143 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
144 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700145 {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700146 TimingLogger::ScopedTiming t(concurrent ? "EnqueueFinalizerReferences" :
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700147 "(Paused)EnqueueFinalizerReferences", timings);
148 if (concurrent) {
149 StartPreservingReferences(self);
150 }
151 // Preserve all white objects with finalize methods and schedule them for finalization.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700152 finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700153 mark_object_callback, arg);
154 process_mark_stack_callback(arg);
155 if (concurrent) {
156 StopPreservingReferences(self);
157 }
158 }
159 // Clear all finalizer referent reachable soft and weak references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700160 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
161 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700162 // Clear all phantom references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700163 phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700164 // At this point all reference queues other than the cleared references should be empty.
165 DCHECK(soft_reference_queue_.IsEmpty());
166 DCHECK(weak_reference_queue_.IsEmpty());
167 DCHECK(finalizer_reference_queue_.IsEmpty());
168 DCHECK(phantom_reference_queue_.IsEmpty());
Mathieu Chartier2175f522014-05-09 11:01:06 -0700169 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700170 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier2175f522014-05-09 11:01:06 -0700171 // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
172 // could result in a stale is_marked_callback_ being called before the reference processing
173 // starts since there is a small window of time where slow_path_enabled_ is enabled but the
174 // callback isn't yet set.
175 process_references_args_.is_marked_callback_ = nullptr;
176 if (concurrent) {
177 // Done processing, disable the slow path and broadcast to the waiters.
178 DisableSlowPath(self);
179 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700180 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700181}
182
183// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
184// marked, put it on the appropriate list in the heap for later processing.
185void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
Mathieu Chartier308351a2014-06-15 12:39:02 -0700186 IsHeapReferenceMarkedCallback* is_marked_callback,
187 void* arg) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700188 // klass can be the class of the old object if the visitor already updated the class of ref.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700189 DCHECK(klass != nullptr);
Fred Shih4ee7a662014-07-11 09:59:27 -0700190 DCHECK(klass->IsTypeOfReferenceClass());
Mathieu Chartier308351a2014-06-15 12:39:02 -0700191 mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
192 if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) {
193 Thread* self = Thread::Current();
194 // TODO: Remove these locks, and use atomic stacks for storing references?
195 // We need to check that the references haven't already been enqueued since we can end up
196 // scanning the same reference multiple times due to dirty cards.
197 if (klass->IsSoftReferenceClass()) {
198 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
199 } else if (klass->IsWeakReferenceClass()) {
200 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
201 } else if (klass->IsFinalizerReferenceClass()) {
202 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
203 } else if (klass->IsPhantomReferenceClass()) {
204 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
205 } else {
206 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
207 << klass->GetAccessFlags();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700208 }
209 }
210}
211
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700212void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) {
213 cleared_references_.UpdateRoots(callback, arg);
214}
215
Mathieu Chartier308351a2014-06-15 12:39:02 -0700216void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700217 Locks::mutator_lock_->AssertNotHeld(self);
218 if (!cleared_references_.IsEmpty()) {
219 // When a runtime isn't started there are no reference queues to care about so ignore.
220 if (LIKELY(Runtime::Current()->IsStarted())) {
221 ScopedObjectAccess soa(self);
222 ScopedLocalRef<jobject> arg(self->GetJniEnv(),
223 soa.AddLocalReference<jobject>(cleared_references_.GetList()));
224 jvalue args[1];
225 args[0].l = arg.get();
226 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
227 }
228 cleared_references_.Clear();
229 }
230}
231
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700232bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
233 Thread* self = Thread::Current();
234 MutexLock mu(self, *Locks::reference_processor_lock_);
235 // Wait untul we are done processing reference.
236 while (SlowPathEnabled()) {
237 condition_.Wait(self);
238 }
239 // At this point, since the sentinel of the reference is live, it is guaranteed to not be
240 // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
241 // phase. Since we are holding the reference processor lock, it guarantees that reference
242 // processing can't begin. The GC could have just enqueued the reference one one of the internal
243 // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
244 // race.
245 MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
246 if (!reference->IsEnqueued()) {
247 CHECK(reference->IsFinalizerReferenceInstance());
248 if (Runtime::Current()->IsActiveTransaction()) {
249 reference->SetPendingNext<true>(reference);
250 } else {
251 reference->SetPendingNext<false>(reference);
252 }
253 return true;
254 }
255 return false;
256}
257
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700258} // namespace gc
259} // namespace art