blob: 01e87956691712537a40c72f556d56b2ff4ab067 [file] [log] [blame]
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "reference_processor.h"
18
19#include "mirror/object-inl.h"
Fred Shih4ee7a662014-07-11 09:59:27 -070020#include "mirror/reference.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070021#include "mirror/reference-inl.h"
Fred Shih4ee7a662014-07-11 09:59:27 -070022#include "reference_processor-inl.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070023#include "reflection.h"
24#include "ScopedLocalRef.h"
25#include "scoped_thread_state_change.h"
Mathieu Chartiera5eae692014-12-17 17:56:03 -080026#include "task_processor.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070027#include "well_known_classes.h"
28
29namespace art {
30namespace gc {
31
Mathieu Chartiera5eae692014-12-17 17:56:03 -080032static constexpr bool kAsyncReferenceQueueAdd = false;
33
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070034ReferenceProcessor::ReferenceProcessor()
Fred Shih4ee7a662014-07-11 09:59:27 -070035 : process_references_args_(nullptr, nullptr, nullptr),
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070036 preserving_references_(false),
37 condition_("reference processor condition", *Locks::reference_processor_lock_) ,
38 soft_reference_queue_(Locks::reference_queue_soft_references_lock_),
39 weak_reference_queue_(Locks::reference_queue_weak_references_lock_),
40 finalizer_reference_queue_(Locks::reference_queue_finalizer_references_lock_),
41 phantom_reference_queue_(Locks::reference_queue_phantom_references_lock_),
42 cleared_references_(Locks::reference_queue_cleared_references_lock_) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070043}
44
45void ReferenceProcessor::EnableSlowPath() {
Fred Shih4ee7a662014-07-11 09:59:27 -070046 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(true);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070047}
48
49void ReferenceProcessor::DisableSlowPath(Thread* self) {
Fred Shih4ee7a662014-07-11 09:59:27 -070050 mirror::Reference::GetJavaLangRefReference()->SetSlowPath(false);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070051 condition_.Broadcast(self);
52}
53
54mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
55 mirror::Object* const referent = reference->GetReferent();
Mathieu Chartier308351a2014-06-15 12:39:02 -070056 // If the referent is null then it is already cleared, we can just return null since there is no
57 // scenario where it becomes non-null during the reference processing phase.
Fred Shih4ee7a662014-07-11 09:59:27 -070058 if (UNLIKELY(!SlowPathEnabled()) || referent == nullptr) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070059 return referent;
60 }
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -070061 MutexLock mu(self, *Locks::reference_processor_lock_);
Fred Shih4ee7a662014-07-11 09:59:27 -070062 while (SlowPathEnabled()) {
Mathieu Chartier308351a2014-06-15 12:39:02 -070063 mirror::HeapReference<mirror::Object>* const referent_addr =
64 reference->GetReferentReferenceAddr();
65 // If the referent became cleared, return it. Don't need barrier since thread roots can't get
66 // updated until after we leave the function due to holding the mutator lock.
67 if (referent_addr->AsMirrorPtr() == nullptr) {
Mathieu Chartier2175f522014-05-09 11:01:06 -070068 return nullptr;
69 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070070 // Try to see if the referent is already marked by using the is_marked_callback. We can return
Mathieu Chartier308351a2014-06-15 12:39:02 -070071 // it to the mutator as long as the GC is not preserving references.
72 IsHeapReferenceMarkedCallback* const is_marked_callback =
73 process_references_args_.is_marked_callback_;
Fred Shih530e1b52014-06-09 15:19:54 -070074 if (LIKELY(is_marked_callback != nullptr)) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070075 // If it's null it means not marked, but it could become marked if the referent is reachable
Fred Shih530e1b52014-06-09 15:19:54 -070076 // by finalizer referents. So we can not return in this case and must block. Otherwise, we
77 // can return it to the mutator as long as the GC is not preserving references, in which
78 // case only black nodes can be safely returned. If the GC is preserving references, the
79 // mutator could take a white field from a grey or white node and move it somewhere else
80 // in the heap causing corruption since this field would get swept.
Mathieu Chartier308351a2014-06-15 12:39:02 -070081 if (is_marked_callback(referent_addr, process_references_args_.arg_)) {
Fred Shih530e1b52014-06-09 15:19:54 -070082 if (!preserving_references_ ||
83 (LIKELY(!reference->IsFinalizerReferenceInstance()) && !reference->IsEnqueued())) {
Mathieu Chartier308351a2014-06-15 12:39:02 -070084 return referent_addr->AsMirrorPtr();
Fred Shih530e1b52014-06-09 15:19:54 -070085 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070086 }
87 }
Mathieu Chartier2d1ab0a2014-05-08 15:27:31 -070088 condition_.WaitHoldingLocks(self);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070089 }
90 return reference->GetReferent();
91}
92
Mathieu Chartier308351a2014-06-15 12:39:02 -070093bool ReferenceProcessor::PreserveSoftReferenceCallback(mirror::HeapReference<mirror::Object>* obj,
94 void* arg) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070095 auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
Mathieu Chartier308351a2014-06-15 12:39:02 -070096 // TODO: Add smarter logic for preserving soft references.
97 mirror::Object* new_obj = args->mark_callback_(obj->AsMirrorPtr(), args->arg_);
98 DCHECK(new_obj != nullptr);
99 obj->Assign(new_obj);
100 return true;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700101}
102
103void ReferenceProcessor::StartPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700104 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700105 preserving_references_ = true;
106}
107
108void ReferenceProcessor::StopPreservingReferences(Thread* self) {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700109 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700110 preserving_references_ = false;
111 // We are done preserving references, some people who are blocked may see a marked referent.
112 condition_.Broadcast(self);
113}
114
115// Process reference class instances and schedule finalizations.
116void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
117 bool clear_soft_references,
Mathieu Chartier308351a2014-06-15 12:39:02 -0700118 IsHeapReferenceMarkedCallback* is_marked_callback,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700119 MarkObjectCallback* mark_object_callback,
120 ProcessMarkStackCallback* process_mark_stack_callback,
121 void* arg) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700122 TimingLogger::ScopedTiming t(concurrent ? __FUNCTION__ : "(Paused)ProcessReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700123 Thread* self = Thread::Current();
124 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700125 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700126 process_references_args_.is_marked_callback_ = is_marked_callback;
127 process_references_args_.mark_callback_ = mark_object_callback;
128 process_references_args_.arg_ = arg;
Fred Shih4ee7a662014-07-11 09:59:27 -0700129 CHECK_EQ(SlowPathEnabled(), concurrent) << "Slow path must be enabled iff concurrent";
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700130 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700131 // Unless required to clear soft references with white references, preserve some white referents.
132 if (!clear_soft_references) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700133 TimingLogger::ScopedTiming split(concurrent ? "ForwardSoftReferences" :
Fred Shih530e1b52014-06-09 15:19:54 -0700134 "(Paused)ForwardSoftReferences", timings);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700135 if (concurrent) {
136 StartPreservingReferences(self);
137 }
Fred Shih530e1b52014-06-09 15:19:54 -0700138 soft_reference_queue_.ForwardSoftReferences(&PreserveSoftReferenceCallback,
139 &process_references_args_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700140 process_mark_stack_callback(arg);
141 if (concurrent) {
142 StopPreservingReferences(self);
143 }
144 }
145 // Clear all remaining soft and weak references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700146 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
147 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700148 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800149 TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" :
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700150 "(Paused)EnqueueFinalizerReferences", timings);
151 if (concurrent) {
152 StartPreservingReferences(self);
153 }
154 // Preserve all white objects with finalize methods and schedule them for finalization.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700155 finalizer_reference_queue_.EnqueueFinalizerReferences(&cleared_references_, is_marked_callback,
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700156 mark_object_callback, arg);
157 process_mark_stack_callback(arg);
158 if (concurrent) {
159 StopPreservingReferences(self);
160 }
161 }
162 // Clear all finalizer referent reachable soft and weak references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700163 soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
164 weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700165 // Clear all phantom references with white referents.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700166 phantom_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700167 // At this point all reference queues other than the cleared references should be empty.
168 DCHECK(soft_reference_queue_.IsEmpty());
169 DCHECK(weak_reference_queue_.IsEmpty());
170 DCHECK(finalizer_reference_queue_.IsEmpty());
171 DCHECK(phantom_reference_queue_.IsEmpty());
Mathieu Chartier2175f522014-05-09 11:01:06 -0700172 {
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700173 MutexLock mu(self, *Locks::reference_processor_lock_);
Mathieu Chartier2175f522014-05-09 11:01:06 -0700174 // Need to always do this since the next GC may be concurrent. Doing this for only concurrent
175 // could result in a stale is_marked_callback_ being called before the reference processing
176 // starts since there is a small window of time where slow_path_enabled_ is enabled but the
177 // callback isn't yet set.
178 process_references_args_.is_marked_callback_ = nullptr;
179 if (concurrent) {
180 // Done processing, disable the slow path and broadcast to the waiters.
181 DisableSlowPath(self);
182 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700183 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700184}
185
186// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
187// marked, put it on the appropriate list in the heap for later processing.
188void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
Mathieu Chartier308351a2014-06-15 12:39:02 -0700189 IsHeapReferenceMarkedCallback* is_marked_callback,
190 void* arg) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700191 // klass can be the class of the old object if the visitor already updated the class of ref.
Mathieu Chartier308351a2014-06-15 12:39:02 -0700192 DCHECK(klass != nullptr);
Fred Shih4ee7a662014-07-11 09:59:27 -0700193 DCHECK(klass->IsTypeOfReferenceClass());
Mathieu Chartier308351a2014-06-15 12:39:02 -0700194 mirror::HeapReference<mirror::Object>* referent = ref->GetReferentReferenceAddr();
195 if (referent->AsMirrorPtr() != nullptr && !is_marked_callback(referent, arg)) {
196 Thread* self = Thread::Current();
197 // TODO: Remove these locks, and use atomic stacks for storing references?
198 // We need to check that the references haven't already been enqueued since we can end up
199 // scanning the same reference multiple times due to dirty cards.
200 if (klass->IsSoftReferenceClass()) {
201 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
202 } else if (klass->IsWeakReferenceClass()) {
203 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
204 } else if (klass->IsFinalizerReferenceClass()) {
205 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
206 } else if (klass->IsPhantomReferenceClass()) {
207 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
208 } else {
209 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
210 << klass->GetAccessFlags();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700211 }
212 }
213}
214
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700215void ReferenceProcessor::UpdateRoots(IsMarkedCallback* callback, void* arg) {
216 cleared_references_.UpdateRoots(callback, arg);
217}
218
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800219class ClearedReferenceTask : public HeapTask {
220 public:
221 explicit ClearedReferenceTask(jobject cleared_references)
222 : HeapTask(NanoTime()), cleared_references_(cleared_references) {
223 }
224 virtual void Run(Thread* thread) {
225 ScopedObjectAccess soa(thread);
226 jvalue args[1];
227 args[0].l = cleared_references_;
228 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
229 soa.Env()->DeleteGlobalRef(cleared_references_);
230 }
231
232 private:
233 const jobject cleared_references_;
234};
235
Mathieu Chartier308351a2014-06-15 12:39:02 -0700236void ReferenceProcessor::EnqueueClearedReferences(Thread* self) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700237 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800238 // When a runtime isn't started there are no reference queues to care about so ignore.
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700239 if (!cleared_references_.IsEmpty()) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700240 if (LIKELY(Runtime::Current()->IsStarted())) {
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800241 jobject cleared_references;
242 {
243 ReaderMutexLock mu(self, *Locks::mutator_lock_);
244 cleared_references = self->GetJniEnv()->vm->AddGlobalRef(
245 self, cleared_references_.GetList());
246 }
247 if (kAsyncReferenceQueueAdd) {
248 // TODO: This can cause RunFinalization to terminate before newly freed objects are
249 // finalized since they may not be enqueued by the time RunFinalization starts.
250 Runtime::Current()->GetHeap()->GetTaskProcessor()->AddTask(
251 self, new ClearedReferenceTask(cleared_references));
252 } else {
253 ClearedReferenceTask task(cleared_references);
254 task.Run(self);
255 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700256 }
257 cleared_references_.Clear();
258 }
259}
260
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700261bool ReferenceProcessor::MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference) {
262 Thread* self = Thread::Current();
263 MutexLock mu(self, *Locks::reference_processor_lock_);
264 // Wait untul we are done processing reference.
265 while (SlowPathEnabled()) {
Pavel Vyssotskid64ba382014-12-15 18:00:17 +0600266 condition_.WaitHoldingLocks(self);
Mathieu Chartiera5a53ef2014-09-12 12:58:05 -0700267 }
268 // At this point, since the sentinel of the reference is live, it is guaranteed to not be
269 // enqueued if we just finished processing references. Otherwise, we may be doing the main GC
270 // phase. Since we are holding the reference processor lock, it guarantees that reference
271 // processing can't begin. The GC could have just enqueued the reference one one of the internal
272 // GC queues, but since we hold the lock finalizer_reference_queue_ lock it also prevents this
273 // race.
274 MutexLock mu2(self, *Locks::reference_queue_finalizer_references_lock_);
275 if (!reference->IsEnqueued()) {
276 CHECK(reference->IsFinalizerReferenceInstance());
277 if (Runtime::Current()->IsActiveTransaction()) {
278 reference->SetPendingNext<true>(reference);
279 } else {
280 reference->SetPendingNext<false>(reference);
281 }
282 return true;
283 }
284 return false;
285}
286
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700287} // namespace gc
288} // namespace art