blob: ef9e1d4ee9ce1b6aec8478d5344322893316feaa [file] [log] [blame]
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "reference_processor.h"
18
19#include "mirror/object-inl.h"
20#include "mirror/reference-inl.h"
21#include "reflection.h"
22#include "ScopedLocalRef.h"
23#include "scoped_thread_state_change.h"
24#include "well_known_classes.h"
25
26namespace art {
27namespace gc {
28
29ReferenceProcessor::ReferenceProcessor()
30 : process_references_args_(nullptr, nullptr, nullptr), slow_path_enabled_(false),
31 preserving_references_(false), lock_("reference processor lock"),
32 condition_("reference processor condition", lock_) {
33}
34
35void ReferenceProcessor::EnableSlowPath() {
36 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
37 slow_path_enabled_ = true;
38}
39
40void ReferenceProcessor::DisableSlowPath(Thread* self) {
41 slow_path_enabled_ = false;
42 // Set to null so that GetReferent knows to not attempt to use the callback for seeing if
43 // referents are marked.
44 process_references_args_.is_marked_callback_ = nullptr;
45 condition_.Broadcast(self);
46}
47
48mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
49 mirror::Object* const referent = reference->GetReferent();
50 if (LIKELY(!slow_path_enabled_)) {
51 return referent;
52 }
53 // Another fast path, the referent is cleared, we can just return null since there is no scenario
54 // where it becomes non-null.
55 if (referent == nullptr) {
56 return nullptr;
57 }
58 MutexLock mu(self, lock_);
59 while (slow_path_enabled_) {
60 // Try to see if the referent is already marked by using the is_marked_callback. We can return
61 // it to the mutator as long as the GC is not preserving references. If the GC is
62 // preserving references, the mutator could take a white field and move it somewhere else
63 // in the heap causing corruption since this field would get swept.
64 IsMarkedCallback* const is_marked_callback = process_references_args_.is_marked_callback_;
65 if (!preserving_references_ && is_marked_callback != nullptr) {
66 mirror::Object* const referent = reference->GetReferent();
67 mirror::Object* const obj = is_marked_callback(referent, process_references_args_.arg_);
68 // If it's null it means not marked, but it could become marked if the referent is reachable
69 // by finalizer referents. So we can not return in this case and must block.
70 if (obj != nullptr) {
71 return obj;
72 }
73 }
74 condition_.Wait(self);
75 }
76 return reference->GetReferent();
77}
78
79mirror::Object* ReferenceProcessor::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
80 auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
81 // TODO: Not preserve all soft references.
82 return args->mark_callback_(obj, args->arg_);
83}
84
85void ReferenceProcessor::StartPreservingReferences(Thread* self) {
86 MutexLock mu(self, lock_);
87 preserving_references_ = true;
88}
89
90void ReferenceProcessor::StopPreservingReferences(Thread* self) {
91 MutexLock mu(self, lock_);
92 preserving_references_ = false;
93 // We are done preserving references, some people who are blocked may see a marked referent.
94 condition_.Broadcast(self);
95}
96
97// Process reference class instances and schedule finalizations.
98void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
99 bool clear_soft_references,
100 IsMarkedCallback* is_marked_callback,
101 MarkObjectCallback* mark_object_callback,
102 ProcessMarkStackCallback* process_mark_stack_callback,
103 void* arg) {
104 Thread* self = Thread::Current();
105 {
106 MutexLock mu(self, lock_);
107 process_references_args_.is_marked_callback_ = is_marked_callback;
108 process_references_args_.mark_callback_ = mark_object_callback;
109 process_references_args_.arg_ = arg;
110 }
111 if (concurrent) {
112 MutexLock mu(self, lock_);
113 CHECK(slow_path_enabled_) << "Slow path must be enabled for concurrent reference processing";
114 timings->StartSplit("ProcessReferences");
115 } else {
116 timings->StartSplit("(Paused)ProcessReferences");
117 }
118 // Unless required to clear soft references with white references, preserve some white referents.
119 if (!clear_soft_references) {
120 TimingLogger::ScopedSplit split(concurrent ? "PreserveSomeSoftReferences" :
121 "(Paused)PreserveSomeSoftReferences", timings);
122 if (concurrent) {
123 StartPreservingReferences(self);
124 }
125 // References with a marked referent are removed from the list.
126 soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
127 &process_references_args_);
128
129 process_mark_stack_callback(arg);
130 if (concurrent) {
131 StopPreservingReferences(self);
132 }
133 }
134 // Clear all remaining soft and weak references with white referents.
135 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
136 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
137 {
138 TimingLogger::ScopedSplit split(concurrent ? "EnqueueFinalizerReferences" :
139 "(Paused)EnqueueFinalizerReferences", timings);
140 if (concurrent) {
141 StartPreservingReferences(self);
142 }
143 // Preserve all white objects with finalize methods and schedule them for finalization.
144 finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
145 mark_object_callback, arg);
146 process_mark_stack_callback(arg);
147 if (concurrent) {
148 StopPreservingReferences(self);
149 }
150 }
151 // Clear all finalizer referent reachable soft and weak references with white referents.
152 soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
153 weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
154 // Clear all phantom references with white referents.
155 phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
156 // At this point all reference queues other than the cleared references should be empty.
157 DCHECK(soft_reference_queue_.IsEmpty());
158 DCHECK(weak_reference_queue_.IsEmpty());
159 DCHECK(finalizer_reference_queue_.IsEmpty());
160 DCHECK(phantom_reference_queue_.IsEmpty());
161 if (concurrent) {
162 MutexLock mu(self, lock_);
163 // Done processing, disable the slow path and broadcast to the waiters.
164 DisableSlowPath(self);
165 }
166 timings->EndSplit();
167}
168
169// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
170// marked, put it on the appropriate list in the heap for later processing.
171void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
172 IsMarkedCallback is_marked_callback, void* arg) {
173 // klass can be the class of the old object if the visitor already updated the class of ref.
174 DCHECK(klass->IsReferenceClass());
175 mirror::Object* referent = ref->GetReferent();
176 if (referent != nullptr) {
177 mirror::Object* forward_address = is_marked_callback(referent, arg);
178 // Null means that the object is not currently marked.
179 if (forward_address == nullptr) {
180 Thread* self = Thread::Current();
181 // TODO: Remove these locks, and use atomic stacks for storing references?
182 // We need to check that the references haven't already been enqueued since we can end up
183 // scanning the same reference multiple times due to dirty cards.
184 if (klass->IsSoftReferenceClass()) {
185 soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
186 } else if (klass->IsWeakReferenceClass()) {
187 weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
188 } else if (klass->IsFinalizerReferenceClass()) {
189 finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
190 } else if (klass->IsPhantomReferenceClass()) {
191 phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
192 } else {
193 LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
194 << klass->GetAccessFlags();
195 }
196 } else if (referent != forward_address) {
197 // Referent is already marked and we need to update it.
198 ref->SetReferent<false>(forward_address);
199 }
200 }
201}
202
203void ReferenceProcessor::EnqueueClearedReferences() {
204 Thread* self = Thread::Current();
205 Locks::mutator_lock_->AssertNotHeld(self);
206 if (!cleared_references_.IsEmpty()) {
207 // When a runtime isn't started there are no reference queues to care about so ignore.
208 if (LIKELY(Runtime::Current()->IsStarted())) {
209 ScopedObjectAccess soa(self);
210 ScopedLocalRef<jobject> arg(self->GetJniEnv(),
211 soa.AddLocalReference<jobject>(cleared_references_.GetList()));
212 jvalue args[1];
213 args[0].l = arg.get();
214 InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
215 }
216 cleared_references_.Clear();
217 }
218}
219
220} // namespace gc
221} // namespace art
222