blob: 818eb81853d33e3ac300cebed6faf1ff6bde862d [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Mathieu Chartier2b82db42012-11-14 17:29:05 -080019#include <functional>
20#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070021#include <climits>
22#include <vector>
23
Mathieu Chartier858f1c52012-10-17 17:45:55 -070024#include "barrier.h"
Mathieu Chartier357e9be2012-08-01 11:00:14 -070025#include "card_table.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070026#include "class_loader.h"
Brian Carlstrom693267a2011-09-06 09:25:34 -070027#include "dex_cache.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070028#include "heap.h"
Elliott Hughes410c0c82011-09-01 17:58:25 -070029#include "indirect_reference_table.h"
30#include "intern_table.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070031#include "jni_internal.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070032#include "large_object_space.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070033#include "logging.h"
34#include "macros.h"
Elliott Hughesc33a32b2011-10-11 18:18:07 -070035#include "monitor.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070036#include "object.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070037#include "runtime.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070038#include "space.h"
Elliott Hughes307f75d2011-10-12 18:04:40 -070039#include "timing_logger.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070040#include "thread.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070041#include "thread_list.h"
Ian Rogers08254272012-10-23 17:49:23 -070042#include "verifier/method_verifier.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070043
Carl Shapiro69759ea2011-07-21 18:13:35 -070044namespace art {
45
Mathieu Chartier02b6a782012-10-26 13:51:26 -070046// Performance options.
47static const bool kParallelMarkStack = true;
Mathieu Chartier2b82db42012-11-14 17:29:05 -080048static const bool kDisableFinger = kParallelMarkStack;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070049static const bool kUseMarkStackPrefetch = true;
50
Mathieu Chartier02b6a782012-10-26 13:51:26 -070051// Profiling and information flags.
52static const bool kCountClassesMarked = false;
53static const bool kProfileLargeObjects = false;
54static const bool kMeasureOverhead = false;
55static const bool kCountTasks = false;
Mathieu Chartierd22d5482012-11-06 17:14:12 -080056static const bool kCountJavaLangRefs = false;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070057
Mathieu Chartier357e9be2012-08-01 11:00:14 -070058class SetFingerVisitor {
59 public:
60 SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
61
62 }
63
64 void operator ()(void* finger) const {
65 mark_sweep_->SetFinger(reinterpret_cast<Object*>(finger));
66 }
67
68 private:
69 MarkSweep* const mark_sweep_;
70};
71
Mathieu Chartier2b82db42012-11-14 17:29:05 -080072std::string MarkSweep::GetName() const {
73 std::ostringstream ss;
74 ss << (IsConcurrent() ? "Concurrent" : "") << GetGcType();
75 return ss.str();
Mathieu Chartier5301cd22012-05-31 12:11:36 -070076}
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080077
Mathieu Chartier2b82db42012-11-14 17:29:05 -080078void MarkSweep::ImmuneSpace(ContinuousSpace* space) {
79 // Bind live to mark bitmap if necessary.
80 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
81 BindLiveToMarkBitmap(space);
82 }
83
84 // Add the space to the immune region.
85 if (immune_begin_ == NULL) {
86 DCHECK(immune_end_ == NULL);
87 SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
88 reinterpret_cast<Object*>(space->End()));
89 } else {
90 const Spaces& spaces = GetHeap()->GetSpaces();
91 const ContinuousSpace* prev_space = NULL;
92 // Find out if the previous space is immune.
93 // TODO: C++0x
94 for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
95 if (*it == space) {
96 break;
97 }
98 prev_space = *it;
99 }
100
101 // If previous space was immune, then extend the immune region.
102 if (prev_space != NULL &&
103 immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
104 immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
105 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
106 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
107 }
108 }
109}
110
111// Bind the live bits to the mark bits of bitmaps based on the gc type.
112void MarkSweep::BindBitmaps() {
113 Spaces& spaces = GetHeap()->GetSpaces();
114 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
115
116 // Mark all of the spaces we never collect as immune.
117 for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
118 ContinuousSpace* space = *it;
119 if (space->GetGcRetentionPolicy() == kGcRetentionPolicyNeverCollect) {
120 ImmuneSpace(space);
121 }
122 }
123}
124
125MarkSweep::MarkSweep(Heap* heap, bool is_concurrent)
126 : GarbageCollector(heap),
127 gc_barrier_(new Barrier(0)),
128 large_object_lock_("large object lock"),
129 mark_stack_expand_lock_("mark stack expand lock"),
130 timings_(GetName(), true),
131 cumulative_timings_(GetName(), true),
132 is_concurrent_(is_concurrent) {
133 cumulative_timings_.SetName(GetName());
134 ResetCumulativeStatistics();
135}
136
137void MarkSweep::InitializePhase() {
138 mark_stack_ = GetHeap()->mark_stack_.get();
139 DCHECK(mark_stack_ != NULL);
140 finger_ = NULL;
141 SetImmuneRange(NULL, NULL);
142 soft_reference_list_ = NULL;
143 weak_reference_list_ = NULL;
144 finalizer_reference_list_ = NULL;
145 phantom_reference_list_ = NULL;
146 cleared_reference_list_ = NULL;
147 freed_bytes_ = 0;
148 freed_objects_ = 0;
149 class_count_ = 0;
150 array_count_ = 0;
151 other_count_ = 0;
152 large_object_test_ = 0;
153 large_object_mark_ = 0;
154 classes_marked_ = 0;
155 overhead_time_ = 0;
156 work_chunks_created_ = 0;
157 work_chunks_deleted_ = 0;
158 reference_count_ = 0;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700159 java_lang_Class_ = Class::GetJavaLangClass();
160 CHECK(java_lang_Class_ != NULL);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700161 FindDefaultMarkBitmap();
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700162 // Mark any concurrent roots as dirty since we need to scan them at least once during this GC.
163 Runtime::Current()->DirtyRoots();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800164 timings_.Reset();
165 // Do any pre GC verification.
166 heap_->PreGcVerification(this);
167}
168
169void MarkSweep::ProcessReferences(Thread* self) {
170 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
171 ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
172 &finalizer_reference_list_, &phantom_reference_list_);
173 timings_.AddSplit("ProcessReferences");
174}
175
176bool MarkSweep::HandleDirtyObjectsPhase() {
177 Thread* self = Thread::Current();
178 ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
179 Locks::mutator_lock_->AssertExclusiveHeld(self);
180
181 {
182 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
183
184 // Re-mark root set.
185 ReMarkRoots();
186 timings_.AddSplit("ReMarkRoots");
187
188 // Scan dirty objects, this is only required if we are not doing concurrent GC.
189 RecursiveMarkDirtyObjects();
190 }
191
192 ProcessReferences(self);
193
194 // Only need to do this if we have the card mark verification on, and only during concurrent GC.
195 if (GetHeap()->verify_missing_card_marks_) {
196 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
197 // This second sweep makes sure that we don't have any objects in the live stack which point to
198 // freed objects. These cause problems since their references may be previously freed objects.
199 SweepArray(timings_, allocation_stack, false);
200 } else {
201 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
202 // We only sweep over the live stack, and the live stack should not intersect with the
203 // allocation stack, so it should be safe to UnMark anything in the allocation stack as live.
204 heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(),
205 GetHeap()->large_object_space_->GetMarkObjects(),
206 allocation_stack);
207 timings_.AddSplit("UnMarkAllocStack");
208 }
209 return true;
210}
211
212bool MarkSweep::IsConcurrent() const {
213 return is_concurrent_;
214}
215
216void MarkSweep::MarkingPhase() {
217 Heap* heap = GetHeap();
218 Thread* self = Thread::Current();
219
220 BindBitmaps();
221 FindDefaultMarkBitmap();
222 timings_.AddSplit("BindBitmaps");
223
224 // Process dirty cards and add dirty cards to mod union tables.
225 heap->ProcessCards(timings_);
226
227 // Need to do this before the checkpoint since we don't want any threads to add references to
228 // the live stack during the recursive mark.
229 heap->SwapStacks();
230 timings_.AddSplit("SwapStacks");
231
232 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
233 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
234 // If we exclusively hold the mutator lock, all threads must be suspended.
235 MarkRoots();
236 timings_.AddSplit("MarkConcurrentRoots");
237 } else {
238 MarkRootsCheckpoint();
239 timings_.AddSplit("MarkRootsCheckpoint");
240 MarkNonThreadRoots();
241 timings_.AddSplit("MarkNonThreadRoots");
242 }
243 MarkConcurrentRoots();
244 timings_.AddSplit("MarkConcurrentRoots");
245
246 heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
247 MarkReachableObjects();
248}
249
250void MarkSweep::MarkReachableObjects() {
251 // Mark everything allocated since the last as GC live so that we can sweep concurrently,
252 // knowing that new allocations won't be marked as live.
253 ObjectStack* live_stack = heap_->GetLiveStack();
254 heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
255 heap_->large_object_space_->GetLiveObjects(),
256 live_stack);
257 live_stack->Reset();
258 timings_.AddSplit("MarkStackAsLive");
259 // Recursively mark all the non-image bits set in the mark bitmap.
260 RecursiveMark();
261 DisableFinger();
262}
263
264void MarkSweep::ReclaimPhase() {
265 Thread* self = Thread::Current();
266
267 if (!IsConcurrent()) {
268 ProcessReferences(self);
269 }
270
271 // Before freeing anything, lets verify the heap.
272 if (kIsDebugBuild) {
273 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
274 VerifyImageRoots();
275 }
276 heap_->PreSweepingGcVerification(this);
277
278 {
279 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
280
281 // Reclaim unmarked objects.
282 Sweep(timings_, false);
283
284 // Swap the live and mark bitmaps for each space which we modified space. This is an
285 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
286 // bitmaps.
287 SwapBitmaps();
288 timings_.AddSplit("SwapBitmaps");
289
290 // Unbind the live and mark bitmaps.
291 UnBindBitmaps();
292 }
293
294 heap_->GrowForUtilization();
295 timings_.AddSplit("GrowForUtilization");
296}
297
298void MarkSweep::SwapBitmaps() {
299 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
300 // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
301 // bits of dead objects in the live bitmap.
302 const GcType gc_type = GetGcType();
303 // TODO: C++0x
304 Spaces& spaces = heap_->GetSpaces();
305 for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
306 ContinuousSpace* space = *it;
307 // We never allocate into zygote spaces.
308 if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
309 (gc_type == kGcTypeFull &&
310 space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)) {
311 SpaceBitmap* live_bitmap = space->GetLiveBitmap();
312 SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
313 if (live_bitmap != mark_bitmap) {
314 heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
315 heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
316 space->AsAllocSpace()->SwapBitmaps();
317 }
318 }
319 }
320 SwapLargeObjects();
321}
322
323void MarkSweep::SwapLargeObjects() {
324 LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
325 large_object_space->SwapBitmaps();
326 heap_->GetLiveBitmap()->SetLargeObjects(large_object_space->GetLiveObjects());
327 heap_->GetMarkBitmap()->SetLargeObjects(large_object_space->GetMarkObjects());
328}
329
330void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
331 immune_begin_ = begin;
332 immune_end_ = end;
Carl Shapiro58551df2011-07-24 03:09:51 -0700333}
334
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700335void MarkSweep::FindDefaultMarkBitmap() {
336 const Spaces& spaces = heap_->GetSpaces();
337 for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
338 if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
339 current_mark_bitmap_ = (*it)->GetMarkBitmap();
340 CHECK(current_mark_bitmap_ != NULL);
341 return;
342 }
343 }
344 GetHeap()->DumpSpaces();
345 LOG(FATAL) << "Could not find a default mark bitmap";
346}
347
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800348void MarkSweep::ExpandMarkStack() {
349 // Rare case, no need to have Thread::Current be a parameter.
350 MutexLock mu(Thread::Current(), mark_stack_expand_lock_);
351 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
352 // Someone else acquired the lock and expanded the mark stack before us.
353 return;
354 }
355 std::vector<Object*> temp;
356 temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End());
357 mark_stack_->Resize(mark_stack_->Capacity() * 2);
358 for (size_t i = 0; i < temp.size(); ++i) {
359 mark_stack_->PushBack(temp[i]);
360 }
361}
362
363inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj, bool check_finger) {
364 DCHECK(obj != NULL);
365 if (MarkObjectParallel(obj)) {
366 if (kDisableFinger || (check_finger && obj < finger_)) {
367 while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) {
368 // Only reason a push can fail is that the mark stack is full.
369 ExpandMarkStack();
370 }
371 }
372 }
373}
374
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700375inline void MarkSweep::MarkObjectNonNull(const Object* obj, bool check_finger) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700376 DCHECK(obj != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700377
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700378 if (obj >= immune_begin_ && obj < immune_end_) {
379 DCHECK(IsMarked(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700380 return;
381 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700382
383 // Try to take advantage of locality of references within a space, failing this find the space
384 // the hard way.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700385 SpaceBitmap* object_bitmap = current_mark_bitmap_;
386 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700387 SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetSpaceBitmap(obj);
388 if (new_bitmap != NULL) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700389 object_bitmap = new_bitmap;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700390 } else {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700391 MarkLargeObject(obj);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700392 return;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700393 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700394 }
395
Carl Shapiro69759ea2011-07-21 18:13:35 -0700396 // This object was not previously marked.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700397 if (!object_bitmap->Test(obj)) {
398 object_bitmap->Set(obj);
399 if (kDisableFinger || (check_finger && obj < finger_)) {
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700400 // Do we need to expand the mark stack?
401 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800402 ExpandMarkStack();
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700403 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700404 // The object must be pushed on to the mark stack.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700405 mark_stack_->PushBack(const_cast<Object*>(obj));
Carl Shapiro69759ea2011-07-21 18:13:35 -0700406 }
407 }
408}
409
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700410// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
411bool MarkSweep::MarkLargeObject(const Object* obj) {
412 LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
413 SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
414 if (kProfileLargeObjects) {
415 ++large_object_test_;
416 }
417 if (UNLIKELY(!large_objects->Test(obj))) {
418 if (!large_object_space->Contains(obj)) {
419 LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
420 LOG(ERROR) << "Attempting see if it's a bad root";
421 VerifyRoots();
422 LOG(FATAL) << "Can't mark bad root";
423 }
424 if (kProfileLargeObjects) {
425 ++large_object_mark_;
426 }
427 large_objects->Set(obj);
428 // Don't need to check finger since large objects never have any object references.
429 return true;
430 }
431 return false;
432}
433
434inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
435 DCHECK(obj != NULL);
436
437 if (obj >= immune_begin_ && obj < immune_end_) {
438 DCHECK(IsMarked(obj));
439 return false;
440 }
441
442 // Try to take advantage of locality of references within a space, failing this find the space
443 // the hard way.
444 SpaceBitmap* object_bitmap = current_mark_bitmap_;
445 if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
446 SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetSpaceBitmap(obj);
447 if (new_bitmap != NULL) {
448 object_bitmap = new_bitmap;
449 } else {
450 // TODO: Remove the Thread::Current here?
451 // TODO: Convert this to some kind of atomic marking?
452 MutexLock mu(Thread::Current(), large_object_lock_);
453 return MarkLargeObject(obj);
454 }
455 }
456
457 // Return true if the object was not previously marked.
458 return !object_bitmap->AtomicTestAndSet(obj);
459}
460
Carl Shapiro69759ea2011-07-21 18:13:35 -0700461// Used to mark objects when recursing. Recursion is done by moving
462// the finger across the bitmaps in address order and marking child
463// objects. Any newly-marked objects whose addresses are lower than
464// the finger won't be visited by the bitmap scan, so those objects
465// need to be added to the mark stack.
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700466void MarkSweep::MarkObject(const Object* obj) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700467 if (obj != NULL) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700468 MarkObjectNonNull(obj, true);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700469 }
470}
471
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800472void MarkSweep::MarkRoot(const Object* obj) {
473 if (obj != NULL) {
474 MarkObjectNonNull(obj, false);
475 }
476}
477
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800478void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
479 DCHECK(root != NULL);
480 DCHECK(arg != NULL);
481 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
482 mark_sweep->MarkObjectNonNullParallel(root, false);
483}
484
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700485void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
Brian Carlstrom1f870082011-08-23 16:02:11 -0700486 DCHECK(root != NULL);
487 DCHECK(arg != NULL);
488 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700489 mark_sweep->MarkObjectNonNull(root, false);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700490}
491
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700492void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
493 DCHECK(root != NULL);
494 DCHECK(arg != NULL);
495 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700496 mark_sweep->MarkObjectNonNull(root, true);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700497}
498
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700499void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
500 const AbstractMethod* method) {
501 reinterpret_cast<MarkSweep*>(arg)->VerifyRoot(root, vreg, method);
502}
503
504void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const AbstractMethod* method) {
505 // See if the root is on any space bitmap.
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700506 if (GetHeap()->GetLiveBitmap()->GetSpaceBitmap(root) == NULL) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700507 LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier4202b742012-10-17 17:51:25 -0700508 if (!large_object_space->Contains(root)) {
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700509 LOG(ERROR) << "Found invalid root: " << root;
Ian Rogers08254272012-10-23 17:49:23 -0700510 LOG(ERROR) << "VReg: " << vreg;
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700511 if (method != NULL) {
Ian Rogers2bcb4a42012-11-08 10:39:18 -0800512 LOG(ERROR) << "In method " << PrettyMethod(method, true);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700513 }
514 }
515 }
516}
517
518void MarkSweep::VerifyRoots() {
519 Runtime::Current()->GetThreadList()->VerifyRoots(VerifyRootCallback, this);
520}
521
Carl Shapiro69759ea2011-07-21 18:13:35 -0700522// Marks all objects in the root set.
523void MarkSweep::MarkRoots() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700524 Runtime::Current()->VisitNonConcurrentRoots(MarkObjectCallback, this);
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700525}
526
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700527void MarkSweep::MarkNonThreadRoots() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700528 Runtime::Current()->VisitNonThreadRoots(MarkObjectCallback, this);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700529}
530
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700531void MarkSweep::MarkConcurrentRoots() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700532 Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700533}
534
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700535class CheckObjectVisitor {
536 public:
537 CheckObjectVisitor(MarkSweep* const mark_sweep)
538 : mark_sweep_(mark_sweep) {
539
540 }
541
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700542 void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800543 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800544 if (kDebugLocking) {
545 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
546 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700547 mark_sweep_->CheckReference(obj, ref, offset, is_static);
548 }
549
550 private:
551 MarkSweep* const mark_sweep_;
552};
553
554void MarkSweep::CheckObject(const Object* obj) {
555 DCHECK(obj != NULL);
556 CheckObjectVisitor visitor(this);
557 VisitObjectReferences(obj, visitor);
558}
559
560void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
561 DCHECK(root != NULL);
562 DCHECK(arg != NULL);
563 MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700564 DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700565 mark_sweep->CheckObject(root);
566}
567
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700568void MarkSweep::BindLiveToMarkBitmap(ContinuousSpace* space) {
569 CHECK(space->IsAllocSpace());
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700570 DlMallocSpace* alloc_space = space->AsAllocSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700571 SpaceBitmap* live_bitmap = space->GetLiveBitmap();
572 SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
573 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
574 alloc_space->temp_bitmap_.reset(mark_bitmap);
575 alloc_space->mark_bitmap_.reset(live_bitmap);
576}
577
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700578class ScanObjectVisitor {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700579 public:
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700580 ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
581
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700582 }
583
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800584 // TODO: Fixme when anotatalysis works with visitors.
585 void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
586 if (kDebugLocking) {
587 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
588 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
589 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700590 mark_sweep_->ScanObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700591 }
592
593 private:
594 MarkSweep* const mark_sweep_;
595};
596
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800597void MarkSweep::ScanGrayObjects(byte minimum_age) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700598 const Spaces& spaces = heap_->GetSpaces();
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700599 CardTable* card_table = heap_->GetCardTable();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700600 ScanObjectVisitor visitor(this);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700601 SetFingerVisitor finger_visitor(this);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700602 // TODO: C++ 0x auto
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700603 for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700604 ContinuousSpace* space = *it;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700605 byte* begin = space->Begin();
606 byte* end = space->End();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700607 // Image spaces are handled properly since live == marked for them.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700608 SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800609 card_table->Scan(mark_bitmap, begin, end, visitor, VoidFunctor(), minimum_age);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700610 }
611}
612
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700613class CheckBitmapVisitor {
614 public:
615 CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
616
617 }
618
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700619 void operator ()(const Object* obj) const
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800620 NO_THREAD_SAFETY_ANALYSIS {
621 if (kDebugLocking) {
622 Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
623 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700624 DCHECK(obj != NULL);
625 mark_sweep_->CheckObject(obj);
626 }
627
628 private:
629 MarkSweep* mark_sweep_;
630};
631
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700632void MarkSweep::VerifyImageRoots() {
633 // Verify roots ensures that all the references inside the image space point
634 // objects which are either in the image space or marked objects in the alloc
635 // space
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700636 CheckBitmapVisitor visitor(this);
637 const Spaces& spaces = heap_->GetSpaces();
638 for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700639 if ((*it)->IsImageSpace()) {
640 ImageSpace* space = (*it)->AsImageSpace();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700641 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
642 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
643 SpaceBitmap* live_bitmap = space->GetLiveBitmap();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700644 DCHECK(live_bitmap != NULL);
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800645 live_bitmap->VisitMarkedRange(begin, end, visitor, VoidFunctor());
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700646 }
647 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700648}
649
Carl Shapiro58551df2011-07-24 03:09:51 -0700650// Populates the mark stack based on the set of marked objects and
651// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800652void MarkSweep::RecursiveMark() {
Brian Carlstrom1f870082011-08-23 16:02:11 -0700653 // RecursiveMark will build the lists of known instances of the Reference classes.
654 // See DelayReferenceReferent for details.
655 CHECK(soft_reference_list_ == NULL);
656 CHECK(weak_reference_list_ == NULL);
657 CHECK(finalizer_reference_list_ == NULL);
658 CHECK(phantom_reference_list_ == NULL);
659 CHECK(cleared_reference_list_ == NULL);
660
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800661 const bool partial = GetGcType() == kGcTypePartial;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700662 const Spaces& spaces = heap_->GetSpaces();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700663 SetFingerVisitor set_finger_visitor(this);
664 ScanObjectVisitor scan_visitor(this);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800665 if (!kDisableFinger) {
666 finger_ = NULL;
667 for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
668 ContinuousSpace* space = *it;
669 if ((space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) ||
670 (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
671 ) {
672 current_mark_bitmap_ = space->GetMarkBitmap();
673 if (current_mark_bitmap_ == NULL) {
674 GetHeap()->DumpSpaces();
675 LOG(FATAL) << "invalid bitmap";
676 }
677 // This function does not handle heap end increasing, so we must use the space end.
678 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
679 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
680 current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor, set_finger_visitor);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700681 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700682 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700683 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800684 DisableFinger();
685 timings_.AddSplit("RecursiveMark");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700686 ProcessMarkStack();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800687 timings_.AddSplit("ProcessMarkStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700688}
689
690bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
691 return
692 reinterpret_cast<MarkSweep*>(arg)->IsMarked(object) ||
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700693 !reinterpret_cast<MarkSweep*>(arg)->GetHeap()->GetLiveBitmap()->Test(object);
694}
695
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800696void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) {
697 ScanGrayObjects(minimum_age);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800698 timings_.AddSplit("ScanGrayObjects");
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700699 ProcessMarkStack();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800700 timings_.AddSplit("ProcessMarkStack");
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700701}
702
Carl Shapiro58551df2011-07-24 03:09:51 -0700703void MarkSweep::ReMarkRoots() {
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700704 Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700705}
706
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700707void MarkSweep::SweepJniWeakGlobals(Heap::IsMarkedTester is_marked, void* arg) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700708 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -0700709 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700710 IndirectReferenceTable* table = &vm->weak_globals;
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700711 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
Elliott Hughes410c0c82011-09-01 17:58:25 -0700712 for (It it = table->begin(), end = table->end(); it != end; ++it) {
713 const Object** entry = *it;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700714 if (!is_marked(*entry, arg)) {
Elliott Hughes410c0c82011-09-01 17:58:25 -0700715 *entry = kClearedJniWeakGlobal;
716 }
717 }
718}
719
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700720struct ArrayMarkedCheck {
721 ObjectStack* live_stack;
722 MarkSweep* mark_sweep;
723};
724
725// Either marked or not live.
726bool MarkSweep::IsMarkedArrayCallback(const Object* object, void* arg) {
727 ArrayMarkedCheck* array_check = reinterpret_cast<ArrayMarkedCheck*>(arg);
728 if (array_check->mark_sweep->IsMarked(object)) {
729 return true;
730 }
731 ObjectStack* live_stack = array_check->live_stack;
732 return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
733}
734
735void MarkSweep::SweepSystemWeaksArray(ObjectStack* allocations) {
Mathieu Chartier46a23632012-08-07 18:44:40 -0700736 Runtime* runtime = Runtime::Current();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700737 // The callbacks check
738 // !is_marked where is_marked is the callback but we want
739 // !IsMarked && IsLive
740 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
741 // Or for swapped (IsLive || !IsMarked).
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700742
743 ArrayMarkedCheck visitor;
744 visitor.live_stack = allocations;
745 visitor.mark_sweep = this;
746 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedArrayCallback, &visitor);
747 runtime->GetMonitorList()->SweepMonitorList(IsMarkedArrayCallback, &visitor);
748 SweepJniWeakGlobals(IsMarkedArrayCallback, &visitor);
749}
750
751void MarkSweep::SweepSystemWeaks() {
752 Runtime* runtime = Runtime::Current();
753 // The callbacks check
754 // !is_marked where is_marked is the callback but we want
755 // !IsMarked && IsLive
756 // So compute !(!IsMarked && IsLive) which is equal to (IsMarked || !IsLive).
757 // Or for swapped (IsLive || !IsMarked).
758 runtime->GetInternTable()->SweepInternTableWeaks(IsMarkedCallback, this);
759 runtime->GetMonitorList()->SweepMonitorList(IsMarkedCallback, this);
760 SweepJniWeakGlobals(IsMarkedCallback, this);
Carl Shapiro58551df2011-07-24 03:09:51 -0700761}
762
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700763bool MarkSweep::VerifyIsLiveCallback(const Object* obj, void* arg) {
764 reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
765 // We don't actually want to sweep the object, so lets return "marked"
766 return true;
767}
768
769void MarkSweep::VerifyIsLive(const Object* obj) {
770 Heap* heap = GetHeap();
771 if (!heap->GetLiveBitmap()->Test(obj)) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700772 LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
773 if (!large_object_space->GetLiveObjects()->Test(obj)) {
774 if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
775 heap->allocation_stack_->End()) {
776 // Object not found!
777 heap->DumpSpaces();
778 LOG(FATAL) << "Found dead object " << obj;
779 }
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700780 }
781 }
782}
783
784void MarkSweep::VerifySystemWeaks() {
785 Runtime* runtime = Runtime::Current();
786 // Verify system weaks, uses a special IsMarked callback which always returns true.
787 runtime->GetInternTable()->SweepInternTableWeaks(VerifyIsLiveCallback, this);
788 runtime->GetMonitorList()->SweepMonitorList(VerifyIsLiveCallback, this);
789
790 JavaVMExt* vm = runtime->GetJavaVM();
Ian Rogers50b35e22012-10-04 10:09:15 -0700791 MutexLock mu(Thread::Current(), vm->weak_globals_lock);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700792 IndirectReferenceTable* table = &vm->weak_globals;
793 typedef IndirectReferenceTable::iterator It; // TODO: C++0x auto
794 for (It it = table->begin(), end = table->end(); it != end; ++it) {
795 const Object** entry = *it;
796 VerifyIsLive(*entry);
797 }
798}
799
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800800struct SweepCallbackContext {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700801 MarkSweep* mark_sweep;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800802 AllocSpace* space;
Ian Rogers50b35e22012-10-04 10:09:15 -0700803 Thread* self;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800804};
805
Mathieu Chartier0e4627e2012-10-23 16:13:36 -0700806class CheckpointMarkThreadRoots : public Closure {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700807 public:
808 CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
809
810 }
811
812 virtual void Run(Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
813 // Note: self is not necessarily equal to thread since thread may be suspended.
814 Thread* self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800815 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
816 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800817 thread->VisitRoots(MarkSweep::MarkRootParallelCallback, mark_sweep_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700818 mark_sweep_->GetBarrier().Pass(self);
819 }
820
821 private:
822 MarkSweep* mark_sweep_;
823};
824
825Barrier& MarkSweep::GetBarrier() {
826 return *gc_barrier_;
827}
828
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800829const TimingLogger& MarkSweep::GetTimings() const {
830 return timings_;
831}
832
833const CumulativeLogger& MarkSweep::GetCumulativeTimings() const {
834 return cumulative_timings_;
835}
836
837void MarkSweep::ResetCumulativeStatistics() {
838 cumulative_timings_.Reset();
839 total_time_ = 0;
840 total_paused_time_ = 0;
841 total_freed_objects_ = 0;
842 total_freed_bytes_ = 0;
843}
844
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700845void MarkSweep::MarkRootsCheckpoint() {
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800846 CheckpointMarkThreadRoots check_point(this);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700847 ThreadList* thread_list = Runtime::Current()->GetThreadList();
848 // Increment the count of the barrier. If all of the checkpoints have already been finished then
849 // will hit 0 and continue. Otherwise we are still waiting for some checkpoints, so the counter
850 // will go positive and we will unblock when it hits zero.
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800851 gc_barrier_->Increment(Thread::Current(), thread_list->RunCheckpoint(&check_point));
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700852}
853
Ian Rogers30fab402012-01-23 15:43:46 -0800854void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800855 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700856 MarkSweep* mark_sweep = context->mark_sweep;
857 Heap* heap = mark_sweep->GetHeap();
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800858 AllocSpace* space = context->space;
Ian Rogers50b35e22012-10-04 10:09:15 -0700859 Thread* self = context->self;
860 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
Ian Rogers5d76c432011-10-31 21:42:49 -0700861 // Use a bulk free, that merges consecutive objects before freeing or free per object?
862 // Documentation suggests better free performance with merging, but this may be at the expensive
863 // of allocation.
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700864 size_t freed_objects = num_ptrs;
865 // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
866 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700867 heap->RecordFree(freed_objects, freed_bytes);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700868 mark_sweep->freed_objects_ += freed_objects;
869 mark_sweep->freed_bytes_ += freed_bytes;
Carl Shapiro58551df2011-07-24 03:09:51 -0700870}
871
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700872void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700873 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
Ian Rogers50b35e22012-10-04 10:09:15 -0700874 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700875 Heap* heap = context->mark_sweep->GetHeap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700876 // We don't free any actual memory to avoid dirtying the shared zygote pages.
877 for (size_t i = 0; i < num_ptrs; ++i) {
878 Object* obj = static_cast<Object*>(ptrs[i]);
879 heap->GetLiveBitmap()->Clear(obj);
880 heap->GetCardTable()->MarkCard(obj);
881 }
882}
883
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700884void MarkSweep::SweepArray(TimingLogger& logger, ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700885 size_t freed_bytes = 0;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700886 DlMallocSpace* space = heap_->GetAllocSpace();
Elliott Hughes2da50362011-10-10 16:57:08 -0700887
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700888 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
889 // bitmap, resulting in occasional frees of Weaks which are still in use.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700890 SweepSystemWeaksArray(allocations);
891 logger.AddSplit("SweepSystemWeaks");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700892
893 // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
894 // going to free.
895 SpaceBitmap* live_bitmap = space->GetLiveBitmap();
896 SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700897 LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
898 SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
899 SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700900 if (swap_bitmaps) {
901 std::swap(live_bitmap, mark_bitmap);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700902 std::swap(large_live_objects, large_mark_objects);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700903 }
904
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700905 size_t freed_large_objects = 0;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700906 size_t count = allocations->Size();
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700907 Object** objects = const_cast<Object**>(allocations->Begin());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700908 Object** out = objects;
909
910 // Empty the allocation stack.
Ian Rogers50b35e22012-10-04 10:09:15 -0700911 Thread* self = Thread::Current();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700912 for (size_t i = 0;i < count;++i) {
913 Object* obj = objects[i];
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700914 // There should only be objects in the AllocSpace/LargeObjectSpace in the allocation stack.
915 if (LIKELY(mark_bitmap->HasAddress(obj))) {
916 if (!mark_bitmap->Test(obj)) {
917 // Don't bother un-marking since we clear the mark bitmap anyways.
918 *(out++) = obj;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700919 }
920 } else if (!large_mark_objects->Test(obj)) {
921 ++freed_large_objects;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700922 freed_bytes += large_object_space->Free(self, obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700923 }
924 }
925 logger.AddSplit("Process allocation stack");
926
927 size_t freed_objects = out - objects;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700928 freed_bytes += space->FreeList(self, freed_objects, objects);
Mathieu Chartier40e978b2012-09-07 11:38:36 -0700929 VLOG(heap) << "Freed " << freed_objects << "/" << count
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700930 << " objects with size " << PrettySize(freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700931 heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
Mathieu Chartier40e978b2012-09-07 11:38:36 -0700932 freed_objects_ += freed_objects;
933 freed_bytes_ += freed_bytes;
934 logger.AddSplit("FreeList");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700935 allocations->Reset();
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800936 logger.AddSplit("ResetStack");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700937}
938
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800939void MarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700940 DCHECK(mark_stack_->IsEmpty());
941
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700942 // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
943 // bitmap, resulting in occasional frees of Weaks which are still in use.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700944 SweepSystemWeaks();
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700945 timings.AddSplit("SweepSystemWeaks");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700946
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800947 const bool partial = GetGcType() == kGcTypePartial;
Mathieu Chartier46a23632012-08-07 18:44:40 -0700948 const Spaces& spaces = heap_->GetSpaces();
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800949 SweepCallbackContext scc;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700950 scc.mark_sweep = this;
Ian Rogers50b35e22012-10-04 10:09:15 -0700951 scc.self = Thread::Current();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700952 // TODO: C++0x auto
953 for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700954 ContinuousSpace* space = *it;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700955 if (
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700956 space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
957 (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700958 ) {
959 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
960 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
961 scc.space = space->AsAllocSpace();
962 SpaceBitmap* live_bitmap = space->GetLiveBitmap();
963 SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700964 if (swap_bitmaps) {
965 std::swap(live_bitmap, mark_bitmap);
966 }
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700967 if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700968 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700969 SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
970 &SweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700971 } else {
972 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual memory.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700973 SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
974 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700975 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700976 }
977 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700978 timings.AddSplit("Sweep");
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800979
980 SweepLargeObjects(swap_bitmaps);
981 timings.AddSplit("SweepLargeObjects");
Carl Shapiro58551df2011-07-24 03:09:51 -0700982}
983
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700984void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
985 // Sweep large objects
986 LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700987 SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
988 SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
989 if (swap_bitmaps) {
990 std::swap(large_live_objects, large_mark_objects);
991 }
992 SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
993 // O(n*log(n)) but hopefully there are not too many large objects.
994 size_t freed_objects = 0;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700995 size_t freed_bytes = 0;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700996 // TODO: C++0x
Ian Rogers50b35e22012-10-04 10:09:15 -0700997 Thread* self = Thread::Current();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700998 for (SpaceSetMap::Objects::iterator it = live_objects.begin(); it != live_objects.end(); ++it) {
999 if (!large_mark_objects->Test(*it)) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001000 freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it));
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001001 ++freed_objects;
1002 }
1003 }
1004 freed_objects_ += freed_objects;
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001005 freed_bytes_ += freed_bytes;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001006 // Large objects don't count towards bytes_allocated.
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001007 GetHeap()->RecordFree(freed_objects, freed_bytes);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001008}
1009
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001010void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001011 const Spaces& spaces = heap_->GetSpaces();
1012 // TODO: C++0x auto
1013 for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
1014 if ((*cur)->IsAllocSpace() && (*cur)->Contains(ref)) {
1015 DCHECK(IsMarked(obj));
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001016
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001017 bool is_marked = IsMarked(ref);
1018 if (!is_marked) {
1019 LOG(INFO) << **cur;
1020 LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
1021 << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
1022 << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
1023 << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001024
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001025 const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
1026 DCHECK(klass != NULL);
1027 const ObjectArray<Field>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
1028 DCHECK(fields != NULL);
1029 bool found = false;
1030 for (int32_t i = 0; i < fields->GetLength(); ++i) {
1031 const Field* cur = fields->Get(i);
1032 if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
1033 LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
1034 found = true;
1035 break;
1036 }
1037 }
1038 if (!found) {
1039 LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
1040 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001041
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001042 bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
1043 if (!obj_marked) {
1044 LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
1045 << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
1046 << "the alloc space, but wasn't card marked";
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001047 }
1048 }
Ian Rogers5d76c432011-10-31 21:42:49 -07001049 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001050 break;
Ian Rogers5d76c432011-10-31 21:42:49 -07001051 }
1052}
1053
Carl Shapiro69759ea2011-07-21 18:13:35 -07001054// Process the "referent" field in a java.lang.ref.Reference. If the
1055// referent has not yet been marked, put it on the appropriate list in
1056// the gcHeap for later processing.
1057void MarkSweep::DelayReferenceReferent(Object* obj) {
1058 DCHECK(obj != NULL);
Brian Carlstrom1f870082011-08-23 16:02:11 -07001059 Class* klass = obj->GetClass();
1060 DCHECK(klass != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001061 DCHECK(klass->IsReferenceClass());
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001062 Object* pending = obj->GetFieldObject<Object*>(heap_->GetReferencePendingNextOffset(), false);
1063 Object* referent = heap_->GetReferenceReferent(obj);
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001064 if (kCountJavaLangRefs) {
1065 ++reference_count_;
1066 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001067 if (pending == NULL && referent != NULL && !IsMarked(referent)) {
Brian Carlstrom4873d462011-08-21 15:23:39 -07001068 Object** list = NULL;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001069 if (klass->IsSoftReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001070 list = &soft_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001071 } else if (klass->IsWeakReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001072 list = &weak_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001073 } else if (klass->IsFinalizerReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001074 list = &finalizer_reference_list_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001075 } else if (klass->IsPhantomReferenceClass()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001076 list = &phantom_reference_list_;
1077 }
Brian Carlstrom0796af02011-10-12 14:31:45 -07001078 DCHECK(list != NULL) << PrettyClass(klass) << " " << std::hex << klass->GetAccessFlags();
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001079 // TODO: One lock per list?
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001080 heap_->EnqueuePendingReference(obj, list);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001081 }
1082}
1083
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001084void MarkSweep::ScanRoot(const Object* obj) {
1085 ScanObject(obj);
1086}
1087
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001088class MarkObjectVisitor {
1089 public:
1090 MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
1091 }
1092
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001093 // TODO: Fixme when anotatalysis works with visitors.
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001094 void operator ()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001095 bool /* is_static */) const
1096 NO_THREAD_SAFETY_ANALYSIS {
1097 if (kDebugLocking) {
1098 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1099 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1100 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001101 mark_sweep_->MarkObject(ref);
1102 }
1103
1104 private:
1105 MarkSweep* const mark_sweep_;
1106};
1107
Carl Shapiro69759ea2011-07-21 18:13:35 -07001108// Scans an object reference. Determines the type of the reference
1109// and dispatches to a specialized scanning routine.
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001110void MarkSweep::ScanObject(const Object* obj) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001111 MarkObjectVisitor visitor(this);
1112 ScanObjectVisit(obj, visitor);
1113}
1114
1115class MarkStackChunk : public Task {
1116public:
1117 MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end)
1118 : mark_sweep_(mark_sweep),
1119 thread_pool_(thread_pool),
1120 index_(0),
1121 length_(0),
1122 output_(NULL) {
1123 length_ = end - begin;
1124 if (begin != end) {
1125 // Cost not significant since we only do this for the initial set of mark stack chunks.
1126 memcpy(data_, begin, length_ * sizeof(*begin));
1127 }
1128 if (kCountTasks) {
1129 ++mark_sweep_->work_chunks_created_;
1130 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001131 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001132
1133 ~MarkStackChunk() {
1134 DCHECK(output_ == NULL || output_->length_ == 0);
1135 DCHECK_GE(index_, length_);
1136 delete output_;
1137 if (kCountTasks) {
1138 ++mark_sweep_->work_chunks_deleted_;
1139 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001140 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001141
1142 MarkSweep* const mark_sweep_;
1143 ThreadPool* const thread_pool_;
1144 static const size_t max_size = 1 * KB;
1145 // Index of which object we are scanning. Only needs to be atomic if we are doing work stealing.
1146 size_t index_;
1147 // Input / output mark stack. We add newly marked references to data_ until length reaches
1148 // max_size. This is an optimization so that less tasks are created.
1149 // TODO: Investigate using a bounded buffer FIFO.
1150 Object* data_[max_size];
1151 // How many elements in data_ we need to scan.
1152 size_t length_;
1153 // Output block, newly marked references get added to the ouput block so that another thread can
1154 // scan them.
1155 MarkStackChunk* output_;
1156
1157 class MarkObjectParallelVisitor {
1158 public:
1159 MarkObjectParallelVisitor(MarkStackChunk* chunk_task) : chunk_task_(chunk_task) {
1160
1161 }
1162
1163 void operator ()(const Object* /* obj */, const Object* ref,
1164 const MemberOffset& /* offset */, bool /* is_static */) const {
1165 if (ref != NULL && chunk_task_->mark_sweep_->MarkObjectParallel(ref)) {
1166 chunk_task_->MarkStackPush(ref);
1167 }
1168 }
1169
1170 private:
1171 MarkStackChunk* const chunk_task_;
1172 };
1173
1174 // Push an object into the block.
1175 // Don't need to use atomic ++ since we only one thread is writing to an output block at any
1176 // given time.
1177 void Push(Object* obj) {
1178 data_[length_++] = obj;
1179 }
1180
1181 void MarkStackPush(const Object* obj) {
1182 if (static_cast<size_t>(length_) < max_size) {
1183 Push(const_cast<Object*>(obj));
1184 } else {
1185 // Internal buffer is full, push to a new buffer instead.
1186 if (UNLIKELY(output_ == NULL)) {
1187 AllocateOutputChunk();
1188 } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) {
1189 // Output block is full, queue it up for processing and obtain a new block.
1190 EnqueueOutput();
1191 AllocateOutputChunk();
1192 }
1193 output_->Push(const_cast<Object*>(obj));
1194 }
1195 }
1196
1197 void ScanObject(Object* obj) {
1198 mark_sweep_->ScanObjectVisit(obj, MarkObjectParallelVisitor(this));
1199 }
1200
1201 void EnqueueOutput() {
1202 if (output_ != NULL) {
1203 uint64_t start = 0;
1204 if (kMeasureOverhead) {
1205 start = NanoTime();
1206 }
1207 thread_pool_->AddTask(Thread::Current(), output_);
1208 output_ = NULL;
1209 if (kMeasureOverhead) {
1210 mark_sweep_->overhead_time_ += NanoTime() - start;
1211 }
1212 }
1213 }
1214
1215 void AllocateOutputChunk() {
1216 uint64_t start = 0;
1217 if (kMeasureOverhead) {
1218 start = NanoTime();
1219 }
1220 output_ = new MarkStackChunk(thread_pool_, mark_sweep_, NULL, NULL);
1221 if (kMeasureOverhead) {
1222 mark_sweep_->overhead_time_ += NanoTime() - start;
1223 }
1224 }
1225
1226 void Finalize() {
1227 EnqueueOutput();
1228 delete this;
1229 }
1230
1231 // Scans all of the objects
1232 virtual void Run(Thread* self) {
1233 int index;
1234 while ((index = index_++) < length_) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001235 if (kUseMarkStackPrefetch) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001236 static const size_t prefetch_look_ahead = 1;
1237 __builtin_prefetch(data_[std::min(index + prefetch_look_ahead, length_ - 1)]);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001238 }
1239 Object* obj = data_[index];
1240 DCHECK(obj != NULL);
1241 ScanObject(obj);
1242 }
1243 }
1244};
1245
1246void MarkSweep::ProcessMarkStackParallel() {
1247 CHECK(kDisableFinger) << "parallel mark stack processing cannot work when finger is enabled";
1248 Thread* self = Thread::Current();
1249 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1250 // Split the current mark stack up into work tasks.
1251 const size_t num_threads = thread_pool->GetThreadCount();
1252 const size_t stack_size = mark_stack_->Size();
1253 const size_t chunk_size =
1254 std::min((stack_size + num_threads - 1) / num_threads,
1255 static_cast<size_t>(MarkStackChunk::max_size));
1256 size_t index = 0;
1257 for (size_t i = 0; i < num_threads || index < stack_size; ++i) {
1258 Object** begin = &mark_stack_->Begin()[std::min(stack_size, index)];
1259 Object** end = &mark_stack_->Begin()[std::min(stack_size, index + chunk_size)];
1260 index += chunk_size;
1261 thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end));
1262 }
1263 thread_pool->StartWorkers(self);
1264 mark_stack_->Reset();
1265 thread_pool->Wait(self, true);
1266 //LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime());
1267 CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001268}
1269
Ian Rogers5d76c432011-10-31 21:42:49 -07001270// Scan anything that's on the mark stack.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001271void MarkSweep::ProcessMarkStack() {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001272 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
1273 if (kParallelMarkStack && thread_pool != NULL && thread_pool->GetThreadCount() > 0) {
1274 ProcessMarkStackParallel();
1275 return;
1276 }
1277
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001278 if (kUseMarkStackPrefetch) {
1279 const size_t fifo_size = 4;
1280 const size_t fifo_mask = fifo_size - 1;
1281 const Object* fifo[fifo_size];
1282 for (size_t i = 0;i < fifo_size;++i) {
1283 fifo[i] = NULL;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001284 }
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001285 size_t fifo_pos = 0;
1286 size_t fifo_count = 0;
1287 for (;;) {
1288 const Object* obj = fifo[fifo_pos & fifo_mask];
1289 if (obj != NULL) {
1290 ScanObject(obj);
1291 fifo[fifo_pos & fifo_mask] = NULL;
1292 --fifo_count;
1293 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001294
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001295 if (!mark_stack_->IsEmpty()) {
1296 const Object* obj = mark_stack_->PopBack();
1297 DCHECK(obj != NULL);
1298 fifo[fifo_pos & fifo_mask] = obj;
1299 __builtin_prefetch(obj);
1300 fifo_count++;
1301 }
1302 fifo_pos++;
1303
1304 if (!fifo_count) {
1305 CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size();
1306 break;
1307 }
1308 }
1309 } else {
1310 while (!mark_stack_->IsEmpty()) {
1311 const Object* obj = mark_stack_->PopBack();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001312 DCHECK(obj != NULL);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001313 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001314 }
1315 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001316}
1317
Carl Shapiro69759ea2011-07-21 18:13:35 -07001318// Walks the reference list marking any references subject to the
1319// reference clearing policy. References with a black referent are
1320// removed from the list. References with white referents biased
1321// toward saving are blackened and also removed from the list.
1322void MarkSweep::PreserveSomeSoftReferences(Object** list) {
1323 DCHECK(list != NULL);
1324 Object* clear = NULL;
1325 size_t counter = 0;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001326
1327 DCHECK(mark_stack_->IsEmpty());
1328
Carl Shapiro69759ea2011-07-21 18:13:35 -07001329 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001330 Object* ref = heap_->DequeuePendingReference(list);
1331 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001332 if (referent == NULL) {
1333 // Referent was cleared by the user during marking.
1334 continue;
1335 }
1336 bool is_marked = IsMarked(referent);
1337 if (!is_marked && ((++counter) & 1)) {
1338 // Referent is white and biased toward saving, mark it.
1339 MarkObject(referent);
1340 is_marked = true;
1341 }
1342 if (!is_marked) {
1343 // Referent is white, queue it for clearing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001344 heap_->EnqueuePendingReference(ref, &clear);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001345 }
1346 }
1347 *list = clear;
1348 // Restart the mark with the newly black references added to the
1349 // root set.
1350 ProcessMarkStack();
1351}
1352
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001353inline bool MarkSweep::IsMarked(const Object* object) const
1354 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1355 if (object >= immune_begin_ && object < immune_end_) {
1356 return true;
1357 }
1358 DCHECK(current_mark_bitmap_ != NULL);
1359 if (current_mark_bitmap_->HasAddress(object)) {
1360 return current_mark_bitmap_->Test(object);
1361 }
1362 return heap_->GetMarkBitmap()->Test(object);
1363}
1364
1365
Carl Shapiro69759ea2011-07-21 18:13:35 -07001366// Unlink the reference list clearing references objects with white
1367// referents. Cleared references registered to a reference queue are
1368// scheduled for appending by the heap worker thread.
1369void MarkSweep::ClearWhiteReferences(Object** list) {
1370 DCHECK(list != NULL);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001371 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001372 Object* ref = heap_->DequeuePendingReference(list);
1373 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001374 if (referent != NULL && !IsMarked(referent)) {
1375 // Referent is white, clear it.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001376 heap_->ClearReferenceReferent(ref);
1377 if (heap_->IsEnqueuable(ref)) {
1378 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001379 }
1380 }
1381 }
1382 DCHECK(*list == NULL);
1383}
1384
1385// Enqueues finalizer references with white referents. White
1386// referents are blackened, moved to the zombie field, and the
1387// referent field is cleared.
1388void MarkSweep::EnqueueFinalizerReferences(Object** list) {
1389 DCHECK(list != NULL);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001390 MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001391 bool has_enqueued = false;
1392 while (*list != NULL) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001393 Object* ref = heap_->DequeuePendingReference(list);
1394 Object* referent = heap_->GetReferenceReferent(ref);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001395 if (referent != NULL && !IsMarked(referent)) {
1396 MarkObject(referent);
1397 // If the referent is non-null the reference must queuable.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001398 DCHECK(heap_->IsEnqueuable(ref));
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001399 ref->SetFieldObject(zombie_offset, referent, false);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -08001400 heap_->ClearReferenceReferent(ref);
1401 heap_->EnqueueReference(ref, &cleared_reference_list_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001402 has_enqueued = true;
1403 }
1404 }
1405 if (has_enqueued) {
1406 ProcessMarkStack();
1407 }
1408 DCHECK(*list == NULL);
1409}
1410
Carl Shapiro58551df2011-07-24 03:09:51 -07001411// Process reference class instances and schedule finalizations.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001412void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
1413 Object** weak_references,
1414 Object** finalizer_references,
1415 Object** phantom_references) {
1416 DCHECK(soft_references != NULL);
1417 DCHECK(weak_references != NULL);
1418 DCHECK(finalizer_references != NULL);
1419 DCHECK(phantom_references != NULL);
1420
1421 // Unless we are in the zygote or required to clear soft references
1422 // with white references, preserve some white referents.
Ian Rogers2945e242012-06-03 14:45:16 -07001423 if (!clear_soft && !Runtime::Current()->IsZygote()) {
Carl Shapiro69759ea2011-07-21 18:13:35 -07001424 PreserveSomeSoftReferences(soft_references);
1425 }
1426
1427 // Clear all remaining soft and weak references with white
1428 // referents.
1429 ClearWhiteReferences(soft_references);
1430 ClearWhiteReferences(weak_references);
1431
1432 // Preserve all white objects with finalize methods and schedule
1433 // them for finalization.
1434 EnqueueFinalizerReferences(finalizer_references);
1435
1436 // Clear all f-reachable soft and weak references with white
1437 // referents.
1438 ClearWhiteReferences(soft_references);
1439 ClearWhiteReferences(weak_references);
1440
1441 // Clear all phantom references with white referents.
1442 ClearWhiteReferences(phantom_references);
1443
1444 // At this point all reference lists should be empty.
1445 DCHECK(*soft_references == NULL);
1446 DCHECK(*weak_references == NULL);
1447 DCHECK(*finalizer_references == NULL);
1448 DCHECK(*phantom_references == NULL);
1449}
1450
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001451void MarkSweep::UnBindBitmaps() {
1452 const Spaces& spaces = heap_->GetSpaces();
1453 // TODO: C++0x auto
1454 for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
1455 Space* space = *it;
1456 if (space->IsAllocSpace()) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001457 DlMallocSpace* alloc_space = space->AsAllocSpace();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001458 if (alloc_space->temp_bitmap_.get() != NULL) {
1459 // At this point, the temp_bitmap holds our old mark bitmap.
1460 SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
1461 GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
1462 CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
1463 alloc_space->mark_bitmap_.reset(new_bitmap);
1464 DCHECK(alloc_space->temp_bitmap_.get() == NULL);
1465 }
1466 }
1467 }
1468}
1469
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001470void MarkSweep::FinishPhase() {
1471 // Can't enqueue referneces if we hold the mutator lock.
1472 Object* cleared_references = GetClearedReferences();
1473 heap_->EnqueueClearedReferences(&cleared_references);
1474
1475 heap_->PostGcVerification(this);
1476
1477 // Update the cumulative statistics
1478 total_time_ += GetDuration();
1479 total_paused_time_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
1480 std::plus<uint64_t>());
1481 total_freed_objects_ += GetFreedObjects();
1482 total_freed_bytes_ += GetFreedBytes();
1483
1484 // Ensure that the mark stack is empty.
1485 CHECK(mark_stack_->IsEmpty());
1486
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001487 if (kCountScannedTypes) {
1488 VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
1489 << " other=" << other_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001490 }
1491
1492 if (kCountTasks) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001493 VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001494 }
1495
1496 if (kMeasureOverhead) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001497 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001498 }
1499
1500 if (kProfileLargeObjects) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001501 VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001502 }
1503
1504 if (kCountClassesMarked) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001505 VLOG(gc) << "Classes marked " << classes_marked_;
1506 }
1507
1508 if (kCountJavaLangRefs) {
1509 VLOG(gc) << "References scanned " << reference_count_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001510 }
1511
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001512 // Update the cumulative loggers.
1513 cumulative_timings_.Start();
1514 cumulative_timings_.AddLogger(timings_);
1515 cumulative_timings_.End();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001516
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001517 // Clear all of the spaces' mark bitmaps.
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001518 const Spaces& spaces = heap_->GetSpaces();
1519 // TODO: C++0x auto
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001520 for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001521 ContinuousSpace* space = *it;
1522 if (space->GetGcRetentionPolicy() != kGcRetentionPolicyNeverCollect) {
1523 space->GetMarkBitmap()->Clear();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001524 }
1525 }
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001526 mark_stack_->Reset();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001527
1528 // Reset the marked large objects.
1529 LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001530 large_objects->GetMarkObjects()->Clear();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001531}
1532
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001533MarkSweep::~MarkSweep() {
1534
1535}
1536
Carl Shapiro69759ea2011-07-21 18:13:35 -07001537} // namespace art