blob: 923560e26055cd652b60f9914d00ea40f8baab45 [file] [log] [blame]
Mathieu Chartier590fee92013-09-13 13:46:47 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * Copyright (C) 2011 The Android Open Source Project
19 *
20 * Licensed under the Apache License, Version 2.0 (the "License");
21 * you may not use this file except in compliance with the License.
22 * You may obtain a copy of the License at
23 *
24 * http://www.apache.org/licenses/LICENSE-2.0
25 *
26 * Unless required by applicable law or agreed to in writing, software
27 * distributed under the License is distributed on an "AS IS" BASIS,
28 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
29 * See the License for the specific language governing permissions and
30 * limitations under the License.
31 */
32
33#include "semi_space.h"
34
35#include <functional>
36#include <numeric>
37#include <climits>
38#include <vector>
39
40#include "base/logging.h"
41#include "base/macros.h"
42#include "base/mutex-inl.h"
43#include "base/timing_logger.h"
44#include "gc/accounting/heap_bitmap.h"
45#include "gc/accounting/mod_union_table.h"
46#include "gc/accounting/space_bitmap-inl.h"
47#include "gc/heap.h"
48#include "gc/space/bump_pointer_space.h"
49#include "gc/space/bump_pointer_space-inl.h"
50#include "gc/space/image_space.h"
51#include "gc/space/large_object_space.h"
52#include "gc/space/space-inl.h"
53#include "indirect_reference_table.h"
54#include "intern_table.h"
55#include "jni_internal.h"
56#include "mark_sweep-inl.h"
57#include "monitor.h"
58#include "mirror/art_field.h"
59#include "mirror/art_field-inl.h"
60#include "mirror/class-inl.h"
61#include "mirror/class_loader.h"
62#include "mirror/dex_cache.h"
63#include "mirror/object-inl.h"
64#include "mirror/object_array.h"
65#include "mirror/object_array-inl.h"
66#include "runtime.h"
67#include "semi_space-inl.h"
68#include "thread-inl.h"
69#include "thread_list.h"
70#include "verifier/method_verifier.h"
71
72using ::art::mirror::Class;
73using ::art::mirror::Object;
74
75namespace art {
76namespace gc {
77namespace collector {
78
79static constexpr bool kProtectFromSpace = true;
80static constexpr bool kResetFromSpace = true;
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -080081// TODO: move this to a new file as a new garbage collector?
82static constexpr bool kEnableSimplePromo = false;
Mathieu Chartier590fee92013-09-13 13:46:47 -070083
84// TODO: Unduplicate logic.
85void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
86 // Bind live to mark bitmap if necessary.
87 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
88 BindLiveToMarkBitmap(space);
89 }
90 // Add the space to the immune region.
91 if (immune_begin_ == nullptr) {
92 DCHECK(immune_end_ == nullptr);
93 immune_begin_ = reinterpret_cast<Object*>(space->Begin());
94 immune_end_ = reinterpret_cast<Object*>(space->End());
95 } else {
96 const space::ContinuousSpace* prev_space = nullptr;
97 // Find out if the previous space is immune.
98 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
99 if (cur_space == space) {
100 break;
101 }
102 prev_space = cur_space;
103 }
104 // If previous space was immune, then extend the immune region. Relies on continuous spaces
105 // being sorted by Heap::AddContinuousSpace.
106 if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
107 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
108 immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
109 }
110 }
111}
112
113void SemiSpace::BindBitmaps() {
114 timings_.StartSplit("BindBitmaps");
115 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
116 // Mark all of the spaces we never collect as immune.
117 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
118 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
119 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
120 ImmuneSpace(space);
121 }
122 }
123 timings_.EndSplit();
124}
125
126SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
127 : GarbageCollector(heap,
128 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
129 mark_stack_(nullptr),
130 immune_begin_(nullptr),
131 immune_end_(nullptr),
132 to_space_(nullptr),
133 from_space_(nullptr),
134 soft_reference_list_(nullptr),
135 weak_reference_list_(nullptr),
136 finalizer_reference_list_(nullptr),
137 phantom_reference_list_(nullptr),
138 cleared_reference_list_(nullptr),
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800139 self_(nullptr),
140 last_gc_to_space_end_(nullptr),
141 bytes_promoted_(0) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700142}
143
144void SemiSpace::InitializePhase() {
145 timings_.Reset();
Ian Rogers5fe9af72013-11-14 00:17:20 -0800146 TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700147 mark_stack_ = heap_->mark_stack_.get();
148 DCHECK(mark_stack_ != nullptr);
149 immune_begin_ = nullptr;
150 immune_end_ = nullptr;
151 soft_reference_list_ = nullptr;
152 weak_reference_list_ = nullptr;
153 finalizer_reference_list_ = nullptr;
154 phantom_reference_list_ = nullptr;
155 cleared_reference_list_ = nullptr;
156 self_ = Thread::Current();
157 // Do any pre GC verification.
158 timings_.NewSplit("PreGcVerification");
159 heap_->PreGcVerification(this);
160}
161
162void SemiSpace::ProcessReferences(Thread* self) {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800163 TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700164 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800165 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
166 &RecursiveMarkObjectCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700167}
168
169void SemiSpace::MarkingPhase() {
170 Thread* self = Thread::Current();
171 Locks::mutator_lock_->AssertExclusiveHeld(self);
Ian Rogers5fe9af72013-11-14 00:17:20 -0800172 TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700173 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
174 // wrong space.
175 heap_->SwapSemiSpaces();
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800176 if (kEnableSimplePromo) {
177 // If last_gc_to_space_end_ is out of the bounds of the from-space
178 // (the to-space from last GC), then point it to the beginning of
179 // the from-space. For example, the very first GC or the
180 // pre-zygote compaction.
181 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
182 last_gc_to_space_end_ = from_space_->Begin();
183 }
184 // Reset this before the marking starts below.
185 bytes_promoted_ = 0;
186 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700187 // Assume the cleared space is already empty.
188 BindBitmaps();
189 // Process dirty cards and add dirty cards to mod-union tables.
190 heap_->ProcessCards(timings_);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800191 // Clear the whole card table since we can not get any additional dirty cards during the
192 // paused GC. This saves memory but only works for pause the world collectors.
193 timings_.NewSplit("ClearCardTable");
194 heap_->GetCardTable()->ClearCardTable();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700195 // Need to do this before the checkpoint since we don't want any threads to add references to
196 // the live stack during the recursive mark.
197 timings_.NewSplit("SwapStacks");
198 heap_->SwapStacks();
199 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
200 MarkRoots();
201 // Mark roots of immune spaces.
202 UpdateAndMarkModUnion();
203 // Recursively mark remaining objects.
204 MarkReachableObjects();
205}
206
207bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const {
208 return
209 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
210 immune_end_ >= reinterpret_cast<Object*>(space->End());
211}
212
213void SemiSpace::UpdateAndMarkModUnion() {
214 for (auto& space : heap_->GetContinuousSpaces()) {
215 // If the space is immune then we need to mark the references to other spaces.
216 if (IsImmuneSpace(space)) {
217 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
218 CHECK(table != nullptr);
219 // TODO: Improve naming.
Ian Rogers5fe9af72013-11-14 00:17:20 -0800220 TimingLogger::ScopedSplit split(
Mathieu Chartier590fee92013-09-13 13:46:47 -0700221 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
222 "UpdateAndMarkImageModUnionTable",
223 &timings_);
224 table->UpdateAndMarkReferences(MarkRootCallback, this);
225 }
226 }
227}
228
229void SemiSpace::MarkReachableObjects() {
230 timings_.StartSplit("MarkStackAsLive");
231 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
232 heap_->MarkAllocStackAsLive(live_stack);
233 live_stack->Reset();
234 timings_.EndSplit();
235 // Recursively process the mark stack.
236 ProcessMarkStack(true);
237}
238
239void SemiSpace::ReclaimPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800240 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700241 Thread* self = Thread::Current();
242 ProcessReferences(self);
243 {
244 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
245 SweepSystemWeaks();
246 }
247 // Record freed memory.
248 int from_bytes = from_space_->GetBytesAllocated();
249 int to_bytes = to_space_->GetBytesAllocated();
250 int from_objects = from_space_->GetObjectsAllocated();
251 int to_objects = to_space_->GetObjectsAllocated();
252 int freed_bytes = from_bytes - to_bytes;
253 int freed_objects = from_objects - to_objects;
254 CHECK_GE(freed_bytes, 0);
255 freed_bytes_.fetch_add(freed_bytes);
256 freed_objects_.fetch_add(freed_objects);
257 heap_->RecordFree(static_cast<size_t>(freed_objects), static_cast<size_t>(freed_bytes));
258
259 timings_.StartSplit("PreSweepingGcVerification");
260 heap_->PreSweepingGcVerification(this);
261 timings_.EndSplit();
262
263 {
264 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
265 // Reclaim unmarked objects.
266 Sweep(false);
267 // Swap the live and mark bitmaps for each space which we modified space. This is an
268 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
269 // bitmaps.
270 timings_.StartSplit("SwapBitmaps");
271 SwapBitmaps();
272 timings_.EndSplit();
273 // Unbind the live and mark bitmaps.
274 UnBindBitmaps();
275 }
276 // Release the memory used by the from space.
277 if (kResetFromSpace) {
278 // Clearing from space.
279 from_space_->Clear();
280 }
281 // Protect the from space.
282 VLOG(heap)
283 << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
284 << reinterpret_cast<void*>(from_space_->Limit());
285 if (kProtectFromSpace) {
286 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
287 } else {
288 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
289 }
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800290
291 if (kEnableSimplePromo) {
292 // Record the end (top) of the to space so we can distinguish
293 // between objects that were allocated since the last GC and the
294 // older objects.
295 last_gc_to_space_end_ = to_space_->End();
296 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700297}
298
299void SemiSpace::ResizeMarkStack(size_t new_size) {
300 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
301 CHECK_LE(mark_stack_->Size(), new_size);
302 mark_stack_->Resize(new_size);
303 for (const auto& obj : temp) {
304 mark_stack_->PushBack(obj);
305 }
306}
307
308inline void SemiSpace::MarkStackPush(Object* obj) {
309 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
310 ResizeMarkStack(mark_stack_->Capacity() * 2);
311 }
312 // The object must be pushed on to the mark stack.
313 mark_stack_->PushBack(obj);
314}
315
316// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
317bool SemiSpace::MarkLargeObject(const Object* obj) {
318 // TODO: support >1 discontinuous space.
319 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
320 accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
321 if (UNLIKELY(!large_objects->Test(obj))) {
322 large_objects->Set(obj);
323 return true;
324 }
325 return false;
326}
327
328// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
329// the to-space and have their forward address updated. Objects which have been newly marked are
330// pushed on the mark stack.
331Object* SemiSpace::MarkObject(Object* obj) {
332 Object* ret = obj;
333 if (obj != nullptr && !IsImmune(obj)) {
334 if (from_space_->HasAddress(obj)) {
335 mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
336 // If the object has already been moved, return the new forward address.
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800337 if (forward_address == nullptr) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700338 // Otherwise, we need to move the object and add it to the markstack for processing.
339 size_t object_size = obj->SizeOf();
340 size_t dummy = 0;
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800341 if (kEnableSimplePromo && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
342 // If it's allocated before the last GC (older), move (pseudo-promote) it to
343 // the non-moving space (as sort of an old generation.)
344 size_t bytes_promoted;
345 space::MallocSpace* non_moving_space = GetHeap()->GetNonMovingSpace();
346 forward_address = non_moving_space->Alloc(self_, object_size, &bytes_promoted);
347 if (forward_address == nullptr) {
348 // If out of space, fall back to the to-space.
349 forward_address = to_space_->Alloc(self_, object_size, &dummy);
350 } else {
351 GetHeap()->num_bytes_allocated_.fetch_add(bytes_promoted);
352 bytes_promoted_ += bytes_promoted;
353 // Mark forward_address on the live bit map.
354 accounting::SpaceBitmap* live_bitmap = non_moving_space->GetLiveBitmap();
355 DCHECK(live_bitmap != nullptr);
356 DCHECK(!live_bitmap->Test(forward_address));
357 live_bitmap->Set(forward_address);
358 // Mark forward_address on the mark bit map.
359 accounting::SpaceBitmap* mark_bitmap = non_moving_space->GetMarkBitmap();
360 DCHECK(mark_bitmap != nullptr);
361 DCHECK(!mark_bitmap->Test(forward_address));
362 mark_bitmap->Set(forward_address);
363 }
364 DCHECK(forward_address != nullptr);
365 } else {
366 // If it's allocated after the last GC (younger), copy it to the to-space.
367 forward_address = to_space_->Alloc(self_, object_size, &dummy);
368 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700369 // Copy over the object and add it to the mark stack since we still need to update it's
370 // references.
371 memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
372 // Make sure to only update the forwarding address AFTER you copy the object so that the
373 // monitor word doesn't get stomped over.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700374 obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)));
375 MarkStackPush(forward_address);
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800376 } else {
377 DCHECK(to_space_->HasAddress(forward_address) ||
378 (kEnableSimplePromo && GetHeap()->GetNonMovingSpace()->HasAddress(forward_address)));
Mathieu Chartier590fee92013-09-13 13:46:47 -0700379 }
380 ret = forward_address;
381 // TODO: Do we need this if in the else statement?
382 } else {
383 accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
384 if (LIKELY(object_bitmap != nullptr)) {
385 // This object was not previously marked.
386 if (!object_bitmap->Test(obj)) {
387 object_bitmap->Set(obj);
388 MarkStackPush(obj);
389 }
390 } else {
391 DCHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_";
392 if (MarkLargeObject(obj)) {
393 MarkStackPush(obj);
394 }
395 }
396 }
397 }
398 return ret;
399}
400
Mathieu Chartier39e32612013-11-12 16:28:05 -0800401Object* SemiSpace::RecursiveMarkObjectCallback(Object* root, void* arg) {
402 DCHECK(root != nullptr);
403 DCHECK(arg != nullptr);
404 SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg);
405 mirror::Object* ret = semi_space->MarkObject(root);
406 semi_space->ProcessMarkStack(true);
407 return ret;
408}
409
Mathieu Chartier590fee92013-09-13 13:46:47 -0700410Object* SemiSpace::MarkRootCallback(Object* root, void* arg) {
411 DCHECK(root != nullptr);
412 DCHECK(arg != nullptr);
413 return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
414}
415
416// Marks all objects in the root set.
417void SemiSpace::MarkRoots() {
418 timings_.StartSplit("MarkRoots");
419 // TODO: Visit up image roots as well?
420 Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true);
421 timings_.EndSplit();
422}
423
424void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700425 CHECK(space->IsMallocSpace());
426 space::MallocSpace* alloc_space = space->AsMallocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700427 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
428 accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap();
429 GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
430}
431
432mirror::Object* SemiSpace::GetForwardingAddress(mirror::Object* obj) {
433 if (from_space_->HasAddress(obj)) {
434 LOG(FATAL) << "Shouldn't happen!";
435 return GetForwardingAddressInFromSpace(obj);
436 }
437 return obj;
438}
439
Mathieu Chartier39e32612013-11-12 16:28:05 -0800440mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700441 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
442}
443
444void SemiSpace::SweepSystemWeaks() {
445 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier39e32612013-11-12 16:28:05 -0800446 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700447 timings_.EndSplit();
448}
449
450struct SweepCallbackContext {
451 SemiSpace* mark_sweep;
452 space::AllocSpace* space;
453 Thread* self;
454};
455
456void SemiSpace::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
457 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
458 SemiSpace* gc = context->mark_sweep;
459 Heap* heap = gc->GetHeap();
460 space::AllocSpace* space = context->space;
461 Thread* self = context->self;
462 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
463 size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
464 heap->RecordFree(num_ptrs, freed_bytes);
465 gc->freed_objects_.fetch_add(num_ptrs);
466 gc->freed_bytes_.fetch_add(freed_bytes);
467}
468
469void SemiSpace::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
470 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
471 Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
472 Heap* heap = context->mark_sweep->GetHeap();
473 // We don't free any actual memory to avoid dirtying the shared zygote pages.
474 for (size_t i = 0; i < num_ptrs; ++i) {
475 Object* obj = static_cast<Object*>(ptrs[i]);
476 heap->GetLiveBitmap()->Clear(obj);
477 heap->GetCardTable()->MarkCard(obj);
478 }
479}
480
481void SemiSpace::Sweep(bool swap_bitmaps) {
482 DCHECK(mark_stack_->IsEmpty());
Ian Rogers5fe9af72013-11-14 00:17:20 -0800483 TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700484
485 const bool partial = (GetGcType() == kGcTypePartial);
486 SweepCallbackContext scc;
487 scc.mark_sweep = this;
488 scc.self = Thread::Current();
489 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700490 if (!space->IsMallocSpace()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700491 continue;
492 }
493 // We always sweep always collect spaces.
494 bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
495 if (!partial && !sweep_space) {
496 // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
497 sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
498 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700499 if (sweep_space && space->IsMallocSpace()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700500 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
501 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700502 scc.space = space->AsMallocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700503 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
504 accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
505 if (swap_bitmaps) {
506 std::swap(live_bitmap, mark_bitmap);
507 }
508 if (!space->IsZygoteSpace()) {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800509 TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700510 // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
511 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
512 &SweepCallback, reinterpret_cast<void*>(&scc));
513 } else {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800514 TimingLogger::ScopedSplit split("SweepZygote", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700515 // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
516 // memory.
517 accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
518 &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
519 }
520 }
521 }
522
523 SweepLargeObjects(swap_bitmaps);
524}
525
526void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800527 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700528 // Sweep large objects
529 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
530 accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
531 accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
532 if (swap_bitmaps) {
533 std::swap(large_live_objects, large_mark_objects);
534 }
535 // O(n*log(n)) but hopefully there are not too many large objects.
536 size_t freed_objects = 0;
537 size_t freed_bytes = 0;
538 Thread* self = Thread::Current();
539 for (const Object* obj : large_live_objects->GetObjects()) {
540 if (!large_mark_objects->Test(obj)) {
541 freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
542 ++freed_objects;
543 }
544 }
545 freed_large_objects_.fetch_add(freed_objects);
546 freed_large_object_bytes_.fetch_add(freed_bytes);
547 GetHeap()->RecordFree(freed_objects, freed_bytes);
548}
549
550// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
551// marked, put it on the appropriate list in the heap for later processing.
552void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
Mathieu Chartier39e32612013-11-12 16:28:05 -0800553 heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700554}
555
556// Visit all of the references of an object and update.
557void SemiSpace::ScanObject(Object* obj) {
558 DCHECK(obj != NULL);
559 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
560 MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset,
Bernhard Rosenkränzer46053622013-12-12 02:15:52 +0100561 bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700562 mirror::Object* new_address = MarkObject(ref);
563 if (new_address != ref) {
564 DCHECK(new_address != nullptr);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800565 // Don't need to mark the card since we updating the object address and not changing the
566 // actual objects its pointing to. Using SetFieldPtr is better in this case since it does not
567 // dirty cards and use additional memory.
568 obj->SetFieldPtr(offset, new_address, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700569 }
570 }, kMovingClasses);
571 mirror::Class* klass = obj->GetClass();
572 if (UNLIKELY(klass->IsReferenceClass())) {
573 DelayReferenceReferent(klass, obj);
574 }
575}
576
577// Scan anything that's on the mark stack.
578void SemiSpace::ProcessMarkStack(bool paused) {
579 timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
580 while (!mark_stack_->IsEmpty()) {
581 ScanObject(mark_stack_->PopBack());
582 }
583 timings_.EndSplit();
584}
585
Mathieu Chartier590fee92013-09-13 13:46:47 -0700586inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
587 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
588 // All immune objects are assumed marked.
589 if (IsImmune(obj)) {
590 return obj;
591 }
592 if (from_space_->HasAddress(obj)) {
593 mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj));
594 // If the object is forwarded then it MUST be marked.
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800595 DCHECK(forwarding_address == nullptr || to_space_->HasAddress(forwarding_address) ||
596 (kEnableSimplePromo && GetHeap()->GetNonMovingSpace()->HasAddress(forwarding_address)));
597 if (forwarding_address != nullptr) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700598 return forwarding_address;
599 }
600 // Must not be marked, return nullptr;
601 return nullptr;
602 } else if (to_space_->HasAddress(obj)) {
603 // Already forwarded, must be marked.
604 return obj;
605 }
606 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
607}
608
Mathieu Chartier590fee92013-09-13 13:46:47 -0700609void SemiSpace::UnBindBitmaps() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800610 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700611 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700612 if (space->IsMallocSpace()) {
613 space::MallocSpace* alloc_space = space->AsMallocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700614 if (alloc_space->HasBoundBitmaps()) {
615 alloc_space->UnBindBitmaps();
616 heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(),
617 alloc_space->GetMarkBitmap());
618 }
619 }
620 }
621}
622
623void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
624 DCHECK(to_space != nullptr);
625 to_space_ = to_space;
626}
627
628void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
629 DCHECK(from_space != nullptr);
630 from_space_ = from_space;
631}
632
633void SemiSpace::FinishPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800634 TimingLogger::ScopedSplit split("FinishPhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700635 // Can't enqueue references if we hold the mutator lock.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700636 Heap* heap = GetHeap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700637 timings_.NewSplit("PostGcVerification");
638 heap->PostGcVerification(this);
639
640 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
641 // further action is done by the heap.
642 to_space_ = nullptr;
643 from_space_ = nullptr;
644
645 // Update the cumulative statistics
Mathieu Chartier590fee92013-09-13 13:46:47 -0700646 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
647 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
648
649 // Ensure that the mark stack is empty.
650 CHECK(mark_stack_->IsEmpty());
651
652 // Update the cumulative loggers.
653 cumulative_timings_.Start();
654 cumulative_timings_.AddLogger(timings_);
655 cumulative_timings_.End();
656
657 // Clear all of the spaces' mark bitmaps.
658 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
659 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
660 if (bitmap != nullptr &&
661 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
662 bitmap->Clear();
663 }
664 }
665 mark_stack_->Reset();
666
667 // Reset the marked large objects.
668 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
669 large_objects->GetMarkObjects()->Clear();
670}
671
672} // namespace collector
673} // namespace gc
674} // namespace art