blob: 03307f5310921c32f39f749a838f21ea0218d63e [file] [log] [blame]
Mathieu Chartier590fee92013-09-13 13:46:47 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Mathieu Chartier590fee92013-09-13 13:46:47 -070017#include "semi_space.h"
18
19#include <functional>
20#include <numeric>
21#include <climits>
22#include <vector>
23
24#include "base/logging.h"
25#include "base/macros.h"
26#include "base/mutex-inl.h"
27#include "base/timing_logger.h"
28#include "gc/accounting/heap_bitmap.h"
29#include "gc/accounting/mod_union_table.h"
30#include "gc/accounting/space_bitmap-inl.h"
31#include "gc/heap.h"
32#include "gc/space/bump_pointer_space.h"
33#include "gc/space/bump_pointer_space-inl.h"
34#include "gc/space/image_space.h"
35#include "gc/space/large_object_space.h"
36#include "gc/space/space-inl.h"
37#include "indirect_reference_table.h"
38#include "intern_table.h"
39#include "jni_internal.h"
40#include "mark_sweep-inl.h"
41#include "monitor.h"
42#include "mirror/art_field.h"
43#include "mirror/art_field-inl.h"
44#include "mirror/class-inl.h"
45#include "mirror/class_loader.h"
46#include "mirror/dex_cache.h"
47#include "mirror/object-inl.h"
48#include "mirror/object_array.h"
49#include "mirror/object_array-inl.h"
50#include "runtime.h"
51#include "semi_space-inl.h"
52#include "thread-inl.h"
53#include "thread_list.h"
54#include "verifier/method_verifier.h"
55
56using ::art::mirror::Class;
57using ::art::mirror::Object;
58
59namespace art {
60namespace gc {
61namespace collector {
62
63static constexpr bool kProtectFromSpace = true;
64static constexpr bool kResetFromSpace = true;
65
66// TODO: Unduplicate logic.
67void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
68 // Bind live to mark bitmap if necessary.
69 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080070 CHECK(space->IsContinuousMemMapAllocSpace());
71 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -070072 }
73 // Add the space to the immune region.
74 if (immune_begin_ == nullptr) {
75 DCHECK(immune_end_ == nullptr);
76 immune_begin_ = reinterpret_cast<Object*>(space->Begin());
77 immune_end_ = reinterpret_cast<Object*>(space->End());
78 } else {
79 const space::ContinuousSpace* prev_space = nullptr;
80 // Find out if the previous space is immune.
81 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
82 if (cur_space == space) {
83 break;
84 }
85 prev_space = cur_space;
86 }
87 // If previous space was immune, then extend the immune region. Relies on continuous spaces
88 // being sorted by Heap::AddContinuousSpace.
89 if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
90 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080091 // Use Limit() instead of End() because otherwise if the
92 // generational mode is enabled, the alloc space might expand
93 // due to promotion and the sense of immunity may change in the
94 // middle of a GC.
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -080095 immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_);
Mathieu Chartier590fee92013-09-13 13:46:47 -070096 }
97 }
98}
99
100void SemiSpace::BindBitmaps() {
101 timings_.StartSplit("BindBitmaps");
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800102 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700103 // Mark all of the spaces we never collect as immune.
104 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800105 if (space->GetLiveBitmap() != nullptr) {
106 if (space == to_space_) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800107 CHECK(to_space_->IsContinuousMemMapAllocSpace());
108 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800109 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800110 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect
111 // Add the main free list space and the non-moving
112 // space to the immune space if a bump pointer space
113 // only collection.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800114 || (generational_ && !whole_heap_collection_ &&
115 (space == GetHeap()->GetNonMovingSpace() ||
116 space == GetHeap()->GetPrimaryFreeListSpace()))) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800117 ImmuneSpace(space);
118 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700119 }
120 }
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800121 if (generational_ && !whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800122 // We won't collect the large object space if a bump pointer space only collection.
123 is_large_object_space_immune_ = true;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800124 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700125 timings_.EndSplit();
126}
127
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800128SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
Mathieu Chartier590fee92013-09-13 13:46:47 -0700129 : GarbageCollector(heap,
130 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
131 mark_stack_(nullptr),
132 immune_begin_(nullptr),
133 immune_end_(nullptr),
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800134 is_large_object_space_immune_(false),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700135 to_space_(nullptr),
136 from_space_(nullptr),
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800137 self_(nullptr),
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800138 generational_(generational),
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800139 last_gc_to_space_end_(nullptr),
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800140 bytes_promoted_(0),
141 whole_heap_collection_(true),
142 whole_heap_collection_interval_counter_(0) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700143}
144
145void SemiSpace::InitializePhase() {
146 timings_.Reset();
Ian Rogers5fe9af72013-11-14 00:17:20 -0800147 TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700148 mark_stack_ = heap_->mark_stack_.get();
149 DCHECK(mark_stack_ != nullptr);
150 immune_begin_ = nullptr;
151 immune_end_ = nullptr;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800152 is_large_object_space_immune_ = false;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700153 self_ = Thread::Current();
154 // Do any pre GC verification.
155 timings_.NewSplit("PreGcVerification");
156 heap_->PreGcVerification(this);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800157 // Set the initial bitmap.
158 to_space_live_bitmap_ = to_space_->GetLiveBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700159}
160
161void SemiSpace::ProcessReferences(Thread* self) {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800162 TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700163 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800164 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
165 &RecursiveMarkObjectCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700166}
167
168void SemiSpace::MarkingPhase() {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800169 if (generational_) {
170 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
171 clear_soft_references_) {
172 // If an explicit, native allocation-triggered, or last attempt
173 // collection, collect the whole heap (and reset the interval
174 // counter to be consistent.)
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800175 whole_heap_collection_ = true;
176 whole_heap_collection_interval_counter_ = 0;
177 }
178 if (whole_heap_collection_) {
179 VLOG(heap) << "Whole heap collection";
180 } else {
181 VLOG(heap) << "Bump pointer space only collection";
182 }
183 }
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800184 Locks::mutator_lock_->AssertExclusiveHeld(self_);
Ian Rogers5fe9af72013-11-14 00:17:20 -0800185 TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700186 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
187 // wrong space.
188 heap_->SwapSemiSpaces();
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800189 if (generational_) {
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800190 // If last_gc_to_space_end_ is out of the bounds of the from-space
191 // (the to-space from last GC), then point it to the beginning of
192 // the from-space. For example, the very first GC or the
193 // pre-zygote compaction.
194 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
195 last_gc_to_space_end_ = from_space_->Begin();
196 }
197 // Reset this before the marking starts below.
198 bytes_promoted_ = 0;
199 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700200 // Assume the cleared space is already empty.
201 BindBitmaps();
202 // Process dirty cards and add dirty cards to mod-union tables.
203 heap_->ProcessCards(timings_);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800204 // Clear the whole card table since we can not get any additional dirty cards during the
205 // paused GC. This saves memory but only works for pause the world collectors.
206 timings_.NewSplit("ClearCardTable");
207 heap_->GetCardTable()->ClearCardTable();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700208 // Need to do this before the checkpoint since we don't want any threads to add references to
209 // the live stack during the recursive mark.
210 timings_.NewSplit("SwapStacks");
211 heap_->SwapStacks();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800212 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700213 MarkRoots();
214 // Mark roots of immune spaces.
215 UpdateAndMarkModUnion();
216 // Recursively mark remaining objects.
217 MarkReachableObjects();
218}
219
220bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const {
221 return
222 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
223 immune_end_ >= reinterpret_cast<Object*>(space->End());
224}
225
226void SemiSpace::UpdateAndMarkModUnion() {
227 for (auto& space : heap_->GetContinuousSpaces()) {
228 // If the space is immune then we need to mark the references to other spaces.
229 if (IsImmuneSpace(space)) {
230 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800231 if (table != nullptr) {
232 // TODO: Improve naming.
233 TimingLogger::ScopedSplit split(
234 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
235 "UpdateAndMarkImageModUnionTable",
236 &timings_);
237 table->UpdateAndMarkReferences(MarkRootCallback, this);
238 } else {
239 // If a bump pointer space only collection, the non-moving
240 // space is added to the immune space. But the non-moving
241 // space doesn't have a mod union table. Instead, its live
242 // bitmap will be scanned later in MarkReachableObjects().
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800243 DCHECK(generational_ && !whole_heap_collection_ &&
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800244 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()));
245 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700246 }
247 }
248}
249
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800250class SemiSpaceScanObjectVisitor {
251 public:
252 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
253 void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
254 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an
255 // exclusive lock on the mutator lock, but
256 // SpaceBitmap::VisitMarkedRange() only requires the shared lock.
257 DCHECK(obj != nullptr);
258 semi_space_->ScanObject(obj);
259 }
260 private:
261 SemiSpace* semi_space_;
262};
263
Mathieu Chartier590fee92013-09-13 13:46:47 -0700264void SemiSpace::MarkReachableObjects() {
265 timings_.StartSplit("MarkStackAsLive");
266 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
267 heap_->MarkAllocStackAsLive(live_stack);
268 live_stack->Reset();
269 timings_.EndSplit();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800270
271 for (auto& space : heap_->GetContinuousSpaces()) {
272 // If the space is immune and has no mod union table (the
273 // non-moving space when the bump pointer space only collection is
274 // enabled,) then we need to scan its live bitmap as roots
275 // (including the objects on the live stack which have just marked
276 // in the live bitmap above in MarkAllocStackAsLive().)
277 if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800278 DCHECK(generational_ && !whole_heap_collection_ &&
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800279 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
280 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
281 SemiSpaceScanObjectVisitor visitor(this);
282 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
283 reinterpret_cast<uintptr_t>(space->End()),
284 visitor);
285 }
286 }
287
288 if (is_large_object_space_immune_) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800289 DCHECK(generational_ && !whole_heap_collection_);
Hiroshi Yamauchiba5870d2014-01-29 15:31:03 -0800290 // Delay copying the live set to the marked set until here from
291 // BindBitmaps() as the large objects on the allocation stack may
292 // be newly added to the live set above in MarkAllocStackAsLive().
293 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
294
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800295 // When the large object space is immune, we need to scan the
296 // large object space as roots as they contain references to their
297 // classes (primitive array classes) that could move though they
298 // don't contain any other references.
299 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
300 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
301 SemiSpaceScanObjectVisitor visitor(this);
302 for (const Object* obj : large_live_objects->GetObjects()) {
303 visitor(const_cast<Object*>(obj));
304 }
305 }
306
Mathieu Chartier590fee92013-09-13 13:46:47 -0700307 // Recursively process the mark stack.
308 ProcessMarkStack(true);
309}
310
311void SemiSpace::ReclaimPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800312 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800313 ProcessReferences(self_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700314 {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800315 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700316 SweepSystemWeaks();
317 }
318 // Record freed memory.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800319 uint64_t from_bytes = from_space_->GetBytesAllocated();
320 uint64_t to_bytes = to_space_->GetBytesAllocated();
321 uint64_t from_objects = from_space_->GetObjectsAllocated();
322 uint64_t to_objects = to_space_->GetObjectsAllocated();
323 CHECK_LE(to_objects, from_objects);
324 int64_t freed_bytes = from_bytes - to_bytes;
325 int64_t freed_objects = from_objects - to_objects;
Ian Rogersb122a4b2013-11-19 18:00:50 -0800326 freed_bytes_.FetchAndAdd(freed_bytes);
327 freed_objects_.FetchAndAdd(freed_objects);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800328 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
329 // space.
330 heap_->RecordFree(freed_objects, freed_bytes);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700331 timings_.StartSplit("PreSweepingGcVerification");
332 heap_->PreSweepingGcVerification(this);
333 timings_.EndSplit();
334
335 {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800336 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700337 // Reclaim unmarked objects.
338 Sweep(false);
339 // Swap the live and mark bitmaps for each space which we modified space. This is an
340 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
341 // bitmaps.
342 timings_.StartSplit("SwapBitmaps");
343 SwapBitmaps();
344 timings_.EndSplit();
345 // Unbind the live and mark bitmaps.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800346 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
347 GetHeap()->UnBindBitmaps();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700348 }
349 // Release the memory used by the from space.
350 if (kResetFromSpace) {
351 // Clearing from space.
352 from_space_->Clear();
353 }
354 // Protect the from space.
355 VLOG(heap)
356 << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
357 << reinterpret_cast<void*>(from_space_->Limit());
358 if (kProtectFromSpace) {
359 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
360 } else {
361 mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
362 }
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800363
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800364 if (generational_) {
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800365 // Record the end (top) of the to space so we can distinguish
366 // between objects that were allocated since the last GC and the
367 // older objects.
368 last_gc_to_space_end_ = to_space_->End();
369 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700370}
371
372void SemiSpace::ResizeMarkStack(size_t new_size) {
373 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
374 CHECK_LE(mark_stack_->Size(), new_size);
375 mark_stack_->Resize(new_size);
376 for (const auto& obj : temp) {
377 mark_stack_->PushBack(obj);
378 }
379}
380
381inline void SemiSpace::MarkStackPush(Object* obj) {
382 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
383 ResizeMarkStack(mark_stack_->Capacity() * 2);
384 }
385 // The object must be pushed on to the mark stack.
386 mark_stack_->PushBack(obj);
387}
388
389// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
390bool SemiSpace::MarkLargeObject(const Object* obj) {
391 // TODO: support >1 discontinuous space.
392 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800393 DCHECK(large_object_space->Contains(obj));
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800394 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700395 if (UNLIKELY(!large_objects->Test(obj))) {
396 large_objects->Set(obj);
397 return true;
398 }
399 return false;
400}
401
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800402mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
403 size_t object_size = obj->SizeOf();
Mathieu Chartier5dc08a62014-01-10 10:10:23 -0800404 size_t bytes_allocated;
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800405 mirror::Object* forward_address = nullptr;
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800406 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800407 // If it's allocated before the last GC (older), move
408 // (pseudo-promote) it to the main free list space (as sort
409 // of an old generation.)
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800410 size_t bytes_promoted;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800411 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
412 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted);
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800413 if (forward_address == nullptr) {
414 // If out of space, fall back to the to-space.
415 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
416 } else {
417 GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted);
418 bytes_promoted_ += bytes_promoted;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800419 // Handle the bitmaps marking.
420 accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800421 DCHECK(live_bitmap != nullptr);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800422 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800423 DCHECK(mark_bitmap != nullptr);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800424 DCHECK(!live_bitmap->Test(forward_address));
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800425 if (!whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800426 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
427 DCHECK_EQ(live_bitmap, mark_bitmap);
428
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800429 // If a bump pointer space only collection, delay the live
430 // bitmap marking of the promoted object until it's popped off
431 // the mark stack (ProcessMarkStack()). The rationale: we may
432 // be in the middle of scanning the objects in the promo
433 // destination space for
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800434 // non-moving-space-to-bump-pointer-space references by
435 // iterating over the marked bits of the live bitmap
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800436 // (MarkReachableObjects()). If we don't delay it (and instead
437 // mark the promoted object here), the above promo destination
438 // space scan could encounter the just-promoted object and
439 // forward the references in the promoted object's fields even
440 // through it is pushed onto the mark stack. If this happens,
441 // the promoted object would be in an inconsistent state, that
442 // is, it's on the mark stack (gray) but its fields are
443 // already forwarded (black), which would cause a
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800444 // DCHECK(!to_space_->HasAddress(obj)) failure below.
445 } else {
446 // Mark forward_address on the live bit map.
447 live_bitmap->Set(forward_address);
448 // Mark forward_address on the mark bit map.
449 DCHECK(!mark_bitmap->Test(forward_address));
450 mark_bitmap->Set(forward_address);
451 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800452 }
453 DCHECK(forward_address != nullptr);
454 } else {
455 // If it's allocated after the last GC (younger), copy it to the to-space.
456 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
457 }
458 // Copy over the object and add it to the mark stack since we still need to update its
459 // references.
460 memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
461 if (to_space_live_bitmap_ != nullptr) {
462 to_space_live_bitmap_->Set(forward_address);
463 }
Mathieu Chartier5dc08a62014-01-10 10:10:23 -0800464 DCHECK(to_space_->HasAddress(forward_address) ||
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800465 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800466 return forward_address;
467}
468
Mathieu Chartier590fee92013-09-13 13:46:47 -0700469// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
470// the to-space and have their forward address updated. Objects which have been newly marked are
471// pushed on the mark stack.
472Object* SemiSpace::MarkObject(Object* obj) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800473 Object* forward_address = obj;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700474 if (obj != nullptr && !IsImmune(obj)) {
475 if (from_space_->HasAddress(obj)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800476 forward_address = GetForwardingAddressInFromSpace(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700477 // If the object has already been moved, return the new forward address.
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800478 if (forward_address == nullptr) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800479 forward_address = MarkNonForwardedObject(obj);
480 DCHECK(forward_address != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700481 // Make sure to only update the forwarding address AFTER you copy the object so that the
482 // monitor word doesn't get stomped over.
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800483 obj->SetLockWord(LockWord::FromForwardingAddress(
484 reinterpret_cast<size_t>(forward_address)));
485 // Push the object onto the mark stack for later processing.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700486 MarkStackPush(forward_address);
487 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700488 // TODO: Do we need this if in the else statement?
489 } else {
490 accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
491 if (LIKELY(object_bitmap != nullptr)) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800492 if (generational_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800493 // If a bump pointer space only collection, we should not
494 // reach here as we don't/won't mark the objects in the
495 // non-moving space (except for the promoted objects.) Note
496 // the non-moving space is added to the immune space.
497 DCHECK(whole_heap_collection_);
498 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700499 // This object was not previously marked.
500 if (!object_bitmap->Test(obj)) {
501 object_bitmap->Set(obj);
502 MarkStackPush(obj);
503 }
504 } else {
Mathieu Chartierd1e05bf2014-02-04 17:11:58 -0800505 CHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700506 if (MarkLargeObject(obj)) {
507 MarkStackPush(obj);
508 }
509 }
510 }
511 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800512 return forward_address;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700513}
514
Mathieu Chartier39e32612013-11-12 16:28:05 -0800515Object* SemiSpace::RecursiveMarkObjectCallback(Object* root, void* arg) {
516 DCHECK(root != nullptr);
517 DCHECK(arg != nullptr);
518 SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg);
519 mirror::Object* ret = semi_space->MarkObject(root);
520 semi_space->ProcessMarkStack(true);
521 return ret;
522}
523
Mathieu Chartier590fee92013-09-13 13:46:47 -0700524Object* SemiSpace::MarkRootCallback(Object* root, void* arg) {
525 DCHECK(root != nullptr);
526 DCHECK(arg != nullptr);
527 return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
528}
529
530// Marks all objects in the root set.
531void SemiSpace::MarkRoots() {
532 timings_.StartSplit("MarkRoots");
533 // TODO: Visit up image roots as well?
534 Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true);
535 timings_.EndSplit();
536}
537
Mathieu Chartier39e32612013-11-12 16:28:05 -0800538mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700539 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
540}
541
542void SemiSpace::SweepSystemWeaks() {
543 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier39e32612013-11-12 16:28:05 -0800544 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700545 timings_.EndSplit();
546}
547
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800548bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800549 return space != from_space_ && space != to_space_ && !IsImmuneSpace(space);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700550}
551
552void SemiSpace::Sweep(bool swap_bitmaps) {
553 DCHECK(mark_stack_->IsEmpty());
Ian Rogers5fe9af72013-11-14 00:17:20 -0800554 TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700555 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800556 if (space->IsContinuousMemMapAllocSpace()) {
557 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
558 if (!ShouldSweepSpace(alloc_space)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800559 continue;
560 }
Mathieu Chartierec050072014-01-07 16:00:07 -0800561 TimingLogger::ScopedSplit split(
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800562 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
Mathieu Chartierec050072014-01-07 16:00:07 -0800563 size_t freed_objects = 0;
564 size_t freed_bytes = 0;
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800565 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Mathieu Chartierec050072014-01-07 16:00:07 -0800566 heap_->RecordFree(freed_objects, freed_bytes);
567 freed_objects_.FetchAndAdd(freed_objects);
568 freed_bytes_.FetchAndAdd(freed_bytes);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700569 }
570 }
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800571 if (!is_large_object_space_immune_) {
572 SweepLargeObjects(swap_bitmaps);
573 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700574}
575
576void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800577 DCHECK(!is_large_object_space_immune_);
Ian Rogers5fe9af72013-11-14 00:17:20 -0800578 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700579 size_t freed_objects = 0;
580 size_t freed_bytes = 0;
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800581 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Ian Rogersb122a4b2013-11-19 18:00:50 -0800582 freed_large_objects_.FetchAndAdd(freed_objects);
583 freed_large_object_bytes_.FetchAndAdd(freed_bytes);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700584 GetHeap()->RecordFree(freed_objects, freed_bytes);
585}
586
587// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
588// marked, put it on the appropriate list in the heap for later processing.
589void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
Mathieu Chartier39e32612013-11-12 16:28:05 -0800590 heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700591}
592
593// Visit all of the references of an object and update.
594void SemiSpace::ScanObject(Object* obj) {
595 DCHECK(obj != NULL);
596 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
597 MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset,
Bernhard Rosenkränzer46053622013-12-12 02:15:52 +0100598 bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700599 mirror::Object* new_address = MarkObject(ref);
600 if (new_address != ref) {
601 DCHECK(new_address != nullptr);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800602 // Don't need to mark the card since we updating the object address and not changing the
Ian Rogersef7d42f2014-01-06 12:55:46 -0800603 // actual objects its pointing to. Using SetFieldObjectWithoutWriteBarrier is better in this
604 // case since it does not dirty cards and use additional memory.
605 obj->SetFieldObjectWithoutWriteBarrier(offset, new_address, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700606 }
607 }, kMovingClasses);
608 mirror::Class* klass = obj->GetClass();
609 if (UNLIKELY(klass->IsReferenceClass())) {
610 DelayReferenceReferent(klass, obj);
611 }
612}
613
614// Scan anything that's on the mark stack.
615void SemiSpace::ProcessMarkStack(bool paused) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800616 space::MallocSpace* promo_dest_space = NULL;
617 accounting::SpaceBitmap* live_bitmap = NULL;
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800618 if (generational_ && !whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800619 // If a bump pointer space only collection (and the promotion is
620 // enabled,) we delay the live-bitmap marking of promoted objects
621 // from MarkObject() until this function.
622 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
623 live_bitmap = promo_dest_space->GetLiveBitmap();
624 DCHECK(live_bitmap != nullptr);
625 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
626 DCHECK(mark_bitmap != nullptr);
627 DCHECK_EQ(live_bitmap, mark_bitmap);
628 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700629 timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
630 while (!mark_stack_->IsEmpty()) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800631 Object* obj = mark_stack_->PopBack();
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800632 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800633 // obj has just been promoted. Mark the live bitmap for it,
634 // which is delayed from MarkObject().
635 DCHECK(!live_bitmap->Test(obj));
636 live_bitmap->Set(obj);
637 }
638 ScanObject(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700639 }
640 timings_.EndSplit();
641}
642
Mathieu Chartier590fee92013-09-13 13:46:47 -0700643inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
644 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
645 // All immune objects are assumed marked.
646 if (IsImmune(obj)) {
647 return obj;
648 }
649 if (from_space_->HasAddress(obj)) {
650 mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj));
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800651 return forwarding_address; // Returns either the forwarding address or nullptr.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700652 } else if (to_space_->HasAddress(obj)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800653 // Should be unlikely.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700654 // Already forwarded, must be marked.
655 return obj;
656 }
657 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
658}
659
Mathieu Chartier590fee92013-09-13 13:46:47 -0700660void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
661 DCHECK(to_space != nullptr);
662 to_space_ = to_space;
663}
664
665void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
666 DCHECK(from_space != nullptr);
667 from_space_ = from_space;
668}
669
670void SemiSpace::FinishPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800671 TimingLogger::ScopedSplit split("FinishPhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700672 Heap* heap = GetHeap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700673 timings_.NewSplit("PostGcVerification");
674 heap->PostGcVerification(this);
675
676 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
677 // further action is done by the heap.
678 to_space_ = nullptr;
679 from_space_ = nullptr;
680
681 // Update the cumulative statistics
Mathieu Chartier590fee92013-09-13 13:46:47 -0700682 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
683 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
684
685 // Ensure that the mark stack is empty.
686 CHECK(mark_stack_->IsEmpty());
687
688 // Update the cumulative loggers.
689 cumulative_timings_.Start();
690 cumulative_timings_.AddLogger(timings_);
691 cumulative_timings_.End();
692
693 // Clear all of the spaces' mark bitmaps.
694 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
695 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
696 if (bitmap != nullptr &&
697 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
698 bitmap->Clear();
699 }
700 }
701 mark_stack_->Reset();
702
703 // Reset the marked large objects.
704 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
705 large_objects->GetMarkObjects()->Clear();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800706
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800707 if (generational_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800708 // Decide whether to do a whole heap collection or a bump pointer
709 // only space collection at the next collection by updating
710 // whole_heap_collection. Enable whole_heap_collection once every
711 // kDefaultWholeHeapCollectionInterval collections.
712 if (!whole_heap_collection_) {
713 --whole_heap_collection_interval_counter_;
714 DCHECK_GE(whole_heap_collection_interval_counter_, 0);
715 if (whole_heap_collection_interval_counter_ == 0) {
716 whole_heap_collection_ = true;
717 }
718 } else {
719 DCHECK_EQ(whole_heap_collection_interval_counter_, 0);
720 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval;
721 whole_heap_collection_ = false;
722 }
723 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700724}
725
726} // namespace collector
727} // namespace gc
728} // namespace art