blob: 4668a1985ff84113231a9a3855b0b2688c58b212 [file] [log] [blame]
Mathieu Chartier590fee92013-09-13 13:46:47 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Mathieu Chartier590fee92013-09-13 13:46:47 -070017#include "semi_space.h"
18
19#include <functional>
20#include <numeric>
21#include <climits>
22#include <vector>
23
24#include "base/logging.h"
25#include "base/macros.h"
26#include "base/mutex-inl.h"
27#include "base/timing_logger.h"
28#include "gc/accounting/heap_bitmap.h"
29#include "gc/accounting/mod_union_table.h"
30#include "gc/accounting/space_bitmap-inl.h"
31#include "gc/heap.h"
32#include "gc/space/bump_pointer_space.h"
33#include "gc/space/bump_pointer_space-inl.h"
34#include "gc/space/image_space.h"
35#include "gc/space/large_object_space.h"
36#include "gc/space/space-inl.h"
37#include "indirect_reference_table.h"
38#include "intern_table.h"
39#include "jni_internal.h"
40#include "mark_sweep-inl.h"
41#include "monitor.h"
42#include "mirror/art_field.h"
43#include "mirror/art_field-inl.h"
44#include "mirror/class-inl.h"
45#include "mirror/class_loader.h"
46#include "mirror/dex_cache.h"
47#include "mirror/object-inl.h"
48#include "mirror/object_array.h"
49#include "mirror/object_array-inl.h"
50#include "runtime.h"
51#include "semi_space-inl.h"
52#include "thread-inl.h"
53#include "thread_list.h"
54#include "verifier/method_verifier.h"
55
56using ::art::mirror::Class;
57using ::art::mirror::Object;
58
59namespace art {
60namespace gc {
61namespace collector {
62
63static constexpr bool kProtectFromSpace = true;
Mathieu Chartier15d34022014-02-26 17:16:38 -080064static constexpr bool kClearFromSpace = true;
65static constexpr bool kStoreStackTraces = false;
Mathieu Chartier590fee92013-09-13 13:46:47 -070066
67// TODO: Unduplicate logic.
68void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
69 // Bind live to mark bitmap if necessary.
70 if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080071 CHECK(space->IsContinuousMemMapAllocSpace());
72 space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -070073 }
74 // Add the space to the immune region.
75 if (immune_begin_ == nullptr) {
76 DCHECK(immune_end_ == nullptr);
77 immune_begin_ = reinterpret_cast<Object*>(space->Begin());
78 immune_end_ = reinterpret_cast<Object*>(space->End());
79 } else {
80 const space::ContinuousSpace* prev_space = nullptr;
81 // Find out if the previous space is immune.
82 for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
83 if (cur_space == space) {
84 break;
85 }
86 prev_space = cur_space;
87 }
88 // If previous space was immune, then extend the immune region. Relies on continuous spaces
89 // being sorted by Heap::AddContinuousSpace.
90 if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
91 immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080092 // Use Limit() instead of End() because otherwise if the
93 // generational mode is enabled, the alloc space might expand
94 // due to promotion and the sense of immunity may change in the
95 // middle of a GC.
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -080096 immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_);
Mathieu Chartier590fee92013-09-13 13:46:47 -070097 }
98 }
99}
100
101void SemiSpace::BindBitmaps() {
102 timings_.StartSplit("BindBitmaps");
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800103 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700104 // Mark all of the spaces we never collect as immune.
105 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800106 if (space->GetLiveBitmap() != nullptr) {
107 if (space == to_space_) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800108 CHECK(to_space_->IsContinuousMemMapAllocSpace());
109 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800110 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800111 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect
112 // Add the main free list space and the non-moving
113 // space to the immune space if a bump pointer space
114 // only collection.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800115 || (generational_ && !whole_heap_collection_ &&
116 (space == GetHeap()->GetNonMovingSpace() ||
117 space == GetHeap()->GetPrimaryFreeListSpace()))) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800118 ImmuneSpace(space);
119 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700120 }
121 }
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800122 if (generational_ && !whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800123 // We won't collect the large object space if a bump pointer space only collection.
124 is_large_object_space_immune_ = true;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800125 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700126 timings_.EndSplit();
127}
128
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800129SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
Mathieu Chartier590fee92013-09-13 13:46:47 -0700130 : GarbageCollector(heap,
131 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
132 mark_stack_(nullptr),
133 immune_begin_(nullptr),
134 immune_end_(nullptr),
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800135 is_large_object_space_immune_(false),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700136 to_space_(nullptr),
Ian Rogers6fac4472014-02-25 17:01:10 -0800137 to_space_live_bitmap_(nullptr),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700138 from_space_(nullptr),
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800139 self_(nullptr),
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800140 generational_(generational),
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800141 last_gc_to_space_end_(nullptr),
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800142 bytes_promoted_(0),
143 whole_heap_collection_(true),
Ian Rogers6fac4472014-02-25 17:01:10 -0800144 whole_heap_collection_interval_counter_(0),
145 saved_bytes_(0) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700146}
147
148void SemiSpace::InitializePhase() {
149 timings_.Reset();
Ian Rogers5fe9af72013-11-14 00:17:20 -0800150 TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700151 mark_stack_ = heap_->mark_stack_.get();
152 DCHECK(mark_stack_ != nullptr);
153 immune_begin_ = nullptr;
154 immune_end_ = nullptr;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800155 is_large_object_space_immune_ = false;
Mathieu Chartierad35d902014-02-11 16:20:42 -0800156 saved_bytes_ = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700157 self_ = Thread::Current();
158 // Do any pre GC verification.
159 timings_.NewSplit("PreGcVerification");
160 heap_->PreGcVerification(this);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800161 // Set the initial bitmap.
162 to_space_live_bitmap_ = to_space_->GetLiveBitmap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700163}
164
165void SemiSpace::ProcessReferences(Thread* self) {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800166 TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700167 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800168 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800169 &MarkObjectCallback, &ProcessMarkStackCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700170}
171
172void SemiSpace::MarkingPhase() {
Mathieu Chartier15d34022014-02-26 17:16:38 -0800173 if (kStoreStackTraces) {
174 Locks::mutator_lock_->AssertExclusiveHeld(self_);
175 // Store the stack traces into the runtime fault string in case we get a heap corruption
176 // related crash later.
177 ThreadState old_state = self_->SetStateUnsafe(kRunnable);
178 std::ostringstream oss;
179 Runtime* runtime = Runtime::Current();
180 runtime->GetThreadList()->DumpForSigQuit(oss);
181 runtime->GetThreadList()->DumpNativeStacks(oss);
182 runtime->SetFaultMessage(oss.str());
183 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
184 }
185
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800186 if (generational_) {
187 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
188 clear_soft_references_) {
189 // If an explicit, native allocation-triggered, or last attempt
190 // collection, collect the whole heap (and reset the interval
191 // counter to be consistent.)
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800192 whole_heap_collection_ = true;
193 whole_heap_collection_interval_counter_ = 0;
194 }
195 if (whole_heap_collection_) {
196 VLOG(heap) << "Whole heap collection";
197 } else {
198 VLOG(heap) << "Bump pointer space only collection";
199 }
200 }
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800201 Locks::mutator_lock_->AssertExclusiveHeld(self_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800202
Ian Rogers5fe9af72013-11-14 00:17:20 -0800203 TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700204 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
205 // wrong space.
206 heap_->SwapSemiSpaces();
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800207 if (generational_) {
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800208 // If last_gc_to_space_end_ is out of the bounds of the from-space
209 // (the to-space from last GC), then point it to the beginning of
210 // the from-space. For example, the very first GC or the
211 // pre-zygote compaction.
212 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
213 last_gc_to_space_end_ = from_space_->Begin();
214 }
215 // Reset this before the marking starts below.
216 bytes_promoted_ = 0;
217 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700218 // Assume the cleared space is already empty.
219 BindBitmaps();
220 // Process dirty cards and add dirty cards to mod-union tables.
221 heap_->ProcessCards(timings_);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800222 // Clear the whole card table since we can not get any additional dirty cards during the
223 // paused GC. This saves memory but only works for pause the world collectors.
224 timings_.NewSplit("ClearCardTable");
225 heap_->GetCardTable()->ClearCardTable();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700226 // Need to do this before the checkpoint since we don't want any threads to add references to
227 // the live stack during the recursive mark.
228 timings_.NewSplit("SwapStacks");
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800229 if (kUseThreadLocalAllocationStack) {
230 heap_->RevokeAllThreadLocalAllocationStacks(self_);
231 }
232 heap_->SwapStacks(self_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800233 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700234 MarkRoots();
235 // Mark roots of immune spaces.
236 UpdateAndMarkModUnion();
237 // Recursively mark remaining objects.
238 MarkReachableObjects();
239}
240
241bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const {
242 return
243 immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
244 immune_end_ >= reinterpret_cast<Object*>(space->End());
245}
246
247void SemiSpace::UpdateAndMarkModUnion() {
248 for (auto& space : heap_->GetContinuousSpaces()) {
249 // If the space is immune then we need to mark the references to other spaces.
250 if (IsImmuneSpace(space)) {
251 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800252 if (table != nullptr) {
253 // TODO: Improve naming.
254 TimingLogger::ScopedSplit split(
255 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
256 "UpdateAndMarkImageModUnionTable",
257 &timings_);
Mathieu Chartier815873e2014-02-13 18:02:13 -0800258 table->UpdateAndMarkReferences(MarkObjectCallback, this);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800259 } else {
260 // If a bump pointer space only collection, the non-moving
261 // space is added to the immune space. But the non-moving
262 // space doesn't have a mod union table. Instead, its live
263 // bitmap will be scanned later in MarkReachableObjects().
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800264 DCHECK(generational_ && !whole_heap_collection_ &&
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800265 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()));
266 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700267 }
268 }
269}
270
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800271class SemiSpaceScanObjectVisitor {
272 public:
273 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
274 void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
275 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an
276 // exclusive lock on the mutator lock, but
277 // SpaceBitmap::VisitMarkedRange() only requires the shared lock.
278 DCHECK(obj != nullptr);
279 semi_space_->ScanObject(obj);
280 }
281 private:
Ian Rogers6fac4472014-02-25 17:01:10 -0800282 SemiSpace* const semi_space_;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800283};
284
Mathieu Chartier590fee92013-09-13 13:46:47 -0700285void SemiSpace::MarkReachableObjects() {
286 timings_.StartSplit("MarkStackAsLive");
287 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
288 heap_->MarkAllocStackAsLive(live_stack);
289 live_stack->Reset();
290 timings_.EndSplit();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800291
292 for (auto& space : heap_->GetContinuousSpaces()) {
293 // If the space is immune and has no mod union table (the
294 // non-moving space when the bump pointer space only collection is
295 // enabled,) then we need to scan its live bitmap as roots
296 // (including the objects on the live stack which have just marked
297 // in the live bitmap above in MarkAllocStackAsLive().)
298 if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800299 DCHECK(generational_ && !whole_heap_collection_ &&
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800300 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
301 accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
302 SemiSpaceScanObjectVisitor visitor(this);
303 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
304 reinterpret_cast<uintptr_t>(space->End()),
305 visitor);
306 }
307 }
308
309 if (is_large_object_space_immune_) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800310 DCHECK(generational_ && !whole_heap_collection_);
Hiroshi Yamauchiba5870d2014-01-29 15:31:03 -0800311 // Delay copying the live set to the marked set until here from
312 // BindBitmaps() as the large objects on the allocation stack may
313 // be newly added to the live set above in MarkAllocStackAsLive().
314 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
315
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800316 // When the large object space is immune, we need to scan the
317 // large object space as roots as they contain references to their
318 // classes (primitive array classes) that could move though they
319 // don't contain any other references.
320 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
321 accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
322 SemiSpaceScanObjectVisitor visitor(this);
323 for (const Object* obj : large_live_objects->GetObjects()) {
324 visitor(const_cast<Object*>(obj));
325 }
326 }
327
Mathieu Chartier590fee92013-09-13 13:46:47 -0700328 // Recursively process the mark stack.
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800329 ProcessMarkStack();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700330}
331
332void SemiSpace::ReclaimPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800333 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800334 ProcessReferences(self_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700335 {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800336 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700337 SweepSystemWeaks();
338 }
339 // Record freed memory.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800340 uint64_t from_bytes = from_space_->GetBytesAllocated();
341 uint64_t to_bytes = to_space_->GetBytesAllocated();
342 uint64_t from_objects = from_space_->GetObjectsAllocated();
343 uint64_t to_objects = to_space_->GetObjectsAllocated();
344 CHECK_LE(to_objects, from_objects);
345 int64_t freed_bytes = from_bytes - to_bytes;
346 int64_t freed_objects = from_objects - to_objects;
Ian Rogersb122a4b2013-11-19 18:00:50 -0800347 freed_bytes_.FetchAndAdd(freed_bytes);
348 freed_objects_.FetchAndAdd(freed_objects);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800349 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
350 // space.
351 heap_->RecordFree(freed_objects, freed_bytes);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700352 timings_.StartSplit("PreSweepingGcVerification");
353 heap_->PreSweepingGcVerification(this);
354 timings_.EndSplit();
355
356 {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800357 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700358 // Reclaim unmarked objects.
359 Sweep(false);
360 // Swap the live and mark bitmaps for each space which we modified space. This is an
361 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
362 // bitmaps.
363 timings_.StartSplit("SwapBitmaps");
364 SwapBitmaps();
365 timings_.EndSplit();
366 // Unbind the live and mark bitmaps.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800367 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
368 GetHeap()->UnBindBitmaps();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700369 }
Mathieu Chartier15d34022014-02-26 17:16:38 -0800370 if (kClearFromSpace) {
371 // Release the memory used by the from space.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700372 from_space_->Clear();
373 }
Mathieu Chartier15d34022014-02-26 17:16:38 -0800374 from_space_->Reset();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700375 // Protect the from space.
Mathieu Chartier15d34022014-02-26 17:16:38 -0800376 VLOG(heap) << "Protecting space " << *from_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700377 if (kProtectFromSpace) {
Mathieu Chartier15d34022014-02-26 17:16:38 -0800378 from_space_->GetMemMap()->Protect(PROT_NONE);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700379 } else {
Mathieu Chartier15d34022014-02-26 17:16:38 -0800380 from_space_->GetMemMap()->Protect(PROT_READ);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700381 }
Mathieu Chartierad35d902014-02-11 16:20:42 -0800382 if (saved_bytes_ > 0) {
383 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
384 }
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800385
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800386 if (generational_) {
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800387 // Record the end (top) of the to space so we can distinguish
388 // between objects that were allocated since the last GC and the
389 // older objects.
390 last_gc_to_space_end_ = to_space_->End();
391 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700392}
393
394void SemiSpace::ResizeMarkStack(size_t new_size) {
395 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
396 CHECK_LE(mark_stack_->Size(), new_size);
397 mark_stack_->Resize(new_size);
398 for (const auto& obj : temp) {
399 mark_stack_->PushBack(obj);
400 }
401}
402
403inline void SemiSpace::MarkStackPush(Object* obj) {
404 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
405 ResizeMarkStack(mark_stack_->Capacity() * 2);
406 }
407 // The object must be pushed on to the mark stack.
408 mark_stack_->PushBack(obj);
409}
410
411// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
412bool SemiSpace::MarkLargeObject(const Object* obj) {
413 // TODO: support >1 discontinuous space.
414 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800415 DCHECK(large_object_space->Contains(obj));
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800416 accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700417 if (UNLIKELY(!large_objects->Test(obj))) {
418 large_objects->Set(obj);
419 return true;
420 }
421 return false;
422}
423
Mathieu Chartierad35d902014-02-11 16:20:42 -0800424static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
425 if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
426 // We will dirty the current page and somewhere in the middle of the next page. This means
427 // that the next object copied will also dirty that page.
428 // TODO: Worth considering the last object copied? We may end up dirtying one page which is
429 // not necessary per GC.
430 memcpy(dest, src, size);
431 return 0;
432 }
433 size_t saved_bytes = 0;
434 byte* byte_dest = reinterpret_cast<byte*>(dest);
435 if (kIsDebugBuild) {
436 for (size_t i = 0; i < size; ++i) {
437 CHECK_EQ(byte_dest[i], 0U);
438 }
439 }
440 // Process the start of the page. The page must already be dirty, don't bother with checking.
441 const byte* byte_src = reinterpret_cast<const byte*>(src);
442 const byte* limit = byte_src + size;
443 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
444 // Copy the bytes until the start of the next page.
445 memcpy(dest, src, page_remain);
446 byte_src += page_remain;
447 byte_dest += page_remain;
448 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
449 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
450 CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
451 while (byte_src + kPageSize < limit) {
452 bool all_zero = true;
453 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
454 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
455 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
456 // Assumes the destination of the copy is all zeros.
457 if (word_src[i] != 0) {
458 all_zero = false;
459 word_dest[i] = word_src[i];
460 }
461 }
462 if (all_zero) {
463 // Avoided copying into the page since it was all zeros.
464 saved_bytes += kPageSize;
465 }
466 byte_src += kPageSize;
467 byte_dest += kPageSize;
468 }
469 // Handle the part of the page at the end.
470 memcpy(byte_dest, byte_src, limit - byte_src);
471 return saved_bytes;
472}
473
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800474mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
475 size_t object_size = obj->SizeOf();
Mathieu Chartier5dc08a62014-01-10 10:10:23 -0800476 size_t bytes_allocated;
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800477 mirror::Object* forward_address = nullptr;
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800478 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800479 // If it's allocated before the last GC (older), move
480 // (pseudo-promote) it to the main free list space (as sort
481 // of an old generation.)
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800482 size_t bytes_promoted;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800483 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
Ian Rogers6fac4472014-02-25 17:01:10 -0800484 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr);
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800485 if (forward_address == nullptr) {
486 // If out of space, fall back to the to-space.
Ian Rogers6fac4472014-02-25 17:01:10 -0800487 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800488 } else {
489 GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted);
490 bytes_promoted_ += bytes_promoted;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800491 // Handle the bitmaps marking.
492 accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800493 DCHECK(live_bitmap != nullptr);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800494 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800495 DCHECK(mark_bitmap != nullptr);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800496 DCHECK(!live_bitmap->Test(forward_address));
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800497 if (!whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800498 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
499 DCHECK_EQ(live_bitmap, mark_bitmap);
500
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800501 // If a bump pointer space only collection, delay the live
502 // bitmap marking of the promoted object until it's popped off
503 // the mark stack (ProcessMarkStack()). The rationale: we may
504 // be in the middle of scanning the objects in the promo
505 // destination space for
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800506 // non-moving-space-to-bump-pointer-space references by
507 // iterating over the marked bits of the live bitmap
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800508 // (MarkReachableObjects()). If we don't delay it (and instead
509 // mark the promoted object here), the above promo destination
510 // space scan could encounter the just-promoted object and
511 // forward the references in the promoted object's fields even
512 // through it is pushed onto the mark stack. If this happens,
513 // the promoted object would be in an inconsistent state, that
514 // is, it's on the mark stack (gray) but its fields are
515 // already forwarded (black), which would cause a
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800516 // DCHECK(!to_space_->HasAddress(obj)) failure below.
517 } else {
518 // Mark forward_address on the live bit map.
519 live_bitmap->Set(forward_address);
520 // Mark forward_address on the mark bit map.
521 DCHECK(!mark_bitmap->Test(forward_address));
522 mark_bitmap->Set(forward_address);
523 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800524 }
525 DCHECK(forward_address != nullptr);
526 } else {
527 // If it's allocated after the last GC (younger), copy it to the to-space.
Ian Rogers6fac4472014-02-25 17:01:10 -0800528 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800529 }
530 // Copy over the object and add it to the mark stack since we still need to update its
531 // references.
Mathieu Chartierad35d902014-02-11 16:20:42 -0800532 saved_bytes_ +=
533 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800534 if (kUseBrooksPointer) {
535 obj->AssertSelfBrooksPointer();
536 DCHECK_EQ(forward_address->GetBrooksPointer(), obj);
537 forward_address->SetBrooksPointer(forward_address);
538 forward_address->AssertSelfBrooksPointer();
539 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800540 if (to_space_live_bitmap_ != nullptr) {
541 to_space_live_bitmap_->Set(forward_address);
542 }
Mathieu Chartier5dc08a62014-01-10 10:10:23 -0800543 DCHECK(to_space_->HasAddress(forward_address) ||
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800544 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800545 return forward_address;
546}
547
Mathieu Chartier590fee92013-09-13 13:46:47 -0700548// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
549// the to-space and have their forward address updated. Objects which have been newly marked are
550// pushed on the mark stack.
551Object* SemiSpace::MarkObject(Object* obj) {
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800552 if (kUseBrooksPointer) {
553 // Verify all the objects have the correct forward pointer installed.
554 if (obj != nullptr) {
555 obj->AssertSelfBrooksPointer();
556 }
557 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800558 Object* forward_address = obj;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700559 if (obj != nullptr && !IsImmune(obj)) {
560 if (from_space_->HasAddress(obj)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800561 forward_address = GetForwardingAddressInFromSpace(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700562 // If the object has already been moved, return the new forward address.
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800563 if (forward_address == nullptr) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800564 forward_address = MarkNonForwardedObject(obj);
565 DCHECK(forward_address != nullptr);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700566 // Make sure to only update the forwarding address AFTER you copy the object so that the
567 // monitor word doesn't get stomped over.
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800568 obj->SetLockWord(LockWord::FromForwardingAddress(
569 reinterpret_cast<size_t>(forward_address)));
570 // Push the object onto the mark stack for later processing.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700571 MarkStackPush(forward_address);
572 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700573 // TODO: Do we need this if in the else statement?
574 } else {
575 accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
576 if (LIKELY(object_bitmap != nullptr)) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800577 if (generational_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800578 // If a bump pointer space only collection, we should not
579 // reach here as we don't/won't mark the objects in the
580 // non-moving space (except for the promoted objects.) Note
581 // the non-moving space is added to the immune space.
582 DCHECK(whole_heap_collection_);
583 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700584 // This object was not previously marked.
585 if (!object_bitmap->Test(obj)) {
586 object_bitmap->Set(obj);
587 MarkStackPush(obj);
588 }
589 } else {
Mathieu Chartierd1e05bf2014-02-04 17:11:58 -0800590 CHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_";
Mathieu Chartier590fee92013-09-13 13:46:47 -0700591 if (MarkLargeObject(obj)) {
592 MarkStackPush(obj);
593 }
594 }
595 }
596 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800597 return forward_address;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700598}
599
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800600void SemiSpace::ProcessMarkStackCallback(void* arg) {
601 DCHECK(arg != nullptr);
602 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack();
603}
604
605mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) {
Mathieu Chartier39e32612013-11-12 16:28:05 -0800606 DCHECK(root != nullptr);
607 DCHECK(arg != nullptr);
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800608 return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800609}
610
Mathieu Chartier815873e2014-02-13 18:02:13 -0800611void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
612 RootType /*root_type*/) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700613 DCHECK(root != nullptr);
614 DCHECK(arg != nullptr);
Mathieu Chartier815873e2014-02-13 18:02:13 -0800615 *root = reinterpret_cast<SemiSpace*>(arg)->MarkObject(*root);
616}
617
Mathieu Chartier590fee92013-09-13 13:46:47 -0700618// Marks all objects in the root set.
619void SemiSpace::MarkRoots() {
620 timings_.StartSplit("MarkRoots");
621 // TODO: Visit up image roots as well?
622 Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true);
623 timings_.EndSplit();
624}
625
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800626mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700627 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
628}
629
630void SemiSpace::SweepSystemWeaks() {
631 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier39e32612013-11-12 16:28:05 -0800632 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700633 timings_.EndSplit();
634}
635
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800636bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800637 return space != from_space_ && space != to_space_ && !IsImmuneSpace(space);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700638}
639
640void SemiSpace::Sweep(bool swap_bitmaps) {
641 DCHECK(mark_stack_->IsEmpty());
Ian Rogers5fe9af72013-11-14 00:17:20 -0800642 TimingLogger::ScopedSplit("Sweep", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700643 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800644 if (space->IsContinuousMemMapAllocSpace()) {
645 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
646 if (!ShouldSweepSpace(alloc_space)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800647 continue;
648 }
Mathieu Chartierec050072014-01-07 16:00:07 -0800649 TimingLogger::ScopedSplit split(
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800650 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
Mathieu Chartierec050072014-01-07 16:00:07 -0800651 size_t freed_objects = 0;
652 size_t freed_bytes = 0;
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800653 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Mathieu Chartierec050072014-01-07 16:00:07 -0800654 heap_->RecordFree(freed_objects, freed_bytes);
655 freed_objects_.FetchAndAdd(freed_objects);
656 freed_bytes_.FetchAndAdd(freed_bytes);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700657 }
658 }
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800659 if (!is_large_object_space_immune_) {
660 SweepLargeObjects(swap_bitmaps);
661 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700662}
663
664void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800665 DCHECK(!is_large_object_space_immune_);
Ian Rogers5fe9af72013-11-14 00:17:20 -0800666 TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700667 size_t freed_objects = 0;
668 size_t freed_bytes = 0;
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800669 GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Ian Rogersb122a4b2013-11-19 18:00:50 -0800670 freed_large_objects_.FetchAndAdd(freed_objects);
671 freed_large_object_bytes_.FetchAndAdd(freed_bytes);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700672 GetHeap()->RecordFree(freed_objects, freed_bytes);
673}
674
675// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
676// marked, put it on the appropriate list in the heap for later processing.
677void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
Mathieu Chartier39e32612013-11-12 16:28:05 -0800678 heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700679}
680
681// Visit all of the references of an object and update.
682void SemiSpace::ScanObject(Object* obj) {
683 DCHECK(obj != NULL);
684 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
685 MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset,
Bernhard Rosenkränzer46053622013-12-12 02:15:52 +0100686 bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700687 mirror::Object* new_address = MarkObject(ref);
688 if (new_address != ref) {
689 DCHECK(new_address != nullptr);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800690 // Don't need to mark the card since we updating the object address and not changing the
Ian Rogersef7d42f2014-01-06 12:55:46 -0800691 // actual objects its pointing to. Using SetFieldObjectWithoutWriteBarrier is better in this
692 // case since it does not dirty cards and use additional memory.
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100693 // Since we do not change the actual object, we can safely use non-transactional mode. Also
694 // disable check as we could run inside a transaction.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800695 obj->SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(offset, new_address, false);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700696 }
697 }, kMovingClasses);
Mathieu Chartier4e305412014-02-19 10:54:44 -0800698 mirror::Class* klass = obj->GetClass<kVerifyNone>();
699 if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700700 DelayReferenceReferent(klass, obj);
701 }
702}
703
704// Scan anything that's on the mark stack.
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800705void SemiSpace::ProcessMarkStack() {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800706 space::MallocSpace* promo_dest_space = NULL;
707 accounting::SpaceBitmap* live_bitmap = NULL;
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800708 if (generational_ && !whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800709 // If a bump pointer space only collection (and the promotion is
710 // enabled,) we delay the live-bitmap marking of promoted objects
711 // from MarkObject() until this function.
712 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
713 live_bitmap = promo_dest_space->GetLiveBitmap();
714 DCHECK(live_bitmap != nullptr);
715 accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
716 DCHECK(mark_bitmap != nullptr);
717 DCHECK_EQ(live_bitmap, mark_bitmap);
718 }
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800719 timings_.StartSplit("ProcessMarkStack");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700720 while (!mark_stack_->IsEmpty()) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800721 Object* obj = mark_stack_->PopBack();
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800722 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800723 // obj has just been promoted. Mark the live bitmap for it,
724 // which is delayed from MarkObject().
725 DCHECK(!live_bitmap->Test(obj));
726 live_bitmap->Set(obj);
727 }
728 ScanObject(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700729 }
730 timings_.EndSplit();
731}
732
Mathieu Chartier590fee92013-09-13 13:46:47 -0700733inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
734 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
735 // All immune objects are assumed marked.
736 if (IsImmune(obj)) {
737 return obj;
738 }
739 if (from_space_->HasAddress(obj)) {
740 mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj));
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800741 return forwarding_address; // Returns either the forwarding address or nullptr.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700742 } else if (to_space_->HasAddress(obj)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800743 // Should be unlikely.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700744 // Already forwarded, must be marked.
745 return obj;
746 }
747 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
748}
749
Mathieu Chartier590fee92013-09-13 13:46:47 -0700750void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
751 DCHECK(to_space != nullptr);
752 to_space_ = to_space;
753}
754
755void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
756 DCHECK(from_space != nullptr);
757 from_space_ = from_space;
758}
759
760void SemiSpace::FinishPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800761 TimingLogger::ScopedSplit split("FinishPhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700762 Heap* heap = GetHeap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700763 timings_.NewSplit("PostGcVerification");
764 heap->PostGcVerification(this);
765
766 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
767 // further action is done by the heap.
768 to_space_ = nullptr;
769 from_space_ = nullptr;
770
771 // Update the cumulative statistics
Mathieu Chartier590fee92013-09-13 13:46:47 -0700772 total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
773 total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
774
775 // Ensure that the mark stack is empty.
776 CHECK(mark_stack_->IsEmpty());
777
778 // Update the cumulative loggers.
779 cumulative_timings_.Start();
780 cumulative_timings_.AddLogger(timings_);
781 cumulative_timings_.End();
782
783 // Clear all of the spaces' mark bitmaps.
784 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
785 accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
786 if (bitmap != nullptr &&
787 space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
788 bitmap->Clear();
789 }
790 }
791 mark_stack_->Reset();
792
793 // Reset the marked large objects.
794 space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
795 large_objects->GetMarkObjects()->Clear();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800796
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800797 if (generational_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800798 // Decide whether to do a whole heap collection or a bump pointer
799 // only space collection at the next collection by updating
800 // whole_heap_collection. Enable whole_heap_collection once every
801 // kDefaultWholeHeapCollectionInterval collections.
802 if (!whole_heap_collection_) {
803 --whole_heap_collection_interval_counter_;
804 DCHECK_GE(whole_heap_collection_interval_counter_, 0);
805 if (whole_heap_collection_interval_counter_ == 0) {
806 whole_heap_collection_ = true;
807 }
808 } else {
809 DCHECK_EQ(whole_heap_collection_interval_counter_, 0);
810 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval;
811 whole_heap_collection_ = false;
812 }
813 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700814}
815
816} // namespace collector
817} // namespace gc
818} // namespace art