blob: cabfe2176ce4455c0ac3c25aa1e85d5644835d3e [file] [log] [blame]
Mathieu Chartier590fee92013-09-13 13:46:47 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -070017#include "semi_space-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070018
19#include <functional>
20#include <numeric>
21#include <climits>
22#include <vector>
23
24#include "base/logging.h"
25#include "base/macros.h"
26#include "base/mutex-inl.h"
27#include "base/timing_logger.h"
Mathieu Chartier4aeec172014-03-27 16:09:46 -070028#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070029#include "gc/accounting/mod_union_table.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080030#include "gc/accounting/remembered_set.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070031#include "gc/accounting/space_bitmap-inl.h"
32#include "gc/heap.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070033#include "gc/reference_processor.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070034#include "gc/space/bump_pointer_space.h"
35#include "gc/space/bump_pointer_space-inl.h"
36#include "gc/space/image_space.h"
37#include "gc/space/large_object_space.h"
38#include "gc/space/space-inl.h"
39#include "indirect_reference_table.h"
40#include "intern_table.h"
41#include "jni_internal.h"
42#include "mark_sweep-inl.h"
43#include "monitor.h"
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070044#include "mirror/reference-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070045#include "mirror/object-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070046#include "runtime.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070047#include "thread-inl.h"
48#include "thread_list.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070049
Mathieu Chartier590fee92013-09-13 13:46:47 -070050using ::art::mirror::Object;
51
52namespace art {
53namespace gc {
54namespace collector {
55
56static constexpr bool kProtectFromSpace = true;
Mathieu Chartier15d34022014-02-26 17:16:38 -080057static constexpr bool kStoreStackTraces = false;
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -070058static constexpr size_t kBytesPromotedThreshold = 4 * MB;
Hiroshi Yamauchi24faeb22014-05-07 13:12:43 -070059static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
Mathieu Chartier590fee92013-09-13 13:46:47 -070060
Mathieu Chartier590fee92013-09-13 13:46:47 -070061void SemiSpace::BindBitmaps() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -070062 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiera1602f22014-01-13 17:19:19 -080063 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -070064 // Mark all of the spaces we never collect as immune.
65 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080066 if (space->GetLiveBitmap() != nullptr) {
67 if (space == to_space_) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080068 CHECK(to_space_->IsContinuousMemMapAllocSpace());
69 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080070 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -080071 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect
72 // Add the main free list space and the non-moving
73 // space to the immune space if a bump pointer space
74 // only collection.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080075 || (generational_ && !whole_heap_collection_ &&
76 (space == GetHeap()->GetNonMovingSpace() ||
77 space == GetHeap()->GetPrimaryFreeListSpace()))) {
Mathieu Chartier8d562102014-03-12 17:42:10 -070078 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080079 }
Mathieu Chartier590fee92013-09-13 13:46:47 -070080 }
81 }
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080082 if (generational_ && !whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -080083 // We won't collect the large object space if a bump pointer space only collection.
84 is_large_object_space_immune_ = true;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -080085 }
Mathieu Chartier590fee92013-09-13 13:46:47 -070086}
87
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080088SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
Mathieu Chartier590fee92013-09-13 13:46:47 -070089 : GarbageCollector(heap,
90 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
Mathieu Chartier590fee92013-09-13 13:46:47 -070091 to_space_(nullptr),
92 from_space_(nullptr),
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080093 generational_(generational),
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -080094 last_gc_to_space_end_(nullptr),
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -080095 bytes_promoted_(0),
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -070096 bytes_promoted_since_last_whole_heap_collection_(0),
Hiroshi Yamauchi24faeb22014-05-07 13:12:43 -070097 large_object_bytes_allocated_at_last_whole_heap_collection_(0),
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -080098 whole_heap_collection_(true),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -070099 collector_name_(name_),
100 swap_semi_spaces_(true) {
101}
102
103void SemiSpace::RunPhases() {
104 Thread* self = Thread::Current();
105 InitializePhase();
106 // Semi-space collector is special since it is sometimes called with the mutators suspended
107 // during the zygote creation and collector transitions. If we already exclusively hold the
108 // mutator lock, then we can't lock it again since it will cause a deadlock.
109 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
110 GetHeap()->PreGcVerificationPaused(this);
111 GetHeap()->PrePauseRosAllocVerification(this);
112 MarkingPhase();
113 ReclaimPhase();
114 GetHeap()->PostGcVerificationPaused(this);
115 } else {
116 Locks::mutator_lock_->AssertNotHeld(self);
117 {
118 ScopedPause pause(this);
119 GetHeap()->PreGcVerificationPaused(this);
120 GetHeap()->PrePauseRosAllocVerification(this);
121 MarkingPhase();
122 }
123 {
124 ReaderMutexLock mu(self, *Locks::mutator_lock_);
125 ReclaimPhase();
126 }
127 GetHeap()->PostGcVerification(this);
128 }
129 FinishPhase();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700130}
131
132void SemiSpace::InitializePhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700133 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700134 mark_stack_ = heap_->GetMarkStack();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700135 DCHECK(mark_stack_ != nullptr);
Mathieu Chartier8d562102014-03-12 17:42:10 -0700136 immune_region_.Reset();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800137 is_large_object_space_immune_ = false;
Mathieu Chartierad35d902014-02-11 16:20:42 -0800138 saved_bytes_ = 0;
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700139 bytes_moved_ = 0;
140 objects_moved_ = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700141 self_ = Thread::Current();
Mathieu Chartier31f44142014-04-08 14:40:03 -0700142 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800143 // Set the initial bitmap.
144 to_space_live_bitmap_ = to_space_->GetLiveBitmap();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700145 {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700146 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700147 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
148 mark_bitmap_ = heap_->GetMarkBitmap();
149 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700150}
151
152void SemiSpace::ProcessReferences(Thread* self) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700153 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700154 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700155 false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
156 &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700157}
158
159void SemiSpace::MarkingPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700160 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700161 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self_));
Mathieu Chartier15d34022014-02-26 17:16:38 -0800162 if (kStoreStackTraces) {
163 Locks::mutator_lock_->AssertExclusiveHeld(self_);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700164 // Store the stack traces into the runtime fault string in case we Get a heap corruption
Mathieu Chartier15d34022014-02-26 17:16:38 -0800165 // related crash later.
166 ThreadState old_state = self_->SetStateUnsafe(kRunnable);
167 std::ostringstream oss;
168 Runtime* runtime = Runtime::Current();
169 runtime->GetThreadList()->DumpForSigQuit(oss);
170 runtime->GetThreadList()->DumpNativeStacks(oss);
171 runtime->SetFaultMessage(oss.str());
172 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
173 }
Mathieu Chartier0651d412014-04-29 14:37:57 -0700174 // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps
175 // to prevent fragmentation.
176 RevokeAllThreadLocalBuffers();
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800177 if (generational_) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700178 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
179 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
180 GetCurrentIteration()->GetClearSoftReferences()) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800181 // If an explicit, native allocation-triggered, or last attempt
Hiroshi Yamauchi24faeb22014-05-07 13:12:43 -0700182 // collection, collect the whole heap.
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800183 whole_heap_collection_ = true;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800184 }
185 if (whole_heap_collection_) {
186 VLOG(heap) << "Whole heap collection";
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700187 name_ = collector_name_ + " whole";
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800188 } else {
189 VLOG(heap) << "Bump pointer space only collection";
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700190 name_ = collector_name_ + " bps";
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800191 }
192 }
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700193
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700194 if (!generational_ || whole_heap_collection_) {
195 // If non-generational, always clear soft references.
196 // If generational, clear soft references if a whole heap collection.
197 GetCurrentIteration()->SetClearSoftReferences(true);
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700198 }
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800199 Locks::mutator_lock_->AssertExclusiveHeld(self_);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800200 if (generational_) {
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800201 // If last_gc_to_space_end_ is out of the bounds of the from-space
202 // (the to-space from last GC), then point it to the beginning of
203 // the from-space. For example, the very first GC or the
204 // pre-zygote compaction.
205 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
206 last_gc_to_space_end_ = from_space_->Begin();
207 }
208 // Reset this before the marking starts below.
209 bytes_promoted_ = 0;
210 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700211 // Assume the cleared space is already empty.
212 BindBitmaps();
213 // Process dirty cards and add dirty cards to mod-union tables.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700214 heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700215 // Clear the whole card table since we can not Get any additional dirty cards during the
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800216 // paused GC. This saves memory but only works for pause the world collectors.
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700217 t.NewTiming("ClearCardTable");
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800218 heap_->GetCardTable()->ClearCardTable();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700219 // Need to do this before the checkpoint since we don't want any threads to add references to
220 // the live stack during the recursive mark.
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700221 t.NewTiming("SwapStacks");
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800222 if (kUseThreadLocalAllocationStack) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700223 TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings());
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800224 heap_->RevokeAllThreadLocalAllocationStacks(self_);
225 }
226 heap_->SwapStacks(self_);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700227 {
228 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
229 MarkRoots();
230 // Mark roots of immune spaces.
231 UpdateAndMarkModUnion();
232 // Recursively mark remaining objects.
233 MarkReachableObjects();
234 }
235 ProcessReferences(self_);
236 {
237 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
238 SweepSystemWeaks();
239 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700240 // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
241 // before they are properly counted.
242 RevokeAllThreadLocalBuffers();
243 // Record freed memory.
Mathieu Chartiere76e70f2014-05-02 16:35:37 -0700244 const int64_t from_bytes = from_space_->GetBytesAllocated();
245 const int64_t to_bytes = bytes_moved_;
246 const uint64_t from_objects = from_space_->GetObjectsAllocated();
247 const uint64_t to_objects = objects_moved_;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700248 CHECK_LE(to_objects, from_objects);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700249 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
250 // space.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700251 RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700252 // Clear and protect the from space.
253 from_space_->Clear();
Mathieu Chartiere76e70f2014-05-02 16:35:37 -0700254 VLOG(heap) << "Protecting from_space_: " << *from_space_;
255 from_space_->GetMemMap()->Protect(kProtectFromSpace ? PROT_NONE : PROT_READ);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700256 heap_->PreSweepingGcVerification(this);
Mathieu Chartier4240c512014-05-27 10:10:11 -0700257 if (swap_semi_spaces_) {
258 heap_->SwapSemiSpaces();
259 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700260}
261
Mathieu Chartier590fee92013-09-13 13:46:47 -0700262void SemiSpace::UpdateAndMarkModUnion() {
263 for (auto& space : heap_->GetContinuousSpaces()) {
264 // If the space is immune then we need to mark the references to other spaces.
Mathieu Chartier8d562102014-03-12 17:42:10 -0700265 if (immune_region_.ContainsSpace(space)) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700266 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800267 if (table != nullptr) {
268 // TODO: Improve naming.
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700269 TimingLogger::ScopedTiming t(
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800270 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
271 "UpdateAndMarkImageModUnionTable",
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700272 GetTimings());
Mathieu Chartier407f7022014-02-18 14:37:05 -0800273 table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800274 } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) {
275 DCHECK(kUseRememberedSet);
276 // If a bump pointer space only collection, the non-moving
277 // space is added to the immune space. The non-moving space
278 // doesn't have a mod union table, but has a remembered
279 // set. Its dirty cards will be scanned later in
280 // MarkReachableObjects().
281 DCHECK(generational_ && !whole_heap_collection_ &&
282 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
283 << "Space " << space->GetName() << " "
284 << "generational_=" << generational_ << " "
285 << "whole_heap_collection_=" << whole_heap_collection_ << " ";
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800286 } else {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800287 DCHECK(!kUseRememberedSet);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800288 // If a bump pointer space only collection, the non-moving
289 // space is added to the immune space. But the non-moving
290 // space doesn't have a mod union table. Instead, its live
291 // bitmap will be scanned later in MarkReachableObjects().
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800292 DCHECK(generational_ && !whole_heap_collection_ &&
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800293 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
294 << "Space " << space->GetName() << " "
295 << "generational_=" << generational_ << " "
296 << "whole_heap_collection_=" << whole_heap_collection_ << " ";
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800297 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700298 }
299 }
300}
301
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800302class SemiSpaceScanObjectVisitor {
303 public:
304 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
Mathieu Chartier0651d412014-04-29 14:37:57 -0700305 void operator()(Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_,
306 Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800307 DCHECK(obj != nullptr);
308 semi_space_->ScanObject(obj);
309 }
310 private:
Ian Rogers6fac4472014-02-25 17:01:10 -0800311 SemiSpace* const semi_space_;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800312};
313
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800314// Used to verify that there's no references to the from-space.
315class SemiSpaceVerifyNoFromSpaceReferencesVisitor {
316 public:
317 explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) :
318 from_space_(from_space) {}
319
Mathieu Chartier407f7022014-02-18 14:37:05 -0800320 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
321 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700322 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800323 if (from_space_->HasAddress(ref)) {
324 Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -0700325 LOG(FATAL) << ref << " found in from space";
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800326 }
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800327 }
328 private:
329 space::ContinuousMemMapAllocSpace* from_space_;
330};
331
332void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800333 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
334 SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_);
Ian Rogersb0fa5dc2014-04-28 16:47:08 -0700335 obj->VisitReferences<kMovingClasses>(visitor, VoidFunctor());
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800336}
337
338class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor {
339 public:
340 explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
341 void operator()(Object* obj) const
342 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
343 DCHECK(obj != nullptr);
344 semi_space_->VerifyNoFromSpaceReferences(obj);
345 }
346 private:
347 SemiSpace* const semi_space_;
348};
349
Mathieu Chartier590fee92013-09-13 13:46:47 -0700350void SemiSpace::MarkReachableObjects() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700351 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
352 {
353 TimingLogger::ScopedTiming t2("MarkStackAsLive", GetTimings());
354 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
355 heap_->MarkAllocStackAsLive(live_stack);
356 live_stack->Reset();
357 }
358 t.NewTiming("UpdateAndMarkRememberedSets");
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800359 for (auto& space : heap_->GetContinuousSpaces()) {
360 // If the space is immune and has no mod union table (the
361 // non-moving space when the bump pointer space only collection is
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800362 // enabled,) then we need to scan its live bitmap or dirty cards as roots
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800363 // (including the objects on the live stack which have just marked
364 // in the live bitmap above in MarkAllocStackAsLive().)
Mathieu Chartier8d562102014-03-12 17:42:10 -0700365 if (immune_region_.ContainsSpace(space) &&
366 heap_->FindModUnionTableFromSpace(space) == nullptr) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800367 DCHECK(generational_ && !whole_heap_collection_ &&
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800368 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800369 accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space);
370 if (kUseRememberedSet) {
371 DCHECK(rem_set != nullptr);
Hiroshi Yamauchi4db74492014-04-22 17:10:48 -0700372 rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
373 from_space_, this);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800374 if (kIsDebugBuild) {
375 // Verify that there are no from-space references that
376 // remain in the space, that is, the remembered set (and the
377 // card table) didn't miss any from-space references in the
378 // space.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700379 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800380 SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this);
381 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
382 reinterpret_cast<uintptr_t>(space->End()),
383 visitor);
384 }
385 } else {
386 DCHECK(rem_set == nullptr);
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700387 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800388 SemiSpaceScanObjectVisitor visitor(this);
389 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
390 reinterpret_cast<uintptr_t>(space->End()),
391 visitor);
392 }
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800393 }
394 }
395
396 if (is_large_object_space_immune_) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700397 TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800398 DCHECK(generational_ && !whole_heap_collection_);
Hiroshi Yamauchiba5870d2014-01-29 15:31:03 -0800399 // Delay copying the live set to the marked set until here from
400 // BindBitmaps() as the large objects on the allocation stack may
401 // be newly added to the live set above in MarkAllocStackAsLive().
402 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
403
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800404 // When the large object space is immune, we need to scan the
405 // large object space as roots as they contain references to their
406 // classes (primitive array classes) that could move though they
407 // don't contain any other references.
408 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700409 accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800410 SemiSpaceScanObjectVisitor visitor(this);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700411 large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()),
412 reinterpret_cast<uintptr_t>(large_object_space->End()),
413 visitor);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800414 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700415 // Recursively process the mark stack.
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800416 ProcessMarkStack();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700417}
418
419void SemiSpace::ReclaimPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700420 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
421 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
422 // Reclaim unmarked objects.
423 Sweep(false);
424 // Swap the live and mark bitmaps for each space which we modified space. This is an
425 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
426 // bitmaps.
427 SwapBitmaps();
428 // Unbind the live and mark bitmaps.
429 GetHeap()->UnBindBitmaps();
Mathieu Chartierad35d902014-02-11 16:20:42 -0800430 if (saved_bytes_ > 0) {
431 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
432 }
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800433 if (generational_) {
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800434 // Record the end (top) of the to space so we can distinguish
435 // between objects that were allocated since the last GC and the
436 // older objects.
437 last_gc_to_space_end_ = to_space_->End();
438 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700439}
440
441void SemiSpace::ResizeMarkStack(size_t new_size) {
442 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
443 CHECK_LE(mark_stack_->Size(), new_size);
444 mark_stack_->Resize(new_size);
445 for (const auto& obj : temp) {
446 mark_stack_->PushBack(obj);
447 }
448}
449
450inline void SemiSpace::MarkStackPush(Object* obj) {
451 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
452 ResizeMarkStack(mark_stack_->Capacity() * 2);
453 }
454 // The object must be pushed on to the mark stack.
455 mark_stack_->PushBack(obj);
456}
457
Mathieu Chartierad35d902014-02-11 16:20:42 -0800458static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
459 if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
460 // We will dirty the current page and somewhere in the middle of the next page. This means
461 // that the next object copied will also dirty that page.
462 // TODO: Worth considering the last object copied? We may end up dirtying one page which is
463 // not necessary per GC.
464 memcpy(dest, src, size);
465 return 0;
466 }
467 size_t saved_bytes = 0;
468 byte* byte_dest = reinterpret_cast<byte*>(dest);
469 if (kIsDebugBuild) {
470 for (size_t i = 0; i < size; ++i) {
471 CHECK_EQ(byte_dest[i], 0U);
472 }
473 }
474 // Process the start of the page. The page must already be dirty, don't bother with checking.
475 const byte* byte_src = reinterpret_cast<const byte*>(src);
476 const byte* limit = byte_src + size;
477 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
478 // Copy the bytes until the start of the next page.
479 memcpy(dest, src, page_remain);
480 byte_src += page_remain;
481 byte_dest += page_remain;
Mathieu Chartier407f7022014-02-18 14:37:05 -0800482 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
483 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
484 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
Mathieu Chartierad35d902014-02-11 16:20:42 -0800485 while (byte_src + kPageSize < limit) {
486 bool all_zero = true;
487 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
488 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
489 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
490 // Assumes the destination of the copy is all zeros.
491 if (word_src[i] != 0) {
492 all_zero = false;
493 word_dest[i] = word_src[i];
494 }
495 }
496 if (all_zero) {
497 // Avoided copying into the page since it was all zeros.
498 saved_bytes += kPageSize;
499 }
500 byte_src += kPageSize;
501 byte_dest += kPageSize;
502 }
503 // Handle the part of the page at the end.
504 memcpy(byte_dest, byte_src, limit - byte_src);
505 return saved_bytes;
506}
507
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800508mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
509 size_t object_size = obj->SizeOf();
Mathieu Chartier5dc08a62014-01-10 10:10:23 -0800510 size_t bytes_allocated;
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800511 mirror::Object* forward_address = nullptr;
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800512 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800513 // If it's allocated before the last GC (older), move
514 // (pseudo-promote) it to the main free list space (as sort
515 // of an old generation.)
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800516 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
Mathieu Chartier0651d412014-04-29 14:37:57 -0700517 forward_address = promo_dest_space->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
518 nullptr);
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700519 if (UNLIKELY(forward_address == nullptr)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800520 // If out of space, fall back to the to-space.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700521 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800522 } else {
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700523 bytes_promoted_ += bytes_allocated;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800524 // Dirty the card at the destionation as it may contain
525 // references (including the class pointer) to the bump pointer
526 // space.
527 GetHeap()->WriteBarrierEveryFieldOf(forward_address);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800528 // Handle the bitmaps marking.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700529 accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800530 DCHECK(live_bitmap != nullptr);
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700531 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800532 DCHECK(mark_bitmap != nullptr);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800533 DCHECK(!live_bitmap->Test(forward_address));
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800534 if (!whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800535 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
536 DCHECK_EQ(live_bitmap, mark_bitmap);
537
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800538 // If a bump pointer space only collection, delay the live
539 // bitmap marking of the promoted object until it's popped off
540 // the mark stack (ProcessMarkStack()). The rationale: we may
541 // be in the middle of scanning the objects in the promo
542 // destination space for
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800543 // non-moving-space-to-bump-pointer-space references by
544 // iterating over the marked bits of the live bitmap
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800545 // (MarkReachableObjects()). If we don't delay it (and instead
546 // mark the promoted object here), the above promo destination
547 // space scan could encounter the just-promoted object and
548 // forward the references in the promoted object's fields even
549 // through it is pushed onto the mark stack. If this happens,
550 // the promoted object would be in an inconsistent state, that
551 // is, it's on the mark stack (gray) but its fields are
552 // already forwarded (black), which would cause a
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800553 // DCHECK(!to_space_->HasAddress(obj)) failure below.
554 } else {
555 // Mark forward_address on the live bit map.
556 live_bitmap->Set(forward_address);
557 // Mark forward_address on the mark bit map.
558 DCHECK(!mark_bitmap->Test(forward_address));
559 mark_bitmap->Set(forward_address);
560 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800561 }
562 DCHECK(forward_address != nullptr);
563 } else {
564 // If it's allocated after the last GC (younger), copy it to the to-space.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700565 forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800566 }
Hiroshi Yamauchi13bf2e62014-05-19 12:49:45 -0700567 CHECK(forward_address != nullptr) << "Out of memory in the to-space.";
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700568 ++objects_moved_;
569 bytes_moved_ += bytes_allocated;
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800570 // Copy over the object and add it to the mark stack since we still need to update its
571 // references.
Mathieu Chartierad35d902014-02-11 16:20:42 -0800572 saved_bytes_ +=
573 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700574 if (kUseBakerOrBrooksReadBarrier) {
575 obj->AssertReadBarrierPointer();
576 if (kUseBrooksReadBarrier) {
577 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
578 forward_address->SetReadBarrierPointer(forward_address);
579 }
580 forward_address->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800581 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800582 if (to_space_live_bitmap_ != nullptr) {
583 to_space_live_bitmap_->Set(forward_address);
584 }
Mathieu Chartier5dc08a62014-01-10 10:10:23 -0800585 DCHECK(to_space_->HasAddress(forward_address) ||
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800586 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800587 return forward_address;
588}
589
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800590void SemiSpace::ProcessMarkStackCallback(void* arg) {
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800591 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack();
592}
593
594mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) {
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -0700595 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800596 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
597 return ref.AsMirrorPtr();
598}
599
600void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr,
601 void* arg) {
602 reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800603}
604
Hiroshi Yamauchi4db74492014-04-22 17:10:48 -0700605void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
606 void* arg) {
607 reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref);
608}
609
Mathieu Chartier815873e2014-02-13 18:02:13 -0800610void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
611 RootType /*root_type*/) {
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -0700612 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800613 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
614 if (*root != ref.AsMirrorPtr()) {
615 *root = ref.AsMirrorPtr();
616 }
Mathieu Chartier815873e2014-02-13 18:02:13 -0800617}
618
Mathieu Chartier590fee92013-09-13 13:46:47 -0700619// Marks all objects in the root set.
620void SemiSpace::MarkRoots() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700621 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier893263b2014-03-04 11:07:42 -0800622 Runtime::Current()->VisitRoots(MarkRootCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700623}
624
Mathieu Chartier308351a2014-06-15 12:39:02 -0700625bool SemiSpace::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object,
626 void* arg) {
627 mirror::Object* obj = object->AsMirrorPtr();
628 mirror::Object* new_obj =
629 reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(obj);
630 if (new_obj == nullptr) {
631 return false;
632 }
633 if (new_obj != obj) {
634 // Write barrier is not necessary since it still points to the same object, just at a different
635 // address.
636 object->Assign(new_obj);
637 }
638 return true;
639}
640
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800641mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700642 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
643}
644
645void SemiSpace::SweepSystemWeaks() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700646 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier39e32612013-11-12 16:28:05 -0800647 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700648}
649
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800650bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
Mathieu Chartier8d562102014-03-12 17:42:10 -0700651 return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700652}
653
654void SemiSpace::Sweep(bool swap_bitmaps) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700655 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700656 DCHECK(mark_stack_->IsEmpty());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700657 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800658 if (space->IsContinuousMemMapAllocSpace()) {
659 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
660 if (!ShouldSweepSpace(alloc_space)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800661 continue;
662 }
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700663 TimingLogger::ScopedTiming split(
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700664 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
665 RecordFree(alloc_space->Sweep(swap_bitmaps));
Mathieu Chartier590fee92013-09-13 13:46:47 -0700666 }
667 }
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800668 if (!is_large_object_space_immune_) {
669 SweepLargeObjects(swap_bitmaps);
670 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700671}
672
673void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800674 DCHECK(!is_large_object_space_immune_);
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700675 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700676 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
Mathieu Chartier590fee92013-09-13 13:46:47 -0700677}
678
679// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
680// marked, put it on the appropriate list in the heap for later processing.
Mathieu Chartier407f7022014-02-18 14:37:05 -0800681void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700682 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
Mathieu Chartier308351a2014-06-15 12:39:02 -0700683 &HeapReferenceMarkedCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700684}
685
Ian Rogers719d1a32014-03-06 12:13:39 -0800686class SemiSpaceMarkObjectVisitor {
687 public:
Mathieu Chartier407f7022014-02-18 14:37:05 -0800688 explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) {
Ian Rogers719d1a32014-03-06 12:13:39 -0800689 }
690
Mathieu Chartier407f7022014-02-18 14:37:05 -0800691 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
692 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier580a8df2014-03-26 15:15:57 -0700693 // Object was already verified when we scanned it.
694 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
Ian Rogers719d1a32014-03-06 12:13:39 -0800695 }
Mathieu Chartier407f7022014-02-18 14:37:05 -0800696
697 void operator()(mirror::Class* klass, mirror::Reference* ref) const
698 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
699 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
700 collector_->DelayReferenceReferent(klass, ref);
701 }
702
Ian Rogers719d1a32014-03-06 12:13:39 -0800703 private:
Mathieu Chartier407f7022014-02-18 14:37:05 -0800704 SemiSpace* const collector_;
Ian Rogers719d1a32014-03-06 12:13:39 -0800705};
706
707// Visit all of the references of an object and update.
708void SemiSpace::ScanObject(Object* obj) {
Ian Rogers719d1a32014-03-06 12:13:39 -0800709 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
710 SemiSpaceMarkObjectVisitor visitor(this);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800711 obj->VisitReferences<kMovingClasses>(visitor, visitor);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700712}
713
714// Scan anything that's on the mark stack.
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800715void SemiSpace::ProcessMarkStack() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700716 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700717 space::MallocSpace* promo_dest_space = nullptr;
718 accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800719 if (generational_ && !whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800720 // If a bump pointer space only collection (and the promotion is
721 // enabled,) we delay the live-bitmap marking of promoted objects
722 // from MarkObject() until this function.
723 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
724 live_bitmap = promo_dest_space->GetLiveBitmap();
725 DCHECK(live_bitmap != nullptr);
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700726 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800727 DCHECK(mark_bitmap != nullptr);
728 DCHECK_EQ(live_bitmap, mark_bitmap);
729 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700730 while (!mark_stack_->IsEmpty()) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800731 Object* obj = mark_stack_->PopBack();
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800732 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800733 // obj has just been promoted. Mark the live bitmap for it,
734 // which is delayed from MarkObject().
735 DCHECK(!live_bitmap->Test(obj));
736 live_bitmap->Set(obj);
737 }
738 ScanObject(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700739 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700740}
741
Mathieu Chartier590fee92013-09-13 13:46:47 -0700742inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
743 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
744 // All immune objects are assumed marked.
Mathieu Chartier8d562102014-03-12 17:42:10 -0700745 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700746 return obj;
747 }
748 if (from_space_->HasAddress(obj)) {
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700749 // Returns either the forwarding address or nullptr.
750 return GetForwardingAddressInFromSpace(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700751 } else if (to_space_->HasAddress(obj)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800752 // Should be unlikely.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700753 // Already forwarded, must be marked.
754 return obj;
755 }
Mathieu Chartier52e4b432014-06-10 11:22:31 -0700756 return mark_bitmap_->Test(obj) ? obj : nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700757}
758
Mathieu Chartier590fee92013-09-13 13:46:47 -0700759void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
760 DCHECK(to_space != nullptr);
761 to_space_ = to_space;
762}
763
764void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
765 DCHECK(from_space != nullptr);
766 from_space_ = from_space;
767}
768
769void SemiSpace::FinishPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700770 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700771 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
772 // further action is done by the heap.
773 to_space_ = nullptr;
774 from_space_ = nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700775 CHECK(mark_stack_->IsEmpty());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700776 mark_stack_->Reset();
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800777 if (generational_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800778 // Decide whether to do a whole heap collection or a bump pointer
779 // only space collection at the next collection by updating
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700780 // whole_heap_collection.
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800781 if (!whole_heap_collection_) {
Hiroshi Yamauchi24faeb22014-05-07 13:12:43 -0700782 // Enable whole_heap_collection if the bytes promoted since the
783 // last whole heap collection or the large object bytes
784 // allocated exceeds a threshold.
785 bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
786 bool bytes_promoted_threshold_exceeded =
787 bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
788 uint64_t current_los_bytes_allocated = GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
789 uint64_t last_los_bytes_allocated =
790 large_object_bytes_allocated_at_last_whole_heap_collection_;
791 bool large_object_bytes_threshold_exceeded =
792 current_los_bytes_allocated >=
793 last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
794 if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
795 whole_heap_collection_ = true;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800796 }
797 } else {
Hiroshi Yamauchi24faeb22014-05-07 13:12:43 -0700798 // Reset the counters.
799 bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
800 large_object_bytes_allocated_at_last_whole_heap_collection_ =
801 GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
802 whole_heap_collection_ = false;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800803 }
804 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700805 // Clear all of the spaces' mark bitmaps.
806 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
807 heap_->ClearMarkedObjects();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700808}
809
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700810void SemiSpace::RevokeAllThreadLocalBuffers() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700811 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700812 GetHeap()->RevokeAllThreadLocalBuffers();
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700813}
814
Mathieu Chartier590fee92013-09-13 13:46:47 -0700815} // namespace collector
816} // namespace gc
817} // namespace art