blob: 3b9e853bc74abb1ae86d24eb25a17ee4567006bf [file] [log] [blame]
Mathieu Chartier590fee92013-09-13 13:46:47 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -070017#include "semi_space-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070018
19#include <functional>
20#include <numeric>
21#include <climits>
22#include <vector>
23
24#include "base/logging.h"
25#include "base/macros.h"
26#include "base/mutex-inl.h"
27#include "base/timing_logger.h"
Mathieu Chartier4aeec172014-03-27 16:09:46 -070028#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070029#include "gc/accounting/mod_union_table.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080030#include "gc/accounting/remembered_set.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070031#include "gc/accounting/space_bitmap-inl.h"
32#include "gc/heap.h"
33#include "gc/space/bump_pointer_space.h"
34#include "gc/space/bump_pointer_space-inl.h"
35#include "gc/space/image_space.h"
36#include "gc/space/large_object_space.h"
37#include "gc/space/space-inl.h"
38#include "indirect_reference_table.h"
39#include "intern_table.h"
40#include "jni_internal.h"
41#include "mark_sweep-inl.h"
42#include "monitor.h"
43#include "mirror/art_field.h"
44#include "mirror/art_field-inl.h"
45#include "mirror/class-inl.h"
46#include "mirror/class_loader.h"
47#include "mirror/dex_cache.h"
Mathieu Chartier8fa2dad2014-03-13 12:22:56 -070048#include "mirror/reference-inl.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070049#include "mirror/object-inl.h"
50#include "mirror/object_array.h"
51#include "mirror/object_array-inl.h"
52#include "runtime.h"
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -070053#include "stack.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070054#include "thread-inl.h"
55#include "thread_list.h"
56#include "verifier/method_verifier.h"
57
58using ::art::mirror::Class;
59using ::art::mirror::Object;
60
61namespace art {
62namespace gc {
63namespace collector {
64
65static constexpr bool kProtectFromSpace = true;
Mathieu Chartier15d34022014-02-26 17:16:38 -080066static constexpr bool kStoreStackTraces = false;
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -070067static constexpr bool kUseBytesPromoted = true;
68static constexpr size_t kBytesPromotedThreshold = 4 * MB;
Mathieu Chartier590fee92013-09-13 13:46:47 -070069
Mathieu Chartier590fee92013-09-13 13:46:47 -070070void SemiSpace::BindBitmaps() {
71 timings_.StartSplit("BindBitmaps");
Mathieu Chartiera1602f22014-01-13 17:19:19 -080072 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -070073 // Mark all of the spaces we never collect as immune.
74 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080075 if (space->GetLiveBitmap() != nullptr) {
76 if (space == to_space_) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -080077 CHECK(to_space_->IsContinuousMemMapAllocSpace());
78 to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080079 } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -080080 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect
81 // Add the main free list space and the non-moving
82 // space to the immune space if a bump pointer space
83 // only collection.
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080084 || (generational_ && !whole_heap_collection_ &&
85 (space == GetHeap()->GetNonMovingSpace() ||
86 space == GetHeap()->GetPrimaryFreeListSpace()))) {
Mathieu Chartier8d562102014-03-12 17:42:10 -070087 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080088 }
Mathieu Chartier590fee92013-09-13 13:46:47 -070089 }
90 }
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080091 if (generational_ && !whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -080092 // We won't collect the large object space if a bump pointer space only collection.
93 is_large_object_space_immune_ = true;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -080094 }
Mathieu Chartier590fee92013-09-13 13:46:47 -070095 timings_.EndSplit();
96}
97
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080098SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
Mathieu Chartier590fee92013-09-13 13:46:47 -070099 : GarbageCollector(heap,
100 name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
Mathieu Chartier590fee92013-09-13 13:46:47 -0700101 to_space_(nullptr),
102 from_space_(nullptr),
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800103 generational_(generational),
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800104 last_gc_to_space_end_(nullptr),
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800105 bytes_promoted_(0),
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700106 bytes_promoted_since_last_whole_heap_collection_(0),
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800107 whole_heap_collection_(true),
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700108 whole_heap_collection_interval_counter_(0),
109 collector_name_(name_) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700110}
111
112void SemiSpace::InitializePhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800113 TimingLogger::ScopedSplit split("InitializePhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700114 mark_stack_ = heap_->mark_stack_.get();
115 DCHECK(mark_stack_ != nullptr);
Mathieu Chartier8d562102014-03-12 17:42:10 -0700116 immune_region_.Reset();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800117 is_large_object_space_immune_ = false;
Mathieu Chartierad35d902014-02-11 16:20:42 -0800118 saved_bytes_ = 0;
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700119 bytes_moved_ = 0;
120 objects_moved_ = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700121 self_ = Thread::Current();
122 // Do any pre GC verification.
123 timings_.NewSplit("PreGcVerification");
124 heap_->PreGcVerification(this);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700125 CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800126 // Set the initial bitmap.
127 to_space_live_bitmap_ = to_space_->GetLiveBitmap();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700128 {
129 // TODO: I don't think we should need heap bitmap lock to get the mark bitmap.
130 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
131 mark_bitmap_ = heap_->GetMarkBitmap();
132 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700133}
134
135void SemiSpace::ProcessReferences(Thread* self) {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800136 TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700137 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800138 GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800139 &MarkObjectCallback, &ProcessMarkStackCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700140}
141
142void SemiSpace::MarkingPhase() {
Mathieu Chartier15d34022014-02-26 17:16:38 -0800143 if (kStoreStackTraces) {
144 Locks::mutator_lock_->AssertExclusiveHeld(self_);
145 // Store the stack traces into the runtime fault string in case we get a heap corruption
146 // related crash later.
147 ThreadState old_state = self_->SetStateUnsafe(kRunnable);
148 std::ostringstream oss;
149 Runtime* runtime = Runtime::Current();
150 runtime->GetThreadList()->DumpForSigQuit(oss);
151 runtime->GetThreadList()->DumpNativeStacks(oss);
152 runtime->SetFaultMessage(oss.str());
153 CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable);
154 }
155
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800156 if (generational_) {
157 if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
158 clear_soft_references_) {
159 // If an explicit, native allocation-triggered, or last attempt
160 // collection, collect the whole heap (and reset the interval
161 // counter to be consistent.)
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800162 whole_heap_collection_ = true;
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700163 if (!kUseBytesPromoted) {
164 whole_heap_collection_interval_counter_ = 0;
165 }
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800166 }
167 if (whole_heap_collection_) {
168 VLOG(heap) << "Whole heap collection";
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700169 name_ = collector_name_ + " whole";
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800170 } else {
171 VLOG(heap) << "Bump pointer space only collection";
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700172 name_ = collector_name_ + " bps";
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800173 }
174 }
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700175
176 if (!clear_soft_references_) {
177 if (!generational_) {
178 // If non-generational, always clear soft references.
179 clear_soft_references_ = true;
180 } else {
181 // If generational, clear soft references if a whole heap collection.
182 if (whole_heap_collection_) {
183 clear_soft_references_ = true;
184 }
185 }
186 }
187
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800188 Locks::mutator_lock_->AssertExclusiveHeld(self_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800189
Ian Rogers5fe9af72013-11-14 00:17:20 -0800190 TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800191 if (generational_) {
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800192 // If last_gc_to_space_end_ is out of the bounds of the from-space
193 // (the to-space from last GC), then point it to the beginning of
194 // the from-space. For example, the very first GC or the
195 // pre-zygote compaction.
196 if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
197 last_gc_to_space_end_ = from_space_->Begin();
198 }
199 // Reset this before the marking starts below.
200 bytes_promoted_ = 0;
201 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700202 // Assume the cleared space is already empty.
203 BindBitmaps();
204 // Process dirty cards and add dirty cards to mod-union tables.
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800205 heap_->ProcessCards(timings_, kUseRememberedSet && generational_);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800206 // Clear the whole card table since we can not get any additional dirty cards during the
207 // paused GC. This saves memory but only works for pause the world collectors.
208 timings_.NewSplit("ClearCardTable");
209 heap_->GetCardTable()->ClearCardTable();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700210 // Need to do this before the checkpoint since we don't want any threads to add references to
211 // the live stack during the recursive mark.
212 timings_.NewSplit("SwapStacks");
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800213 if (kUseThreadLocalAllocationStack) {
214 heap_->RevokeAllThreadLocalAllocationStacks(self_);
215 }
216 heap_->SwapStacks(self_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800217 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700218 MarkRoots();
219 // Mark roots of immune spaces.
220 UpdateAndMarkModUnion();
221 // Recursively mark remaining objects.
222 MarkReachableObjects();
223}
224
Mathieu Chartier590fee92013-09-13 13:46:47 -0700225void SemiSpace::UpdateAndMarkModUnion() {
226 for (auto& space : heap_->GetContinuousSpaces()) {
227 // If the space is immune then we need to mark the references to other spaces.
Mathieu Chartier8d562102014-03-12 17:42:10 -0700228 if (immune_region_.ContainsSpace(space)) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700229 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800230 if (table != nullptr) {
231 // TODO: Improve naming.
232 TimingLogger::ScopedSplit split(
233 space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
234 "UpdateAndMarkImageModUnionTable",
235 &timings_);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800236 table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800237 } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) {
238 DCHECK(kUseRememberedSet);
239 // If a bump pointer space only collection, the non-moving
240 // space is added to the immune space. The non-moving space
241 // doesn't have a mod union table, but has a remembered
242 // set. Its dirty cards will be scanned later in
243 // MarkReachableObjects().
244 DCHECK(generational_ && !whole_heap_collection_ &&
245 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
246 << "Space " << space->GetName() << " "
247 << "generational_=" << generational_ << " "
248 << "whole_heap_collection_=" << whole_heap_collection_ << " ";
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800249 } else {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800250 DCHECK(!kUseRememberedSet);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800251 // If a bump pointer space only collection, the non-moving
252 // space is added to the immune space. But the non-moving
253 // space doesn't have a mod union table. Instead, its live
254 // bitmap will be scanned later in MarkReachableObjects().
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800255 DCHECK(generational_ && !whole_heap_collection_ &&
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800256 (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
257 << "Space " << space->GetName() << " "
258 << "generational_=" << generational_ << " "
259 << "whole_heap_collection_=" << whole_heap_collection_ << " ";
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800260 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700261 }
262 }
263}
264
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800265class SemiSpaceScanObjectVisitor {
266 public:
267 explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
Mathieu Chartier407f7022014-02-18 14:37:05 -0800268 void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
269 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800270 // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an
271 // exclusive lock on the mutator lock, but
272 // SpaceBitmap::VisitMarkedRange() only requires the shared lock.
273 DCHECK(obj != nullptr);
274 semi_space_->ScanObject(obj);
275 }
276 private:
Ian Rogers6fac4472014-02-25 17:01:10 -0800277 SemiSpace* const semi_space_;
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800278};
279
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800280// Used to verify that there's no references to the from-space.
281class SemiSpaceVerifyNoFromSpaceReferencesVisitor {
282 public:
283 explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) :
284 from_space_(from_space) {}
285
Mathieu Chartier407f7022014-02-18 14:37:05 -0800286 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
287 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
288 mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800289 if (from_space_->HasAddress(ref)) {
290 Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -0700291 LOG(FATAL) << ref << " found in from space";
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800292 }
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800293 }
294 private:
295 space::ContinuousMemMapAllocSpace* from_space_;
296};
297
298void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800299 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
300 SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800301 obj->VisitReferences<kMovingClasses>(visitor);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800302}
303
304class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor {
305 public:
306 explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
307 void operator()(Object* obj) const
308 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
309 DCHECK(obj != nullptr);
310 semi_space_->VerifyNoFromSpaceReferences(obj);
311 }
312 private:
313 SemiSpace* const semi_space_;
314};
315
Mathieu Chartier590fee92013-09-13 13:46:47 -0700316void SemiSpace::MarkReachableObjects() {
317 timings_.StartSplit("MarkStackAsLive");
318 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
319 heap_->MarkAllocStackAsLive(live_stack);
320 live_stack->Reset();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800321
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700322 timings_.NewSplit("UpdateAndMarkRememberedSets");
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800323 for (auto& space : heap_->GetContinuousSpaces()) {
324 // If the space is immune and has no mod union table (the
325 // non-moving space when the bump pointer space only collection is
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800326 // enabled,) then we need to scan its live bitmap or dirty cards as roots
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800327 // (including the objects on the live stack which have just marked
328 // in the live bitmap above in MarkAllocStackAsLive().)
Mathieu Chartier8d562102014-03-12 17:42:10 -0700329 if (immune_region_.ContainsSpace(space) &&
330 heap_->FindModUnionTableFromSpace(space) == nullptr) {
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800331 DCHECK(generational_ && !whole_heap_collection_ &&
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800332 (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800333 accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space);
334 if (kUseRememberedSet) {
335 DCHECK(rem_set != nullptr);
Hiroshi Yamauchi4db74492014-04-22 17:10:48 -0700336 rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
337 from_space_, this);
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800338 if (kIsDebugBuild) {
339 // Verify that there are no from-space references that
340 // remain in the space, that is, the remembered set (and the
341 // card table) didn't miss any from-space references in the
342 // space.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700343 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800344 SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this);
345 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
346 reinterpret_cast<uintptr_t>(space->End()),
347 visitor);
348 }
349 } else {
350 DCHECK(rem_set == nullptr);
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700351 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800352 SemiSpaceScanObjectVisitor visitor(this);
353 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
354 reinterpret_cast<uintptr_t>(space->End()),
355 visitor);
356 }
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800357 }
358 }
359
360 if (is_large_object_space_immune_) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700361 timings_.NewSplit("VisitLargeObjects");
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800362 DCHECK(generational_ && !whole_heap_collection_);
Hiroshi Yamauchiba5870d2014-01-29 15:31:03 -0800363 // Delay copying the live set to the marked set until here from
364 // BindBitmaps() as the large objects on the allocation stack may
365 // be newly added to the live set above in MarkAllocStackAsLive().
366 GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
367
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800368 // When the large object space is immune, we need to scan the
369 // large object space as roots as they contain references to their
370 // classes (primitive array classes) that could move though they
371 // don't contain any other references.
372 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700373 accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800374 SemiSpaceScanObjectVisitor visitor(this);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700375 large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()),
376 reinterpret_cast<uintptr_t>(large_object_space->End()),
377 visitor);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800378 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700379 timings_.EndSplit();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700380 // Recursively process the mark stack.
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800381 ProcessMarkStack();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700382}
383
384void SemiSpace::ReclaimPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800385 TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800386 ProcessReferences(self_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700387 {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800388 ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700389 SweepSystemWeaks();
390 }
391 // Record freed memory.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800392 uint64_t from_bytes = from_space_->GetBytesAllocated();
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700393 uint64_t to_bytes = bytes_moved_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800394 uint64_t from_objects = from_space_->GetObjectsAllocated();
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700395 uint64_t to_objects = objects_moved_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800396 CHECK_LE(to_objects, from_objects);
397 int64_t freed_bytes = from_bytes - to_bytes;
398 int64_t freed_objects = from_objects - to_objects;
Ian Rogersb122a4b2013-11-19 18:00:50 -0800399 freed_bytes_.FetchAndAdd(freed_bytes);
400 freed_objects_.FetchAndAdd(freed_objects);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800401 // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed
402 // space.
403 heap_->RecordFree(freed_objects, freed_bytes);
Mathieu Chartierb272cd32014-04-11 16:42:46 -0700404
Mathieu Chartier590fee92013-09-13 13:46:47 -0700405 timings_.StartSplit("PreSweepingGcVerification");
406 heap_->PreSweepingGcVerification(this);
407 timings_.EndSplit();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700408 {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800409 WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700410 // Reclaim unmarked objects.
411 Sweep(false);
412 // Swap the live and mark bitmaps for each space which we modified space. This is an
413 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
414 // bitmaps.
415 timings_.StartSplit("SwapBitmaps");
416 SwapBitmaps();
417 timings_.EndSplit();
418 // Unbind the live and mark bitmaps.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800419 TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
420 GetHeap()->UnBindBitmaps();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700421 }
Mathieu Chartierb272cd32014-04-11 16:42:46 -0700422 // TODO: Do this before doing verification since the from space may have objects which weren't
423 // moved and point to dead objects.
Mathieu Chartier31f44142014-04-08 14:40:03 -0700424 from_space_->Clear();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700425 // Protect the from space.
Mathieu Chartier15d34022014-02-26 17:16:38 -0800426 VLOG(heap) << "Protecting space " << *from_space_;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700427 if (kProtectFromSpace) {
Mathieu Chartier15d34022014-02-26 17:16:38 -0800428 from_space_->GetMemMap()->Protect(PROT_NONE);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700429 } else {
Mathieu Chartier15d34022014-02-26 17:16:38 -0800430 from_space_->GetMemMap()->Protect(PROT_READ);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700431 }
Mathieu Chartierad35d902014-02-11 16:20:42 -0800432 if (saved_bytes_ > 0) {
433 VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
434 }
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800435
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800436 if (generational_) {
Hiroshi Yamauchi4b1782f2013-12-05 16:46:22 -0800437 // Record the end (top) of the to space so we can distinguish
438 // between objects that were allocated since the last GC and the
439 // older objects.
440 last_gc_to_space_end_ = to_space_->End();
441 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700442}
443
444void SemiSpace::ResizeMarkStack(size_t new_size) {
445 std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
446 CHECK_LE(mark_stack_->Size(), new_size);
447 mark_stack_->Resize(new_size);
448 for (const auto& obj : temp) {
449 mark_stack_->PushBack(obj);
450 }
451}
452
453inline void SemiSpace::MarkStackPush(Object* obj) {
454 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
455 ResizeMarkStack(mark_stack_->Capacity() * 2);
456 }
457 // The object must be pushed on to the mark stack.
458 mark_stack_->PushBack(obj);
459}
460
Mathieu Chartierad35d902014-02-11 16:20:42 -0800461static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) {
462 if (LIKELY(size <= static_cast<size_t>(kPageSize))) {
463 // We will dirty the current page and somewhere in the middle of the next page. This means
464 // that the next object copied will also dirty that page.
465 // TODO: Worth considering the last object copied? We may end up dirtying one page which is
466 // not necessary per GC.
467 memcpy(dest, src, size);
468 return 0;
469 }
470 size_t saved_bytes = 0;
471 byte* byte_dest = reinterpret_cast<byte*>(dest);
472 if (kIsDebugBuild) {
473 for (size_t i = 0; i < size; ++i) {
474 CHECK_EQ(byte_dest[i], 0U);
475 }
476 }
477 // Process the start of the page. The page must already be dirty, don't bother with checking.
478 const byte* byte_src = reinterpret_cast<const byte*>(src);
479 const byte* limit = byte_src + size;
480 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest;
481 // Copy the bytes until the start of the next page.
482 memcpy(dest, src, page_remain);
483 byte_src += page_remain;
484 byte_dest += page_remain;
Mathieu Chartier407f7022014-02-18 14:37:05 -0800485 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize);
486 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t));
487 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t));
Mathieu Chartierad35d902014-02-11 16:20:42 -0800488 while (byte_src + kPageSize < limit) {
489 bool all_zero = true;
490 uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest);
491 const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src);
492 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) {
493 // Assumes the destination of the copy is all zeros.
494 if (word_src[i] != 0) {
495 all_zero = false;
496 word_dest[i] = word_src[i];
497 }
498 }
499 if (all_zero) {
500 // Avoided copying into the page since it was all zeros.
501 saved_bytes += kPageSize;
502 }
503 byte_src += kPageSize;
504 byte_dest += kPageSize;
505 }
506 // Handle the part of the page at the end.
507 memcpy(byte_dest, byte_src, limit - byte_src);
508 return saved_bytes;
509}
510
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800511mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
512 size_t object_size = obj->SizeOf();
Mathieu Chartier5dc08a62014-01-10 10:10:23 -0800513 size_t bytes_allocated;
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800514 mirror::Object* forward_address = nullptr;
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800515 if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800516 // If it's allocated before the last GC (older), move
517 // (pseudo-promote) it to the main free list space (as sort
518 // of an old generation.)
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800519 space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700520 forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_allocated, nullptr);
521 if (UNLIKELY(forward_address == nullptr)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800522 // If out of space, fall back to the to-space.
Ian Rogers6fac4472014-02-25 17:01:10 -0800523 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800524 } else {
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700525 bytes_promoted_ += bytes_allocated;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800526 // Dirty the card at the destionation as it may contain
527 // references (including the class pointer) to the bump pointer
528 // space.
529 GetHeap()->WriteBarrierEveryFieldOf(forward_address);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800530 // Handle the bitmaps marking.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700531 accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800532 DCHECK(live_bitmap != nullptr);
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700533 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800534 DCHECK(mark_bitmap != nullptr);
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800535 DCHECK(!live_bitmap->Test(forward_address));
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800536 if (!whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800537 // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
538 DCHECK_EQ(live_bitmap, mark_bitmap);
539
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800540 // If a bump pointer space only collection, delay the live
541 // bitmap marking of the promoted object until it's popped off
542 // the mark stack (ProcessMarkStack()). The rationale: we may
543 // be in the middle of scanning the objects in the promo
544 // destination space for
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800545 // non-moving-space-to-bump-pointer-space references by
546 // iterating over the marked bits of the live bitmap
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800547 // (MarkReachableObjects()). If we don't delay it (and instead
548 // mark the promoted object here), the above promo destination
549 // space scan could encounter the just-promoted object and
550 // forward the references in the promoted object's fields even
551 // through it is pushed onto the mark stack. If this happens,
552 // the promoted object would be in an inconsistent state, that
553 // is, it's on the mark stack (gray) but its fields are
554 // already forwarded (black), which would cause a
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800555 // DCHECK(!to_space_->HasAddress(obj)) failure below.
556 } else {
557 // Mark forward_address on the live bit map.
558 live_bitmap->Set(forward_address);
559 // Mark forward_address on the mark bit map.
560 DCHECK(!mark_bitmap->Test(forward_address));
561 mark_bitmap->Set(forward_address);
562 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800563 }
564 DCHECK(forward_address != nullptr);
565 } else {
566 // If it's allocated after the last GC (younger), copy it to the to-space.
Ian Rogers6fac4472014-02-25 17:01:10 -0800567 forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800568 }
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700569 ++objects_moved_;
570 bytes_moved_ += bytes_allocated;
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800571 // Copy over the object and add it to the mark stack since we still need to update its
572 // references.
Mathieu Chartierad35d902014-02-11 16:20:42 -0800573 saved_bytes_ +=
574 CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size);
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700575 if (kUseBakerOrBrooksReadBarrier) {
576 obj->AssertReadBarrierPointer();
577 if (kUseBrooksReadBarrier) {
578 DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj);
579 forward_address->SetReadBarrierPointer(forward_address);
580 }
581 forward_address->AssertReadBarrierPointer();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800582 }
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800583 if (to_space_live_bitmap_ != nullptr) {
584 to_space_live_bitmap_->Set(forward_address);
585 }
Mathieu Chartier5dc08a62014-01-10 10:10:23 -0800586 DCHECK(to_space_->HasAddress(forward_address) ||
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800587 (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800588 return forward_address;
589}
590
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800591void SemiSpace::ProcessMarkStackCallback(void* arg) {
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800592 reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack();
593}
594
595mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) {
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -0700596 auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800597 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
598 return ref.AsMirrorPtr();
599}
600
601void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr,
602 void* arg) {
603 reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800604}
605
Hiroshi Yamauchi4db74492014-04-22 17:10:48 -0700606void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
607 void* arg) {
608 reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref);
609}
610
Mathieu Chartier815873e2014-02-13 18:02:13 -0800611void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
612 RootType /*root_type*/) {
Mathieu Chartier3b05e9b2014-03-25 09:29:43 -0700613 auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800614 reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
615 if (*root != ref.AsMirrorPtr()) {
616 *root = ref.AsMirrorPtr();
617 }
Mathieu Chartier815873e2014-02-13 18:02:13 -0800618}
619
Mathieu Chartier590fee92013-09-13 13:46:47 -0700620// Marks all objects in the root set.
621void SemiSpace::MarkRoots() {
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700622 timings_.NewSplit("MarkRoots");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700623 // TODO: Visit up image roots as well?
Mathieu Chartier893263b2014-03-04 11:07:42 -0800624 Runtime::Current()->VisitRoots(MarkRootCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700625}
626
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800627mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700628 return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
629}
630
631void SemiSpace::SweepSystemWeaks() {
632 timings_.StartSplit("SweepSystemWeaks");
Mathieu Chartier39e32612013-11-12 16:28:05 -0800633 Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700634 timings_.EndSplit();
635}
636
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800637bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
Mathieu Chartier8d562102014-03-12 17:42:10 -0700638 return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700639}
640
641void SemiSpace::Sweep(bool swap_bitmaps) {
642 DCHECK(mark_stack_->IsEmpty());
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700643 TimingLogger::ScopedSplit split("Sweep", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700644 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800645 if (space->IsContinuousMemMapAllocSpace()) {
646 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
647 if (!ShouldSweepSpace(alloc_space)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800648 continue;
649 }
Mathieu Chartierec050072014-01-07 16:00:07 -0800650 TimingLogger::ScopedSplit split(
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800651 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
Mathieu Chartierec050072014-01-07 16:00:07 -0800652 size_t freed_objects = 0;
653 size_t freed_bytes = 0;
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800654 alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Mathieu Chartierec050072014-01-07 16:00:07 -0800655 heap_->RecordFree(freed_objects, freed_bytes);
656 freed_objects_.FetchAndAdd(freed_objects);
657 freed_bytes_.FetchAndAdd(freed_bytes);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700658 }
659 }
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800660 if (!is_large_object_space_immune_) {
661 SweepLargeObjects(swap_bitmaps);
662 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700663}
664
665void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800666 DCHECK(!is_large_object_space_immune_);
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700667 TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700668 size_t freed_objects = 0;
669 size_t freed_bytes = 0;
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700670 heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
Ian Rogersb122a4b2013-11-19 18:00:50 -0800671 freed_large_objects_.FetchAndAdd(freed_objects);
672 freed_large_object_bytes_.FetchAndAdd(freed_bytes);
Mathieu Chartier4d7f61d2014-04-17 14:43:39 -0700673 heap_->RecordFree(freed_objects, freed_bytes);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700674}
675
676// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
677// marked, put it on the appropriate list in the heap for later processing.
Mathieu Chartier407f7022014-02-18 14:37:05 -0800678void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
679 heap_->DelayReferenceReferent(klass, reference, MarkedForwardingAddressCallback, this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700680}
681
Ian Rogers719d1a32014-03-06 12:13:39 -0800682class SemiSpaceMarkObjectVisitor {
683 public:
Mathieu Chartier407f7022014-02-18 14:37:05 -0800684 explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) {
Ian Rogers719d1a32014-03-06 12:13:39 -0800685 }
686
Mathieu Chartier407f7022014-02-18 14:37:05 -0800687 void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
688 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier580a8df2014-03-26 15:15:57 -0700689 // Object was already verified when we scanned it.
690 collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
Ian Rogers719d1a32014-03-06 12:13:39 -0800691 }
Mathieu Chartier407f7022014-02-18 14:37:05 -0800692
693 void operator()(mirror::Class* klass, mirror::Reference* ref) const
694 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
695 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
696 collector_->DelayReferenceReferent(klass, ref);
697 }
698
Ian Rogers719d1a32014-03-06 12:13:39 -0800699 private:
Mathieu Chartier407f7022014-02-18 14:37:05 -0800700 SemiSpace* const collector_;
Ian Rogers719d1a32014-03-06 12:13:39 -0800701};
702
703// Visit all of the references of an object and update.
704void SemiSpace::ScanObject(Object* obj) {
Ian Rogers719d1a32014-03-06 12:13:39 -0800705 DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
706 SemiSpaceMarkObjectVisitor visitor(this);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800707 obj->VisitReferences<kMovingClasses>(visitor, visitor);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700708}
709
710// Scan anything that's on the mark stack.
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800711void SemiSpace::ProcessMarkStack() {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700712 space::MallocSpace* promo_dest_space = nullptr;
713 accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800714 if (generational_ && !whole_heap_collection_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800715 // If a bump pointer space only collection (and the promotion is
716 // enabled,) we delay the live-bitmap marking of promoted objects
717 // from MarkObject() until this function.
718 promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
719 live_bitmap = promo_dest_space->GetLiveBitmap();
720 DCHECK(live_bitmap != nullptr);
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700721 accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800722 DCHECK(mark_bitmap != nullptr);
723 DCHECK_EQ(live_bitmap, mark_bitmap);
724 }
Mathieu Chartier3bb57c72014-02-18 11:38:45 -0800725 timings_.StartSplit("ProcessMarkStack");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700726 while (!mark_stack_->IsEmpty()) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800727 Object* obj = mark_stack_->PopBack();
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800728 if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800729 // obj has just been promoted. Mark the live bitmap for it,
730 // which is delayed from MarkObject().
731 DCHECK(!live_bitmap->Test(obj));
732 live_bitmap->Set(obj);
733 }
734 ScanObject(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700735 }
736 timings_.EndSplit();
737}
738
Mathieu Chartier590fee92013-09-13 13:46:47 -0700739inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
740 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
741 // All immune objects are assumed marked.
Mathieu Chartier8d562102014-03-12 17:42:10 -0700742 if (immune_region_.ContainsObject(obj)) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700743 return obj;
744 }
745 if (from_space_->HasAddress(obj)) {
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700746 // Returns either the forwarding address or nullptr.
747 return GetForwardingAddressInFromSpace(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700748 } else if (to_space_->HasAddress(obj)) {
Mathieu Chartier85a43c02014-01-07 17:59:00 -0800749 // Should be unlikely.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700750 // Already forwarded, must be marked.
751 return obj;
752 }
753 return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
754}
755
Mathieu Chartier590fee92013-09-13 13:46:47 -0700756void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
757 DCHECK(to_space != nullptr);
758 to_space_ = to_space;
759}
760
761void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
762 DCHECK(from_space != nullptr);
763 from_space_ = from_space;
764}
765
766void SemiSpace::FinishPhase() {
Ian Rogers5fe9af72013-11-14 00:17:20 -0800767 TimingLogger::ScopedSplit split("FinishPhase", &timings_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700768 Heap* heap = GetHeap();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700769 timings_.NewSplit("PostGcVerification");
770 heap->PostGcVerification(this);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700771 // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
772 // further action is done by the heap.
773 to_space_ = nullptr;
774 from_space_ = nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700775 CHECK(mark_stack_->IsEmpty());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700776 mark_stack_->Reset();
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -0800777 if (generational_) {
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800778 // Decide whether to do a whole heap collection or a bump pointer
779 // only space collection at the next collection by updating
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700780 // whole_heap_collection.
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800781 if (!whole_heap_collection_) {
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700782 if (!kUseBytesPromoted) {
783 // Enable whole_heap_collection once every
784 // kDefaultWholeHeapCollectionInterval collections.
785 --whole_heap_collection_interval_counter_;
786 DCHECK_GE(whole_heap_collection_interval_counter_, 0);
787 if (whole_heap_collection_interval_counter_ == 0) {
788 whole_heap_collection_ = true;
789 }
790 } else {
791 // Enable whole_heap_collection if the bytes promoted since
792 // the last whole heap collection exceeds a threshold.
793 bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
794 if (bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold) {
795 whole_heap_collection_ = true;
796 }
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800797 }
798 } else {
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700799 if (!kUseBytesPromoted) {
800 DCHECK_EQ(whole_heap_collection_interval_counter_, 0);
801 whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval;
802 whole_heap_collection_ = false;
803 } else {
804 // Reset it.
805 bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
806 whole_heap_collection_ = false;
807 }
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800808 }
809 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700810 // Clear all of the spaces' mark bitmaps.
811 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
812 heap_->ClearMarkedObjects();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700813}
814
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700815void SemiSpace::RevokeAllThreadLocalBuffers() {
816 timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
817 GetHeap()->RevokeAllThreadLocalBuffers();
818 timings_.EndSplit();
819}
820
Mathieu Chartier590fee92013-09-13 13:46:47 -0700821} // namespace collector
822} // namespace gc
823} // namespace art