blob: 399f9ff301a849e6e8a10c6ec1bf9d56487d7819 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "mark_sweep.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Yabin Cuic7df66e2015-04-15 15:40:18 -070019#include <atomic>
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070020#include <climits>
Mathieu Chartier2b82db42012-11-14 17:29:05 -080021#include <functional>
22#include <numeric>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
24
Mathieu Chartier94c32c52013-08-09 11:14:04 -070025#include "base/bounded_fifo.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070026#include "base/enums.h"
David Sehr891a50e2017-10-27 17:01:07 -070027#include "base/file_utils.h"
Andreas Gampe57943812017-12-06 21:39:13 -080028#include "base/logging.h" // For VLOG.
Elliott Hughes76160052012-12-12 16:31:20 -080029#include "base/macros.h"
Ian Rogers693ff612013-02-01 10:56:12 -080030#include "base/mutex-inl.h"
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080031#include "base/systrace.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010032#include "base/time_utils.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080033#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070034#include "gc/accounting/card_table-inl.h"
Mathieu Chartier4aeec172014-03-27 16:09:46 -070035#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier11409ae2013-09-23 11:49:36 -070036#include "gc/accounting/mod_union_table.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070037#include "gc/accounting/space_bitmap-inl.h"
38#include "gc/heap.h"
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070039#include "gc/reference_processor.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070040#include "gc/space/large_object_space.h"
41#include "gc/space/space-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080042#include "mark_sweep-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080043#include "mirror/object-inl.h"
Brian Carlstrom1f870082011-08-23 16:02:11 -070044#include "runtime.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070045#include "scoped_thread_state_change-inl.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070046#include "thread-current-inl.h"
Mathieu Chartier6f1c9492012-10-15 12:08:41 -070047#include "thread_list.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070048
Carl Shapiro69759ea2011-07-21 18:13:35 -070049namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070050namespace gc {
51namespace collector {
Carl Shapiro69759ea2011-07-21 18:13:35 -070052
Mathieu Chartier02b6a782012-10-26 13:51:26 -070053// Performance options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080054static constexpr bool kUseRecursiveMark = false;
55static constexpr bool kUseMarkStackPrefetch = true;
56static constexpr size_t kSweepArrayChunkFreeSize = 1024;
57static constexpr bool kPreCleanCards = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070058
59// Parallelism options.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080060static constexpr bool kParallelCardScan = true;
61static constexpr bool kParallelRecursiveMark = true;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070062// Don't attempt to parallelize mark stack processing unless the mark stack is at least n
63// elements. This is temporary until we reduce the overhead caused by allocating tasks, etc.. Not
64// having this can add overhead in ProcessReferences since we may end up doing many calls of
65// ProcessMarkStack with very small mark stacks.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080066static constexpr size_t kMinimumParallelMarkStackSize = 128;
67static constexpr bool kParallelProcessMarkStack = true;
Mathieu Chartier858f1c52012-10-17 17:45:55 -070068
Mathieu Chartier02b6a782012-10-26 13:51:26 -070069// Profiling and information flags.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080070static constexpr bool kProfileLargeObjects = false;
71static constexpr bool kMeasureOverhead = false;
72static constexpr bool kCountTasks = false;
Mathieu Chartier0e54cd02014-03-20 12:41:23 -070073static constexpr bool kCountMarkedObjects = false;
Mathieu Chartier94c32c52013-08-09 11:14:04 -070074
75// Turn off kCheckLocks when profiling the GC since it slows the GC down by up to 40%.
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -080076static constexpr bool kCheckLocks = kDebugLocking;
Mathieu Chartier7bf9f192014-04-04 11:09:41 -070077static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
Mathieu Chartier02b6a782012-10-26 13:51:26 -070078
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -070079// If true, revoke the rosalloc thread-local buffers at the
80// checkpoint, as opposed to during the pause.
81static constexpr bool kRevokeRosAllocThreadLocalBuffersAtCheckpoint = true;
82
Mathieu Chartier2b82db42012-11-14 17:29:05 -080083void MarkSweep::BindBitmaps() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -070084 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -080085 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -080086 // Mark all of the spaces we never collect as immune.
Mathieu Chartier94c32c52013-08-09 11:14:04 -070087 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -070088 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -080089 immune_spaces_.AddSpace(space);
Mathieu Chartier2b82db42012-11-14 17:29:05 -080090 }
91 }
92}
93
Ian Rogers1d54e732013-05-02 21:10:01 -070094MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
95 : GarbageCollector(heap,
Mathieu Chartier590fee92013-09-13 13:46:47 -070096 name_prefix +
Ian Rogers1d54e732013-05-02 21:10:01 -070097 (is_concurrent ? "concurrent mark sweep": "mark sweep")),
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -070098 current_space_bitmap_(nullptr),
99 mark_bitmap_(nullptr),
100 mark_stack_(nullptr),
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800101 gc_barrier_(new Barrier(0)),
Mathieu Chartier958291c2013-08-27 18:14:55 -0700102 mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700103 is_concurrent_(is_concurrent),
104 live_stack_freeze_size_(0) {
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -0700105 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100106 sweep_array_free_buffer_mem_map_ = MemMap::MapAnonymous(
107 "mark sweep sweep array free buffer",
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700108 /* addr= */ nullptr,
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -0700109 RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100110 PROT_READ | PROT_WRITE,
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700111 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100112 &error_msg);
113 CHECK(sweep_array_free_buffer_mem_map_.IsValid())
114 << "Couldn't allocate sweep array free buffer: " << error_msg;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800115}
116
117void MarkSweep::InitializePhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700118 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700119 mark_stack_ = heap_->GetMarkStack();
Mathieu Chartiere53225c2013-08-19 10:59:11 -0700120 DCHECK(mark_stack_ != nullptr);
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800121 immune_spaces_.Reset();
Orion Hodson88591fe2018-03-06 13:35:43 +0000122 no_reference_class_count_.store(0, std::memory_order_relaxed);
123 normal_count_.store(0, std::memory_order_relaxed);
124 class_count_.store(0, std::memory_order_relaxed);
125 object_array_count_.store(0, std::memory_order_relaxed);
126 other_count_.store(0, std::memory_order_relaxed);
127 reference_count_.store(0, std::memory_order_relaxed);
128 large_object_test_.store(0, std::memory_order_relaxed);
129 large_object_mark_.store(0, std::memory_order_relaxed);
130 overhead_time_ .store(0, std::memory_order_relaxed);
131 work_chunks_created_.store(0, std::memory_order_relaxed);
132 work_chunks_deleted_.store(0, std::memory_order_relaxed);
133 mark_null_count_.store(0, std::memory_order_relaxed);
134 mark_immune_count_.store(0, std::memory_order_relaxed);
135 mark_fastpath_count_.store(0, std::memory_order_relaxed);
136 mark_slowpath_count_.store(0, std::memory_order_relaxed);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700137 {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700138 // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700139 ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
140 mark_bitmap_ = heap_->GetMarkBitmap();
141 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700142 if (!GetCurrentIteration()->GetClearSoftReferences()) {
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700143 // Always clear soft references if a non-sticky collection.
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700144 GetCurrentIteration()->SetClearSoftReferences(GetGcType() != collector::kGcTypeSticky);
Hiroshi Yamauchidf386c52014-04-08 16:21:52 -0700145 }
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700146}
147
148void MarkSweep::RunPhases() {
149 Thread* self = Thread::Current();
150 InitializePhase();
151 Locks::mutator_lock_->AssertNotHeld(self);
152 if (IsConcurrent()) {
153 GetHeap()->PreGcVerification(this);
154 {
155 ReaderMutexLock mu(self, *Locks::mutator_lock_);
156 MarkingPhase();
157 }
158 ScopedPause pause(this);
159 GetHeap()->PrePauseRosAllocVerification(this);
160 PausePhase();
161 RevokeAllThreadLocalBuffers();
162 } else {
163 ScopedPause pause(this);
164 GetHeap()->PreGcVerificationPaused(this);
165 MarkingPhase();
166 GetHeap()->PrePauseRosAllocVerification(this);
167 PausePhase();
168 RevokeAllThreadLocalBuffers();
169 }
170 {
171 // Sweeping always done concurrently, even for non concurrent mark sweep.
172 ReaderMutexLock mu(self, *Locks::mutator_lock_);
173 ReclaimPhase();
174 }
175 GetHeap()->PostGcVerification(this);
176 FinishPhase();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800177}
178
179void MarkSweep::ProcessReferences(Thread* self) {
Mathieu Chartier8e56c7e2012-11-20 13:25:50 -0800180 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700181 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700182 true,
183 GetTimings(),
184 GetCurrentIteration()->GetClearSoftReferences(),
185 this);
Mathieu Chartier1ad27842014-03-19 17:08:17 -0700186}
187
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700188void MarkSweep::PausePhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700189 TimingLogger::ScopedTiming t("(Paused)PausePhase", GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800190 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800191 Locks::mutator_lock_->AssertExclusiveHeld(self);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700192 if (IsConcurrent()) {
193 // Handle the dirty objects if we are a concurrent GC.
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800194 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800195 // Re-mark root set.
196 ReMarkRoots();
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000197 // Scan dirty objects, this is only required if we are doing concurrent GC.
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700198 RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800199 }
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700200 {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700201 TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800202 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700203 heap_->SwapStacks();
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700204 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
205 // Need to revoke all the thread local allocation stacks since we just swapped the allocation
206 // stacks and don't want anybody to allocate into the live stack.
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800207 RevokeAllThreadLocalAllocationStacks(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800208 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700209 heap_->PreSweepingGcVerification(this);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700210 // Disallow new system weaks to prevent a race which occurs when someone adds a new system
211 // weak before we sweep them. Since this new system weak may not be marked, the GC may
212 // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
213 // reference to a string that is about to be swept.
214 Runtime::Current()->DisallowNewSystemWeaks();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700215 // Enable the reference processing slow path, needs to be done with mutators paused since there
216 // is no lock in the GetReferent fast path.
217 GetHeap()->GetReferenceProcessor()->EnableSlowPath();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800218}
219
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800220void MarkSweep::PreCleanCards() {
221 // Don't do this for non concurrent GCs since they don't have any dirty cards.
222 if (kPreCleanCards && IsConcurrent()) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700223 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800224 Thread* self = Thread::Current();
225 CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
226 // Process dirty cards and add dirty cards to mod union tables, also ages cards.
Lei Li4add3b42015-01-15 11:55:26 +0800227 heap_->ProcessCards(GetTimings(), false, true, false);
Mathieu Chartiereb7bbad2014-02-25 15:52:46 -0800228 // The checkpoint root marking is required to avoid a race condition which occurs if the
229 // following happens during a reference write:
230 // 1. mutator dirties the card (write barrier)
231 // 2. GC ages the card (the above ProcessCards call)
232 // 3. GC scans the object (the RecursiveMarkDirtyObjects call below)
233 // 4. mutator writes the value (corresponding to the write barrier in 1.)
234 // This causes the GC to age the card but not necessarily mark the reference which the mutator
235 // wrote into the object stored in the card.
236 // Having the checkpoint fixes this issue since it ensures that the card mark and the
237 // reference write are visible to the GC before the card is scanned (this is due to locks being
238 // acquired / released in the checkpoint code).
239 // The other roots are also marked to help reduce the pause.
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700240 MarkRootsCheckpoint(self, false);
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800241 MarkNonThreadRoots();
Mathieu Chartier893263b2014-03-04 11:07:42 -0800242 MarkConcurrentRoots(
243 static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800244 // Process the newly aged cards.
245 RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
246 // TODO: Empty allocation stack to reduce the number of objects we need to test / mark as live
247 // in the next GC.
248 }
249}
250
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800251void MarkSweep::RevokeAllThreadLocalAllocationStacks(Thread* self) {
252 if (kUseThreadLocalAllocationStack) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700253 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800254 Locks::mutator_lock_->AssertExclusiveHeld(self);
255 heap_->RevokeAllThreadLocalAllocationStacks(self);
256 }
257}
258
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800259void MarkSweep::MarkingPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700260 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800261 Thread* self = Thread::Current();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800262 BindBitmaps();
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700263 FindDefaultSpaceBitmap();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800264 // Process dirty cards and add dirty cards to mod union tables.
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000265 // If the GC type is non sticky, then we just clear the cards of the
266 // alloc space instead of aging them.
267 //
268 // Note that it is fine to clear the cards of the alloc space here,
269 // in the case of a concurrent (non-sticky) mark-sweep GC (whose
270 // marking phase _is_ performed concurrently with mutator threads
271 // running and possibly dirtying cards), as the whole alloc space
272 // will be traced in that case, starting *after* this call to
273 // Heap::ProcessCards (see calls to MarkSweep::MarkRoots and
274 // MarkSweep::MarkReachableObjects). References held by objects on
275 // cards that became dirty *after* the actual marking work started
276 // will be marked in the pause (see MarkSweep::PausePhase), in a
277 // *non-concurrent* way to prevent races with mutator threads.
278 //
279 // TODO: Do we need some sort of fence between the call to
280 // Heap::ProcessCard and the calls to MarkSweep::MarkRoot /
281 // MarkSweep::MarkReachableObjects below to make sure write
282 // operations in the card table clearing the alloc space's dirty
283 // cards (during the call to Heap::ProcessCard) are not reordered
284 // *after* marking actually starts?
285 heap_->ProcessCards(GetTimings(),
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700286 /* use_rem_sets= */ false,
287 /* process_alloc_space_cards= */ true,
288 /* clear_alloc_space_cards= */ GetGcType() != kGcTypeSticky);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800289 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800290 MarkRoots(self);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800291 MarkReachableObjects();
Mathieu Chartierdda54f52014-02-24 09:58:40 -0800292 // Pre-clean dirtied cards to reduce pauses.
293 PreCleanCards();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800294}
295
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700296class MarkSweep::ScanObjectVisitor {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800297 public:
298 explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
299 : mark_sweep_(mark_sweep) {}
300
Mathieu Chartier31e88222016-10-14 18:43:19 -0700301 void operator()(ObjPtr<mirror::Object> obj) const
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800302 ALWAYS_INLINE
303 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700304 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800305 if (kCheckLocks) {
306 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
307 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
308 }
Mathieu Chartier31e88222016-10-14 18:43:19 -0700309 mark_sweep_->ScanObject(obj.Ptr());
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800310 }
311
312 private:
313 MarkSweep* const mark_sweep_;
314};
315
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700316void MarkSweep::UpdateAndMarkModUnion() {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800317 for (const auto& space : immune_spaces_.GetSpaces()) {
318 const char* name = space->IsZygoteSpace()
319 ? "UpdateAndMarkZygoteModUnionTable"
320 : "UpdateAndMarkImageModUnionTable";
321 DCHECK(space->IsZygoteSpace() || space->IsImageSpace()) << *space;
322 TimingLogger::ScopedTiming t(name, GetTimings());
323 accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
324 if (mod_union_table != nullptr) {
Mathieu Chartier97509952015-07-13 14:35:43 -0700325 mod_union_table->UpdateAndMarkReferences(this);
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800326 } else {
327 // No mod-union table, scan all the live bits. This can only occur for app images.
328 space->GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
329 reinterpret_cast<uintptr_t>(space->End()),
330 ScanObjectVisitor(this));
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700331 }
332 }
333}
334
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800335void MarkSweep::MarkReachableObjects() {
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700336 UpdateAndMarkModUnion();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800337 // Recursively mark all the non-image bits set in the mark bitmap.
338 RecursiveMark();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800339}
340
341void MarkSweep::ReclaimPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700342 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700343 Thread* const self = Thread::Current();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700344 // Process the references concurrently.
345 ProcessReferences(self);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700346 SweepSystemWeaks(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700347 Runtime* const runtime = Runtime::Current();
348 runtime->AllowNewSystemWeaks();
349 // Clean up class loaders after system weaks are swept since that is how we know if class
350 // unloading occurred.
351 runtime->GetClassLinker()->CleanupClassLoaders();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800352 {
353 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700354 GetHeap()->RecordFreeRevoke();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800355 // Reclaim unmarked objects.
Ian Rogers1d54e732013-05-02 21:10:01 -0700356 Sweep(false);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800357 // Swap the live and mark bitmaps for each space which we modified space. This is an
358 // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
359 // bitmaps.
360 SwapBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800361 // Unbind the live and mark bitmaps.
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800362 GetHeap()->UnBindBitmaps();
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800363 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800364}
365
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700366void MarkSweep::FindDefaultSpaceBitmap() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700367 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier02e25112013-08-14 16:14:24 -0700368 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700369 accounting::ContinuousSpaceBitmap* bitmap = space->GetMarkBitmap();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700370 // We want to have the main space instead of non moving if possible.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700371 if (bitmap != nullptr &&
372 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700373 current_space_bitmap_ = bitmap;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700374 // If we are not the non moving space exit the loop early since this will be good enough.
375 if (space != heap_->GetNonMovingSpace()) {
376 break;
377 }
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700378 }
379 }
Mathieu Chartier4c13a3f2014-07-14 14:57:16 -0700380 CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
381 << heap_->DumpSpaces();
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700382}
383
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800384void MarkSweep::ExpandMarkStack() {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700385 ResizeMarkStack(mark_stack_->Capacity() * 2);
386}
387
388void MarkSweep::ResizeMarkStack(size_t new_size) {
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800389 // Rare case, no need to have Thread::Current be a parameter.
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800390 if (UNLIKELY(mark_stack_->Size() < mark_stack_->Capacity())) {
391 // Someone else acquired the lock and expanded the mark stack before us.
392 return;
393 }
Mathieu Chartier97509952015-07-13 14:35:43 -0700394 std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End());
Mathieu Chartierba311b42013-08-27 13:02:30 -0700395 CHECK_LE(mark_stack_->Size(), new_size);
396 mark_stack_->Resize(new_size);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800397 for (auto& obj : temp) {
398 mark_stack_->PushBack(obj.AsMirrorPtr());
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800399 }
400}
401
Mathieu Chartiere48a1692015-07-15 19:58:45 -0700402mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) {
403 MarkObject(obj, nullptr, MemberOffset(0));
404 return obj;
405}
406
Mathieu Chartier97509952015-07-13 14:35:43 -0700407inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700408 DCHECK(obj != nullptr);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800409 if (MarkObjectParallel(obj)) {
Mathieu Chartierba311b42013-08-27 13:02:30 -0700410 MutexLock mu(Thread::Current(), mark_stack_lock_);
411 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
Mathieu Chartier184e3222013-08-03 14:02:57 -0700412 ExpandMarkStack();
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800413 }
Mathieu Chartierba311b42013-08-27 13:02:30 -0700414 // The object must be pushed on to the mark stack.
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700415 mark_stack_->PushBack(obj);
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800416 }
417}
418
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -0800419bool MarkSweep::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref,
420 bool do_atomic_update ATTRIBUTE_UNUSED) {
421 mirror::Object* obj = ref->AsMirrorPtr();
422 if (obj == nullptr) {
423 return true;
424 }
425 return IsMarked(obj);
Mathieu Chartier308351a2014-06-15 12:39:02 -0700426}
427
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700428class MarkSweep::MarkObjectSlowPath {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700429 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700430 explicit MarkObjectSlowPath(MarkSweep* mark_sweep,
431 mirror::Object* holder = nullptr,
432 MemberOffset offset = MemberOffset(0))
433 : mark_sweep_(mark_sweep),
434 holder_(holder),
435 offset_(offset) {}
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700436
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700437 void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700438 if (kProfileLargeObjects) {
439 // TODO: Differentiate between marking and testing somehow.
440 ++mark_sweep_->large_object_test_;
441 ++mark_sweep_->large_object_mark_;
442 }
443 space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
Mathieu Chartiera17288e2014-05-08 17:53:19 -0700444 if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700445 (kIsDebugBuild && large_object_space != nullptr &&
446 !large_object_space->Contains(obj)))) {
Mathieu Chartierfc80ff72016-11-28 13:13:28 -0800447 // Lowest priority logging first:
448 PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700449 MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), /* terse= */ true);
Mathieu Chartierfc80ff72016-11-28 13:13:28 -0800450 // Buffer the output in the string stream since it is more important than the stack traces
451 // and we want it to have log priority. The stack traces are printed from Runtime::Abort
452 // which is called from LOG(FATAL) but before the abort message.
453 std::ostringstream oss;
454 oss << "Tried to mark " << obj << " not contained by any spaces" << std::endl;
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700455 if (holder_ != nullptr) {
Hiroshi Yamauchid0c84122015-05-15 15:23:30 -0700456 size_t holder_size = holder_->SizeOf();
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700457 ArtField* field = holder_->FindFieldByOffset(offset_);
Mathieu Chartierfc80ff72016-11-28 13:13:28 -0800458 oss << "Field info: "
459 << " holder=" << holder_
460 << " holder is "
461 << (mark_sweep_->GetHeap()->IsLiveObjectLocked(holder_)
462 ? "alive" : "dead")
463 << " holder_size=" << holder_size
464 << " holder_type=" << holder_->PrettyTypeOf()
465 << " offset=" << offset_.Uint32Value()
466 << " field=" << (field != nullptr ? field->GetName() : "nullptr")
467 << " field_type="
468 << (field != nullptr ? field->GetTypeDescriptor() : "")
469 << " first_ref_field_offset="
470 << (holder_->IsClass()
471 ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset(
472 kRuntimePointerSize)
473 : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
474 << " num_of_ref_fields="
475 << (holder_->IsClass()
476 ? holder_->AsClass()->NumReferenceStaticFields()
477 : holder_->GetClass()->NumReferenceInstanceFields())
478 << std::endl;
Hiroshi Yamauchi679b1cf2015-05-21 12:05:27 -0700479 // Print the memory content of the holder.
480 for (size_t i = 0; i < holder_size / sizeof(uint32_t); ++i) {
481 uint32_t* p = reinterpret_cast<uint32_t*>(holder_);
Mathieu Chartierfc80ff72016-11-28 13:13:28 -0800482 oss << &p[i] << ": " << "holder+" << (i * sizeof(uint32_t)) << " = " << std::hex << p[i]
483 << std::endl;
Hiroshi Yamauchi679b1cf2015-05-21 12:05:27 -0700484 }
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700485 }
Mathieu Chartierfc80ff72016-11-28 13:13:28 -0800486 oss << "Attempting see if it's a bad thread root" << std::endl;
487 mark_sweep_->VerifySuspendedThreadRoots(oss);
488 LOG(FATAL) << oss.str();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700489 }
490 }
491
492 private:
493 MarkSweep* const mark_sweep_;
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700494 mirror::Object* const holder_;
495 MemberOffset offset_;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700496};
497
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700498inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj,
499 mirror::Object* holder,
Mathieu Chartier97509952015-07-13 14:35:43 -0700500 MemberOffset offset) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700501 DCHECK(obj != nullptr);
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700502 if (kUseBakerReadBarrier) {
503 // Verify all the objects have the correct state installed.
504 obj->AssertReadBarrierState();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800505 }
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800506 if (immune_spaces_.IsInImmuneRegion(obj)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700507 if (kCountMarkedObjects) {
508 ++mark_immune_count_;
509 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700510 DCHECK(mark_bitmap_->Test(obj));
511 } else if (LIKELY(current_space_bitmap_->HasAddress(obj))) {
512 if (kCountMarkedObjects) {
513 ++mark_fastpath_count_;
514 }
515 if (UNLIKELY(!current_space_bitmap_->Set(obj))) {
516 PushOnMarkStack(obj); // This object was not previously marked.
517 }
518 } else {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700519 if (kCountMarkedObjects) {
520 ++mark_slowpath_count_;
521 }
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700522 MarkObjectSlowPath visitor(this, holder, offset);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700523 // TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
524 // will check again.
525 if (!mark_bitmap_->Set(obj, visitor)) {
526 PushOnMarkStack(obj); // Was not already marked, push.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700527 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700528 }
529}
530
Mathieu Chartier97509952015-07-13 14:35:43 -0700531inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700532 if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
533 // Lock is not needed but is here anyways to please annotalysis.
534 MutexLock mu(Thread::Current(), mark_stack_lock_);
535 ExpandMarkStack();
536 }
537 // The object must be pushed on to the mark stack.
538 mark_stack_->PushBack(obj);
539}
540
Mathieu Chartier97509952015-07-13 14:35:43 -0700541inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700542 DCHECK(obj != nullptr);
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700543 if (kUseBakerReadBarrier) {
544 // Verify all the objects have the correct state installed.
545 obj->AssertReadBarrierState();
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800546 }
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800547 if (immune_spaces_.IsInImmuneRegion(obj)) {
Mathieu Chartier97509952015-07-13 14:35:43 -0700548 DCHECK(IsMarked(obj) != nullptr);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700549 return false;
550 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700551 // Try to take advantage of locality of references within a space, failing this find the space
552 // the hard way.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700553 accounting::ContinuousSpaceBitmap* object_bitmap = current_space_bitmap_;
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700554 if (LIKELY(object_bitmap->HasAddress(obj))) {
555 return !object_bitmap->AtomicTestAndSet(obj);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700556 }
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700557 MarkObjectSlowPath visitor(this);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700558 return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700559}
560
Hiroshi Yamauchi057d9772017-02-17 15:33:23 -0800561void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref,
562 bool do_atomic_update ATTRIBUTE_UNUSED) {
Mathieu Chartier97509952015-07-13 14:35:43 -0700563 MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0));
564}
565
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700566// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700567inline void MarkSweep::MarkObject(mirror::Object* obj,
568 mirror::Object* holder,
Mathieu Chartier97509952015-07-13 14:35:43 -0700569 MemberOffset offset) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700570 if (obj != nullptr) {
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -0700571 MarkObjectNonNull(obj, holder, offset);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700572 } else if (kCountMarkedObjects) {
573 ++mark_null_count_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700574 }
575}
576
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700577class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700578 public:
579 explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
580
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100581 void VisitRoot(mirror::Object* root, const RootInfo& info) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700582 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier97509952015-07-13 14:35:43 -0700583 CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700584 }
585
586 private:
587 MarkSweep* const collector_;
588};
589
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700590void MarkSweep::VisitRoots(mirror::Object*** roots,
591 size_t count,
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700592 const RootInfo& info ATTRIBUTE_UNUSED) {
593 for (size_t i = 0; i < count; ++i) {
594 MarkObjectNonNull(*roots[i]);
595 }
Mathieu Chartierac86a7c2012-11-12 15:03:16 -0800596}
597
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700598void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
599 size_t count,
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700600 const RootInfo& info ATTRIBUTE_UNUSED) {
601 for (size_t i = 0; i < count; ++i) {
602 MarkObjectNonNull(roots[i]->AsMirrorPtr());
603 }
Mathieu Chartier893263b2014-03-04 11:07:42 -0800604}
605
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700606class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700607 public:
Mathieu Chartierfc80ff72016-11-28 13:13:28 -0800608 explicit VerifyRootVisitor(std::ostream& os) : os_(os) {}
609
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100610 void VisitRoot(mirror::Object* root, const RootInfo& info) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700611 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier9086b652015-04-14 09:35:18 -0700612 // See if the root is on any space bitmap.
613 auto* heap = Runtime::Current()->GetHeap();
614 if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
615 space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
616 if (large_object_space != nullptr && !large_object_space->Contains(root)) {
Mathieu Chartierfc80ff72016-11-28 13:13:28 -0800617 os_ << "Found invalid root: " << root << " " << info << std::endl;
Mathieu Chartier9086b652015-04-14 09:35:18 -0700618 }
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700619 }
620 }
Mathieu Chartierfc80ff72016-11-28 13:13:28 -0800621
622 private:
623 std::ostream& os_;
Mathieu Chartier9086b652015-04-14 09:35:18 -0700624};
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700625
Mathieu Chartierfc80ff72016-11-28 13:13:28 -0800626void MarkSweep::VerifySuspendedThreadRoots(std::ostream& os) {
627 VerifyRootVisitor visitor(os);
Mathieu Chartierf8a86b92016-06-14 17:08:47 -0700628 Runtime::Current()->GetThreadList()->VisitRootsForSuspendedThreads(&visitor);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700629}
630
Mathieu Chartier893263b2014-03-04 11:07:42 -0800631void MarkSweep::MarkRoots(Thread* self) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700632 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier893263b2014-03-04 11:07:42 -0800633 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
634 // If we exclusively hold the mutator lock, all threads must be suspended.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700635 Runtime::Current()->VisitRoots(this);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800636 RevokeAllThreadLocalAllocationStacks(self);
637 } else {
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -0700638 MarkRootsCheckpoint(self, kRevokeRosAllocThreadLocalBuffersAtCheckpoint);
Mathieu Chartier893263b2014-03-04 11:07:42 -0800639 // At this point the live stack should no longer have any mutators which push into it.
640 MarkNonThreadRoots();
641 MarkConcurrentRoots(
642 static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
643 }
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700644}
645
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700646void MarkSweep::MarkNonThreadRoots() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700647 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700648 Runtime::Current()->VisitNonThreadRoots(this);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700649}
650
Mathieu Chartier893263b2014-03-04 11:07:42 -0800651void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700652 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Ian Rogers1d54e732013-05-02 21:10:01 -0700653 // Visit all runtime roots and clear dirty flags.
neo.chaea2d1b282016-11-08 08:40:46 +0900654 Runtime::Current()->VisitConcurrentRoots(this, flags);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700655}
656
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700657class MarkSweep::DelayReferenceReferentVisitor {
Mathieu Chartier407f7022014-02-18 14:37:05 -0800658 public:
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700659 explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
Mathieu Chartier407f7022014-02-18 14:37:05 -0800660
Mathieu Chartier31e88222016-10-14 18:43:19 -0700661 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700662 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700663 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier407f7022014-02-18 14:37:05 -0800664 collector_->DelayReferenceReferent(klass, ref);
665 }
666
667 private:
668 MarkSweep* const collector_;
669};
670
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700671template <bool kUseFinger = false>
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700672class MarkSweep::MarkStackTask : public Task {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700673 public:
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700674 MarkStackTask(ThreadPool* thread_pool,
675 MarkSweep* mark_sweep,
676 size_t mark_stack_size,
Mathieu Chartier97509952015-07-13 14:35:43 -0700677 StackReference<mirror::Object>* mark_stack)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700678 : mark_sweep_(mark_sweep),
679 thread_pool_(thread_pool),
680 mark_stack_pos_(mark_stack_size) {
681 // We may have to copy part of an existing mark stack when another mark stack overflows.
682 if (mark_stack_size != 0) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700683 DCHECK(mark_stack != nullptr);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700684 // TODO: Check performance?
685 std::copy(mark_stack, mark_stack + mark_stack_size, mark_stack_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700686 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700687 if (kCountTasks) {
688 ++mark_sweep_->work_chunks_created_;
689 }
690 }
691
692 static const size_t kMaxSize = 1 * KB;
693
694 protected:
Mathieu Chartier407f7022014-02-18 14:37:05 -0800695 class MarkObjectParallelVisitor {
696 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100697 ALWAYS_INLINE MarkObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task,
698 MarkSweep* mark_sweep)
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700699 : chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
Mathieu Chartier407f7022014-02-18 14:37:05 -0800700
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700701 ALWAYS_INLINE void operator()(mirror::Object* obj,
702 MemberOffset offset,
703 bool is_static ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700704 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700705 Mark(obj->GetFieldObject<mirror::Object>(offset));
706 }
707
708 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700709 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700710 if (!root->IsNull()) {
711 VisitRoot(root);
712 }
713 }
714
715 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700716 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700717 if (kCheckLocks) {
718 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
719 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
720 }
721 Mark(root->AsMirrorPtr());
722 }
723
724 private:
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700725 ALWAYS_INLINE void Mark(mirror::Object* ref) const REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier407f7022014-02-18 14:37:05 -0800726 if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
727 if (kUseFinger) {
Yabin Cuic7df66e2015-04-15 15:40:18 -0700728 std::atomic_thread_fence(std::memory_order_seq_cst);
Mathieu Chartier407f7022014-02-18 14:37:05 -0800729 if (reinterpret_cast<uintptr_t>(ref) >=
Orion Hodson88591fe2018-03-06 13:35:43 +0000730 static_cast<uintptr_t>(mark_sweep_->atomic_finger_.load(std::memory_order_relaxed))) {
Mathieu Chartier407f7022014-02-18 14:37:05 -0800731 return;
732 }
733 }
734 chunk_task_->MarkStackPush(ref);
735 }
736 }
737
Mathieu Chartier407f7022014-02-18 14:37:05 -0800738 MarkStackTask<kUseFinger>* const chunk_task_;
739 MarkSweep* const mark_sweep_;
740 };
741
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700742 class ScanObjectParallelVisitor {
743 public:
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700744 ALWAYS_INLINE explicit ScanObjectParallelVisitor(MarkStackTask<kUseFinger>* chunk_task)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700745 : chunk_task_(chunk_task) {}
746
Mathieu Chartier407f7022014-02-18 14:37:05 -0800747 // No thread safety analysis since multiple threads will use this visitor.
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700748 void operator()(mirror::Object* obj) const
749 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700750 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier407f7022014-02-18 14:37:05 -0800751 MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
752 MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
753 DelayReferenceReferentVisitor ref_visitor(mark_sweep);
754 mark_sweep->ScanObjectVisit(obj, mark_visitor, ref_visitor);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700755 }
756
757 private:
758 MarkStackTask<kUseFinger>* const chunk_task_;
759 };
760
761 virtual ~MarkStackTask() {
762 // Make sure that we have cleared our mark stack.
763 DCHECK_EQ(mark_stack_pos_, 0U);
764 if (kCountTasks) {
765 ++mark_sweep_->work_chunks_deleted_;
766 }
767 }
768
769 MarkSweep* const mark_sweep_;
770 ThreadPool* const thread_pool_;
771 // Thread local mark stack for this task.
Mathieu Chartier97509952015-07-13 14:35:43 -0700772 StackReference<mirror::Object> mark_stack_[kMaxSize];
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700773 // Mark stack position.
774 size_t mark_stack_pos_;
775
Mathieu Chartier97509952015-07-13 14:35:43 -0700776 ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700777 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700778 if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
779 // Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
780 mark_stack_pos_ /= 2;
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700781 auto* task = new MarkStackTask(thread_pool_,
782 mark_sweep_,
783 kMaxSize - mark_stack_pos_,
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700784 mark_stack_ + mark_stack_pos_);
785 thread_pool_->AddTask(Thread::Current(), task);
786 }
787 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700788 DCHECK_LT(mark_stack_pos_, kMaxSize);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800789 mark_stack_[mark_stack_pos_++].Assign(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700790 }
791
Andreas Gampefa6a1b02018-09-07 08:11:55 -0700792 void Finalize() override {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700793 delete this;
794 }
795
796 // Scans all of the objects
Andreas Gampefa6a1b02018-09-07 08:11:55 -0700797 void Run(Thread* self ATTRIBUTE_UNUSED) override
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700798 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700799 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700800 ScanObjectParallelVisitor visitor(this);
801 // TODO: Tune this.
802 static const size_t kFifoSize = 4;
Mathieu Chartier97509952015-07-13 14:35:43 -0700803 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700804 for (;;) {
Mathieu Chartier97509952015-07-13 14:35:43 -0700805 mirror::Object* obj = nullptr;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700806 if (kUseMarkStackPrefetch) {
807 while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier97509952015-07-13 14:35:43 -0700808 mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800809 DCHECK(mark_stack_obj != nullptr);
810 __builtin_prefetch(mark_stack_obj);
811 prefetch_fifo.push_back(mark_stack_obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700812 }
813 if (UNLIKELY(prefetch_fifo.empty())) {
814 break;
815 }
816 obj = prefetch_fifo.front();
817 prefetch_fifo.pop_front();
818 } else {
819 if (UNLIKELY(mark_stack_pos_ == 0)) {
820 break;
821 }
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800822 obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700823 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700824 DCHECK(obj != nullptr);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700825 visitor(obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700826 }
827 }
828};
829
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700830class MarkSweep::CardScanTask : public MarkStackTask<false> {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700831 public:
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700832 CardScanTask(ThreadPool* thread_pool,
833 MarkSweep* mark_sweep,
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700834 accounting::ContinuousSpaceBitmap* bitmap,
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700835 uint8_t* begin,
836 uint8_t* end,
837 uint8_t minimum_age,
838 size_t mark_stack_size,
839 StackReference<mirror::Object>* mark_stack_obj,
840 bool clear_card)
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700841 : MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
842 bitmap_(bitmap),
843 begin_(begin),
844 end_(end),
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700845 minimum_age_(minimum_age),
846 clear_card_(clear_card) {}
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700847
848 protected:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700849 accounting::ContinuousSpaceBitmap* const bitmap_;
Ian Rogers13735952014-10-08 12:43:28 -0700850 uint8_t* const begin_;
851 uint8_t* const end_;
852 const uint8_t minimum_age_;
Lei Li727b2942015-01-15 11:26:34 +0800853 const bool clear_card_;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700854
Andreas Gampefa6a1b02018-09-07 08:11:55 -0700855 void Finalize() override {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700856 delete this;
857 }
858
Andreas Gampefa6a1b02018-09-07 08:11:55 -0700859 void Run(Thread* self) override NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700860 ScanObjectParallelVisitor visitor(this);
861 accounting::CardTable* card_table = mark_sweep_->GetHeap()->GetCardTable();
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700862 size_t cards_scanned = clear_card_
863 ? card_table->Scan<true>(bitmap_, begin_, end_, visitor, minimum_age_)
864 : card_table->Scan<false>(bitmap_, begin_, end_, visitor, minimum_age_);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700865 VLOG(heap) << "Parallel scanning cards " << reinterpret_cast<void*>(begin_) << " - "
866 << reinterpret_cast<void*>(end_) << " = " << cards_scanned;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700867 // Finish by emptying our local mark stack.
868 MarkStackTask::Run(self);
869 }
870};
871
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700872size_t MarkSweep::GetThreadCount(bool paused) const {
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700873 // Use less threads if we are in a background state (non jank perceptible) since we want to leave
874 // more CPU time for the foreground apps.
875 if (heap_->GetThreadPool() == nullptr || !Runtime::Current()->InJankPerceptibleProcessState()) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700876 return 1;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700877 }
Mathieu Chartier10d68862015-04-15 14:21:33 -0700878 return (paused ? heap_->GetParallelGCThreadCount() : heap_->GetConcGCThreadCount()) + 1;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700879}
880
Ian Rogers13735952014-10-08 12:43:28 -0700881void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700882 accounting::CardTable* card_table = GetHeap()->GetCardTable();
883 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700884 size_t thread_count = GetThreadCount(paused);
885 // The parallel version with only one thread is faster for card scanning, TODO: fix.
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700886 if (kParallelCardScan && thread_count > 1) {
Mathieu Chartier720ef762013-08-17 14:46:54 -0700887 Thread* self = Thread::Current();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700888 // Can't have a different split for each space since multiple spaces can have their cards being
889 // scanned at the same time.
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700890 TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
891 GetTimings());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700892 // Try to take some of the mark stack since we can pass this off to the worker tasks.
Mathieu Chartier97509952015-07-13 14:35:43 -0700893 StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin();
894 StackReference<mirror::Object>* mark_stack_end = mark_stack_->End();
Mathieu Chartier720ef762013-08-17 14:46:54 -0700895 const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700896 // Estimated number of work tasks we will create.
897 const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
898 DCHECK_NE(mark_stack_tasks, 0U);
899 const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
900 mark_stack_size / mark_stack_tasks + 1);
901 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700902 if (space->GetMarkBitmap() == nullptr) {
903 continue;
904 }
Ian Rogers13735952014-10-08 12:43:28 -0700905 uint8_t* card_begin = space->Begin();
906 uint8_t* card_end = space->End();
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800907 // Align up the end address. For example, the image space's end
908 // may not be card-size-aligned.
909 card_end = AlignUp(card_end, accounting::CardTable::kCardSize);
Roland Levillain14d90572015-07-16 10:52:26 +0100910 DCHECK_ALIGNED(card_begin, accounting::CardTable::kCardSize);
911 DCHECK_ALIGNED(card_end, accounting::CardTable::kCardSize);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700912 // Calculate how many bytes of heap we will scan,
913 const size_t address_range = card_end - card_begin;
914 // Calculate how much address range each task gets.
915 const size_t card_delta = RoundUp(address_range / thread_count + 1,
916 accounting::CardTable::kCardSize);
Lei Li727b2942015-01-15 11:26:34 +0800917 // If paused and the space is neither zygote nor image space, we could clear the dirty
918 // cards to avoid accumulating them to increase card scanning load in the following GC
919 // cycles. We need to keep dirty cards of image space and zygote space in order to track
920 // references to the other spaces.
921 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700922 // Create the worker tasks for this space.
923 while (card_begin != card_end) {
924 // Add a range of cards.
925 size_t addr_remaining = card_end - card_begin;
926 size_t card_increment = std::min(card_delta, addr_remaining);
927 // Take from the back of the mark stack.
928 size_t mark_stack_remaining = mark_stack_end - mark_stack_begin;
929 size_t mark_stack_increment = std::min(mark_stack_delta, mark_stack_remaining);
930 mark_stack_end -= mark_stack_increment;
931 mark_stack_->PopBackCount(static_cast<int32_t>(mark_stack_increment));
Mathieu Chartier0e54cd02014-03-20 12:41:23 -0700932 DCHECK_EQ(mark_stack_end, mark_stack_->End());
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700933 // Add the new task to the thread pool.
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700934 auto* task = new CardScanTask(thread_pool,
935 this,
936 space->GetMarkBitmap(),
937 card_begin,
938 card_begin + card_increment,
939 minimum_age,
940 mark_stack_increment,
941 mark_stack_end,
942 clear_card);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700943 thread_pool->AddTask(self, task);
944 card_begin += card_increment;
945 }
946 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700947
Hiroshi Yamauchi0941b042013-11-05 11:34:03 -0800948 // Note: the card scan below may dirty new cards (and scan them)
949 // as a side effect when a Reference object is encountered and
950 // queued during the marking. See b/11465268.
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700951 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700952 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700953 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700954 thread_pool->StopWorkers(self);
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700955 } else {
956 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700957 if (space->GetMarkBitmap() != nullptr) {
958 // Image spaces are handled properly since live == marked for them.
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700959 const char* name = nullptr;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700960 switch (space->GetGcRetentionPolicy()) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700961 case space::kGcRetentionPolicyNeverCollect:
962 name = paused ? "(Paused)ScanGrayImageSpaceObjects" : "ScanGrayImageSpaceObjects";
963 break;
964 case space::kGcRetentionPolicyFullCollect:
965 name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
966 break;
967 case space::kGcRetentionPolicyAlwaysCollect:
968 name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
969 break;
970 default:
971 LOG(FATAL) << "Unreachable";
Ian Rogers2c4257b2014-10-24 14:20:06 -0700972 UNREACHABLE();
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700973 }
974 TimingLogger::ScopedTiming t(name, GetTimings());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700975 ScanObjectVisitor visitor(this);
Lei Li727b2942015-01-15 11:26:34 +0800976 bool clear_card = paused && !space->IsZygoteSpace() && !space->IsImageSpace();
977 if (clear_card) {
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700978 card_table->Scan<true>(space->GetMarkBitmap(),
979 space->Begin(),
980 space->End(),
981 visitor,
Lei Li727b2942015-01-15 11:26:34 +0800982 minimum_age);
983 } else {
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700984 card_table->Scan<false>(space->GetMarkBitmap(),
985 space->Begin(),
986 space->End(),
987 visitor,
Lei Li727b2942015-01-15 11:26:34 +0800988 minimum_age);
989 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700990 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700991 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700992 }
993}
994
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700995class MarkSweep::RecursiveMarkTask : public MarkStackTask<false> {
Mathieu Chartier94c32c52013-08-09 11:14:04 -0700996 public:
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -0700997 RecursiveMarkTask(ThreadPool* thread_pool,
998 MarkSweep* mark_sweep,
999 accounting::ContinuousSpaceBitmap* bitmap,
1000 uintptr_t begin,
1001 uintptr_t end)
1002 : MarkStackTask<false>(thread_pool, mark_sweep, 0, nullptr),
1003 bitmap_(bitmap),
1004 begin_(begin),
1005 end_(end) {}
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001006
1007 protected:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001008 accounting::ContinuousSpaceBitmap* const bitmap_;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001009 const uintptr_t begin_;
1010 const uintptr_t end_;
1011
Andreas Gampefa6a1b02018-09-07 08:11:55 -07001012 void Finalize() override {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001013 delete this;
1014 }
1015
1016 // Scans all of the objects
Andreas Gampefa6a1b02018-09-07 08:11:55 -07001017 void Run(Thread* self) override NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001018 ScanObjectParallelVisitor visitor(this);
1019 bitmap_->VisitMarkedRange(begin_, end_, visitor);
1020 // Finish by emptying our local mark stack.
1021 MarkStackTask::Run(self);
1022 }
1023};
1024
Carl Shapiro58551df2011-07-24 03:09:51 -07001025// Populates the mark stack based on the set of marked objects and
1026// recursively marks until the mark stack is emptied.
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001027void MarkSweep::RecursiveMark() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001028 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001029 // RecursiveMark will build the lists of known instances of the Reference classes. See
1030 // DelayReferenceReferent for details.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001031 if (kUseRecursiveMark) {
1032 const bool partial = GetGcType() == kGcTypePartial;
1033 ScanObjectVisitor scan_visitor(this);
1034 auto* self = Thread::Current();
1035 ThreadPool* thread_pool = heap_->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001036 size_t thread_count = GetThreadCount(false);
1037 const bool parallel = kParallelRecursiveMark && thread_count > 1;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001038 mark_stack_->Reset();
Mathieu Chartier02e25112013-08-14 16:14:24 -07001039 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Ian Rogers1d54e732013-05-02 21:10:01 -07001040 if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
1041 (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001042 current_space_bitmap_ = space->GetMarkBitmap();
1043 if (current_space_bitmap_ == nullptr) {
Mathieu Chartier590fee92013-09-13 13:46:47 -07001044 continue;
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001045 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001046 if (parallel) {
1047 // We will use the mark stack the future.
1048 // CHECK(mark_stack_->IsEmpty());
1049 // This function does not handle heap end increasing, so we must use the space end.
1050 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1051 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Orion Hodson88591fe2018-03-06 13:35:43 +00001052 atomic_finger_.store(AtomicInteger::MaxValue(), std::memory_order_relaxed);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001053
1054 // Create a few worker tasks.
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001055 const size_t n = thread_count * 2;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001056 while (begin != end) {
1057 uintptr_t start = begin;
1058 uintptr_t delta = (end - begin) / n;
1059 delta = RoundUp(delta, KB);
1060 if (delta < 16 * KB) delta = end - begin;
1061 begin += delta;
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -07001062 auto* task = new RecursiveMarkTask(thread_pool,
1063 this,
1064 current_space_bitmap_,
1065 start,
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001066 begin);
1067 thread_pool->AddTask(self, task);
1068 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001069 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001070 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001071 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001072 thread_pool->StopWorkers(self);
1073 } else {
1074 // This function does not handle heap end increasing, so we must use the space end.
1075 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
1076 uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001077 current_space_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001078 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001079 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001080 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001081 }
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001082 ProcessMarkStack(false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001083}
1084
Ian Rogers13735952014-10-08 12:43:28 -07001085void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001086 ScanGrayObjects(paused, minimum_age);
1087 ProcessMarkStack(paused);
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001088}
1089
Carl Shapiro58551df2011-07-24 03:09:51 -07001090void MarkSweep::ReMarkRoots() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001091 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier893263b2014-03-04 11:07:42 -08001092 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001093 Runtime::Current()->VisitRoots(this, static_cast<VisitRootFlags>(
1094 kVisitRootFlagNewRoots | kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog));
Mathieu Chartier7bf9f192014-04-04 11:09:41 -07001095 if (kVerifyRootsMarked) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001096 TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001097 VerifyRootMarkedVisitor visitor(this);
1098 Runtime::Current()->VisitRoots(&visitor);
Mathieu Chartier893263b2014-03-04 11:07:42 -08001099 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001100}
1101
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001102void MarkSweep::SweepSystemWeaks(Thread* self) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001103 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier14c3bf92015-07-13 14:35:43 -07001104 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001105 Runtime::Current()->SweepSystemWeaks(this);
Carl Shapiro58551df2011-07-24 03:09:51 -07001106}
1107
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001108class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor {
Mathieu Chartier97509952015-07-13 14:35:43 -07001109 public:
1110 explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001111
Roland Levillainf73caca2018-08-24 17:19:07 +01001112 mirror::Object* IsMarked(mirror::Object* obj) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001113 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001114 mark_sweep_->VerifyIsLive(obj);
1115 return obj;
1116 }
1117
1118 MarkSweep* const mark_sweep_;
1119};
1120
1121void MarkSweep::VerifyIsLive(const mirror::Object* obj) {
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001122 if (!heap_->GetLiveBitmap()->Test(obj)) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001123 // TODO: Consider live stack? Has this code bitrotted?
1124 CHECK(!heap_->allocation_stack_->Contains(obj))
1125 << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001126 }
1127}
1128
1129void MarkSweep::VerifySystemWeaks() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001130 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier6aa3df92013-09-17 15:17:28 -07001131 // Verify system weaks, uses a special object visitor which returns the input object.
Mathieu Chartier97509952015-07-13 14:35:43 -07001132 VerifySystemWeakVisitor visitor(this);
1133 Runtime::Current()->SweepSystemWeaks(&visitor);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001134}
1135
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001136class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor {
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001137 public:
Roland Levillain3887c462015-08-12 18:15:42 +01001138 CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
1139 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001140 : mark_sweep_(mark_sweep),
1141 revoke_ros_alloc_thread_local_buffers_at_checkpoint_(
1142 revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
1143 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001144
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001145 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
Roland Levillainbbc6e7e2018-08-24 16:58:47 +01001146 override REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -07001147 REQUIRES(Locks::heap_bitmap_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001148 for (size_t i = 0; i < count; ++i) {
1149 mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
1150 }
1151 }
1152
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -07001153 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
1154 size_t count,
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001155 const RootInfo& info ATTRIBUTE_UNUSED)
Roland Levillainbbc6e7e2018-08-24 16:58:47 +01001156 override REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -07001157 REQUIRES(Locks::heap_bitmap_lock_) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001158 for (size_t i = 0; i < count; ++i) {
1159 mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
1160 }
1161 }
1162
Roland Levillainf73caca2018-08-24 17:19:07 +01001163 void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001164 ScopedTrace trace("Marking thread roots");
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001165 // Note: self is not necessarily equal to thread since thread may be suspended.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001166 Thread* const self = Thread::Current();
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001167 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1168 << thread->GetState() << " thread " << thread << " self " << self;
Andreas Gampe513061a2017-06-01 09:17:34 -07001169 thread->VisitRoots(this, kVisitRootFlagAllRoots);
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001170 if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001171 ScopedTrace trace2("RevokeRosAllocThreadLocalBuffers");
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001172 mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread);
1173 }
Lei Lidd9943d2015-02-02 14:24:44 +08001174 // If thread is a running mutator, then act on behalf of the garbage collector.
1175 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -07001176 mark_sweep_->GetBarrier().Pass(self);
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001177 }
1178
1179 private:
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001180 MarkSweep* const mark_sweep_;
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001181 const bool revoke_ros_alloc_thread_local_buffers_at_checkpoint_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001182};
1183
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001184void MarkSweep::MarkRootsCheckpoint(Thread* self,
1185 bool revoke_ros_alloc_thread_local_buffers_at_checkpoint) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001186 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001187 CheckpointMarkThreadRoots check_point(this, revoke_ros_alloc_thread_local_buffers_at_checkpoint);
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001188 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Ian Rogers1d54e732013-05-02 21:10:01 -07001189 // Request the check point is run on all threads returning a count of the threads that must
1190 // run through the barrier including self.
1191 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
1192 // Release locks then wait for all mutator threads to pass the barrier.
Lei Lidd9943d2015-02-02 14:24:44 +08001193 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1194 // then no need to release locks.
1195 if (barrier_count == 0) {
1196 return;
1197 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001198 Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
1199 Locks::mutator_lock_->SharedUnlock(self);
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001200 {
1201 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1202 gc_barrier_->Increment(self, barrier_count);
1203 }
Ian Rogers1d54e732013-05-02 21:10:01 -07001204 Locks::mutator_lock_->SharedLock(self);
1205 Locks::heap_bitmap_lock_->ExclusiveLock(self);
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001206}
1207
Ian Rogers1d54e732013-05-02 21:10:01 -07001208void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001209 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001210 Thread* self = Thread::Current();
Hiroshi Yamauchibbdc5bc2014-05-28 14:04:59 -07001211 mirror::Object** chunk_free_buffer = reinterpret_cast<mirror::Object**>(
Vladimir Markoc34bebf2018-08-16 16:12:49 +01001212 sweep_array_free_buffer_mem_map_.BaseBegin());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001213 size_t chunk_free_pos = 0;
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001214 ObjectBytePair freed;
1215 ObjectBytePair freed_los;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001216 // How many objects are left in the array, modified after each space is swept.
Mathieu Chartier97509952015-07-13 14:35:43 -07001217 StackReference<mirror::Object>* objects = allocations->Begin();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001218 size_t count = allocations->Size();
1219 // Change the order to ensure that the non-moving space last swept as an optimization.
1220 std::vector<space::ContinuousSpace*> sweep_spaces;
1221 space::ContinuousSpace* non_moving_space = nullptr;
1222 for (space::ContinuousSpace* space : heap_->GetContinuousSpaces()) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001223 if (space->IsAllocSpace() &&
1224 !immune_spaces_.ContainsSpace(space) &&
Mathieu Chartier8d562102014-03-12 17:42:10 -07001225 space->GetLiveBitmap() != nullptr) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001226 if (space == heap_->GetNonMovingSpace()) {
1227 non_moving_space = space;
1228 } else {
1229 sweep_spaces.push_back(space);
1230 }
1231 }
1232 }
Hans Boehmd972b422017-09-11 12:57:00 -07001233 // Unlikely to sweep a significant amount of non_movable objects, so we do these after
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001234 // the other alloc spaces as an optimization.
1235 if (non_moving_space != nullptr) {
1236 sweep_spaces.push_back(non_moving_space);
1237 }
1238 // Start by sweeping the continuous spaces.
1239 for (space::ContinuousSpace* space : sweep_spaces) {
1240 space::AllocSpace* alloc_space = space->AsAllocSpace();
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -07001241 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
1242 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001243 if (swap_bitmaps) {
1244 std::swap(live_bitmap, mark_bitmap);
1245 }
Mathieu Chartier97509952015-07-13 14:35:43 -07001246 StackReference<mirror::Object>* out = objects;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001247 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001248 mirror::Object* const obj = objects[i].AsMirrorPtr();
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001249 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1250 continue;
1251 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001252 if (space->HasAddress(obj)) {
1253 // This object is in the space, remove it from the array and add it to the sweep buffer
1254 // if needed.
1255 if (!mark_bitmap->Test(obj)) {
1256 if (chunk_free_pos >= kSweepArrayChunkFreeSize) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001257 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001258 freed.objects += chunk_free_pos;
1259 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001260 chunk_free_pos = 0;
1261 }
1262 chunk_free_buffer[chunk_free_pos++] = obj;
1263 }
1264 } else {
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001265 (out++)->Assign(obj);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001266 }
1267 }
1268 if (chunk_free_pos > 0) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001269 TimingLogger::ScopedTiming t2("FreeList", GetTimings());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001270 freed.objects += chunk_free_pos;
1271 freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001272 chunk_free_pos = 0;
1273 }
1274 // All of the references which space contained are no longer in the allocation stack, update
1275 // the count.
1276 count = out - objects;
1277 }
1278 // Handle the large object space.
1279 space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001280 if (large_object_space != nullptr) {
1281 accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
1282 accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
1283 if (swap_bitmaps) {
1284 std::swap(large_live_objects, large_mark_objects);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001285 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001286 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001287 mirror::Object* const obj = objects[i].AsMirrorPtr();
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001288 // Handle large objects.
1289 if (kUseThreadLocalAllocationStack && obj == nullptr) {
1290 continue;
1291 }
1292 if (!large_mark_objects->Test(obj)) {
1293 ++freed_los.objects;
1294 freed_los.bytes += large_object_space->Free(self, obj);
1295 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001296 }
1297 }
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001298 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001299 TimingLogger::ScopedTiming t2("RecordFree", GetTimings());
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001300 RecordFree(freed);
1301 RecordFreeLOS(freed_los);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001302 t2.NewTiming("ResetStack");
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001303 allocations->Reset();
1304 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +01001305 sweep_array_free_buffer_mem_map_.MadviseDontNeedAndZero();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001306}
1307
Ian Rogers1d54e732013-05-02 21:10:01 -07001308void MarkSweep::Sweep(bool swap_bitmaps) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001309 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartier0f7bf6a2014-03-28 10:05:39 -07001310 // Ensure that nobody inserted items in the live stack after we swapped the stacks.
1311 CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001312 {
1313 TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
Roland Levillainef012222017-06-21 16:28:06 +01001314 // Mark everything allocated since the last GC as live so that we can sweep concurrently,
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001315 // knowing that new allocations won't be marked as live.
1316 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1317 heap_->MarkAllocStackAsLive(live_stack);
1318 live_stack->Reset();
1319 DCHECK(mark_stack_->IsEmpty());
1320 }
Mathieu Chartier02e25112013-08-14 16:14:24 -07001321 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
Mathieu Chartiera1602f22014-01-13 17:19:19 -08001322 if (space->IsContinuousMemMapAllocSpace()) {
1323 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001324 TimingLogger::ScopedTiming split(
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -07001325 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace",
1326 GetTimings());
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001327 RecordFree(alloc_space->Sweep(swap_bitmaps));
Carl Shapiro58551df2011-07-24 03:09:51 -07001328 }
1329 }
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001330 SweepLargeObjects(swap_bitmaps);
Carl Shapiro58551df2011-07-24 03:09:51 -07001331}
1332
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001333void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
Mathieu Chartier2dbe6272014-09-16 10:43:23 -07001334 space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
1335 if (los != nullptr) {
1336 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1337 RecordFreeLOS(los->Sweep(swap_bitmaps));
1338 }
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001339}
1340
Mathieu Chartier31e88222016-10-14 18:43:19 -07001341// Process the "referent" field lin a java.lang.ref.Reference. If the referent has not yet been
Mathieu Chartier407f7022014-02-18 14:37:05 -08001342// marked, put it on the appropriate list in the heap for later processing.
Mathieu Chartier31e88222016-10-14 18:43:19 -07001343void MarkSweep::DelayReferenceReferent(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001344 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001345}
1346
Mathieu Chartier97509952015-07-13 14:35:43 -07001347class MarkVisitor {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001348 public:
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -07001349 ALWAYS_INLINE explicit MarkVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001350
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -07001351 ALWAYS_INLINE void operator()(mirror::Object* obj,
1352 MemberOffset offset,
1353 bool is_static ATTRIBUTE_UNUSED) const
1354 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001355 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001356 if (kCheckLocks) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001357 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1358 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1359 }
Hiroshi Yamauchieb2baaf2015-05-13 21:14:22 -07001360 mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001361 }
1362
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001363 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -07001364 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001365 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001366 if (!root->IsNull()) {
1367 VisitRoot(root);
1368 }
1369 }
1370
1371 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Mathieu Chartier1ac1c2b2015-09-22 14:53:32 -07001372 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001373 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001374 if (kCheckLocks) {
1375 Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
1376 Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
1377 }
1378 mark_sweep_->MarkObject(root->AsMirrorPtr());
1379 }
1380
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001381 private:
1382 MarkSweep* const mark_sweep_;
1383};
1384
Carl Shapiro69759ea2011-07-21 18:13:35 -07001385// Scans an object reference. Determines the type of the reference
1386// and dispatches to a specialized scanning routine.
Mathieu Chartier97509952015-07-13 14:35:43 -07001387void MarkSweep::ScanObject(mirror::Object* obj) {
1388 MarkVisitor mark_visitor(this);
Mathieu Chartier407f7022014-02-18 14:37:05 -08001389 DelayReferenceReferentVisitor ref_visitor(this);
1390 ScanObjectVisit(obj, mark_visitor, ref_visitor);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001391}
1392
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001393void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001394 Thread* self = Thread::Current();
1395 ThreadPool* thread_pool = GetHeap()->GetThreadPool();
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001396 const size_t chunk_size = std::min(mark_stack_->Size() / thread_count + 1,
1397 static_cast<size_t>(MarkStackTask<false>::kMaxSize));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001398 CHECK_GT(chunk_size, 0U);
1399 // Split the current mark stack up into work tasks.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001400 for (auto* it = mark_stack_->Begin(), *end = mark_stack_->End(); it < end; ) {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001401 const size_t delta = std::min(static_cast<size_t>(end - it), chunk_size);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001402 thread_pool->AddTask(self, new MarkStackTask<false>(thread_pool, this, delta, it));
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001403 it += delta;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001404 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001405 thread_pool->SetMaxActiveWorkers(thread_count - 1);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001406 thread_pool->StartWorkers(self);
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001407 thread_pool->Wait(self, true, true);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001408 thread_pool->StopWorkers(self);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001409 mark_stack_->Reset();
Orion Hodson88591fe2018-03-06 13:35:43 +00001410 CHECK_EQ(work_chunks_created_.load(std::memory_order_seq_cst),
1411 work_chunks_deleted_.load(std::memory_order_seq_cst))
Ian Rogers3e5cf302014-05-20 16:40:37 -07001412 << " some of the work chunks were leaked";
Carl Shapiro69759ea2011-07-21 18:13:35 -07001413}
1414
Ian Rogers5d76c432011-10-31 21:42:49 -07001415// Scan anything that's on the mark stack.
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001416void MarkSweep::ProcessMarkStack(bool paused) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001417 TimingLogger::ScopedTiming t(paused ? "(Paused)ProcessMarkStack" : __FUNCTION__, GetTimings());
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001418 size_t thread_count = GetThreadCount(paused);
1419 if (kParallelProcessMarkStack && thread_count > 1 &&
1420 mark_stack_->Size() >= kMinimumParallelMarkStackSize) {
1421 ProcessMarkStackParallel(thread_count);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001422 } else {
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001423 // TODO: Tune this.
1424 static const size_t kFifoSize = 4;
Mathieu Chartier97509952015-07-13 14:35:43 -07001425 BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001426 for (;;) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001427 mirror::Object* obj = nullptr;
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001428 if (kUseMarkStackPrefetch) {
1429 while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001430 mirror::Object* mark_stack_obj = mark_stack_->PopBack();
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001431 DCHECK(mark_stack_obj != nullptr);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001432 __builtin_prefetch(mark_stack_obj);
1433 prefetch_fifo.push_back(mark_stack_obj);
Mathieu Chartier94c32c52013-08-09 11:14:04 -07001434 }
1435 if (prefetch_fifo.empty()) {
1436 break;
1437 }
1438 obj = prefetch_fifo.front();
1439 prefetch_fifo.pop_front();
1440 } else {
1441 if (mark_stack_->IsEmpty()) {
1442 break;
1443 }
1444 obj = mark_stack_->PopBack();
1445 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001446 DCHECK(obj != nullptr);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001447 ScanObject(obj);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001448 }
1449 }
Carl Shapiro69759ea2011-07-21 18:13:35 -07001450}
1451
Mathieu Chartier97509952015-07-13 14:35:43 -07001452inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001453 if (immune_spaces_.IsInImmuneRegion(object)) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001454 return object;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001455 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001456 if (current_space_bitmap_->HasAddress(object)) {
Mathieu Chartier97509952015-07-13 14:35:43 -07001457 return current_space_bitmap_->Test(object) ? object : nullptr;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001458 }
Mathieu Chartier97509952015-07-13 14:35:43 -07001459 return mark_bitmap_->Test(object) ? object : nullptr;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001460}
1461
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001462void MarkSweep::FinishPhase() {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001463 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Mathieu Chartierd22d5482012-11-06 17:14:12 -08001464 if (kCountScannedTypes) {
Mathieu Chartier52a7f5c2015-08-18 18:35:52 -07001465 VLOG(gc)
1466 << "MarkSweep scanned"
Orion Hodson88591fe2018-03-06 13:35:43 +00001467 << " no reference objects=" << no_reference_class_count_.load(std::memory_order_relaxed)
1468 << " normal objects=" << normal_count_.load(std::memory_order_relaxed)
1469 << " classes=" << class_count_.load(std::memory_order_relaxed)
1470 << " object arrays=" << object_array_count_.load(std::memory_order_relaxed)
1471 << " references=" << reference_count_.load(std::memory_order_relaxed)
1472 << " other=" << other_count_.load(std::memory_order_relaxed);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001473 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001474 if (kCountTasks) {
Orion Hodson88591fe2018-03-06 13:35:43 +00001475 VLOG(gc)
1476 << "Total number of work chunks allocated: "
1477 << work_chunks_created_.load(std::memory_order_relaxed);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001478 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001479 if (kMeasureOverhead) {
Orion Hodson88591fe2018-03-06 13:35:43 +00001480 VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.load(std::memory_order_relaxed));
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001481 }
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001482 if (kProfileLargeObjects) {
Orion Hodson88591fe2018-03-06 13:35:43 +00001483 VLOG(gc)
1484 << "Large objects tested " << large_object_test_.load(std::memory_order_relaxed)
1485 << " marked " << large_object_mark_.load(std::memory_order_relaxed);
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001486 }
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001487 if (kCountMarkedObjects) {
Orion Hodson88591fe2018-03-06 13:35:43 +00001488 VLOG(gc)
1489 << "Marked: null=" << mark_null_count_.load(std::memory_order_relaxed)
1490 << " immune=" << mark_immune_count_.load(std::memory_order_relaxed)
1491 << " fastpath=" << mark_fastpath_count_.load(std::memory_order_relaxed)
1492 << " slowpath=" << mark_slowpath_count_.load(std::memory_order_relaxed);
Mathieu Chartier0e54cd02014-03-20 12:41:23 -07001493 }
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001494 CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001495 mark_stack_->Reset();
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001496 Thread* const self = Thread::Current();
1497 ReaderMutexLock mu(self, *Locks::mutator_lock_);
1498 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier4aeec172014-03-27 16:09:46 -07001499 heap_->ClearMarkedObjects();
Carl Shapiro69759ea2011-07-21 18:13:35 -07001500}
1501
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001502void MarkSweep::RevokeAllThreadLocalBuffers() {
1503 if (kRevokeRosAllocThreadLocalBuffersAtCheckpoint && IsConcurrent()) {
1504 // If concurrent, rosalloc thread-local buffers are revoked at the
1505 // thread checkpoint. Bump pointer space thread-local buffers must
1506 // not be in use.
1507 GetHeap()->AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
1508 } else {
Mathieu Chartierf5997b42014-06-20 10:37:54 -07001509 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001510 GetHeap()->RevokeAllThreadLocalBuffers();
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001511 }
1512}
1513
Ian Rogers1d54e732013-05-02 21:10:01 -07001514} // namespace collector
1515} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -07001516} // namespace art