blob: bd81f471762427f96634b0c3684ef319f338703e [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070020#include "base/enums.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070021#include "base/histogram-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070022#include "base/stl_util.h"
Mathieu Chartier56fe2582016-07-14 13:30:03 -070023#include "base/systrace.h"
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -070024#include "debugger.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartier21328a12016-07-22 10:47:45 -070026#include "gc/accounting/mod_union_table-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080027#include "gc/accounting/space_bitmap-inl.h"
Andreas Gampe4934eb12017-01-30 13:15:26 -080028#include "gc/gc_pause_listener.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070029#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080030#include "gc/space/image_space.h"
Mathieu Chartier073b16c2015-11-10 14:13:23 -080031#include "gc/space/space-inl.h"
Mathieu Chartier4a26f172016-01-26 14:26:18 -080032#include "image-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080033#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070034#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080035#include "mirror/object-inl.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070036#include "scoped_thread_state_change-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080037#include "thread-inl.h"
38#include "thread_list.h"
39#include "well_known_classes.h"
40
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070041namespace art {
42namespace gc {
43namespace collector {
44
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070045static constexpr size_t kDefaultGcMarkStackSize = 2 * MB;
Mathieu Chartier21328a12016-07-22 10:47:45 -070046// If kFilterModUnionCards then we attempt to filter cards that don't need to be dirty in the mod
47// union table. Disabled since it does not seem to help the pause much.
48static constexpr bool kFilterModUnionCards = kIsDebugBuild;
Mathieu Chartierd6636d32016-07-28 11:02:38 -070049// If kDisallowReadBarrierDuringScan is true then the GC aborts if there are any that occur during
50// ConcurrentCopying::Scan. May be used to diagnose possibly unnecessary read barriers.
51// Only enabled for kIsDebugBuild to avoid performance hit.
52static constexpr bool kDisallowReadBarrierDuringScan = kIsDebugBuild;
Mathieu Chartier36a270a2016-07-28 18:08:51 -070053// Slow path mark stack size, increase this if the stack is getting full and it is causing
54// performance problems.
55static constexpr size_t kReadBarrierMarkStackSize = 512 * KB;
Mathieu Chartiera1467d02017-02-22 09:22:50 -080056// Verify that there are no missing card marks.
57static constexpr bool kVerifyNoMissingCardMarks = kIsDebugBuild;
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070058
Mathieu Chartier56fe2582016-07-14 13:30:03 -070059ConcurrentCopying::ConcurrentCopying(Heap* heap,
60 const std::string& name_prefix,
61 bool measure_read_barrier_slow_path)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080062 : GarbageCollector(heap,
63 name_prefix + (name_prefix.empty() ? "" : " ") +
Hiroshi Yamauchi88e08162017-01-06 15:03:26 -080064 "concurrent copying"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070065 region_space_(nullptr), gc_barrier_(new Barrier(0)),
66 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -070067 kDefaultGcMarkStackSize,
68 kDefaultGcMarkStackSize)),
Mathieu Chartier36a270a2016-07-28 18:08:51 -070069 rb_mark_bit_stack_(accounting::ObjectStack::Create("rb copying gc mark stack",
70 kReadBarrierMarkStackSize,
71 kReadBarrierMarkStackSize)),
72 rb_mark_bit_stack_full_(false),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070073 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
74 thread_running_gc_(nullptr),
Andreas Gamped9911ee2017-03-27 13:27:24 -070075 is_marking_(false),
76 is_active_(false),
77 is_asserting_to_space_invariant_(false),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070078 region_space_bitmap_(nullptr),
Andreas Gamped9911ee2017-03-27 13:27:24 -070079 heap_mark_bitmap_(nullptr),
80 live_stack_freeze_size_(0),
81 from_space_num_objects_at_first_pause_(0),
82 from_space_num_bytes_at_first_pause_(0),
83 mark_stack_mode_(kMarkStackModeOff),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070084 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080085 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
Mathieu Chartier56fe2582016-07-14 13:30:03 -070086 measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
Andreas Gamped9911ee2017-03-27 13:27:24 -070087 mark_from_read_barrier_measurements_(false),
Mathieu Chartier56fe2582016-07-14 13:30:03 -070088 rb_slow_path_ns_(0),
89 rb_slow_path_count_(0),
90 rb_slow_path_count_gc_(0),
91 rb_slow_path_histogram_lock_("Read barrier histogram lock"),
92 rb_slow_path_time_histogram_("Mutator time in read barrier slow path", 500, 32),
93 rb_slow_path_count_total_(0),
94 rb_slow_path_count_gc_total_(0),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080095 rb_table_(heap_->GetReadBarrierTable()),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070096 force_evacuate_all_(false),
Andreas Gamped9911ee2017-03-27 13:27:24 -070097 gc_grays_immune_objects_(false),
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070098 immune_gray_stack_lock_("concurrent copying immune gray stack lock",
99 kMarkSweepMarkStackLock) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800100 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
101 "The region space size and the read barrier table region size must match");
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700102 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800103 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800104 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
105 // Cache this so that we won't have to lock heap_bitmap_lock_ in
106 // Mark() which could cause a nested lock on heap_bitmap_lock_
107 // when GC causes a RB while doing GC or a lock order violation
108 // (class_linker_lock_ and heap_bitmap_lock_).
109 heap_mark_bitmap_ = heap->GetMarkBitmap();
110 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700111 {
112 MutexLock mu(self, mark_stack_lock_);
113 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
114 accounting::AtomicStack<mirror::Object>* mark_stack =
115 accounting::AtomicStack<mirror::Object>::Create(
116 "thread local mark stack", kMarkStackSize, kMarkStackSize);
117 pooled_mark_stacks_.push_back(mark_stack);
118 }
119 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800120}
121
Hiroshi Yamauchi057d9772017-02-17 15:33:23 -0800122void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* field,
123 bool do_atomic_update) {
124 if (UNLIKELY(do_atomic_update)) {
125 // Used to mark the referent in DelayReferenceReferent in transaction mode.
126 mirror::Object* from_ref = field->AsMirrorPtr();
127 if (from_ref == nullptr) {
128 return;
129 }
130 mirror::Object* to_ref = Mark(from_ref);
131 if (from_ref != to_ref) {
132 do {
133 if (field->AsMirrorPtr() != from_ref) {
134 // Concurrently overwritten by a mutator.
135 break;
136 }
137 } while (!field->CasWeakRelaxed(from_ref, to_ref));
138 }
139 } else {
140 // Used for preserving soft references, should be OK to not have a CAS here since there should be
141 // no other threads which can trigger read barriers on the same referent during reference
142 // processing.
143 field->Assign(Mark(field->AsMirrorPtr()));
144 }
Mathieu Chartier97509952015-07-13 14:35:43 -0700145}
146
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800147ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700148 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800149}
150
151void ConcurrentCopying::RunPhases() {
152 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
153 CHECK(!is_active_);
154 is_active_ = true;
155 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700156 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800157 Locks::mutator_lock_->AssertNotHeld(self);
158 {
159 ReaderMutexLock mu(self, *Locks::mutator_lock_);
160 InitializePhase();
161 }
162 FlipThreadRoots();
163 {
164 ReaderMutexLock mu(self, *Locks::mutator_lock_);
165 MarkingPhase();
166 }
167 // Verify no from space refs. This causes a pause.
Andreas Gampee3ce7872017-02-22 13:36:21 -0800168 if (kEnableNoFromSpaceRefsVerification) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800169 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
Andreas Gampe4934eb12017-01-30 13:15:26 -0800170 ScopedPause pause(this, false);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700171 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800172 if (kVerboseMode) {
173 LOG(INFO) << "Verifying no from-space refs";
174 }
175 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700176 if (kVerboseMode) {
177 LOG(INFO) << "Done verifying no from-space refs";
178 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700179 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800180 }
181 {
182 ReaderMutexLock mu(self, *Locks::mutator_lock_);
183 ReclaimPhase();
184 }
185 FinishPhase();
186 CHECK(is_active_);
187 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700188 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800189}
190
191void ConcurrentCopying::BindBitmaps() {
192 Thread* self = Thread::Current();
193 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
194 // Mark all of the spaces we never collect as immune.
195 for (const auto& space : heap_->GetContinuousSpaces()) {
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800196 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
197 space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800198 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800199 immune_spaces_.AddSpace(space);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800200 } else if (space == region_space_) {
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -0700201 // It is OK to clear the bitmap with mutators running since the only place it is read is
202 // VisitObjects which has exclusion with CC.
203 region_space_bitmap_ = region_space_->GetMarkBitmap();
204 region_space_bitmap_->Clear();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800205 }
206 }
207}
208
209void ConcurrentCopying::InitializePhase() {
210 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
211 if (kVerboseMode) {
212 LOG(INFO) << "GC InitializePhase";
213 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
214 << reinterpret_cast<void*>(region_space_->Limit());
215 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700216 CheckEmptyMarkStack();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800217 if (kIsDebugBuild) {
218 MutexLock mu(Thread::Current(), mark_stack_lock_);
219 CHECK(false_gray_stack_.empty());
220 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700221
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700222 rb_mark_bit_stack_full_ = false;
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700223 mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_;
224 if (measure_read_barrier_slow_path_) {
225 rb_slow_path_ns_.StoreRelaxed(0);
226 rb_slow_path_count_.StoreRelaxed(0);
227 rb_slow_path_count_gc_.StoreRelaxed(0);
228 }
229
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800230 immune_spaces_.Reset();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800231 bytes_moved_.StoreRelaxed(0);
232 objects_moved_.StoreRelaxed(0);
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -0700233 GcCause gc_cause = GetCurrentIteration()->GetGcCause();
234 if (gc_cause == kGcCauseExplicit ||
235 gc_cause == kGcCauseForNativeAlloc ||
236 gc_cause == kGcCauseCollectorTransition ||
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800237 GetCurrentIteration()->GetClearSoftReferences()) {
238 force_evacuate_all_ = true;
239 } else {
240 force_evacuate_all_ = false;
241 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700242 if (kUseBakerReadBarrier) {
243 updated_all_immune_objects_.StoreRelaxed(false);
244 // GC may gray immune objects in the thread flip.
245 gc_grays_immune_objects_ = true;
246 if (kIsDebugBuild) {
247 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
248 DCHECK(immune_gray_stack_.empty());
249 }
250 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800251 BindBitmaps();
252 if (kVerboseMode) {
253 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800254 LOG(INFO) << "Largest immune region: " << immune_spaces_.GetLargestImmuneRegion().Begin()
255 << "-" << immune_spaces_.GetLargestImmuneRegion().End();
256 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
257 LOG(INFO) << "Immune space: " << *space;
258 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800259 LOG(INFO) << "GC end of InitializePhase";
260 }
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700261 // Mark all of the zygote large objects without graying them.
262 MarkZygoteLargeObjects();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800263}
264
265// Used to switch the thread roots of a thread from from-space refs to to-space refs.
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700266class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800267 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100268 ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800269 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
270 }
271
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700272 virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800273 // Note: self is not necessarily equal to thread since thread may be suspended.
274 Thread* self = Thread::Current();
275 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
276 << thread->GetState() << " thread " << thread << " self " << self;
Mathieu Chartierfe814e82016-11-09 14:32:49 -0800277 thread->SetIsGcMarkingAndUpdateEntrypoints(true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800278 if (use_tlab_ && thread->HasTlab()) {
279 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
280 // This must come before the revoke.
281 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
282 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
283 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
284 FetchAndAddSequentiallyConsistent(thread_local_objects);
285 } else {
286 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
287 }
288 }
289 if (kUseThreadLocalAllocationStack) {
290 thread->RevokeThreadLocalAllocationStack();
291 }
292 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700293 // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
294 // only.
295 thread->VisitRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800296 concurrent_copying_->GetBarrier().Pass(self);
297 }
298
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700299 void VisitRoots(mirror::Object*** roots,
300 size_t count,
301 const RootInfo& info ATTRIBUTE_UNUSED)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700302 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700303 for (size_t i = 0; i < count; ++i) {
304 mirror::Object** root = roots[i];
305 mirror::Object* ref = *root;
306 if (ref != nullptr) {
307 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
308 if (to_ref != ref) {
309 *root = to_ref;
310 }
311 }
312 }
313 }
314
315 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
316 size_t count,
317 const RootInfo& info ATTRIBUTE_UNUSED)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700318 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi7e9b2572016-07-20 20:25:27 -0700319 for (size_t i = 0; i < count; ++i) {
320 mirror::CompressedReference<mirror::Object>* const root = roots[i];
321 if (!root->IsNull()) {
322 mirror::Object* ref = root->AsMirrorPtr();
323 mirror::Object* to_ref = concurrent_copying_->Mark(ref);
324 if (to_ref != ref) {
325 root->Assign(to_ref);
326 }
327 }
328 }
329 }
330
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800331 private:
332 ConcurrentCopying* const concurrent_copying_;
333 const bool use_tlab_;
334};
335
336// Called back from Runtime::FlipThreadRoots() during a pause.
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700337class ConcurrentCopying::FlipCallback : public Closure {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800338 public:
339 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
340 : concurrent_copying_(concurrent_copying) {
341 }
342
Mathieu Chartier90443472015-07-16 20:32:27 -0700343 virtual void Run(Thread* thread) OVERRIDE REQUIRES(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800344 ConcurrentCopying* cc = concurrent_copying_;
345 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
346 // Note: self is not necessarily equal to thread since thread may be suspended.
347 Thread* self = Thread::Current();
Mathieu Chartiera1467d02017-02-22 09:22:50 -0800348 if (kVerifyNoMissingCardMarks) {
349 cc->VerifyNoMissingCardMarks();
350 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800351 CHECK(thread == self);
352 Locks::mutator_lock_->AssertExclusiveHeld(self);
353 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700354 cc->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800355 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
356 cc->RecordLiveStackFreezeSize(self);
357 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
358 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
359 }
360 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700361 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800362 if (kIsDebugBuild) {
363 cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
364 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800365 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800366 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800367 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700368 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800369 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700370 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
371 cc->GrayAllDirtyImmuneObjects();
372 if (kIsDebugBuild) {
373 // Check that all non-gray immune objects only refernce immune objects.
374 cc->VerifyGrayImmuneObjects();
375 }
376 }
Mathieu Chartier3ed8ec12017-04-20 19:28:54 -0700377 cc->java_lang_Object_ = down_cast<mirror::Class*>(cc->Mark(
378 WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object).Ptr()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800379 }
380
381 private:
382 ConcurrentCopying* const concurrent_copying_;
383};
384
Mathieu Chartier21328a12016-07-22 10:47:45 -0700385class ConcurrentCopying::VerifyGrayImmuneObjectsVisitor {
386 public:
387 explicit VerifyGrayImmuneObjectsVisitor(ConcurrentCopying* collector)
388 : collector_(collector) {}
389
Mathieu Chartier31e88222016-10-14 18:43:19 -0700390 void operator()(ObjPtr<mirror::Object> obj, MemberOffset offset, bool /* is_static */)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700391 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
392 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700393 CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
394 obj, offset);
395 }
396
Mathieu Chartier31e88222016-10-14 18:43:19 -0700397 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700398 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700399 CHECK(klass->IsTypeOfReferenceClass());
400 CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
401 ref,
402 mirror::Reference::ReferentOffset());
403 }
404
405 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
406 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700407 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700408 if (!root->IsNull()) {
409 VisitRoot(root);
410 }
411 }
412
413 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
414 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700415 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700416 CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
417 }
418
419 private:
420 ConcurrentCopying* const collector_;
421
Mathieu Chartier31e88222016-10-14 18:43:19 -0700422 void CheckReference(ObjPtr<mirror::Object> ref,
423 ObjPtr<mirror::Object> holder,
424 MemberOffset offset) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700425 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700426 if (ref != nullptr) {
Mathieu Chartier31e88222016-10-14 18:43:19 -0700427 if (!collector_->immune_spaces_.ContainsObject(ref.Ptr())) {
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700428 // Not immune, must be a zygote large object.
429 CHECK(Runtime::Current()->GetHeap()->GetLargeObjectsSpace()->IsZygoteLargeObject(
Mathieu Chartier31e88222016-10-14 18:43:19 -0700430 Thread::Current(), ref.Ptr()))
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700431 << "Non gray object references non immune, non zygote large object "<< ref << " "
David Sehr709b0702016-10-13 09:12:37 -0700432 << mirror::Object::PrettyTypeOf(ref) << " in holder " << holder << " "
433 << mirror::Object::PrettyTypeOf(holder) << " offset=" << offset.Uint32Value();
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700434 } else {
435 // Make sure the large object class is immune since we will never scan the large object.
436 CHECK(collector_->immune_spaces_.ContainsObject(
437 ref->GetClass<kVerifyNone, kWithoutReadBarrier>()));
438 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700439 }
440 }
441};
442
443void ConcurrentCopying::VerifyGrayImmuneObjects() {
444 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
445 for (auto& space : immune_spaces_.GetSpaces()) {
446 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
447 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
448 VerifyGrayImmuneObjectsVisitor visitor(this);
449 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
450 reinterpret_cast<uintptr_t>(space->Limit()),
451 [&visitor](mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700452 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700453 // If an object is not gray, it should only have references to things in the immune spaces.
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700454 if (obj->GetReadBarrierState() != ReadBarrier::GrayState()) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700455 obj->VisitReferences</*kVisitNativeRoots*/true,
456 kDefaultVerifyFlags,
457 kWithoutReadBarrier>(visitor, visitor);
458 }
459 });
460 }
461}
462
Mathieu Chartiera1467d02017-02-22 09:22:50 -0800463class ConcurrentCopying::VerifyNoMissingCardMarkVisitor {
464 public:
465 VerifyNoMissingCardMarkVisitor(ConcurrentCopying* cc, ObjPtr<mirror::Object> holder)
466 : cc_(cc),
467 holder_(holder) {}
468
469 void operator()(ObjPtr<mirror::Object> obj,
470 MemberOffset offset,
471 bool is_static ATTRIBUTE_UNUSED) const
472 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
473 if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
474 CheckReference(obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(
475 offset), offset.Uint32Value());
476 }
477 }
478 void operator()(ObjPtr<mirror::Class> klass,
479 ObjPtr<mirror::Reference> ref) const
480 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
481 CHECK(klass->IsTypeOfReferenceClass());
482 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
483 }
484
485 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
486 REQUIRES_SHARED(Locks::mutator_lock_) {
487 if (!root->IsNull()) {
488 VisitRoot(root);
489 }
490 }
491
492 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
493 REQUIRES_SHARED(Locks::mutator_lock_) {
494 CheckReference(root->AsMirrorPtr());
495 }
496
497 void CheckReference(mirror::Object* ref, int32_t offset = -1) const
498 REQUIRES_SHARED(Locks::mutator_lock_) {
499 CHECK(ref == nullptr || !cc_->region_space_->IsInNewlyAllocatedRegion(ref))
500 << holder_->PrettyTypeOf() << "(" << holder_.Ptr() << ") references object "
501 << ref->PrettyTypeOf() << "(" << ref << ") in newly allocated region at offset=" << offset;
502 }
503
504 private:
505 ConcurrentCopying* const cc_;
506 ObjPtr<mirror::Object> const holder_;
507};
508
509void ConcurrentCopying::VerifyNoMissingCardMarkCallback(mirror::Object* obj, void* arg) {
510 auto* collector = reinterpret_cast<ConcurrentCopying*>(arg);
511 // Objects not on dirty cards should never have references to newly allocated regions.
512 if (!collector->heap_->GetCardTable()->IsDirty(obj)) {
513 VerifyNoMissingCardMarkVisitor visitor(collector, /*holder*/ obj);
514 obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
515 visitor,
516 visitor);
517 }
518}
519
520void ConcurrentCopying::VerifyNoMissingCardMarks() {
521 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
522 region_space_->Walk(&VerifyNoMissingCardMarkCallback, this);
523 {
524 ReaderMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
525 heap_->GetLiveBitmap()->Walk(&VerifyNoMissingCardMarkCallback, this);
526 }
527}
528
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800529// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
530void ConcurrentCopying::FlipThreadRoots() {
531 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
532 if (kVerboseMode) {
533 LOG(INFO) << "time=" << region_space_->Time();
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700534 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800535 }
536 Thread* self = Thread::Current();
537 Locks::mutator_lock_->AssertNotHeld(self);
538 gc_barrier_->Init(self, 0);
539 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
540 FlipCallback flip_callback(this);
Andreas Gampe4934eb12017-01-30 13:15:26 -0800541
542 // This is the point where Concurrent-Copying will pause all threads. We report a pause here, if
543 // necessary. This is slightly over-reporting, as this includes the time to actually suspend
544 // threads.
545 {
546 GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
547 if (pause_listener != nullptr) {
548 pause_listener->StartPause();
549 }
550 }
551
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800552 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
553 &thread_flip_visitor, &flip_callback, this);
Andreas Gampe4934eb12017-01-30 13:15:26 -0800554
555 {
556 GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
557 if (pause_listener != nullptr) {
558 pause_listener->EndPause();
559 }
560 }
561
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800562 {
563 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
564 gc_barrier_->Increment(self, barrier_count);
565 }
566 is_asserting_to_space_invariant_ = true;
567 QuasiAtomic::ThreadFenceForConstructor();
568 if (kVerboseMode) {
569 LOG(INFO) << "time=" << region_space_->Time();
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700570 region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800571 LOG(INFO) << "GC end of FlipThreadRoots";
572 }
573}
574
Mathieu Chartier21328a12016-07-22 10:47:45 -0700575class ConcurrentCopying::GrayImmuneObjectVisitor {
576 public:
577 explicit GrayImmuneObjectVisitor() {}
578
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700579 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700580 if (kUseBakerReadBarrier) {
581 if (kIsDebugBuild) {
582 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
583 }
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700584 obj->SetReadBarrierState(ReadBarrier::GrayState());
Mathieu Chartier21328a12016-07-22 10:47:45 -0700585 }
586 }
587
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700588 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700589 reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj);
590 }
591};
592
593void ConcurrentCopying::GrayAllDirtyImmuneObjects() {
594 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
595 gc::Heap* const heap = Runtime::Current()->GetHeap();
596 accounting::CardTable* const card_table = heap->GetCardTable();
597 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
598 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
599 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
600 GrayImmuneObjectVisitor visitor;
601 accounting::ModUnionTable* table = heap->FindModUnionTableFromSpace(space);
602 // Mark all the objects on dirty cards since these may point to objects in other space.
603 // Once these are marked, the GC will eventually clear them later.
604 // Table is non null for boot image and zygote spaces. It is only null for application image
605 // spaces.
606 if (table != nullptr) {
Mathieu Chartier6e6078a2016-10-24 15:45:41 -0700607 // TODO: Consider adding precleaning outside the pause.
608 table->ProcessCards();
Mathieu Chartier21328a12016-07-22 10:47:45 -0700609 table->VisitObjects(GrayImmuneObjectVisitor::Callback, &visitor);
Mathieu Chartier6e6078a2016-10-24 15:45:41 -0700610 // Since the cards are recorded in the mod-union table and this is paused, we can clear
611 // the cards for the space (to madvise).
612 TimingLogger::ScopedTiming split2("(Paused)ClearCards", GetTimings());
613 card_table->ClearCardRange(space->Begin(),
614 AlignDown(space->End(), accounting::CardTable::kCardSize));
Mathieu Chartier21328a12016-07-22 10:47:45 -0700615 } else {
616 // TODO: Consider having a mark bitmap for app image spaces and avoid scanning during the
617 // pause because app image spaces are all dirty pages anyways.
618 card_table->Scan<false>(space->GetMarkBitmap(), space->Begin(), space->End(), visitor);
619 }
620 }
621 // Since all of the objects that may point to other spaces are marked, we can avoid all the read
622 // barriers in the immune spaces.
623 updated_all_immune_objects_.StoreRelaxed(true);
624}
625
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700626void ConcurrentCopying::SwapStacks() {
627 heap_->SwapStacks();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800628}
629
630void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
631 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
632 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
633}
634
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700635// Used to visit objects in the immune spaces.
636inline void ConcurrentCopying::ScanImmuneObject(mirror::Object* obj) {
637 DCHECK(obj != nullptr);
638 DCHECK(immune_spaces_.ContainsObject(obj));
639 // Update the fields without graying it or pushing it onto the mark stack.
640 Scan(obj);
641}
642
643class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
644 public:
645 explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
646 : collector_(cc) {}
647
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700648 ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700649 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700650 if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
Mathieu Chartier21328a12016-07-22 10:47:45 -0700651 collector_->ScanImmuneObject(obj);
652 // Done scanning the object, go back to white.
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700653 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
654 ReadBarrier::WhiteState());
Mathieu Chartier21328a12016-07-22 10:47:45 -0700655 CHECK(success);
656 }
657 } else {
658 collector_->ScanImmuneObject(obj);
659 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700660 }
661
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700662 static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700663 reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
664 }
665
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700666 private:
667 ConcurrentCopying* const collector_;
668};
669
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800670// Concurrently mark roots that are guarded by read barriers and process the mark stack.
671void ConcurrentCopying::MarkingPhase() {
672 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
673 if (kVerboseMode) {
674 LOG(INFO) << "GC MarkingPhase";
675 }
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700676 Thread* self = Thread::Current();
677 if (kIsDebugBuild) {
678 MutexLock mu(self, *Locks::thread_list_lock_);
679 CHECK(weak_ref_access_enabled_);
680 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700681
682 // Scan immune spaces.
683 // Update all the fields in the immune spaces first without graying the objects so that we
684 // minimize dirty pages in the immune spaces. Note mutators can concurrently access and gray some
685 // of the objects.
686 if (kUseBakerReadBarrier) {
687 gc_grays_immune_objects_ = false;
Hiroshi Yamauchi16292fc2016-06-20 20:23:34 -0700688 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700689 {
690 TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
691 for (auto& space : immune_spaces_.GetSpaces()) {
692 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
693 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700694 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700695 ImmuneSpaceScanObjVisitor visitor(this);
Hiroshi Yamauchi5408f232016-07-29 15:07:05 -0700696 if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
697 table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
698 } else {
699 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
700 reinterpret_cast<uintptr_t>(space->Limit()),
701 visitor);
702 }
Mathieu Chartier21328a12016-07-22 10:47:45 -0700703 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700704 }
705 if (kUseBakerReadBarrier) {
706 // This release fence makes the field updates in the above loop visible before allowing mutator
707 // getting access to immune objects without graying it first.
708 updated_all_immune_objects_.StoreRelease(true);
709 // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in
710 // the above loop because we would incorrectly disable the read barrier by whitening an object
711 // which may point to an unscanned, white object, breaking the to-space invariant.
712 //
713 // Make sure no mutators are in the middle of marking an immune object before whitening immune
714 // objects.
715 IssueEmptyCheckpoint();
716 MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
717 if (kVerboseMode) {
718 LOG(INFO) << "immune gray stack size=" << immune_gray_stack_.size();
719 }
720 for (mirror::Object* obj : immune_gray_stack_) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700721 DCHECK(obj->GetReadBarrierState() == ReadBarrier::GrayState());
722 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
723 ReadBarrier::WhiteState());
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700724 DCHECK(success);
725 }
726 immune_gray_stack_.clear();
727 }
728
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800729 {
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700730 TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
731 Runtime::Current()->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800732 }
733 {
734 // TODO: don't visit the transaction roots if it's not active.
735 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700736 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800737 }
738
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800739 {
Mathieu Chartiera6b1ead2015-10-06 10:32:38 -0700740 TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700741 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
742 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
743 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
744 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
745 // reach the point where we process weak references, we can avoid using a lock when accessing
746 // the GC mark stack, which makes mark stack processing more efficient.
747
748 // Process the mark stack once in the thread local stack mode. This marks most of the live
749 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
750 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
751 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800752 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700753 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
754 // for the last time before transitioning to the shared mark stack mode, which would process new
755 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
756 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
757 // important to do these together in a single checkpoint so that we can ensure that mutators
758 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
759 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
760 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
761 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
762 SwitchToSharedMarkStackMode();
763 CHECK(!self->GetWeakRefAccessEnabled());
764 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
765 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
766 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
767 // (via read barriers) have no way to produce any more refs to process. Marking converges once
768 // before we process weak refs below.
769 ProcessMarkStack();
770 CheckEmptyMarkStack();
771 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
772 // lock from this point on.
773 SwitchToGcExclusiveMarkStackMode();
774 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800775 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800776 LOG(INFO) << "ProcessReferences";
777 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700778 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700779 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700780 ProcessReferences(self);
781 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800782 if (kVerboseMode) {
783 LOG(INFO) << "SweepSystemWeaks";
784 }
785 SweepSystemWeaks(self);
786 if (kVerboseMode) {
787 LOG(INFO) << "SweepSystemWeaks done";
788 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700789 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
790 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
791 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800792 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700793 CheckEmptyMarkStack();
794 // Re-enable weak ref accesses.
795 ReenableWeakRefAccess(self);
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700796 // Free data for class loaders that we unloaded.
797 Runtime::Current()->GetClassLinker()->CleanupClassLoaders();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700798 // Marking is done. Disable marking.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700799 DisableMarking();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800800 if (kUseBakerReadBarrier) {
801 ProcessFalseGrayStack();
802 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700803 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800804 }
805
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700806 if (kIsDebugBuild) {
807 MutexLock mu(self, *Locks::thread_list_lock_);
808 CHECK(weak_ref_access_enabled_);
809 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800810 if (kVerboseMode) {
811 LOG(INFO) << "GC end of MarkingPhase";
812 }
813}
814
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700815void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
816 if (kVerboseMode) {
817 LOG(INFO) << "ReenableWeakRefAccess";
818 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700819 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
820 {
821 MutexLock mu(self, *Locks::thread_list_lock_);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700822 weak_ref_access_enabled_ = true; // This is for new threads.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700823 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
824 for (Thread* thread : thread_list) {
825 thread->SetWeakRefAccessEnabled(true);
826 }
827 }
828 // Unblock blocking threads.
829 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
830 Runtime::Current()->BroadcastForNewSystemWeaks();
831}
832
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700833class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700834 public:
835 explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
836 : concurrent_copying_(concurrent_copying) {
837 }
838
839 void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
840 // Note: self is not necessarily equal to thread since thread may be suspended.
841 Thread* self = Thread::Current();
842 DCHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
843 << thread->GetState() << " thread " << thread << " self " << self;
844 // Disable the thread-local is_gc_marking flag.
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700845 // Note a thread that has just started right before this checkpoint may have already this flag
846 // set to false, which is ok.
Mathieu Chartierfe814e82016-11-09 14:32:49 -0800847 thread->SetIsGcMarkingAndUpdateEntrypoints(false);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700848 // If thread is a running mutator, then act on behalf of the garbage collector.
849 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -0700850 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700851 }
852
853 private:
854 ConcurrentCopying* const concurrent_copying_;
855};
856
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700857class ConcurrentCopying::DisableMarkingCallback : public Closure {
858 public:
859 explicit DisableMarkingCallback(ConcurrentCopying* concurrent_copying)
860 : concurrent_copying_(concurrent_copying) {
861 }
862
863 void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
864 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
865 // to avoid a race with ThreadList::Register().
866 CHECK(concurrent_copying_->is_marking_);
867 concurrent_copying_->is_marking_ = false;
868 }
869
870 private:
871 ConcurrentCopying* const concurrent_copying_;
872};
873
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700874void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
875 Thread* self = Thread::Current();
876 DisableMarkingCheckpoint check_point(this);
877 ThreadList* thread_list = Runtime::Current()->GetThreadList();
878 gc_barrier_->Init(self, 0);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700879 DisableMarkingCallback dmc(this);
880 size_t barrier_count = thread_list->RunCheckpoint(&check_point, &dmc);
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700881 // If there are no threads to wait which implies that all the checkpoint functions are finished,
882 // then no need to release the mutator lock.
883 if (barrier_count == 0) {
884 return;
885 }
886 // Release locks then wait for all mutator threads to pass the barrier.
887 Locks::mutator_lock_->SharedUnlock(self);
888 {
889 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
890 gc_barrier_->Increment(self, barrier_count);
891 }
892 Locks::mutator_lock_->SharedLock(self);
893}
894
895void ConcurrentCopying::DisableMarking() {
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700896 // Use a checkpoint to turn off the global is_marking and the thread-local is_gc_marking flags and
897 // to ensure no threads are still in the middle of a read barrier which may have a from-space ref
898 // cached in a local variable.
Hiroshi Yamauchi00370822015-08-18 14:47:25 -0700899 IssueDisableMarkingCheckpoint();
900 if (kUseTableLookupReadBarrier) {
901 heap_->rb_table_->ClearAll();
902 DCHECK(heap_->rb_table_->IsAllCleared());
903 }
904 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
905 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
906}
907
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800908void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
909 CHECK(kUseBakerReadBarrier);
910 DCHECK(ref != nullptr);
911 MutexLock mu(Thread::Current(), mark_stack_lock_);
912 false_gray_stack_.push_back(ref);
913}
914
915void ConcurrentCopying::ProcessFalseGrayStack() {
916 CHECK(kUseBakerReadBarrier);
917 // Change the objects on the false gray stack from gray to white.
918 MutexLock mu(Thread::Current(), mark_stack_lock_);
919 for (mirror::Object* obj : false_gray_stack_) {
920 DCHECK(IsMarked(obj));
921 // The object could be white here if a thread got preempted after a success at the
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700922 // AtomicSetReadBarrierState in Mark(), GC started marking through it (but not finished so
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800923 // still gray), and the thread ran to register it onto the false gray stack.
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700924 if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
925 bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
926 ReadBarrier::WhiteState());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800927 DCHECK(success);
928 }
929 }
930 false_gray_stack_.clear();
931}
932
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800933void ConcurrentCopying::IssueEmptyCheckpoint() {
934 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800935 ThreadList* thread_list = Runtime::Current()->GetThreadList();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800936 // Release locks then wait for all mutator threads to pass the barrier.
937 Locks::mutator_lock_->SharedUnlock(self);
Hiroshi Yamauchia2224042017-02-08 16:35:45 -0800938 thread_list->RunEmptyCheckpoint();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800939 Locks::mutator_lock_->SharedLock(self);
940}
941
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700942void ConcurrentCopying::ExpandGcMarkStack() {
943 DCHECK(gc_mark_stack_->IsFull());
944 const size_t new_size = gc_mark_stack_->Capacity() * 2;
945 std::vector<StackReference<mirror::Object>> temp(gc_mark_stack_->Begin(),
946 gc_mark_stack_->End());
947 gc_mark_stack_->Resize(new_size);
948 for (auto& ref : temp) {
949 gc_mark_stack_->PushBack(ref.AsMirrorPtr());
950 }
951 DCHECK(!gc_mark_stack_->IsFull());
952}
953
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800954void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700955 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
David Sehr709b0702016-10-13 09:12:37 -0700956 << " " << to_ref << " " << mirror::Object::PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700957 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
958 CHECK(thread_running_gc_ != nullptr);
959 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700960 if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
961 if (LIKELY(self == thread_running_gc_)) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700962 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
963 CHECK(self->GetThreadLocalMarkStack() == nullptr);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -0700964 if (UNLIKELY(gc_mark_stack_->IsFull())) {
965 ExpandGcMarkStack();
966 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700967 gc_mark_stack_->PushBack(to_ref);
968 } else {
969 // Otherwise, use a thread-local mark stack.
970 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
971 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
972 MutexLock mu(self, mark_stack_lock_);
973 // Get a new thread local mark stack.
974 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
975 if (!pooled_mark_stacks_.empty()) {
976 // Use a pooled mark stack.
977 new_tl_mark_stack = pooled_mark_stacks_.back();
978 pooled_mark_stacks_.pop_back();
979 } else {
980 // None pooled. Create a new one.
981 new_tl_mark_stack =
982 accounting::AtomicStack<mirror::Object>::Create(
983 "thread local mark stack", 4 * KB, 4 * KB);
984 }
985 DCHECK(new_tl_mark_stack != nullptr);
986 DCHECK(new_tl_mark_stack->IsEmpty());
987 new_tl_mark_stack->PushBack(to_ref);
988 self->SetThreadLocalMarkStack(new_tl_mark_stack);
989 if (tl_mark_stack != nullptr) {
990 // Store the old full stack into a vector.
991 revoked_mark_stacks_.push_back(tl_mark_stack);
992 }
993 } else {
994 tl_mark_stack->PushBack(to_ref);
995 }
996 }
997 } else if (mark_stack_mode == kMarkStackModeShared) {
998 // Access the shared GC mark stack with a lock.
999 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -07001000 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1001 ExpandGcMarkStack();
1002 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001003 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001004 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001005 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
Hiroshi Yamauchifa755182015-09-30 20:12:11 -07001006 static_cast<uint32_t>(kMarkStackModeGcExclusive))
1007 << "ref=" << to_ref
1008 << " self->gc_marking=" << self->GetIsGcMarking()
1009 << " cc->is_marking=" << is_marking_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001010 CHECK(self == thread_running_gc_)
1011 << "Only GC-running thread should access the mark stack "
1012 << "in the GC exclusive mark stack mode";
1013 // Access the GC mark stack without a lock.
Hiroshi Yamauchi19eab402015-10-23 19:59:58 -07001014 if (UNLIKELY(gc_mark_stack_->IsFull())) {
1015 ExpandGcMarkStack();
1016 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001017 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001018 }
1019}
1020
1021accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
1022 return heap_->allocation_stack_.get();
1023}
1024
1025accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
1026 return heap_->live_stack_.get();
1027}
1028
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001029// The following visitors are used to verify that there's no references to the from-space left after
1030// marking.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001031class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001032 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001033 explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001034 : collector_(collector) {}
1035
Mathieu Chartierbc632f02017-04-20 13:31:39 -07001036 void operator()(mirror::Object* ref,
1037 MemberOffset offset = MemberOffset(0),
1038 mirror::Object* holder = nullptr) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001039 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001040 if (ref == nullptr) {
1041 // OK.
1042 return;
1043 }
Mathieu Chartierbc632f02017-04-20 13:31:39 -07001044 collector_->AssertToSpaceInvariant(holder, offset, ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001045 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001046 CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState())
David Sehr709b0702016-10-13 09:12:37 -07001047 << "Ref " << ref << " " << ref->PrettyTypeOf()
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001048 << " has non-white rb_state ";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001049 }
1050 }
1051
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001052 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001053 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001054 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001055 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001056 }
1057
1058 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001059 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001060};
1061
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001062class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001063 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001064 explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001065 : collector_(collector) {}
1066
Mathieu Chartier31e88222016-10-14 18:43:19 -07001067 void operator()(ObjPtr<mirror::Object> obj,
1068 MemberOffset offset,
1069 bool is_static ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001070 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001071 mirror::Object* ref =
1072 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001073 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Mathieu Chartierbc632f02017-04-20 13:31:39 -07001074 visitor(ref, offset, obj.Ptr());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001075 }
Mathieu Chartier31e88222016-10-14 18:43:19 -07001076 void operator()(ObjPtr<mirror::Class> klass,
1077 ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001078 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001079 CHECK(klass->IsTypeOfReferenceClass());
1080 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
1081 }
1082
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001083 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001084 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001085 if (!root->IsNull()) {
1086 VisitRoot(root);
1087 }
1088 }
1089
1090 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001091 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001092 VerifyNoFromSpaceRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001093 visitor(root->AsMirrorPtr());
1094 }
1095
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001096 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001097 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001098};
1099
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001100class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001101 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001102 explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001103 : collector_(collector) {}
1104 void operator()(mirror::Object* obj) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001105 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001106 ObjectCallback(obj, collector_);
1107 }
1108 static void ObjectCallback(mirror::Object* obj, void *arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001109 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001110 CHECK(obj != nullptr);
1111 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
1112 space::RegionSpace* region_space = collector->RegionSpace();
1113 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001114 VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001115 obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1116 visitor,
1117 visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001118 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001119 CHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
1120 << "obj=" << obj << " non-white rb_state " << obj->GetReadBarrierState();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001121 }
1122 }
1123
1124 private:
1125 ConcurrentCopying* const collector_;
1126};
1127
1128// Verify there's no from-space references left after the marking phase.
1129void ConcurrentCopying::VerifyNoFromSpaceReferences() {
1130 Thread* self = Thread::Current();
1131 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001132 // Verify all threads have is_gc_marking to be false
1133 {
1134 MutexLock mu(self, *Locks::thread_list_lock_);
1135 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1136 for (Thread* thread : thread_list) {
1137 CHECK(!thread->GetIsGcMarking());
1138 }
1139 }
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001140 VerifyNoFromSpaceRefsObjectVisitor visitor(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001141 // Roots.
1142 {
1143 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001144 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001145 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001146 }
1147 // The to-space.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001148 region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001149 // Non-moving spaces.
1150 {
1151 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1152 heap_->GetMarkBitmap()->Visit(visitor);
1153 }
1154 // The alloc stack.
1155 {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001156 VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001157 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
1158 it < end; ++it) {
1159 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001160 if (obj != nullptr && obj->GetClass() != nullptr) {
1161 // TODO: need to call this only if obj is alive?
1162 ref_visitor(obj);
1163 visitor(obj);
1164 }
1165 }
1166 }
1167 // TODO: LOS. But only refs in LOS are classes.
1168}
1169
1170// The following visitors are used to assert the to-space invariant.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001171class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001172 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001173 explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001174 : collector_(collector) {}
1175
1176 void operator()(mirror::Object* ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001177 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001178 if (ref == nullptr) {
1179 // OK.
1180 return;
1181 }
1182 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
1183 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001184
1185 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001186 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001187};
1188
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001189class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001190 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001191 explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001192 : collector_(collector) {}
1193
Mathieu Chartier31e88222016-10-14 18:43:19 -07001194 void operator()(ObjPtr<mirror::Object> obj,
1195 MemberOffset offset,
1196 bool is_static ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001197 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001198 mirror::Object* ref =
1199 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001200 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001201 visitor(ref);
1202 }
Mathieu Chartier31e88222016-10-14 18:43:19 -07001203 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001204 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001205 CHECK(klass->IsTypeOfReferenceClass());
1206 }
1207
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001208 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001209 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001210 if (!root->IsNull()) {
1211 VisitRoot(root);
1212 }
1213 }
1214
1215 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001216 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001217 AssertToSpaceInvariantRefsVisitor visitor(collector_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001218 visitor(root->AsMirrorPtr());
1219 }
1220
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001221 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001222 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001223};
1224
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001225class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001226 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001227 explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001228 : collector_(collector) {}
1229 void operator()(mirror::Object* obj) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001230 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001231 ObjectCallback(obj, collector_);
1232 }
1233 static void ObjectCallback(mirror::Object* obj, void *arg)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001234 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001235 CHECK(obj != nullptr);
1236 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
1237 space::RegionSpace* region_space = collector->RegionSpace();
1238 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
1239 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001240 AssertToSpaceInvariantFieldVisitor visitor(collector);
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001241 obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1242 visitor,
1243 visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001244 }
1245
1246 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001247 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001248};
1249
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001250class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001251 public:
Roland Levillain3887c462015-08-12 18:15:42 +01001252 RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
1253 bool disable_weak_ref_access)
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001254 : concurrent_copying_(concurrent_copying),
1255 disable_weak_ref_access_(disable_weak_ref_access) {
1256 }
1257
1258 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1259 // Note: self is not necessarily equal to thread since thread may be suspended.
1260 Thread* self = Thread::Current();
1261 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
1262 << thread->GetState() << " thread " << thread << " self " << self;
1263 // Revoke thread local mark stacks.
1264 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1265 if (tl_mark_stack != nullptr) {
1266 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
1267 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
1268 thread->SetThreadLocalMarkStack(nullptr);
1269 }
1270 // Disable weak ref access.
1271 if (disable_weak_ref_access_) {
1272 thread->SetWeakRefAccessEnabled(false);
1273 }
1274 // If thread is a running mutator, then act on behalf of the garbage collector.
1275 // See the code in ThreadList::RunCheckpoint.
Mathieu Chartier10d25082015-10-28 18:36:09 -07001276 concurrent_copying_->GetBarrier().Pass(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001277 }
1278
1279 private:
1280 ConcurrentCopying* const concurrent_copying_;
1281 const bool disable_weak_ref_access_;
1282};
1283
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001284void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
1285 Closure* checkpoint_callback) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001286 Thread* self = Thread::Current();
1287 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
1288 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1289 gc_barrier_->Init(self, 0);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001290 size_t barrier_count = thread_list->RunCheckpoint(&check_point, checkpoint_callback);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001291 // If there are no threads to wait which implys that all the checkpoint functions are finished,
1292 // then no need to release the mutator lock.
1293 if (barrier_count == 0) {
1294 return;
1295 }
1296 Locks::mutator_lock_->SharedUnlock(self);
1297 {
1298 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
1299 gc_barrier_->Increment(self, barrier_count);
1300 }
1301 Locks::mutator_lock_->SharedLock(self);
1302}
1303
1304void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
1305 Thread* self = Thread::Current();
1306 CHECK_EQ(self, thread);
1307 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
1308 if (tl_mark_stack != nullptr) {
1309 CHECK(is_marking_);
1310 MutexLock mu(self, mark_stack_lock_);
1311 revoked_mark_stacks_.push_back(tl_mark_stack);
1312 thread->SetThreadLocalMarkStack(nullptr);
1313 }
1314}
1315
1316void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001317 if (kVerboseMode) {
1318 LOG(INFO) << "ProcessMarkStack. ";
1319 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001320 bool empty_prev = false;
1321 while (true) {
1322 bool empty = ProcessMarkStackOnce();
1323 if (empty_prev && empty) {
1324 // Saw empty mark stack for a second time, done.
1325 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001326 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001327 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001328 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001329}
1330
1331bool ConcurrentCopying::ProcessMarkStackOnce() {
1332 Thread* self = Thread::Current();
1333 CHECK(thread_running_gc_ != nullptr);
1334 CHECK(self == thread_running_gc_);
1335 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1336 size_t count = 0;
1337 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1338 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1339 // Process the thread-local mark stacks and the GC mark stack.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001340 count += ProcessThreadLocalMarkStacks(false, nullptr);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001341 while (!gc_mark_stack_->IsEmpty()) {
1342 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1343 ProcessMarkStackRef(to_ref);
1344 ++count;
1345 }
1346 gc_mark_stack_->Reset();
1347 } else if (mark_stack_mode == kMarkStackModeShared) {
Hiroshi Yamauchi30493242016-11-03 13:06:52 -07001348 // Do an empty checkpoint to avoid a race with a mutator preempted in the middle of a read
1349 // barrier but before pushing onto the mark stack. b/32508093. Note the weak ref access is
1350 // disabled at this point.
1351 IssueEmptyCheckpoint();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001352 // Process the shared GC mark stack with a lock.
1353 {
1354 MutexLock mu(self, mark_stack_lock_);
1355 CHECK(revoked_mark_stacks_.empty());
1356 }
1357 while (true) {
1358 std::vector<mirror::Object*> refs;
1359 {
1360 // Copy refs with lock. Note the number of refs should be small.
1361 MutexLock mu(self, mark_stack_lock_);
1362 if (gc_mark_stack_->IsEmpty()) {
1363 break;
1364 }
1365 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
1366 p != gc_mark_stack_->End(); ++p) {
1367 refs.push_back(p->AsMirrorPtr());
1368 }
1369 gc_mark_stack_->Reset();
1370 }
1371 for (mirror::Object* ref : refs) {
1372 ProcessMarkStackRef(ref);
1373 ++count;
1374 }
1375 }
1376 } else {
1377 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
1378 static_cast<uint32_t>(kMarkStackModeGcExclusive));
1379 {
1380 MutexLock mu(self, mark_stack_lock_);
1381 CHECK(revoked_mark_stacks_.empty());
1382 }
1383 // Process the GC mark stack in the exclusive mode. No need to take the lock.
1384 while (!gc_mark_stack_->IsEmpty()) {
1385 mirror::Object* to_ref = gc_mark_stack_->PopBack();
1386 ProcessMarkStackRef(to_ref);
1387 ++count;
1388 }
1389 gc_mark_stack_->Reset();
1390 }
1391
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001392 // Return true if the stack was empty.
1393 return count == 0;
1394}
1395
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001396size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
1397 Closure* checkpoint_callback) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001398 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001399 RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001400 size_t count = 0;
1401 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
1402 {
1403 MutexLock mu(Thread::Current(), mark_stack_lock_);
1404 // Make a copy of the mark stack vector.
1405 mark_stacks = revoked_mark_stacks_;
1406 revoked_mark_stacks_.clear();
1407 }
1408 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
1409 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
1410 mirror::Object* to_ref = p->AsMirrorPtr();
1411 ProcessMarkStackRef(to_ref);
1412 ++count;
1413 }
1414 {
1415 MutexLock mu(Thread::Current(), mark_stack_lock_);
1416 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
1417 // The pool has enough. Delete it.
1418 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001419 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001420 // Otherwise, put it into the pool for later reuse.
1421 mark_stack->Reset();
1422 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001423 }
1424 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001425 }
1426 return count;
1427}
1428
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001429inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001430 DCHECK(!region_space_->IsInFromSpace(to_ref));
1431 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001432 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
1433 << " " << to_ref << " " << to_ref->GetReadBarrierState()
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001434 << " is_marked=" << IsMarked(to_ref);
1435 }
Mathieu Chartierc381c362016-08-23 13:27:53 -07001436 bool add_to_live_bytes = false;
1437 if (region_space_->IsInUnevacFromSpace(to_ref)) {
1438 // Mark the bitmap only in the GC thread here so that we don't need a CAS.
1439 if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
1440 // It may be already marked if we accidentally pushed the same object twice due to the racy
1441 // bitmap read in MarkUnevacFromSpaceRegion.
1442 Scan(to_ref);
1443 // Only add to the live bytes if the object was not already marked.
1444 add_to_live_bytes = true;
1445 }
1446 } else {
1447 Scan(to_ref);
1448 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001449 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001450 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState())
1451 << " " << to_ref << " " << to_ref->GetReadBarrierState()
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001452 << " is_marked=" << IsMarked(to_ref);
1453 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001454#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
Hiroshi Yamauchi39c12d42016-12-06 16:46:37 -08001455 mirror::Object* referent = nullptr;
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001456 if (UNLIKELY((to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
Hiroshi Yamauchi39c12d42016-12-06 16:46:37 -08001457 (referent = to_ref->AsReference()->GetReferent<kWithoutReadBarrier>()) != nullptr &&
1458 !IsInToSpace(referent)))) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001459 // Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
1460 // will change it to white later in ReferenceQueue::DequeuePendingReference().
Richard Uhlere3627402016-02-02 13:36:55 -08001461 DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001462 } else {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001463 // We may occasionally leave a reference white in the queue if its referent happens to be
1464 // concurrently marked after the Scan() call above has enqueued the Reference, in which case the
1465 // above IsInToSpace() evaluates to true and we change the color from gray to white here in this
1466 // else block.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001467 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001468 bool success = to_ref->AtomicSetReadBarrierState</*kCasRelease*/true>(
1469 ReadBarrier::GrayState(),
1470 ReadBarrier::WhiteState());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001471 DCHECK(success) << "Must succeed as we won the race.";
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001472 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001473 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001474#else
1475 DCHECK(!kUseBakerReadBarrier);
1476#endif
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001477
Mathieu Chartierc381c362016-08-23 13:27:53 -07001478 if (add_to_live_bytes) {
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001479 // Add to the live bytes per unevacuated from space. Note this code is always run by the
1480 // GC-running thread (no synchronization required).
1481 DCHECK(region_space_bitmap_->Test(to_ref));
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001482 size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags>();
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08001483 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1484 region_space_->AddLiveBytes(to_ref, alloc_size);
1485 }
Andreas Gampee3ce7872017-02-22 13:36:21 -08001486 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001487 AssertToSpaceInvariantObjectVisitor visitor(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001488 visitor(to_ref);
1489 }
1490}
1491
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001492class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
1493 public:
1494 explicit DisableWeakRefAccessCallback(ConcurrentCopying* concurrent_copying)
1495 : concurrent_copying_(concurrent_copying) {
1496 }
1497
1498 void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
1499 // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
1500 // to avoid a deadlock b/31500969.
1501 CHECK(concurrent_copying_->weak_ref_access_enabled_);
1502 concurrent_copying_->weak_ref_access_enabled_ = false;
1503 }
1504
1505 private:
1506 ConcurrentCopying* const concurrent_copying_;
1507};
1508
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001509void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1510 Thread* self = Thread::Current();
1511 CHECK(thread_running_gc_ != nullptr);
1512 CHECK_EQ(self, thread_running_gc_);
1513 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1514 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1515 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1516 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1517 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001518 DisableWeakRefAccessCallback dwrac(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001519 // Process the thread local mark stacks one last time after switching to the shared mark stack
1520 // mode and disable weak ref accesses.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001521 ProcessThreadLocalMarkStacks(true, &dwrac);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001522 if (kVerboseMode) {
1523 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1524 }
1525}
1526
1527void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1528 Thread* self = Thread::Current();
1529 CHECK(thread_running_gc_ != nullptr);
1530 CHECK_EQ(self, thread_running_gc_);
1531 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1532 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1533 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1534 static_cast<uint32_t>(kMarkStackModeShared));
1535 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1536 QuasiAtomic::ThreadFenceForConstructor();
1537 if (kVerboseMode) {
1538 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1539 }
1540}
1541
1542void ConcurrentCopying::CheckEmptyMarkStack() {
1543 Thread* self = Thread::Current();
1544 CHECK(thread_running_gc_ != nullptr);
1545 CHECK_EQ(self, thread_running_gc_);
1546 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1547 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1548 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1549 // Thread-local mark stack mode.
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -07001550 RevokeThreadLocalMarkStacks(false, nullptr);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001551 MutexLock mu(Thread::Current(), mark_stack_lock_);
1552 if (!revoked_mark_stacks_.empty()) {
1553 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1554 while (!mark_stack->IsEmpty()) {
1555 mirror::Object* obj = mark_stack->PopBack();
1556 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001557 uint32_t rb_state = obj->GetReadBarrierState();
1558 LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf() << " rb_state="
1559 << rb_state << " is_marked=" << IsMarked(obj);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001560 } else {
David Sehr709b0702016-10-13 09:12:37 -07001561 LOG(INFO) << "On mark queue : " << obj << " " << obj->PrettyTypeOf()
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001562 << " is_marked=" << IsMarked(obj);
1563 }
1564 }
1565 }
1566 LOG(FATAL) << "mark stack is not empty";
1567 }
1568 } else {
1569 // Shared, GC-exclusive, or off.
1570 MutexLock mu(Thread::Current(), mark_stack_lock_);
1571 CHECK(gc_mark_stack_->IsEmpty());
1572 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001573 }
1574}
1575
1576void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1577 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1578 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001579 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001580}
1581
1582void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1583 {
1584 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1585 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1586 if (kEnableFromSpaceAccountingCheck) {
1587 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1588 }
1589 heap_->MarkAllocStackAsLive(live_stack);
1590 live_stack->Reset();
1591 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001592 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001593 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1594 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1595 if (space->IsContinuousMemMapAllocSpace()) {
1596 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001597 if (space == region_space_ || immune_spaces_.ContainsSpace(space)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001598 continue;
1599 }
1600 TimingLogger::ScopedTiming split2(
1601 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1602 RecordFree(alloc_space->Sweep(swap_bitmaps));
1603 }
1604 }
1605 SweepLargeObjects(swap_bitmaps);
1606}
1607
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07001608void ConcurrentCopying::MarkZygoteLargeObjects() {
1609 TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1610 Thread* const self = Thread::Current();
1611 WriterMutexLock rmu(self, *Locks::heap_bitmap_lock_);
1612 space::LargeObjectSpace* const los = heap_->GetLargeObjectsSpace();
1613 // Pick the current live bitmap (mark bitmap if swapped).
1614 accounting::LargeObjectBitmap* const live_bitmap = los->GetLiveBitmap();
1615 accounting::LargeObjectBitmap* const mark_bitmap = los->GetMarkBitmap();
1616 // Walk through all of the objects and explicitly mark the zygote ones so they don't get swept.
Mathieu Chartier208aaf02016-10-25 10:45:08 -07001617 std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
1618 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
1619 reinterpret_cast<uintptr_t>(range.second),
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07001620 [mark_bitmap, los, self](mirror::Object* obj)
1621 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001622 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartier962cd7a2016-08-16 12:15:59 -07001623 if (los->IsZygoteLargeObject(self, obj)) {
1624 mark_bitmap->Set(obj);
1625 }
1626 });
1627}
1628
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001629void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1630 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1631 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1632}
1633
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001634void ConcurrentCopying::ReclaimPhase() {
1635 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1636 if (kVerboseMode) {
1637 LOG(INFO) << "GC ReclaimPhase";
1638 }
1639 Thread* self = Thread::Current();
1640
1641 {
1642 // Double-check that the mark stack is empty.
1643 // Note: need to set this after VerifyNoFromSpaceRef().
1644 is_asserting_to_space_invariant_ = false;
1645 QuasiAtomic::ThreadFenceForConstructor();
1646 if (kVerboseMode) {
1647 LOG(INFO) << "Issue an empty check point. ";
1648 }
1649 IssueEmptyCheckpoint();
1650 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001651 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001652 if (kUseBakerReadBarrier) {
1653 updated_all_immune_objects_.StoreSequentiallyConsistent(false);
1654 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001655 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001656 }
1657
1658 {
1659 // Record freed objects.
1660 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1661 // Don't include thread-locals that are in the to-space.
Mathieu Chartier371b0472017-02-27 16:37:21 -08001662 const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1663 const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1664 const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1665 const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001666 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
Mathieu Chartiercca44a02016-08-17 10:07:29 -07001667 cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001668 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
Mathieu Chartiercca44a02016-08-17 10:07:29 -07001669 cumulative_objects_moved_.FetchAndAddRelaxed(to_objects);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001670 if (kEnableFromSpaceAccountingCheck) {
1671 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1672 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1673 }
1674 CHECK_LE(to_objects, from_objects);
1675 CHECK_LE(to_bytes, from_bytes);
Mathieu Chartier371b0472017-02-27 16:37:21 -08001676 // cleared_bytes and cleared_objects may be greater than the from space equivalents since
1677 // ClearFromSpace may clear empty unevac regions.
1678 uint64_t cleared_bytes;
1679 uint64_t cleared_objects;
1680 {
1681 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1682 region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
1683 CHECK_GE(cleared_bytes, from_bytes);
1684 CHECK_GE(cleared_objects, from_objects);
1685 }
1686 int64_t freed_bytes = cleared_bytes - to_bytes;
1687 int64_t freed_objects = cleared_objects - to_objects;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001688 if (kVerboseMode) {
1689 LOG(INFO) << "RecordFree:"
1690 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1691 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1692 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1693 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1694 << " from_space size=" << region_space_->FromSpaceSize()
1695 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1696 << " to_space size=" << region_space_->ToSpaceSize();
1697 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1698 }
1699 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1700 if (kVerboseMode) {
1701 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1702 }
1703 }
1704
1705 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001706 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001707 Sweep(false);
1708 SwapBitmaps();
1709 heap_->UnBindBitmaps();
1710
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -07001711 // The bitmap was cleared at the start of the GC, there is nothing we need to do here.
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001712 DCHECK(region_space_bitmap_ != nullptr);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001713 region_space_bitmap_ = nullptr;
1714 }
1715
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001716 CheckEmptyMarkStack();
1717
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001718 if (kVerboseMode) {
1719 LOG(INFO) << "GC end of ReclaimPhase";
1720 }
1721}
1722
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001723// Assert the to-space invariant.
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001724void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj,
1725 MemberOffset offset,
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001726 mirror::Object* ref) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001727 CHECK_EQ(heap_->collector_type_, kCollectorTypeCC);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001728 if (is_asserting_to_space_invariant_) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001729 using RegionType = space::RegionSpace::RegionType;
1730 space::RegionSpace::RegionType type = region_space_->GetRegionType(ref);
1731 if (type == RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001732 // OK.
1733 return;
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001734 } else if (type == RegionType::kRegionTypeUnevacFromSpace) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07001735 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001736 } else if (UNLIKELY(type == RegionType::kRegionTypeFromSpace)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001737 // Not OK. Do extra logging.
1738 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001739 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001740 }
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001741 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
David Sehr709b0702016-10-13 09:12:37 -07001742 CHECK(false) << "Found from-space ref " << ref << " " << ref->PrettyTypeOf();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001743 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001744 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1745 }
1746 }
1747}
1748
1749class RootPrinter {
1750 public:
1751 RootPrinter() { }
1752
1753 template <class MirrorType>
1754 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001755 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001756 if (!root->IsNull()) {
1757 VisitRoot(root);
1758 }
1759 }
1760
1761 template <class MirrorType>
1762 void VisitRoot(mirror::Object** root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001763 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001764 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << *root;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001765 }
1766
1767 template <class MirrorType>
1768 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001769 REQUIRES_SHARED(Locks::mutator_lock_) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001770 LOG(FATAL_WITHOUT_ABORT) << "root=" << root << " ref=" << root->AsMirrorPtr();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001771 }
1772};
1773
1774void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1775 mirror::Object* ref) {
1776 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1777 if (is_asserting_to_space_invariant_) {
1778 if (region_space_->IsInToSpace(ref)) {
1779 // OK.
1780 return;
1781 } else if (region_space_->IsInUnevacFromSpace(ref)) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07001782 CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001783 } else if (region_space_->IsInFromSpace(ref)) {
1784 // Not OK. Do extra logging.
1785 if (gc_root_source == nullptr) {
1786 // No info.
1787 } else if (gc_root_source->HasArtField()) {
1788 ArtField* field = gc_root_source->GetArtField();
David Sehr709b0702016-10-13 09:12:37 -07001789 LOG(FATAL_WITHOUT_ABORT) << "gc root in field " << field << " "
1790 << ArtField::PrettyField(field);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001791 RootPrinter root_printer;
1792 field->VisitRoots(root_printer);
1793 } else if (gc_root_source->HasArtMethod()) {
1794 ArtMethod* method = gc_root_source->GetArtMethod();
David Sehr709b0702016-10-13 09:12:37 -07001795 LOG(FATAL_WITHOUT_ABORT) << "gc root in method " << method << " "
1796 << ArtMethod::PrettyMethod(method);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001797 RootPrinter root_printer;
Andreas Gampe542451c2016-07-26 09:02:02 -07001798 method->VisitRoots(root_printer, kRuntimePointerSize);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001799 }
Andreas Gampe3fec9ac2016-09-13 10:47:28 -07001800 ref->GetLockWord(false).Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
1801 region_space_->DumpNonFreeRegions(LOG_STREAM(FATAL_WITHOUT_ABORT));
1802 PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
1803 MemMap::DumpMaps(LOG_STREAM(FATAL_WITHOUT_ABORT), true);
David Sehr709b0702016-10-13 09:12:37 -07001804 CHECK(false) << "Found from-space ref " << ref << " " << ref->PrettyTypeOf();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001805 } else {
1806 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1807 }
1808 }
1809}
1810
1811void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1812 if (kUseBakerReadBarrier) {
David Sehr709b0702016-10-13 09:12:37 -07001813 LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf()
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001814 << " holder rb_state=" << obj->GetReadBarrierState();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001815 } else {
David Sehr709b0702016-10-13 09:12:37 -07001816 LOG(INFO) << "holder=" << obj << " " << obj->PrettyTypeOf();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001817 }
1818 if (region_space_->IsInFromSpace(obj)) {
1819 LOG(INFO) << "holder is in the from-space.";
1820 } else if (region_space_->IsInToSpace(obj)) {
1821 LOG(INFO) << "holder is in the to-space.";
1822 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1823 LOG(INFO) << "holder is in the unevac from-space.";
Mathieu Chartierc381c362016-08-23 13:27:53 -07001824 if (IsMarkedInUnevacFromSpace(obj)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001825 LOG(INFO) << "holder is marked in the region space bitmap.";
1826 } else {
1827 LOG(INFO) << "holder is not marked in the region space bitmap.";
1828 }
1829 } else {
1830 // In a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001831 if (immune_spaces_.ContainsObject(obj)) {
1832 LOG(INFO) << "holder is in an immune image or the zygote space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001833 } else {
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001834 LOG(INFO) << "holder is in a non-immune, non-moving (or main) space.";
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001835 accounting::ContinuousSpaceBitmap* mark_bitmap =
1836 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1837 accounting::LargeObjectBitmap* los_bitmap =
1838 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1839 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1840 bool is_los = mark_bitmap == nullptr;
1841 if (!is_los && mark_bitmap->Test(obj)) {
1842 LOG(INFO) << "holder is marked in the mark bit map.";
1843 } else if (is_los && los_bitmap->Test(obj)) {
1844 LOG(INFO) << "holder is marked in the los bit map.";
1845 } else {
1846 // If ref is on the allocation stack, then it is considered
1847 // mark/alive (but not necessarily on the live stack.)
1848 if (IsOnAllocStack(obj)) {
1849 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001850 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001851 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001852 }
1853 }
1854 }
1855 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001856 LOG(INFO) << "offset=" << offset.SizeValue();
1857}
1858
1859void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1860 mirror::Object* ref) {
1861 // In a non-moving spaces. Check that the ref is marked.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08001862 if (immune_spaces_.ContainsObject(ref)) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001863 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001864 // Immune object may not be gray if called from the GC.
1865 if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) {
1866 return;
1867 }
1868 bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent();
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07001869 CHECK(updated_all_immune_objects || ref->GetReadBarrierState() == ReadBarrier::GrayState())
1870 << "Unmarked immune space ref. obj=" << obj << " rb_state="
1871 << (obj != nullptr ? obj->GetReadBarrierState() : 0U)
1872 << " ref=" << ref << " ref rb_state=" << ref->GetReadBarrierState()
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001873 << " updated_all_immune_objects=" << updated_all_immune_objects;
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001874 }
1875 } else {
1876 accounting::ContinuousSpaceBitmap* mark_bitmap =
1877 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1878 accounting::LargeObjectBitmap* los_bitmap =
1879 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1880 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1881 bool is_los = mark_bitmap == nullptr;
1882 if ((!is_los && mark_bitmap->Test(ref)) ||
1883 (is_los && los_bitmap->Test(ref))) {
1884 // OK.
1885 } else {
1886 // If ref is on the allocation stack, then it may not be
1887 // marked live, but considered marked/alive (but not
1888 // necessarily on the live stack).
1889 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1890 << "obj=" << obj << " ref=" << ref;
1891 }
1892 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001893}
1894
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001895// Used to scan ref fields of an object.
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001896class ConcurrentCopying::RefFieldsVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001897 public:
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001898 explicit RefFieldsVisitor(ConcurrentCopying* collector)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001899 : collector_(collector) {}
1900
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001901 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001902 const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
1903 REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07001904 collector_->Process(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001905 }
1906
Mathieu Chartier31e88222016-10-14 18:43:19 -07001907 void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001908 REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001909 CHECK(klass->IsTypeOfReferenceClass());
1910 collector_->DelayReferenceReferent(klass, ref);
1911 }
1912
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001913 void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001914 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001915 REQUIRES_SHARED(Locks::mutator_lock_) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001916 if (!root->IsNull()) {
1917 VisitRoot(root);
1918 }
1919 }
1920
1921 void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001922 ALWAYS_INLINE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001923 REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001924 collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001925 }
1926
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001927 private:
1928 ConcurrentCopying* const collector_;
1929};
1930
1931// Scan ref fields of an object.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001932inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
Hiroshi Yamauchi9b60d502017-02-03 15:09:26 -08001933 if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001934 // Avoid all read barriers during visit references to help performance.
Hiroshi Yamauchi9b60d502017-02-03 15:09:26 -08001935 // Don't do this in transaction mode because we may read the old value of an field which may
1936 // trigger read barriers.
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001937 Thread::Current()->ModifyDebugDisallowReadBarrier(1);
1938 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001939 DCHECK(!region_space_->IsInFromSpace(to_ref));
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001940 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartiera07f5592016-06-16 11:44:28 -07001941 RefFieldsVisitor visitor(this);
Hiroshi Yamauchi5496f692016-02-17 13:29:59 -08001942 // Disable the read barrier for a performance reason.
1943 to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
1944 visitor, visitor);
Hiroshi Yamauchi9b60d502017-02-03 15:09:26 -08001945 if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07001946 Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
1947 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001948}
1949
1950// Process a field.
1951inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001952 DCHECK_EQ(Thread::Current(), thread_running_gc_);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001953 mirror::Object* ref = obj->GetFieldObject<
1954 mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
Mathieu Chartierc381c362016-08-23 13:27:53 -07001955 mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, /*kFromGCThread*/true>(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001956 if (to_ref == ref) {
1957 return;
1958 }
1959 // This may fail if the mutator writes to the field at the same time. But it's ok.
1960 mirror::Object* expected_ref = ref;
1961 mirror::Object* new_ref = to_ref;
1962 do {
1963 if (expected_ref !=
1964 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1965 // It was updated by the mutator.
1966 break;
1967 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001968 } while (!obj->CasFieldWeakRelaxedObjectWithoutWriteBarrier<
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001969 false, false, kVerifyNone>(offset, expected_ref, new_ref));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001970}
1971
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001972// Process some roots.
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001973inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001974 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1975 for (size_t i = 0; i < count; ++i) {
1976 mirror::Object** root = roots[i];
1977 mirror::Object* ref = *root;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001978 mirror::Object* to_ref = Mark(ref);
1979 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001980 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001981 }
1982 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1983 mirror::Object* expected_ref = ref;
1984 mirror::Object* new_ref = to_ref;
1985 do {
1986 if (expected_ref != addr->LoadRelaxed()) {
1987 // It was updated by the mutator.
1988 break;
1989 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07001990 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001991 }
1992}
1993
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001994template<bool kGrayImmuneObject>
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07001995inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001996 DCHECK(!root->IsNull());
1997 mirror::Object* const ref = root->AsMirrorPtr();
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07001998 mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07001999 if (to_ref != ref) {
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002000 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
2001 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
2002 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002003 // If the cas fails, then it was updated by the mutator.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002004 do {
2005 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
2006 // It was updated by the mutator.
2007 break;
2008 }
Hiroshi Yamauchifed3e2f2015-10-20 11:11:56 -07002009 } while (!addr->CompareExchangeWeakRelaxed(expected_ref, new_ref));
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07002010 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002011}
2012
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002013inline void ConcurrentCopying::VisitRoots(
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002014 mirror::CompressedReference<mirror::Object>** roots, size_t count,
2015 const RootInfo& info ATTRIBUTE_UNUSED) {
2016 for (size_t i = 0; i < count; ++i) {
2017 mirror::CompressedReference<mirror::Object>* const root = roots[i];
2018 if (!root->IsNull()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002019 // kGrayImmuneObject is true because this is used for the thread flip.
2020 MarkRoot</*kGrayImmuneObject*/true>(root);
Mathieu Chartierda7c6502015-07-23 16:01:26 -07002021 }
2022 }
2023}
2024
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002025// Temporary set gc_grays_immune_objects_ to true in a scope if the current thread is GC.
2026class ConcurrentCopying::ScopedGcGraysImmuneObjects {
2027 public:
2028 explicit ScopedGcGraysImmuneObjects(ConcurrentCopying* collector)
2029 : collector_(collector), enabled_(false) {
2030 if (kUseBakerReadBarrier &&
2031 collector_->thread_running_gc_ == Thread::Current() &&
2032 !collector_->gc_grays_immune_objects_) {
2033 collector_->gc_grays_immune_objects_ = true;
2034 enabled_ = true;
2035 }
2036 }
2037
2038 ~ScopedGcGraysImmuneObjects() {
2039 if (kUseBakerReadBarrier &&
2040 collector_->thread_running_gc_ == Thread::Current() &&
2041 enabled_) {
2042 DCHECK(collector_->gc_grays_immune_objects_);
2043 collector_->gc_grays_immune_objects_ = false;
2044 }
2045 }
2046
2047 private:
2048 ConcurrentCopying* const collector_;
2049 bool enabled_;
2050};
2051
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002052// Fill the given memory block with a dummy object. Used to fill in a
2053// copy of objects that was lost in race.
2054void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002055 // GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
2056 // barriers here because we need the updated reference to the int array class, etc. Temporary set
2057 // gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
2058 ScopedGcGraysImmuneObjects scoped_gc_gray_immune_objects(this);
Roland Levillain14d90572015-07-16 10:52:26 +01002059 CHECK_ALIGNED(byte_size, kObjectAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002060 memset(dummy_obj, 0, byte_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002061 // Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
2062 // Explicitly mark to make sure to get an object in the to-space.
2063 mirror::Class* int_array_class = down_cast<mirror::Class*>(
2064 Mark(mirror::IntArray::GetArrayClass<kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002065 CHECK(int_array_class != nullptr);
2066 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07002067 size_t component_size = int_array_class->GetComponentSize<kWithoutReadBarrier>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002068 CHECK_EQ(component_size, sizeof(int32_t));
2069 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
2070 if (data_offset > byte_size) {
2071 // An int array is too big. Use java.lang.Object.
Mathieu Chartier3ed8ec12017-04-20 19:28:54 -07002072 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object_);
2073 CHECK_EQ(byte_size, (java_lang_Object_->GetObjectSize<kVerifyNone, kWithoutReadBarrier>()));
2074 dummy_obj->SetClass(java_lang_Object_);
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07002075 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002076 } else {
2077 // Use an int array.
2078 dummy_obj->SetClass(int_array_class);
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07002079 CHECK((dummy_obj->IsArrayInstance<kVerifyNone, kWithoutReadBarrier>()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002080 int32_t length = (byte_size - data_offset) / component_size;
Mathieu Chartier5ffa0782016-07-27 10:45:47 -07002081 mirror::Array* dummy_arr = dummy_obj->AsArray<kVerifyNone, kWithoutReadBarrier>();
2082 dummy_arr->SetLength(length);
2083 CHECK_EQ(dummy_arr->GetLength(), length)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002084 << "byte_size=" << byte_size << " length=" << length
2085 << " component_size=" << component_size << " data_offset=" << data_offset;
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07002086 CHECK_EQ(byte_size, (dummy_obj->SizeOf<kVerifyNone>()))
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002087 << "byte_size=" << byte_size << " length=" << length
2088 << " component_size=" << component_size << " data_offset=" << data_offset;
2089 }
2090}
2091
2092// Reuse the memory blocks that were copy of objects that were lost in race.
2093mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
2094 // Try to reuse the blocks that were unused due to CAS failures.
Roland Levillain14d90572015-07-16 10:52:26 +01002095 CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002096 Thread* self = Thread::Current();
2097 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002098 size_t byte_size;
2099 uint8_t* addr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002100 {
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002101 MutexLock mu(self, skipped_blocks_lock_);
2102 auto it = skipped_blocks_map_.lower_bound(alloc_size);
2103 if (it == skipped_blocks_map_.end()) {
2104 // Not found.
2105 return nullptr;
2106 }
2107 byte_size = it->first;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002108 CHECK_GE(byte_size, alloc_size);
2109 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
2110 // If remainder would be too small for a dummy object, retry with a larger request size.
2111 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
2112 if (it == skipped_blocks_map_.end()) {
2113 // Not found.
2114 return nullptr;
2115 }
Roland Levillain14d90572015-07-16 10:52:26 +01002116 CHECK_ALIGNED(it->first - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002117 CHECK_GE(it->first - alloc_size, min_object_size)
2118 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
2119 }
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002120 // Found a block.
2121 CHECK(it != skipped_blocks_map_.end());
2122 byte_size = it->first;
2123 addr = it->second;
2124 CHECK_GE(byte_size, alloc_size);
2125 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
2126 CHECK_ALIGNED(byte_size, space::RegionSpace::kAlignment);
2127 if (kVerboseMode) {
2128 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
2129 }
2130 skipped_blocks_map_.erase(it);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002131 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002132 memset(addr, 0, byte_size);
2133 if (byte_size > alloc_size) {
2134 // Return the remainder to the map.
Roland Levillain14d90572015-07-16 10:52:26 +01002135 CHECK_ALIGNED(byte_size - alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002136 CHECK_GE(byte_size - alloc_size, min_object_size);
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002137 // FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
2138 // violation and possible deadlock. The deadlock case is a recursive case:
2139 // FillWithDummyObject -> IntArray::GetArrayClass -> Mark -> Copy -> AllocateInSkippedBlock.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002140 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
2141 byte_size - alloc_size);
2142 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
Mathieu Chartierd6636d32016-07-28 11:02:38 -07002143 {
2144 MutexLock mu(self, skipped_blocks_lock_);
2145 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
2146 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002147 }
2148 return reinterpret_cast<mirror::Object*>(addr);
2149}
2150
2151mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
2152 DCHECK(region_space_->IsInFromSpace(from_ref));
Mathieu Chartierd08f66f2017-04-13 11:47:53 -07002153 // There must not be a read barrier to avoid nested RB that might violate the to-space invariant.
2154 // Note that from_ref is a from space ref so the SizeOf() call will access the from-space meta
2155 // objects, but it's ok and necessary.
2156 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags>();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002157 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
2158 size_t region_space_bytes_allocated = 0U;
2159 size_t non_moving_space_bytes_allocated = 0U;
2160 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002161 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002162 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002163 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002164 bytes_allocated = region_space_bytes_allocated;
2165 if (to_ref != nullptr) {
2166 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
2167 }
2168 bool fall_back_to_non_moving = false;
2169 if (UNLIKELY(to_ref == nullptr)) {
2170 // Failed to allocate in the region space. Try the skipped blocks.
2171 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
2172 if (to_ref != nullptr) {
2173 // Succeeded to allocate in a skipped block.
2174 if (heap_->use_tlab_) {
2175 // This is necessary for the tlab case as it's not accounted in the space.
2176 region_space_->RecordAlloc(to_ref);
2177 }
2178 bytes_allocated = region_space_alloc_size;
2179 } else {
2180 // Fall back to the non-moving space.
2181 fall_back_to_non_moving = true;
2182 if (kVerboseMode) {
2183 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
2184 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
2185 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
2186 }
2187 fall_back_to_non_moving = true;
2188 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07002189 &non_moving_space_bytes_allocated, nullptr, &dummy);
Mathieu Chartierb01335c2017-03-22 13:15:01 -07002190 if (UNLIKELY(to_ref == nullptr)) {
2191 LOG(FATAL_WITHOUT_ABORT) << "Fall-back non-moving space allocation failed for a "
2192 << obj_size << " byte object in region type "
2193 << region_space_->GetRegionType(from_ref);
2194 LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf();
2195 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002196 bytes_allocated = non_moving_space_bytes_allocated;
2197 // Mark it in the mark bitmap.
2198 accounting::ContinuousSpaceBitmap* mark_bitmap =
2199 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
2200 CHECK(mark_bitmap != nullptr);
2201 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
2202 }
2203 }
2204 DCHECK(to_ref != nullptr);
2205
Mathieu Chartierd818adb2016-09-15 13:12:47 -07002206 // Copy the object excluding the lock word since that is handled in the loop.
2207 to_ref->SetClass(from_ref->GetClass<kVerifyNone, kWithoutReadBarrier>());
2208 const size_t kObjectHeaderSize = sizeof(mirror::Object);
2209 DCHECK_GE(obj_size, kObjectHeaderSize);
2210 static_assert(kObjectHeaderSize == sizeof(mirror::HeapReference<mirror::Class>) +
2211 sizeof(LockWord),
2212 "Object header size does not match");
2213 // Memcpy can tear for words since it may do byte copy. It is only safe to do this since the
2214 // object in the from space is immutable other than the lock word. b/31423258
2215 memcpy(reinterpret_cast<uint8_t*>(to_ref) + kObjectHeaderSize,
2216 reinterpret_cast<const uint8_t*>(from_ref) + kObjectHeaderSize,
2217 obj_size - kObjectHeaderSize);
2218
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002219 // Attempt to install the forward pointer. This is in a loop as the
2220 // lock word atomic write can fail.
2221 while (true) {
Mathieu Chartierd818adb2016-09-15 13:12:47 -07002222 LockWord old_lock_word = from_ref->GetLockWord(false);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002223
2224 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
2225 // Lost the race. Another thread (either GC or mutator) stored
2226 // the forwarding pointer first. Make the lost copy (to_ref)
2227 // look like a valid but dead (dummy) object and keep it for
2228 // future reuse.
2229 FillWithDummyObject(to_ref, bytes_allocated);
2230 if (!fall_back_to_non_moving) {
2231 DCHECK(region_space_->IsInToSpace(to_ref));
2232 if (bytes_allocated > space::RegionSpace::kRegionSize) {
2233 // Free the large alloc.
2234 region_space_->FreeLarge(to_ref, bytes_allocated);
2235 } else {
2236 // Record the lost copy for later reuse.
2237 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2238 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
2239 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
2240 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2241 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
2242 reinterpret_cast<uint8_t*>(to_ref)));
2243 }
2244 } else {
2245 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2246 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2247 // Free the non-moving-space chunk.
2248 accounting::ContinuousSpaceBitmap* mark_bitmap =
2249 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
2250 CHECK(mark_bitmap != nullptr);
2251 CHECK(mark_bitmap->Clear(to_ref));
2252 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
2253 }
2254
2255 // Get the winner's forward ptr.
2256 mirror::Object* lost_fwd_ptr = to_ref;
2257 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
2258 CHECK(to_ref != nullptr);
2259 CHECK_NE(to_ref, lost_fwd_ptr);
Mathieu Chartierdfcd6f42016-09-13 10:02:48 -07002260 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
2261 << "to_ref=" << to_ref << " " << heap_->DumpSpaces();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002262 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
2263 return to_ref;
2264 }
2265
Mathieu Chartierd818adb2016-09-15 13:12:47 -07002266 // Copy the old lock word over since we did not copy it yet.
2267 to_ref->SetLockWord(old_lock_word, false);
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07002268 // Set the gray ptr.
2269 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002270 to_ref->SetReadBarrierState(ReadBarrier::GrayState());
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07002271 }
2272
Mathieu Chartiera8131262016-11-29 17:55:19 -08002273 // Do a fence to prevent the field CAS in ConcurrentCopying::Process from possibly reordering
2274 // before the object copy.
2275 QuasiAtomic::ThreadFenceRelease();
2276
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002277 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
2278
2279 // Try to atomically write the fwd ptr.
Mathieu Chartiera8131262016-11-29 17:55:19 -08002280 bool success = from_ref->CasLockWordWeakRelaxed(old_lock_word, new_lock_word);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002281 if (LIKELY(success)) {
2282 // The CAS succeeded.
Mathieu Chartiera8131262016-11-29 17:55:19 -08002283 objects_moved_.FetchAndAddRelaxed(1);
2284 bytes_moved_.FetchAndAddRelaxed(region_space_alloc_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002285 if (LIKELY(!fall_back_to_non_moving)) {
2286 DCHECK(region_space_->IsInToSpace(to_ref));
2287 } else {
2288 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
2289 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
2290 }
2291 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002292 DCHECK(to_ref->GetReadBarrierState() == ReadBarrier::GrayState());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002293 }
2294 DCHECK(GetFwdPtr(from_ref) == to_ref);
2295 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002296 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002297 return to_ref;
2298 } else {
2299 // The CAS failed. It may have lost the race or may have failed
2300 // due to monitor/hashcode ops. Either way, retry.
2301 }
2302 }
2303}
2304
2305mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
2306 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002307 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
2308 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002309 // It's already marked.
2310 return from_ref;
2311 }
2312 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002313 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002314 to_ref = GetFwdPtr(from_ref);
2315 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
2316 heap_->non_moving_space_->HasAddress(to_ref))
2317 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08002318 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Mathieu Chartierc381c362016-08-23 13:27:53 -07002319 if (IsMarkedInUnevacFromSpace(from_ref)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002320 to_ref = from_ref;
2321 } else {
2322 to_ref = nullptr;
2323 }
2324 } else {
2325 // from_ref is in a non-moving space.
Mathieu Chartier763a31e2015-11-16 16:05:55 -08002326 if (immune_spaces_.ContainsObject(from_ref)) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002327 // An immune object is alive.
2328 to_ref = from_ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002329 } else {
2330 // Non-immune non-moving space. Use the mark bitmap.
2331 accounting::ContinuousSpaceBitmap* mark_bitmap =
2332 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
2333 accounting::LargeObjectBitmap* los_bitmap =
2334 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
2335 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2336 bool is_los = mark_bitmap == nullptr;
2337 if (!is_los && mark_bitmap->Test(from_ref)) {
2338 // Already marked.
2339 to_ref = from_ref;
2340 } else if (is_los && los_bitmap->Test(from_ref)) {
2341 // Already marked in LOS.
2342 to_ref = from_ref;
2343 } else {
2344 // Not marked.
2345 if (IsOnAllocStack(from_ref)) {
2346 // If on the allocation stack, it's considered marked.
2347 to_ref = from_ref;
2348 } else {
2349 // Not marked.
2350 to_ref = nullptr;
2351 }
2352 }
2353 }
2354 }
2355 return to_ref;
2356}
2357
2358bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
2359 QuasiAtomic::ThreadFenceAcquire();
2360 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08002361 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002362}
2363
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002364mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref) {
2365 // ref is in a non-moving space (from_ref == to_ref).
2366 DCHECK(!region_space_->HasAddress(ref)) << ref;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002367 DCHECK(!immune_spaces_.ContainsObject(ref));
2368 // Use the mark bitmap.
2369 accounting::ContinuousSpaceBitmap* mark_bitmap =
2370 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
2371 accounting::LargeObjectBitmap* los_bitmap =
2372 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
2373 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
2374 bool is_los = mark_bitmap == nullptr;
2375 if (!is_los && mark_bitmap->Test(ref)) {
2376 // Already marked.
2377 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002378 DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
2379 ref->GetReadBarrierState() == ReadBarrier::WhiteState());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002380 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002381 } else if (is_los && los_bitmap->Test(ref)) {
2382 // Already marked in LOS.
2383 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002384 DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
2385 ref->GetReadBarrierState() == ReadBarrier::WhiteState());
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002386 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002387 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002388 // Not marked.
2389 if (IsOnAllocStack(ref)) {
2390 // If it's on the allocation stack, it's considered marked. Keep it white.
2391 // Objects on the allocation stack need not be marked.
2392 if (!is_los) {
2393 DCHECK(!mark_bitmap->Test(ref));
2394 } else {
2395 DCHECK(!los_bitmap->Test(ref));
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002396 }
Nicolas Geoffrayddeb1722016-06-28 08:25:59 +00002397 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002398 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002399 }
2400 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002401 // For the baker-style RB, we need to handle 'false-gray' cases. See the
2402 // kRegionTypeUnevacFromSpace-case comment in Mark().
2403 if (kUseBakerReadBarrier) {
2404 // Test the bitmap first to reduce the chance of false gray cases.
2405 if ((!is_los && mark_bitmap->Test(ref)) ||
2406 (is_los && los_bitmap->Test(ref))) {
2407 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002408 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002409 }
2410 // Not marked or on the allocation stack. Try to mark it.
2411 // This may or may not succeed, which is ok.
2412 bool cas_success = false;
2413 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002414 cas_success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
2415 ReadBarrier::GrayState());
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002416 }
2417 if (!is_los && mark_bitmap->AtomicTestAndSet(ref)) {
2418 // Already marked.
2419 if (kUseBakerReadBarrier && cas_success &&
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002420 ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002421 PushOntoFalseGrayStack(ref);
2422 }
2423 } else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
2424 // Already marked in LOS.
2425 if (kUseBakerReadBarrier && cas_success &&
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002426 ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002427 PushOntoFalseGrayStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002428 }
2429 } else {
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002430 // Newly marked.
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002431 if (kUseBakerReadBarrier) {
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -07002432 DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -08002433 }
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -07002434 PushOntoMarkStack(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002435 }
2436 }
2437 }
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -07002438 return ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002439}
2440
2441void ConcurrentCopying::FinishPhase() {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002442 Thread* const self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002443 {
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08002444 MutexLock mu(self, mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002445 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2446 }
Mathieu Chartiera1467d02017-02-22 09:22:50 -08002447 // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
2448 // positives.
2449 if (!kVerifyNoMissingCardMarks) {
Mathieu Chartier6e6078a2016-10-24 15:45:41 -07002450 TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
2451 // We do not currently use the region space cards at all, madvise them away to save ram.
2452 heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
Mathieu Chartier6e6078a2016-10-24 15:45:41 -07002453 }
2454 {
2455 MutexLock mu(self, skipped_blocks_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002456 skipped_blocks_map_.clear();
2457 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002458 {
2459 ReaderMutexLock mu(self, *Locks::mutator_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -07002460 {
2461 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
2462 heap_->ClearMarkedObjects();
2463 }
2464 if (kUseBakerReadBarrier && kFilterModUnionCards) {
2465 TimingLogger::ScopedTiming split("FilterModUnionCards", GetTimings());
2466 ReaderMutexLock mu2(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -07002467 for (space::ContinuousSpace* space : immune_spaces_.GetSpaces()) {
2468 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
Mathieu Chartier6e6078a2016-10-24 15:45:41 -07002469 accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
Mathieu Chartier21328a12016-07-22 10:47:45 -07002470 // Filter out cards that don't need to be set.
2471 if (table != nullptr) {
2472 table->FilterCards();
2473 }
2474 }
2475 }
Mathieu Chartier36a270a2016-07-28 18:08:51 -07002476 if (kUseBakerReadBarrier) {
2477 TimingLogger::ScopedTiming split("EmptyRBMarkBitStack", GetTimings());
Mathieu Chartier6e6078a2016-10-24 15:45:41 -07002478 DCHECK(rb_mark_bit_stack_ != nullptr);
Mathieu Chartier36a270a2016-07-28 18:08:51 -07002479 const auto* limit = rb_mark_bit_stack_->End();
2480 for (StackReference<mirror::Object>* it = rb_mark_bit_stack_->Begin(); it != limit; ++it) {
2481 CHECK(it->AsMirrorPtr()->AtomicSetMarkBit(1, 0));
2482 }
2483 rb_mark_bit_stack_->Reset();
2484 }
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002485 }
2486 if (measure_read_barrier_slow_path_) {
2487 MutexLock mu(self, rb_slow_path_histogram_lock_);
2488 rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed());
2489 rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed();
2490 rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed();
2491 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002492}
2493
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -08002494bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
2495 bool do_atomic_update) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002496 mirror::Object* from_ref = field->AsMirrorPtr();
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -08002497 if (from_ref == nullptr) {
2498 return true;
2499 }
Mathieu Chartier97509952015-07-13 14:35:43 -07002500 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002501 if (to_ref == nullptr) {
2502 return false;
2503 }
2504 if (from_ref != to_ref) {
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -08002505 if (do_atomic_update) {
2506 do {
2507 if (field->AsMirrorPtr() != from_ref) {
2508 // Concurrently overwritten by a mutator.
2509 break;
2510 }
2511 } while (!field->CasWeakRelaxed(from_ref, to_ref));
2512 } else {
2513 QuasiAtomic::ThreadFenceRelease();
2514 field->Assign(to_ref);
2515 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2516 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002517 }
2518 return true;
2519}
2520
Mathieu Chartier97509952015-07-13 14:35:43 -07002521mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2522 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002523}
2524
Mathieu Chartier31e88222016-10-14 18:43:19 -07002525void ConcurrentCopying::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
2526 ObjPtr<mirror::Reference> reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002527 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002528}
2529
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002530void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002531 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002532 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002533 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2534 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002535 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002536}
2537
2538void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2539 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2540 region_space_->RevokeAllThreadLocalBuffers();
2541}
2542
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002543mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) {
2544 if (Thread::Current() != thread_running_gc_) {
2545 rb_slow_path_count_.FetchAndAddRelaxed(1u);
2546 } else {
2547 rb_slow_path_count_gc_.FetchAndAddRelaxed(1u);
2548 }
2549 ScopedTrace tr(__FUNCTION__);
2550 const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
2551 mirror::Object* ret = Mark(from_ref);
2552 if (measure_read_barrier_slow_path_) {
2553 rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time);
2554 }
2555 return ret;
2556}
2557
2558void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
2559 GarbageCollector::DumpPerformanceInfo(os);
2560 MutexLock mu(Thread::Current(), rb_slow_path_histogram_lock_);
2561 if (rb_slow_path_time_histogram_.SampleSize() > 0) {
2562 Histogram<uint64_t>::CumulativeData cumulative_data;
2563 rb_slow_path_time_histogram_.CreateHistogram(&cumulative_data);
2564 rb_slow_path_time_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
2565 }
2566 if (rb_slow_path_count_total_ > 0) {
2567 os << "Slow path count " << rb_slow_path_count_total_ << "\n";
2568 }
2569 if (rb_slow_path_count_gc_total_ > 0) {
2570 os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
2571 }
Mathieu Chartiercca44a02016-08-17 10:07:29 -07002572 os << "Cumulative bytes moved " << cumulative_bytes_moved_.LoadRelaxed() << "\n";
2573 os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n";
Mathieu Chartier56fe2582016-07-14 13:30:03 -07002574}
2575
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002576} // namespace collector
2577} // namespace gc
2578} // namespace art