blob: b5d5c348484a726edb542689e5264744f3c8e707 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "concurrent_copying.h"
18
Mathieu Chartierc7853442015-03-27 14:35:38 -070019#include "art_field-inl.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070020#include "base/stl_util.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080021#include "gc/accounting/heap_bitmap-inl.h"
22#include "gc/accounting/space_bitmap-inl.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070023#include "gc/reference_processor.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024#include "gc/space/image_space.h"
25#include "gc/space/space.h"
26#include "intern_table.h"
Mathieu Chartiere401d142015-04-22 13:56:20 -070027#include "mirror/class-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080028#include "mirror/object-inl.h"
29#include "scoped_thread_state_change.h"
30#include "thread-inl.h"
31#include "thread_list.h"
32#include "well_known_classes.h"
33
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070034namespace art {
35namespace gc {
36namespace collector {
37
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080038ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
39 : GarbageCollector(heap,
40 name_prefix + (name_prefix.empty() ? "" : " ") +
41 "concurrent copying + mark sweep"),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070042 region_space_(nullptr), gc_barrier_(new Barrier(0)),
43 gc_mark_stack_(accounting::ObjectStack::Create("concurrent copying gc mark stack",
44 2 * MB, 2 * MB)),
45 mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
46 thread_running_gc_(nullptr),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080047 is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070048 heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
49 weak_ref_access_enabled_(true),
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080050 skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
51 rb_table_(heap_->GetReadBarrierTable()),
52 force_evacuate_all_(false) {
53 static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
54 "The region space size and the read barrier table region size must match");
55 cc_heap_bitmap_.reset(new accounting::HeapBitmap(heap));
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070056 Thread* self = Thread::Current();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080057 {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080058 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
59 // Cache this so that we won't have to lock heap_bitmap_lock_ in
60 // Mark() which could cause a nested lock on heap_bitmap_lock_
61 // when GC causes a RB while doing GC or a lock order violation
62 // (class_linker_lock_ and heap_bitmap_lock_).
63 heap_mark_bitmap_ = heap->GetMarkBitmap();
64 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070065 {
66 MutexLock mu(self, mark_stack_lock_);
67 for (size_t i = 0; i < kMarkStackPoolSize; ++i) {
68 accounting::AtomicStack<mirror::Object>* mark_stack =
69 accounting::AtomicStack<mirror::Object>::Create(
70 "thread local mark stack", kMarkStackSize, kMarkStackSize);
71 pooled_mark_stacks_.push_back(mark_stack);
72 }
73 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080074}
75
Mathieu Chartier97509952015-07-13 14:35:43 -070076void ConcurrentCopying::MarkHeapReference(
77 mirror::HeapReference<mirror::Object>* from_ref ATTRIBUTE_UNUSED) {
78 // Unused, usually called from mod union tables.
79 UNIMPLEMENTED(FATAL);
80}
81
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080082ConcurrentCopying::~ConcurrentCopying() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070083 STLDeleteElements(&pooled_mark_stacks_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080084}
85
86void ConcurrentCopying::RunPhases() {
87 CHECK(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
88 CHECK(!is_active_);
89 is_active_ = true;
90 Thread* self = Thread::Current();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070091 thread_running_gc_ = self;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080092 Locks::mutator_lock_->AssertNotHeld(self);
93 {
94 ReaderMutexLock mu(self, *Locks::mutator_lock_);
95 InitializePhase();
96 }
97 FlipThreadRoots();
98 {
99 ReaderMutexLock mu(self, *Locks::mutator_lock_);
100 MarkingPhase();
101 }
102 // Verify no from space refs. This causes a pause.
103 if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
104 TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
105 ScopedPause pause(this);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700106 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800107 if (kVerboseMode) {
108 LOG(INFO) << "Verifying no from-space refs";
109 }
110 VerifyNoFromSpaceReferences();
Mathieu Chartier720e71a2015-04-06 17:10:58 -0700111 if (kVerboseMode) {
112 LOG(INFO) << "Done verifying no from-space refs";
113 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700114 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800115 }
116 {
117 ReaderMutexLock mu(self, *Locks::mutator_lock_);
118 ReclaimPhase();
119 }
120 FinishPhase();
121 CHECK(is_active_);
122 is_active_ = false;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700123 thread_running_gc_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800124}
125
126void ConcurrentCopying::BindBitmaps() {
127 Thread* self = Thread::Current();
128 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
129 // Mark all of the spaces we never collect as immune.
130 for (const auto& space : heap_->GetContinuousSpaces()) {
131 if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
132 || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
133 CHECK(space->IsZygoteSpace() || space->IsImageSpace());
134 CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
135 const char* bitmap_name = space->IsImageSpace() ? "cc image space bitmap" :
136 "cc zygote space bitmap";
137 // TODO: try avoiding using bitmaps for image/zygote to save space.
138 accounting::ContinuousSpaceBitmap* bitmap =
139 accounting::ContinuousSpaceBitmap::Create(bitmap_name, space->Begin(), space->Capacity());
140 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
141 cc_bitmaps_.push_back(bitmap);
142 } else if (space == region_space_) {
143 accounting::ContinuousSpaceBitmap* bitmap =
144 accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
145 space->Begin(), space->Capacity());
146 cc_heap_bitmap_->AddContinuousSpaceBitmap(bitmap);
147 cc_bitmaps_.push_back(bitmap);
148 region_space_bitmap_ = bitmap;
149 }
150 }
151}
152
153void ConcurrentCopying::InitializePhase() {
154 TimingLogger::ScopedTiming split("InitializePhase", GetTimings());
155 if (kVerboseMode) {
156 LOG(INFO) << "GC InitializePhase";
157 LOG(INFO) << "Region-space : " << reinterpret_cast<void*>(region_space_->Begin()) << "-"
158 << reinterpret_cast<void*>(region_space_->Limit());
159 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700160 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800161 immune_region_.Reset();
162 bytes_moved_.StoreRelaxed(0);
163 objects_moved_.StoreRelaxed(0);
164 if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
165 GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
166 GetCurrentIteration()->GetClearSoftReferences()) {
167 force_evacuate_all_ = true;
168 } else {
169 force_evacuate_all_ = false;
170 }
171 BindBitmaps();
172 if (kVerboseMode) {
173 LOG(INFO) << "force_evacuate_all=" << force_evacuate_all_;
174 LOG(INFO) << "Immune region: " << immune_region_.Begin() << "-" << immune_region_.End();
175 LOG(INFO) << "GC end of InitializePhase";
176 }
177}
178
179// Used to switch the thread roots of a thread from from-space refs to to-space refs.
180class ThreadFlipVisitor : public Closure {
181 public:
182 explicit ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
183 : concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
184 }
185
186 virtual void Run(Thread* thread) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
187 // Note: self is not necessarily equal to thread since thread may be suspended.
188 Thread* self = Thread::Current();
189 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
190 << thread->GetState() << " thread " << thread << " self " << self;
191 if (use_tlab_ && thread->HasTlab()) {
192 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
193 // This must come before the revoke.
194 size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
195 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
196 reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
197 FetchAndAddSequentiallyConsistent(thread_local_objects);
198 } else {
199 concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
200 }
201 }
202 if (kUseThreadLocalAllocationStack) {
203 thread->RevokeThreadLocalAllocationStack();
204 }
205 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700206 thread->VisitRoots(concurrent_copying_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800207 concurrent_copying_->GetBarrier().Pass(self);
208 }
209
210 private:
211 ConcurrentCopying* const concurrent_copying_;
212 const bool use_tlab_;
213};
214
215// Called back from Runtime::FlipThreadRoots() during a pause.
216class FlipCallback : public Closure {
217 public:
218 explicit FlipCallback(ConcurrentCopying* concurrent_copying)
219 : concurrent_copying_(concurrent_copying) {
220 }
221
222 virtual void Run(Thread* thread) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
223 ConcurrentCopying* cc = concurrent_copying_;
224 TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
225 // Note: self is not necessarily equal to thread since thread may be suspended.
226 Thread* self = Thread::Current();
227 CHECK(thread == self);
228 Locks::mutator_lock_->AssertExclusiveHeld(self);
229 cc->region_space_->SetFromSpace(cc->rb_table_, cc->force_evacuate_all_);
230 cc->SwapStacks(self);
231 if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
232 cc->RecordLiveStackFreezeSize(self);
233 cc->from_space_num_objects_at_first_pause_ = cc->region_space_->GetObjectsAllocated();
234 cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
235 }
236 cc->is_marking_ = true;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700237 cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800238 if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
Mathieu Chartier184c9dc2015-03-05 13:20:54 -0800239 CHECK(Runtime::Current()->IsAotCompiler());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800240 TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700241 Runtime::Current()->VisitTransactionRoots(cc);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800242 }
243 }
244
245 private:
246 ConcurrentCopying* const concurrent_copying_;
247};
248
249// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
250void ConcurrentCopying::FlipThreadRoots() {
251 TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
252 if (kVerboseMode) {
253 LOG(INFO) << "time=" << region_space_->Time();
254 region_space_->DumpNonFreeRegions(LOG(INFO));
255 }
256 Thread* self = Thread::Current();
257 Locks::mutator_lock_->AssertNotHeld(self);
258 gc_barrier_->Init(self, 0);
259 ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
260 FlipCallback flip_callback(this);
261 size_t barrier_count = Runtime::Current()->FlipThreadRoots(
262 &thread_flip_visitor, &flip_callback, this);
263 {
264 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
265 gc_barrier_->Increment(self, barrier_count);
266 }
267 is_asserting_to_space_invariant_ = true;
268 QuasiAtomic::ThreadFenceForConstructor();
269 if (kVerboseMode) {
270 LOG(INFO) << "time=" << region_space_->Time();
271 region_space_->DumpNonFreeRegions(LOG(INFO));
272 LOG(INFO) << "GC end of FlipThreadRoots";
273 }
274}
275
276void ConcurrentCopying::SwapStacks(Thread* self) {
277 heap_->SwapStacks(self);
278}
279
280void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
281 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
282 live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
283}
284
285// Used to visit objects in the immune spaces.
286class ConcurrentCopyingImmuneSpaceObjVisitor {
287 public:
288 explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
289 : collector_(cc) {}
290
291 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
292 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
293 DCHECK(obj != nullptr);
294 DCHECK(collector_->immune_region_.ContainsObject(obj));
295 accounting::ContinuousSpaceBitmap* cc_bitmap =
296 collector_->cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
297 DCHECK(cc_bitmap != nullptr)
298 << "An immune space object must have a bitmap";
299 if (kIsDebugBuild) {
300 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj))
301 << "Immune space object must be already marked";
302 }
303 // This may or may not succeed, which is ok.
304 if (kUseBakerReadBarrier) {
305 obj->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
306 }
307 if (cc_bitmap->AtomicTestAndSet(obj)) {
308 // Already marked. Do nothing.
309 } else {
310 // Newly marked. Set the gray bit and push it onto the mark stack.
311 CHECK(!kUseBakerReadBarrier || obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700312 collector_->PushOntoMarkStack(obj);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800313 }
314 }
315
316 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700317 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800318};
319
320class EmptyCheckpoint : public Closure {
321 public:
322 explicit EmptyCheckpoint(ConcurrentCopying* concurrent_copying)
323 : concurrent_copying_(concurrent_copying) {
324 }
325
326 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
327 // Note: self is not necessarily equal to thread since thread may be suspended.
328 Thread* self = Thread::Current();
329 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
330 << thread->GetState() << " thread " << thread << " self " << self;
Lei Lidd9943d2015-02-02 14:24:44 +0800331 // If thread is a running mutator, then act on behalf of the garbage collector.
332 // See the code in ThreadList::RunCheckpoint.
333 if (thread->GetState() == kRunnable) {
334 concurrent_copying_->GetBarrier().Pass(self);
335 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800336 }
337
338 private:
339 ConcurrentCopying* const concurrent_copying_;
340};
341
342// Concurrently mark roots that are guarded by read barriers and process the mark stack.
343void ConcurrentCopying::MarkingPhase() {
344 TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
345 if (kVerboseMode) {
346 LOG(INFO) << "GC MarkingPhase";
347 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700348 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800349 {
350 // Mark the image root. The WB-based collectors do not need to
351 // scan the image objects from roots by relying on the card table,
352 // but it's necessary for the RB to-space invariant to hold.
353 TimingLogger::ScopedTiming split1("VisitImageRoots", GetTimings());
354 gc::space::ImageSpace* image = heap_->GetImageSpace();
355 if (image != nullptr) {
356 mirror::ObjectArray<mirror::Object>* image_root = image->GetImageHeader().GetImageRoots();
357 mirror::Object* marked_image_root = Mark(image_root);
358 CHECK_EQ(image_root, marked_image_root) << "An image object does not move";
359 if (ReadBarrier::kEnableToSpaceInvariantChecks) {
360 AssertToSpaceInvariant(nullptr, MemberOffset(0), marked_image_root);
361 }
362 }
363 }
Man Cao41656de2015-07-06 18:53:15 -0700364 // TODO: Other garbage collectors uses Runtime::VisitConcurrentRoots(), refactor this part
365 // to also use the same function.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800366 {
367 TimingLogger::ScopedTiming split2("VisitConstantRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700368 Runtime::Current()->VisitConstantRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800369 }
370 {
371 TimingLogger::ScopedTiming split3("VisitInternTableRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700372 Runtime::Current()->GetInternTable()->VisitRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800373 }
374 {
375 TimingLogger::ScopedTiming split4("VisitClassLinkerRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700376 Runtime::Current()->GetClassLinker()->VisitRoots(this, kVisitRootFlagAllRoots);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800377 }
378 {
379 // TODO: don't visit the transaction roots if it's not active.
380 TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700381 Runtime::Current()->VisitNonThreadRoots(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800382 }
Man Cao41656de2015-07-06 18:53:15 -0700383 Runtime::Current()->GetHeap()->VisitAllocationRecords(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800384
385 // Immune spaces.
386 for (auto& space : heap_->GetContinuousSpaces()) {
387 if (immune_region_.ContainsSpace(space)) {
388 DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
389 accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
390 ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
391 live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
392 reinterpret_cast<uintptr_t>(space->Limit()),
393 visitor);
394 }
395 }
396
397 Thread* self = Thread::Current();
398 {
399 TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700400 // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
401 // primary reasons are the fact that we need to use a checkpoint to process thread-local mark
402 // stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
403 // issue because running threads potentially blocking at WaitHoldingLocks, and that once we
404 // reach the point where we process weak references, we can avoid using a lock when accessing
405 // the GC mark stack, which makes mark stack processing more efficient.
406
407 // Process the mark stack once in the thread local stack mode. This marks most of the live
408 // objects, aside from weak ref accesses with read barriers (Reference::GetReferent() and system
409 // weaks) that may happen concurrently while we processing the mark stack and newly mark/gray
410 // objects and push refs on the mark stack.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800411 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700412 // Switch to the shared mark stack mode. That is, revoke and process thread-local mark stacks
413 // for the last time before transitioning to the shared mark stack mode, which would process new
414 // refs that may have been concurrently pushed onto the mark stack during the ProcessMarkStack()
415 // call above. At the same time, disable weak ref accesses using a per-thread flag. It's
416 // important to do these together in a single checkpoint so that we can ensure that mutators
417 // won't newly gray objects and push new refs onto the mark stack due to weak ref accesses and
418 // mutators safely transition to the shared mark stack mode (without leaving unprocessed refs on
419 // the thread-local mark stacks), without a race. This is why we use a thread-local weak ref
420 // access flag Thread::tls32_.weak_ref_access_enabled_ instead of the global ones.
421 SwitchToSharedMarkStackMode();
422 CHECK(!self->GetWeakRefAccessEnabled());
423 // Now that weak refs accesses are disabled, once we exhaust the shared mark stack again here
424 // (which may be non-empty if there were refs found on thread-local mark stacks during the above
425 // SwitchToSharedMarkStackMode() call), we won't have new refs to process, that is, mutators
426 // (via read barriers) have no way to produce any more refs to process. Marking converges once
427 // before we process weak refs below.
428 ProcessMarkStack();
429 CheckEmptyMarkStack();
430 // Switch to the GC exclusive mark stack mode so that we can process the mark stack without a
431 // lock from this point on.
432 SwitchToGcExclusiveMarkStackMode();
433 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800434 if (kVerboseMode) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800435 LOG(INFO) << "ProcessReferences";
436 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700437 // Process weak references. This may produce new refs to process and have them processed via
Mathieu Chartier97509952015-07-13 14:35:43 -0700438 // ProcessMarkStack (in the GC exclusive mark stack mode).
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700439 ProcessReferences(self);
440 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800441 if (kVerboseMode) {
442 LOG(INFO) << "SweepSystemWeaks";
443 }
444 SweepSystemWeaks(self);
445 if (kVerboseMode) {
446 LOG(INFO) << "SweepSystemWeaks done";
447 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700448 // Process the mark stack here one last time because the above SweepSystemWeaks() call may have
449 // marked some objects (strings alive) as hash_set::Erase() can call the hash function for
450 // arbitrary elements in the weak intern table in InternTable::Table::SweepWeaks().
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800451 ProcessMarkStack();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700452 CheckEmptyMarkStack();
453 // Re-enable weak ref accesses.
454 ReenableWeakRefAccess(self);
455 // Issue an empty checkpoint to ensure no threads are still in the middle of a read barrier
456 // which may have a from-space ref cached in a local variable.
Hiroshi Yamauchi46ec5202015-06-19 17:39:45 -0700457 IssueEmptyCheckpoint();
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700458 // Marking is done. Disable marking.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800459 if (kUseTableLookupReadBarrier) {
460 heap_->rb_table_->ClearAll();
461 DCHECK(heap_->rb_table_->IsAllCleared());
462 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700463 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1);
464 is_marking_ = false; // This disables the read barrier/marking of weak roots.
465 mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff);
466 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800467 }
468
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700469 CHECK(weak_ref_access_enabled_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800470 if (kVerboseMode) {
471 LOG(INFO) << "GC end of MarkingPhase";
472 }
473}
474
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700475void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
476 if (kVerboseMode) {
477 LOG(INFO) << "ReenableWeakRefAccess";
478 }
479 weak_ref_access_enabled_.StoreRelaxed(true); // This is for new threads.
480 QuasiAtomic::ThreadFenceForConstructor();
481 // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
482 {
483 MutexLock mu(self, *Locks::thread_list_lock_);
484 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
485 for (Thread* thread : thread_list) {
486 thread->SetWeakRefAccessEnabled(true);
487 }
488 }
489 // Unblock blocking threads.
490 GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
491 Runtime::Current()->BroadcastForNewSystemWeaks();
492}
493
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800494void ConcurrentCopying::IssueEmptyCheckpoint() {
495 Thread* self = Thread::Current();
496 EmptyCheckpoint check_point(this);
497 ThreadList* thread_list = Runtime::Current()->GetThreadList();
498 gc_barrier_->Init(self, 0);
499 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
Lei Lidd9943d2015-02-02 14:24:44 +0800500 // If there are no threads to wait which implys that all the checkpoint functions are finished,
501 // then no need to release the mutator lock.
502 if (barrier_count == 0) {
503 return;
504 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800505 // Release locks then wait for all mutator threads to pass the barrier.
506 Locks::mutator_lock_->SharedUnlock(self);
507 {
508 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
509 gc_barrier_->Increment(self, barrier_count);
510 }
511 Locks::mutator_lock_->SharedLock(self);
512}
513
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800514void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700515 CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0)
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800516 << " " << to_ref << " " << PrettyTypeOf(to_ref);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700517 Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
518 CHECK(thread_running_gc_ != nullptr);
519 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
520 if (mark_stack_mode == kMarkStackModeThreadLocal) {
521 if (self == thread_running_gc_) {
522 // If GC-running thread, use the GC mark stack instead of a thread-local mark stack.
523 CHECK(self->GetThreadLocalMarkStack() == nullptr);
524 CHECK(!gc_mark_stack_->IsFull());
525 gc_mark_stack_->PushBack(to_ref);
526 } else {
527 // Otherwise, use a thread-local mark stack.
528 accounting::AtomicStack<mirror::Object>* tl_mark_stack = self->GetThreadLocalMarkStack();
529 if (UNLIKELY(tl_mark_stack == nullptr || tl_mark_stack->IsFull())) {
530 MutexLock mu(self, mark_stack_lock_);
531 // Get a new thread local mark stack.
532 accounting::AtomicStack<mirror::Object>* new_tl_mark_stack;
533 if (!pooled_mark_stacks_.empty()) {
534 // Use a pooled mark stack.
535 new_tl_mark_stack = pooled_mark_stacks_.back();
536 pooled_mark_stacks_.pop_back();
537 } else {
538 // None pooled. Create a new one.
539 new_tl_mark_stack =
540 accounting::AtomicStack<mirror::Object>::Create(
541 "thread local mark stack", 4 * KB, 4 * KB);
542 }
543 DCHECK(new_tl_mark_stack != nullptr);
544 DCHECK(new_tl_mark_stack->IsEmpty());
545 new_tl_mark_stack->PushBack(to_ref);
546 self->SetThreadLocalMarkStack(new_tl_mark_stack);
547 if (tl_mark_stack != nullptr) {
548 // Store the old full stack into a vector.
549 revoked_mark_stacks_.push_back(tl_mark_stack);
550 }
551 } else {
552 tl_mark_stack->PushBack(to_ref);
553 }
554 }
555 } else if (mark_stack_mode == kMarkStackModeShared) {
556 // Access the shared GC mark stack with a lock.
557 MutexLock mu(self, mark_stack_lock_);
558 CHECK(!gc_mark_stack_->IsFull());
559 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800560 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700561 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
562 static_cast<uint32_t>(kMarkStackModeGcExclusive));
563 CHECK(self == thread_running_gc_)
564 << "Only GC-running thread should access the mark stack "
565 << "in the GC exclusive mark stack mode";
566 // Access the GC mark stack without a lock.
567 CHECK(!gc_mark_stack_->IsFull());
568 gc_mark_stack_->PushBack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800569 }
570}
571
572accounting::ObjectStack* ConcurrentCopying::GetAllocationStack() {
573 return heap_->allocation_stack_.get();
574}
575
576accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
577 return heap_->live_stack_.get();
578}
579
580inline mirror::Object* ConcurrentCopying::GetFwdPtr(mirror::Object* from_ref) {
581 DCHECK(region_space_->IsInFromSpace(from_ref));
582 LockWord lw = from_ref->GetLockWord(false);
583 if (lw.GetState() == LockWord::kForwardingAddress) {
584 mirror::Object* fwd_ptr = reinterpret_cast<mirror::Object*>(lw.ForwardingAddress());
585 CHECK(fwd_ptr != nullptr);
586 return fwd_ptr;
587 } else {
588 return nullptr;
589 }
590}
591
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800592// The following visitors are that used to verify that there's no
593// references to the from-space left after marking.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700594class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800595 public:
596 explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
597 : collector_(collector) {}
598
599 void operator()(mirror::Object* ref) const
600 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
601 if (ref == nullptr) {
602 // OK.
603 return;
604 }
605 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
606 if (kUseBakerReadBarrier) {
607 if (collector_->RegionSpace()->IsInToSpace(ref)) {
608 CHECK(ref->GetReadBarrierPointer() == nullptr)
609 << "To-space ref " << ref << " " << PrettyTypeOf(ref)
610 << " has non-white rb_ptr " << ref->GetReadBarrierPointer();
611 } else {
612 CHECK(ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
613 (ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
614 collector_->IsOnAllocStack(ref)))
615 << "Non-moving/unevac from space ref " << ref << " " << PrettyTypeOf(ref)
616 << " has non-black rb_ptr " << ref->GetReadBarrierPointer()
617 << " but isn't on the alloc stack (and has white rb_ptr)."
618 << " Is it in the non-moving space="
619 << (collector_->GetHeap()->GetNonMovingSpace()->HasAddress(ref));
620 }
621 }
622 }
623
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700624 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
625 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800626 DCHECK(root != nullptr);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700627 operator()(root);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800628 }
629
630 private:
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700631 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800632};
633
634class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
635 public:
636 explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
637 : collector_(collector) {}
638
639 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
640 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
641 mirror::Object* ref =
642 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
643 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
644 visitor(ref);
645 }
646 void operator()(mirror::Class* klass, mirror::Reference* ref) const
647 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
648 CHECK(klass->IsTypeOfReferenceClass());
649 this->operator()(ref, mirror::Reference::ReferentOffset(), false);
650 }
651
652 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700653 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800654};
655
656class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
657 public:
658 explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
659 : collector_(collector) {}
660 void operator()(mirror::Object* obj) const
661 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
662 ObjectCallback(obj, collector_);
663 }
664 static void ObjectCallback(mirror::Object* obj, void *arg)
665 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
666 CHECK(obj != nullptr);
667 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
668 space::RegionSpace* region_space = collector->RegionSpace();
669 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
670 ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
671 obj->VisitReferences<true>(visitor, visitor);
672 if (kUseBakerReadBarrier) {
673 if (collector->RegionSpace()->IsInToSpace(obj)) {
674 CHECK(obj->GetReadBarrierPointer() == nullptr)
675 << "obj=" << obj << " non-white rb_ptr " << obj->GetReadBarrierPointer();
676 } else {
677 CHECK(obj->GetReadBarrierPointer() == ReadBarrier::BlackPtr() ||
678 (obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr() &&
679 collector->IsOnAllocStack(obj)))
680 << "Non-moving space/unevac from space ref " << obj << " " << PrettyTypeOf(obj)
681 << " has non-black rb_ptr " << obj->GetReadBarrierPointer()
682 << " but isn't on the alloc stack (and has white rb_ptr). Is it in the non-moving space="
683 << (collector->GetHeap()->GetNonMovingSpace()->HasAddress(obj));
684 }
685 }
686 }
687
688 private:
689 ConcurrentCopying* const collector_;
690};
691
692// Verify there's no from-space references left after the marking phase.
693void ConcurrentCopying::VerifyNoFromSpaceReferences() {
694 Thread* self = Thread::Current();
695 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
696 ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
697 // Roots.
698 {
699 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700700 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
701 Runtime::Current()->VisitRoots(&ref_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800702 }
703 // The to-space.
704 region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
705 this);
706 // Non-moving spaces.
707 {
708 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
709 heap_->GetMarkBitmap()->Visit(visitor);
710 }
711 // The alloc stack.
712 {
713 ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800714 for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
715 it < end; ++it) {
716 mirror::Object* const obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800717 if (obj != nullptr && obj->GetClass() != nullptr) {
718 // TODO: need to call this only if obj is alive?
719 ref_visitor(obj);
720 visitor(obj);
721 }
722 }
723 }
724 // TODO: LOS. But only refs in LOS are classes.
725}
726
727// The following visitors are used to assert the to-space invariant.
728class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
729 public:
730 explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
731 : collector_(collector) {}
732
733 void operator()(mirror::Object* ref) const
734 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
735 if (ref == nullptr) {
736 // OK.
737 return;
738 }
739 collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
740 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800741
742 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700743 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800744};
745
746class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
747 public:
748 explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
749 : collector_(collector) {}
750
751 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
752 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
753 mirror::Object* ref =
754 obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
755 ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
756 visitor(ref);
757 }
758 void operator()(mirror::Class* klass, mirror::Reference* /* ref */) const
759 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
760 CHECK(klass->IsTypeOfReferenceClass());
761 }
762
763 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700764 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800765};
766
767class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
768 public:
769 explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
770 : collector_(collector) {}
771 void operator()(mirror::Object* obj) const
772 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
773 ObjectCallback(obj, collector_);
774 }
775 static void ObjectCallback(mirror::Object* obj, void *arg)
776 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
777 CHECK(obj != nullptr);
778 ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
779 space::RegionSpace* region_space = collector->RegionSpace();
780 CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
781 collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
782 ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
783 obj->VisitReferences<true>(visitor, visitor);
784 }
785
786 private:
Mathieu Chartier97509952015-07-13 14:35:43 -0700787 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800788};
789
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700790class RevokeThreadLocalMarkStackCheckpoint : public Closure {
791 public:
792 explicit RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
793 bool disable_weak_ref_access)
794 : concurrent_copying_(concurrent_copying),
795 disable_weak_ref_access_(disable_weak_ref_access) {
796 }
797
798 virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
799 // Note: self is not necessarily equal to thread since thread may be suspended.
800 Thread* self = Thread::Current();
801 CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
802 << thread->GetState() << " thread " << thread << " self " << self;
803 // Revoke thread local mark stacks.
804 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
805 if (tl_mark_stack != nullptr) {
806 MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
807 concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
808 thread->SetThreadLocalMarkStack(nullptr);
809 }
810 // Disable weak ref access.
811 if (disable_weak_ref_access_) {
812 thread->SetWeakRefAccessEnabled(false);
813 }
814 // If thread is a running mutator, then act on behalf of the garbage collector.
815 // See the code in ThreadList::RunCheckpoint.
816 if (thread->GetState() == kRunnable) {
817 concurrent_copying_->GetBarrier().Pass(self);
818 }
819 }
820
821 private:
822 ConcurrentCopying* const concurrent_copying_;
823 const bool disable_weak_ref_access_;
824};
825
826void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
827 Thread* self = Thread::Current();
828 RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
829 ThreadList* thread_list = Runtime::Current()->GetThreadList();
830 gc_barrier_->Init(self, 0);
831 size_t barrier_count = thread_list->RunCheckpoint(&check_point);
832 // If there are no threads to wait which implys that all the checkpoint functions are finished,
833 // then no need to release the mutator lock.
834 if (barrier_count == 0) {
835 return;
836 }
837 Locks::mutator_lock_->SharedUnlock(self);
838 {
839 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
840 gc_barrier_->Increment(self, barrier_count);
841 }
842 Locks::mutator_lock_->SharedLock(self);
843}
844
845void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
846 Thread* self = Thread::Current();
847 CHECK_EQ(self, thread);
848 accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
849 if (tl_mark_stack != nullptr) {
850 CHECK(is_marking_);
851 MutexLock mu(self, mark_stack_lock_);
852 revoked_mark_stacks_.push_back(tl_mark_stack);
853 thread->SetThreadLocalMarkStack(nullptr);
854 }
855}
856
857void ConcurrentCopying::ProcessMarkStack() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800858 if (kVerboseMode) {
859 LOG(INFO) << "ProcessMarkStack. ";
860 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700861 bool empty_prev = false;
862 while (true) {
863 bool empty = ProcessMarkStackOnce();
864 if (empty_prev && empty) {
865 // Saw empty mark stack for a second time, done.
866 break;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800867 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700868 empty_prev = empty;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800869 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700870}
871
872bool ConcurrentCopying::ProcessMarkStackOnce() {
873 Thread* self = Thread::Current();
874 CHECK(thread_running_gc_ != nullptr);
875 CHECK(self == thread_running_gc_);
876 CHECK(self->GetThreadLocalMarkStack() == nullptr);
877 size_t count = 0;
878 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
879 if (mark_stack_mode == kMarkStackModeThreadLocal) {
880 // Process the thread-local mark stacks and the GC mark stack.
881 count += ProcessThreadLocalMarkStacks(false);
882 while (!gc_mark_stack_->IsEmpty()) {
883 mirror::Object* to_ref = gc_mark_stack_->PopBack();
884 ProcessMarkStackRef(to_ref);
885 ++count;
886 }
887 gc_mark_stack_->Reset();
888 } else if (mark_stack_mode == kMarkStackModeShared) {
889 // Process the shared GC mark stack with a lock.
890 {
891 MutexLock mu(self, mark_stack_lock_);
892 CHECK(revoked_mark_stacks_.empty());
893 }
894 while (true) {
895 std::vector<mirror::Object*> refs;
896 {
897 // Copy refs with lock. Note the number of refs should be small.
898 MutexLock mu(self, mark_stack_lock_);
899 if (gc_mark_stack_->IsEmpty()) {
900 break;
901 }
902 for (StackReference<mirror::Object>* p = gc_mark_stack_->Begin();
903 p != gc_mark_stack_->End(); ++p) {
904 refs.push_back(p->AsMirrorPtr());
905 }
906 gc_mark_stack_->Reset();
907 }
908 for (mirror::Object* ref : refs) {
909 ProcessMarkStackRef(ref);
910 ++count;
911 }
912 }
913 } else {
914 CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
915 static_cast<uint32_t>(kMarkStackModeGcExclusive));
916 {
917 MutexLock mu(self, mark_stack_lock_);
918 CHECK(revoked_mark_stacks_.empty());
919 }
920 // Process the GC mark stack in the exclusive mode. No need to take the lock.
921 while (!gc_mark_stack_->IsEmpty()) {
922 mirror::Object* to_ref = gc_mark_stack_->PopBack();
923 ProcessMarkStackRef(to_ref);
924 ++count;
925 }
926 gc_mark_stack_->Reset();
927 }
928
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800929 // Return true if the stack was empty.
930 return count == 0;
931}
932
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700933size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
934 // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
935 RevokeThreadLocalMarkStacks(disable_weak_ref_access);
936 size_t count = 0;
937 std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
938 {
939 MutexLock mu(Thread::Current(), mark_stack_lock_);
940 // Make a copy of the mark stack vector.
941 mark_stacks = revoked_mark_stacks_;
942 revoked_mark_stacks_.clear();
943 }
944 for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
945 for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
946 mirror::Object* to_ref = p->AsMirrorPtr();
947 ProcessMarkStackRef(to_ref);
948 ++count;
949 }
950 {
951 MutexLock mu(Thread::Current(), mark_stack_lock_);
952 if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
953 // The pool has enough. Delete it.
954 delete mark_stack;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800955 } else {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700956 // Otherwise, put it into the pool for later reuse.
957 mark_stack->Reset();
958 pooled_mark_stacks_.push_back(mark_stack);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800959 }
960 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700961 }
962 return count;
963}
964
965void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
966 DCHECK(!region_space_->IsInFromSpace(to_ref));
967 if (kUseBakerReadBarrier) {
968 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
969 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
970 << " is_marked=" << IsMarked(to_ref);
971 }
972 // Scan ref fields.
973 Scan(to_ref);
974 // Mark the gray ref as white or black.
975 if (kUseBakerReadBarrier) {
976 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
977 << " " << to_ref << " " << to_ref->GetReadBarrierPointer()
978 << " is_marked=" << IsMarked(to_ref);
979 }
980 if (to_ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass() &&
981 to_ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr &&
982 !IsInToSpace(to_ref->AsReference()->GetReferent<kWithoutReadBarrier>())) {
983 // Leave References gray so that GetReferent() will trigger RB.
984 CHECK(to_ref->AsReference()->IsEnqueued()) << "Left unenqueued ref gray " << to_ref;
985 } else {
986#ifdef USE_BAKER_OR_BROOKS_READ_BARRIER
987 if (kUseBakerReadBarrier) {
988 if (region_space_->IsInToSpace(to_ref)) {
989 // If to-space, change from gray to white.
990 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
991 ReadBarrier::WhitePtr());
992 CHECK(success) << "Must succeed as we won the race.";
993 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
994 } else {
995 // If non-moving space/unevac from space, change from gray
996 // to black. We can't change gray to white because it's not
997 // safe to use CAS if two threads change values in opposite
998 // directions (A->B and B->A). So, we change it to black to
999 // indicate non-moving objects that have been marked
1000 // through. Note we'd need to change from black to white
1001 // later (concurrently).
1002 bool success = to_ref->AtomicSetReadBarrierPointer(ReadBarrier::GrayPtr(),
1003 ReadBarrier::BlackPtr());
1004 CHECK(success) << "Must succeed as we won the race.";
1005 CHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1006 }
1007 }
1008#else
1009 DCHECK(!kUseBakerReadBarrier);
1010#endif
1011 }
1012 if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
1013 ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
1014 visitor(to_ref);
1015 }
1016}
1017
1018void ConcurrentCopying::SwitchToSharedMarkStackMode() {
1019 Thread* self = Thread::Current();
1020 CHECK(thread_running_gc_ != nullptr);
1021 CHECK_EQ(self, thread_running_gc_);
1022 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1023 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1024 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1025 static_cast<uint32_t>(kMarkStackModeThreadLocal));
1026 mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
1027 CHECK(weak_ref_access_enabled_.LoadRelaxed());
1028 weak_ref_access_enabled_.StoreRelaxed(false);
1029 QuasiAtomic::ThreadFenceForConstructor();
1030 // Process the thread local mark stacks one last time after switching to the shared mark stack
1031 // mode and disable weak ref accesses.
1032 ProcessThreadLocalMarkStacks(true);
1033 if (kVerboseMode) {
1034 LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
1035 }
1036}
1037
1038void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
1039 Thread* self = Thread::Current();
1040 CHECK(thread_running_gc_ != nullptr);
1041 CHECK_EQ(self, thread_running_gc_);
1042 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1043 MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1044 CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
1045 static_cast<uint32_t>(kMarkStackModeShared));
1046 mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive);
1047 QuasiAtomic::ThreadFenceForConstructor();
1048 if (kVerboseMode) {
1049 LOG(INFO) << "Switched to GC exclusive mark stack mode";
1050 }
1051}
1052
1053void ConcurrentCopying::CheckEmptyMarkStack() {
1054 Thread* self = Thread::Current();
1055 CHECK(thread_running_gc_ != nullptr);
1056 CHECK_EQ(self, thread_running_gc_);
1057 CHECK(self->GetThreadLocalMarkStack() == nullptr);
1058 MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
1059 if (mark_stack_mode == kMarkStackModeThreadLocal) {
1060 // Thread-local mark stack mode.
1061 RevokeThreadLocalMarkStacks(false);
1062 MutexLock mu(Thread::Current(), mark_stack_lock_);
1063 if (!revoked_mark_stacks_.empty()) {
1064 for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
1065 while (!mark_stack->IsEmpty()) {
1066 mirror::Object* obj = mark_stack->PopBack();
1067 if (kUseBakerReadBarrier) {
1068 mirror::Object* rb_ptr = obj->GetReadBarrierPointer();
1069 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj) << " rb_ptr=" << rb_ptr
1070 << " is_marked=" << IsMarked(obj);
1071 } else {
1072 LOG(INFO) << "On mark queue : " << obj << " " << PrettyTypeOf(obj)
1073 << " is_marked=" << IsMarked(obj);
1074 }
1075 }
1076 }
1077 LOG(FATAL) << "mark stack is not empty";
1078 }
1079 } else {
1080 // Shared, GC-exclusive, or off.
1081 MutexLock mu(Thread::Current(), mark_stack_lock_);
1082 CHECK(gc_mark_stack_->IsEmpty());
1083 CHECK(revoked_mark_stacks_.empty());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001084 }
1085}
1086
1087void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
1088 TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
1089 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -07001090 Runtime::Current()->SweepSystemWeaks(this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001091}
1092
1093void ConcurrentCopying::Sweep(bool swap_bitmaps) {
1094 {
1095 TimingLogger::ScopedTiming t("MarkStackAsLive", GetTimings());
1096 accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1097 if (kEnableFromSpaceAccountingCheck) {
1098 CHECK_GE(live_stack_freeze_size_, live_stack->Size());
1099 }
1100 heap_->MarkAllocStackAsLive(live_stack);
1101 live_stack->Reset();
1102 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001103 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001104 TimingLogger::ScopedTiming split("Sweep", GetTimings());
1105 for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1106 if (space->IsContinuousMemMapAllocSpace()) {
1107 space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1108 if (space == region_space_ || immune_region_.ContainsSpace(space)) {
1109 continue;
1110 }
1111 TimingLogger::ScopedTiming split2(
1112 alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings());
1113 RecordFree(alloc_space->Sweep(swap_bitmaps));
1114 }
1115 }
1116 SweepLargeObjects(swap_bitmaps);
1117}
1118
1119void ConcurrentCopying::SweepLargeObjects(bool swap_bitmaps) {
1120 TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
1121 RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
1122}
1123
1124class ConcurrentCopyingClearBlackPtrsVisitor {
1125 public:
1126 explicit ConcurrentCopyingClearBlackPtrsVisitor(ConcurrentCopying* cc)
1127 : collector_(cc) {}
Andreas Gampe65b798e2015-04-06 09:35:22 -07001128#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
1129 NO_RETURN
1130#endif
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001131 void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1132 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1133 DCHECK(obj != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001134 DCHECK(collector_->heap_->GetMarkBitmap()->Test(obj)) << obj;
1135 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << obj;
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001136 obj->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001137 DCHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001138 }
1139
1140 private:
1141 ConcurrentCopying* const collector_;
1142};
1143
1144// Clear the black ptrs in non-moving objects back to white.
1145void ConcurrentCopying::ClearBlackPtrs() {
1146 CHECK(kUseBakerReadBarrier);
1147 TimingLogger::ScopedTiming split("ClearBlackPtrs", GetTimings());
1148 ConcurrentCopyingClearBlackPtrsVisitor visitor(this);
1149 for (auto& space : heap_->GetContinuousSpaces()) {
1150 if (space == region_space_) {
1151 continue;
1152 }
1153 accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
1154 if (kVerboseMode) {
1155 LOG(INFO) << "ClearBlackPtrs: " << *space << " bitmap: " << *mark_bitmap;
1156 }
1157 mark_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
1158 reinterpret_cast<uintptr_t>(space->Limit()),
1159 visitor);
1160 }
1161 space::LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
1162 large_object_space->GetMarkBitmap()->VisitMarkedRange(
1163 reinterpret_cast<uintptr_t>(large_object_space->Begin()),
1164 reinterpret_cast<uintptr_t>(large_object_space->End()),
1165 visitor);
1166 // Objects on the allocation stack?
1167 if (ReadBarrier::kEnableReadBarrierInvariantChecks || kIsDebugBuild) {
1168 size_t count = GetAllocationStack()->Size();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001169 auto* it = GetAllocationStack()->Begin();
1170 auto* end = GetAllocationStack()->End();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001171 for (size_t i = 0; i < count; ++i, ++it) {
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001172 CHECK_LT(it, end);
1173 mirror::Object* obj = it->AsMirrorPtr();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001174 if (obj != nullptr) {
1175 // Must have been cleared above.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001176 CHECK_EQ(obj->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << obj;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001177 }
1178 }
1179 }
1180}
1181
1182void ConcurrentCopying::ReclaimPhase() {
1183 TimingLogger::ScopedTiming split("ReclaimPhase", GetTimings());
1184 if (kVerboseMode) {
1185 LOG(INFO) << "GC ReclaimPhase";
1186 }
1187 Thread* self = Thread::Current();
1188
1189 {
1190 // Double-check that the mark stack is empty.
1191 // Note: need to set this after VerifyNoFromSpaceRef().
1192 is_asserting_to_space_invariant_ = false;
1193 QuasiAtomic::ThreadFenceForConstructor();
1194 if (kVerboseMode) {
1195 LOG(INFO) << "Issue an empty check point. ";
1196 }
1197 IssueEmptyCheckpoint();
1198 // Disable the check.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001199 is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0);
1200 CheckEmptyMarkStack();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001201 }
1202
1203 {
1204 // Record freed objects.
1205 TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
1206 // Don't include thread-locals that are in the to-space.
1207 uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
1208 uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
1209 uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
1210 uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
1211 uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
1212 uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
1213 if (kEnableFromSpaceAccountingCheck) {
1214 CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
1215 CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
1216 }
1217 CHECK_LE(to_objects, from_objects);
1218 CHECK_LE(to_bytes, from_bytes);
1219 int64_t freed_bytes = from_bytes - to_bytes;
1220 int64_t freed_objects = from_objects - to_objects;
1221 if (kVerboseMode) {
1222 LOG(INFO) << "RecordFree:"
1223 << " from_bytes=" << from_bytes << " from_objects=" << from_objects
1224 << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
1225 << " to_bytes=" << to_bytes << " to_objects=" << to_objects
1226 << " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
1227 << " from_space size=" << region_space_->FromSpaceSize()
1228 << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
1229 << " to_space size=" << region_space_->ToSpaceSize();
1230 LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1231 }
1232 RecordFree(ObjectBytePair(freed_objects, freed_bytes));
1233 if (kVerboseMode) {
1234 LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
1235 }
1236 }
1237
1238 {
1239 TimingLogger::ScopedTiming split3("ComputeUnevacFromSpaceLiveRatio", GetTimings());
1240 ComputeUnevacFromSpaceLiveRatio();
1241 }
1242
1243 {
1244 TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
1245 region_space_->ClearFromSpace();
1246 }
1247
1248 {
1249 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1250 if (kUseBakerReadBarrier) {
1251 ClearBlackPtrs();
1252 }
1253 Sweep(false);
1254 SwapBitmaps();
1255 heap_->UnBindBitmaps();
1256
1257 // Remove bitmaps for the immune spaces.
1258 while (!cc_bitmaps_.empty()) {
1259 accounting::ContinuousSpaceBitmap* cc_bitmap = cc_bitmaps_.back();
1260 cc_heap_bitmap_->RemoveContinuousSpaceBitmap(cc_bitmap);
1261 delete cc_bitmap;
1262 cc_bitmaps_.pop_back();
1263 }
1264 region_space_bitmap_ = nullptr;
1265 }
1266
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001267 CheckEmptyMarkStack();
1268
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001269 if (kVerboseMode) {
1270 LOG(INFO) << "GC end of ReclaimPhase";
1271 }
1272}
1273
1274class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
1275 public:
1276 explicit ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor(ConcurrentCopying* cc)
1277 : collector_(cc) {}
1278 void operator()(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1279 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1280 DCHECK(ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001281 DCHECK(collector_->region_space_bitmap_->Test(ref)) << ref;
1282 DCHECK(collector_->region_space_->IsInUnevacFromSpace(ref)) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001283 if (kUseBakerReadBarrier) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001284 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::BlackPtr()) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001285 // Clear the black ptr.
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001286 ref->AtomicSetReadBarrierPointer(ReadBarrier::BlackPtr(), ReadBarrier::WhitePtr());
1287 DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::WhitePtr()) << ref;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001288 }
1289 size_t obj_size = ref->SizeOf();
1290 size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1291 collector_->region_space_->AddLiveBytes(ref, alloc_size);
1292 }
1293
1294 private:
Mathieu Chartier97509952015-07-13 14:35:43 -07001295 ConcurrentCopying* const collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001296};
1297
1298// Compute how much live objects are left in regions.
1299void ConcurrentCopying::ComputeUnevacFromSpaceLiveRatio() {
1300 region_space_->AssertAllRegionLiveBytesZeroOrCleared();
1301 ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor visitor(this);
1302 region_space_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(region_space_->Begin()),
1303 reinterpret_cast<uintptr_t>(region_space_->Limit()),
1304 visitor);
1305}
1306
1307// Assert the to-space invariant.
1308void ConcurrentCopying::AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
1309 mirror::Object* ref) {
1310 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1311 if (is_asserting_to_space_invariant_) {
1312 if (region_space_->IsInToSpace(ref)) {
1313 // OK.
1314 return;
1315 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1316 CHECK(region_space_bitmap_->Test(ref)) << ref;
1317 } else if (region_space_->IsInFromSpace(ref)) {
1318 // Not OK. Do extra logging.
1319 if (obj != nullptr) {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001320 LogFromSpaceRefHolder(obj, offset);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001321 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001322 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001323 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1324 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001325 AssertToSpaceInvariantInNonMovingSpace(obj, ref);
1326 }
1327 }
1328}
1329
1330class RootPrinter {
1331 public:
1332 RootPrinter() { }
1333
1334 template <class MirrorType>
1335 ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
1336 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1337 if (!root->IsNull()) {
1338 VisitRoot(root);
1339 }
1340 }
1341
1342 template <class MirrorType>
1343 void VisitRoot(mirror::Object** root)
1344 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1345 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
1346 }
1347
1348 template <class MirrorType>
1349 void VisitRoot(mirror::CompressedReference<MirrorType>* root)
1350 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1351 LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
1352 }
1353};
1354
1355void ConcurrentCopying::AssertToSpaceInvariant(GcRootSource* gc_root_source,
1356 mirror::Object* ref) {
1357 CHECK(heap_->collector_type_ == kCollectorTypeCC) << static_cast<size_t>(heap_->collector_type_);
1358 if (is_asserting_to_space_invariant_) {
1359 if (region_space_->IsInToSpace(ref)) {
1360 // OK.
1361 return;
1362 } else if (region_space_->IsInUnevacFromSpace(ref)) {
1363 CHECK(region_space_bitmap_->Test(ref)) << ref;
1364 } else if (region_space_->IsInFromSpace(ref)) {
1365 // Not OK. Do extra logging.
1366 if (gc_root_source == nullptr) {
1367 // No info.
1368 } else if (gc_root_source->HasArtField()) {
1369 ArtField* field = gc_root_source->GetArtField();
1370 LOG(INTERNAL_FATAL) << "gc root in field " << field << " " << PrettyField(field);
1371 RootPrinter root_printer;
1372 field->VisitRoots(root_printer);
1373 } else if (gc_root_source->HasArtMethod()) {
1374 ArtMethod* method = gc_root_source->GetArtMethod();
1375 LOG(INTERNAL_FATAL) << "gc root in method " << method << " " << PrettyMethod(method);
1376 RootPrinter root_printer;
1377 method->VisitRoots(root_printer);
1378 }
1379 ref->GetLockWord(false).Dump(LOG(INTERNAL_FATAL));
1380 region_space_->DumpNonFreeRegions(LOG(INTERNAL_FATAL));
1381 PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
1382 MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
1383 CHECK(false) << "Found from-space ref " << ref << " " << PrettyTypeOf(ref);
1384 } else {
1385 AssertToSpaceInvariantInNonMovingSpace(nullptr, ref);
1386 }
1387 }
1388}
1389
1390void ConcurrentCopying::LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) {
1391 if (kUseBakerReadBarrier) {
1392 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj)
1393 << " holder rb_ptr=" << obj->GetReadBarrierPointer();
1394 } else {
1395 LOG(INFO) << "holder=" << obj << " " << PrettyTypeOf(obj);
1396 }
1397 if (region_space_->IsInFromSpace(obj)) {
1398 LOG(INFO) << "holder is in the from-space.";
1399 } else if (region_space_->IsInToSpace(obj)) {
1400 LOG(INFO) << "holder is in the to-space.";
1401 } else if (region_space_->IsInUnevacFromSpace(obj)) {
1402 LOG(INFO) << "holder is in the unevac from-space.";
1403 if (region_space_bitmap_->Test(obj)) {
1404 LOG(INFO) << "holder is marked in the region space bitmap.";
1405 } else {
1406 LOG(INFO) << "holder is not marked in the region space bitmap.";
1407 }
1408 } else {
1409 // In a non-moving space.
1410 if (immune_region_.ContainsObject(obj)) {
1411 LOG(INFO) << "holder is in the image or the zygote space.";
1412 accounting::ContinuousSpaceBitmap* cc_bitmap =
1413 cc_heap_bitmap_->GetContinuousSpaceBitmap(obj);
1414 CHECK(cc_bitmap != nullptr)
1415 << "An immune space object must have a bitmap.";
1416 if (cc_bitmap->Test(obj)) {
1417 LOG(INFO) << "holder is marked in the bit map.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001418 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001419 LOG(INFO) << "holder is NOT marked in the bit map.";
1420 }
1421 } else {
1422 LOG(INFO) << "holder is in a non-moving (or main) space.";
1423 accounting::ContinuousSpaceBitmap* mark_bitmap =
1424 heap_mark_bitmap_->GetContinuousSpaceBitmap(obj);
1425 accounting::LargeObjectBitmap* los_bitmap =
1426 heap_mark_bitmap_->GetLargeObjectBitmap(obj);
1427 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1428 bool is_los = mark_bitmap == nullptr;
1429 if (!is_los && mark_bitmap->Test(obj)) {
1430 LOG(INFO) << "holder is marked in the mark bit map.";
1431 } else if (is_los && los_bitmap->Test(obj)) {
1432 LOG(INFO) << "holder is marked in the los bit map.";
1433 } else {
1434 // If ref is on the allocation stack, then it is considered
1435 // mark/alive (but not necessarily on the live stack.)
1436 if (IsOnAllocStack(obj)) {
1437 LOG(INFO) << "holder is on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001438 } else {
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001439 LOG(INFO) << "holder is not marked or on the alloc stack.";
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001440 }
1441 }
1442 }
1443 }
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -07001444 LOG(INFO) << "offset=" << offset.SizeValue();
1445}
1446
1447void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj,
1448 mirror::Object* ref) {
1449 // In a non-moving spaces. Check that the ref is marked.
1450 if (immune_region_.ContainsObject(ref)) {
1451 accounting::ContinuousSpaceBitmap* cc_bitmap =
1452 cc_heap_bitmap_->GetContinuousSpaceBitmap(ref);
1453 CHECK(cc_bitmap != nullptr)
1454 << "An immune space ref must have a bitmap. " << ref;
1455 if (kUseBakerReadBarrier) {
1456 CHECK(cc_bitmap->Test(ref))
1457 << "Unmarked immune space ref. obj=" << obj << " rb_ptr="
1458 << obj->GetReadBarrierPointer() << " ref=" << ref;
1459 } else {
1460 CHECK(cc_bitmap->Test(ref))
1461 << "Unmarked immune space ref. obj=" << obj << " ref=" << ref;
1462 }
1463 } else {
1464 accounting::ContinuousSpaceBitmap* mark_bitmap =
1465 heap_mark_bitmap_->GetContinuousSpaceBitmap(ref);
1466 accounting::LargeObjectBitmap* los_bitmap =
1467 heap_mark_bitmap_->GetLargeObjectBitmap(ref);
1468 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1469 bool is_los = mark_bitmap == nullptr;
1470 if ((!is_los && mark_bitmap->Test(ref)) ||
1471 (is_los && los_bitmap->Test(ref))) {
1472 // OK.
1473 } else {
1474 // If ref is on the allocation stack, then it may not be
1475 // marked live, but considered marked/alive (but not
1476 // necessarily on the live stack).
1477 CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
1478 << "obj=" << obj << " ref=" << ref;
1479 }
1480 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001481}
1482
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001483// Used to scan ref fields of an object.
1484class ConcurrentCopyingRefFieldsVisitor {
1485 public:
1486 explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
1487 : collector_(collector) {}
1488
1489 void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
1490 const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1491 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
1492 collector_->Process(obj, offset);
1493 }
1494
1495 void operator()(mirror::Class* klass, mirror::Reference* ref) const
1496 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
1497 CHECK(klass->IsTypeOfReferenceClass());
1498 collector_->DelayReferenceReferent(klass, ref);
1499 }
1500
1501 private:
1502 ConcurrentCopying* const collector_;
1503};
1504
1505// Scan ref fields of an object.
1506void ConcurrentCopying::Scan(mirror::Object* to_ref) {
1507 DCHECK(!region_space_->IsInFromSpace(to_ref));
1508 ConcurrentCopyingRefFieldsVisitor visitor(this);
1509 to_ref->VisitReferences<true>(visitor, visitor);
1510}
1511
1512// Process a field.
1513inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset) {
1514 mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
1515 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
1516 return;
1517 }
1518 mirror::Object* to_ref = Mark(ref);
1519 if (to_ref == ref) {
1520 return;
1521 }
1522 // This may fail if the mutator writes to the field at the same time. But it's ok.
1523 mirror::Object* expected_ref = ref;
1524 mirror::Object* new_ref = to_ref;
1525 do {
1526 if (expected_ref !=
1527 obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset)) {
1528 // It was updated by the mutator.
1529 break;
1530 }
1531 } while (!obj->CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<false, false, kVerifyNone>(
1532 offset, expected_ref, new_ref));
1533}
1534
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001535// Process some roots.
1536void ConcurrentCopying::VisitRoots(
1537 mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
1538 for (size_t i = 0; i < count; ++i) {
1539 mirror::Object** root = roots[i];
1540 mirror::Object* ref = *root;
1541 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001542 continue;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001543 }
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001544 mirror::Object* to_ref = Mark(ref);
1545 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001546 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001547 }
1548 Atomic<mirror::Object*>* addr = reinterpret_cast<Atomic<mirror::Object*>*>(root);
1549 mirror::Object* expected_ref = ref;
1550 mirror::Object* new_ref = to_ref;
1551 do {
1552 if (expected_ref != addr->LoadRelaxed()) {
1553 // It was updated by the mutator.
1554 break;
1555 }
1556 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1557 }
1558}
1559
1560void ConcurrentCopying::VisitRoots(
1561 mirror::CompressedReference<mirror::Object>** roots, size_t count,
1562 const RootInfo& info ATTRIBUTE_UNUSED) {
1563 for (size_t i = 0; i < count; ++i) {
1564 mirror::CompressedReference<mirror::Object>* root = roots[i];
1565 mirror::Object* ref = root->AsMirrorPtr();
1566 if (ref == nullptr || region_space_->IsInToSpace(ref)) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001567 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001568 }
1569 mirror::Object* to_ref = Mark(ref);
1570 if (to_ref == ref) {
Mathieu Chartier4809d0a2015-04-07 10:39:04 -07001571 continue;
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -07001572 }
1573 auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
1574 auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
1575 auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref);
1576 do {
1577 if (ref != addr->LoadRelaxed().AsMirrorPtr()) {
1578 // It was updated by the mutator.
1579 break;
1580 }
1581 } while (!addr->CompareExchangeWeakSequentiallyConsistent(expected_ref, new_ref));
1582 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001583}
1584
1585// Fill the given memory block with a dummy object. Used to fill in a
1586// copy of objects that was lost in race.
1587void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
1588 CHECK(IsAligned<kObjectAlignment>(byte_size));
1589 memset(dummy_obj, 0, byte_size);
1590 mirror::Class* int_array_class = mirror::IntArray::GetArrayClass();
1591 CHECK(int_array_class != nullptr);
1592 AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
1593 size_t component_size = int_array_class->GetComponentSize();
1594 CHECK_EQ(component_size, sizeof(int32_t));
1595 size_t data_offset = mirror::Array::DataOffset(component_size).SizeValue();
1596 if (data_offset > byte_size) {
1597 // An int array is too big. Use java.lang.Object.
1598 mirror::Class* java_lang_Object = WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object);
1599 AssertToSpaceInvariant(nullptr, MemberOffset(0), java_lang_Object);
1600 CHECK_EQ(byte_size, java_lang_Object->GetObjectSize());
1601 dummy_obj->SetClass(java_lang_Object);
1602 CHECK_EQ(byte_size, dummy_obj->SizeOf());
1603 } else {
1604 // Use an int array.
1605 dummy_obj->SetClass(int_array_class);
1606 CHECK(dummy_obj->IsArrayInstance());
1607 int32_t length = (byte_size - data_offset) / component_size;
1608 dummy_obj->AsArray()->SetLength(length);
1609 CHECK_EQ(dummy_obj->AsArray()->GetLength(), length)
1610 << "byte_size=" << byte_size << " length=" << length
1611 << " component_size=" << component_size << " data_offset=" << data_offset;
1612 CHECK_EQ(byte_size, dummy_obj->SizeOf())
1613 << "byte_size=" << byte_size << " length=" << length
1614 << " component_size=" << component_size << " data_offset=" << data_offset;
1615 }
1616}
1617
1618// Reuse the memory blocks that were copy of objects that were lost in race.
1619mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
1620 // Try to reuse the blocks that were unused due to CAS failures.
1621 CHECK(IsAligned<space::RegionSpace::kAlignment>(alloc_size));
1622 Thread* self = Thread::Current();
1623 size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
1624 MutexLock mu(self, skipped_blocks_lock_);
1625 auto it = skipped_blocks_map_.lower_bound(alloc_size);
1626 if (it == skipped_blocks_map_.end()) {
1627 // Not found.
1628 return nullptr;
1629 }
1630 {
1631 size_t byte_size = it->first;
1632 CHECK_GE(byte_size, alloc_size);
1633 if (byte_size > alloc_size && byte_size - alloc_size < min_object_size) {
1634 // If remainder would be too small for a dummy object, retry with a larger request size.
1635 it = skipped_blocks_map_.lower_bound(alloc_size + min_object_size);
1636 if (it == skipped_blocks_map_.end()) {
1637 // Not found.
1638 return nullptr;
1639 }
1640 CHECK(IsAligned<space::RegionSpace::kAlignment>(it->first - alloc_size));
1641 CHECK_GE(it->first - alloc_size, min_object_size)
1642 << "byte_size=" << byte_size << " it->first=" << it->first << " alloc_size=" << alloc_size;
1643 }
1644 }
1645 // Found a block.
1646 CHECK(it != skipped_blocks_map_.end());
1647 size_t byte_size = it->first;
1648 uint8_t* addr = it->second;
1649 CHECK_GE(byte_size, alloc_size);
1650 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr)));
1651 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size));
1652 if (kVerboseMode) {
1653 LOG(INFO) << "Reusing skipped bytes : " << reinterpret_cast<void*>(addr) << ", " << byte_size;
1654 }
1655 skipped_blocks_map_.erase(it);
1656 memset(addr, 0, byte_size);
1657 if (byte_size > alloc_size) {
1658 // Return the remainder to the map.
1659 CHECK(IsAligned<space::RegionSpace::kAlignment>(byte_size - alloc_size));
1660 CHECK_GE(byte_size - alloc_size, min_object_size);
1661 FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
1662 byte_size - alloc_size);
1663 CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
1664 skipped_blocks_map_.insert(std::make_pair(byte_size - alloc_size, addr + alloc_size));
1665 }
1666 return reinterpret_cast<mirror::Object*>(addr);
1667}
1668
1669mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
1670 DCHECK(region_space_->IsInFromSpace(from_ref));
1671 // No read barrier to avoid nested RB that might violate the to-space
1672 // invariant. Note that from_ref is a from space ref so the SizeOf()
1673 // call will access the from-space meta objects, but it's ok and necessary.
1674 size_t obj_size = from_ref->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
1675 size_t region_space_alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
1676 size_t region_space_bytes_allocated = 0U;
1677 size_t non_moving_space_bytes_allocated = 0U;
1678 size_t bytes_allocated = 0U;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001679 size_t dummy;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001680 mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001681 region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001682 bytes_allocated = region_space_bytes_allocated;
1683 if (to_ref != nullptr) {
1684 DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
1685 }
1686 bool fall_back_to_non_moving = false;
1687 if (UNLIKELY(to_ref == nullptr)) {
1688 // Failed to allocate in the region space. Try the skipped blocks.
1689 to_ref = AllocateInSkippedBlock(region_space_alloc_size);
1690 if (to_ref != nullptr) {
1691 // Succeeded to allocate in a skipped block.
1692 if (heap_->use_tlab_) {
1693 // This is necessary for the tlab case as it's not accounted in the space.
1694 region_space_->RecordAlloc(to_ref);
1695 }
1696 bytes_allocated = region_space_alloc_size;
1697 } else {
1698 // Fall back to the non-moving space.
1699 fall_back_to_non_moving = true;
1700 if (kVerboseMode) {
1701 LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes="
1702 << to_space_bytes_skipped_.LoadSequentiallyConsistent()
1703 << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent();
1704 }
1705 fall_back_to_non_moving = true;
1706 to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001707 &non_moving_space_bytes_allocated, nullptr, &dummy);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001708 CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
1709 bytes_allocated = non_moving_space_bytes_allocated;
1710 // Mark it in the mark bitmap.
1711 accounting::ContinuousSpaceBitmap* mark_bitmap =
1712 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1713 CHECK(mark_bitmap != nullptr);
1714 CHECK(!mark_bitmap->AtomicTestAndSet(to_ref));
1715 }
1716 }
1717 DCHECK(to_ref != nullptr);
1718
1719 // Attempt to install the forward pointer. This is in a loop as the
1720 // lock word atomic write can fail.
1721 while (true) {
1722 // Copy the object. TODO: copy only the lockword in the second iteration and on?
1723 memcpy(to_ref, from_ref, obj_size);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001724
1725 LockWord old_lock_word = to_ref->GetLockWord(false);
1726
1727 if (old_lock_word.GetState() == LockWord::kForwardingAddress) {
1728 // Lost the race. Another thread (either GC or mutator) stored
1729 // the forwarding pointer first. Make the lost copy (to_ref)
1730 // look like a valid but dead (dummy) object and keep it for
1731 // future reuse.
1732 FillWithDummyObject(to_ref, bytes_allocated);
1733 if (!fall_back_to_non_moving) {
1734 DCHECK(region_space_->IsInToSpace(to_ref));
1735 if (bytes_allocated > space::RegionSpace::kRegionSize) {
1736 // Free the large alloc.
1737 region_space_->FreeLarge(to_ref, bytes_allocated);
1738 } else {
1739 // Record the lost copy for later reuse.
1740 heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1741 to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated);
1742 to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1);
1743 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
1744 skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
1745 reinterpret_cast<uint8_t*>(to_ref)));
1746 }
1747 } else {
1748 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1749 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1750 // Free the non-moving-space chunk.
1751 accounting::ContinuousSpaceBitmap* mark_bitmap =
1752 heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
1753 CHECK(mark_bitmap != nullptr);
1754 CHECK(mark_bitmap->Clear(to_ref));
1755 heap_->non_moving_space_->Free(Thread::Current(), to_ref);
1756 }
1757
1758 // Get the winner's forward ptr.
1759 mirror::Object* lost_fwd_ptr = to_ref;
1760 to_ref = reinterpret_cast<mirror::Object*>(old_lock_word.ForwardingAddress());
1761 CHECK(to_ref != nullptr);
1762 CHECK_NE(to_ref, lost_fwd_ptr);
1763 CHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref));
1764 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
1765 return to_ref;
1766 }
1767
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001768 // Set the gray ptr.
1769 if (kUseBakerReadBarrier) {
1770 to_ref->SetReadBarrierPointer(ReadBarrier::GrayPtr());
1771 }
1772
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001773 LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
1774
1775 // Try to atomically write the fwd ptr.
1776 bool success = from_ref->CasLockWordWeakSequentiallyConsistent(old_lock_word, new_lock_word);
1777 if (LIKELY(success)) {
1778 // The CAS succeeded.
1779 objects_moved_.FetchAndAddSequentiallyConsistent(1);
1780 bytes_moved_.FetchAndAddSequentiallyConsistent(region_space_alloc_size);
1781 if (LIKELY(!fall_back_to_non_moving)) {
1782 DCHECK(region_space_->IsInToSpace(to_ref));
1783 } else {
1784 DCHECK(heap_->non_moving_space_->HasAddress(to_ref));
1785 DCHECK_EQ(bytes_allocated, non_moving_space_bytes_allocated);
1786 }
1787 if (kUseBakerReadBarrier) {
1788 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1789 }
1790 DCHECK(GetFwdPtr(from_ref) == to_ref);
1791 CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001792 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001793 return to_ref;
1794 } else {
1795 // The CAS failed. It may have lost the race or may have failed
1796 // due to monitor/hashcode ops. Either way, retry.
1797 }
1798 }
1799}
1800
1801mirror::Object* ConcurrentCopying::IsMarked(mirror::Object* from_ref) {
1802 DCHECK(from_ref != nullptr);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001803 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1804 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001805 // It's already marked.
1806 return from_ref;
1807 }
1808 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001809 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001810 to_ref = GetFwdPtr(from_ref);
1811 DCHECK(to_ref == nullptr || region_space_->IsInToSpace(to_ref) ||
1812 heap_->non_moving_space_->HasAddress(to_ref))
1813 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001814 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001815 if (region_space_bitmap_->Test(from_ref)) {
1816 to_ref = from_ref;
1817 } else {
1818 to_ref = nullptr;
1819 }
1820 } else {
1821 // from_ref is in a non-moving space.
1822 if (immune_region_.ContainsObject(from_ref)) {
1823 accounting::ContinuousSpaceBitmap* cc_bitmap =
1824 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1825 DCHECK(cc_bitmap != nullptr)
1826 << "An immune space object must have a bitmap";
1827 if (kIsDebugBuild) {
1828 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1829 << "Immune space object must be already marked";
1830 }
1831 if (cc_bitmap->Test(from_ref)) {
1832 // Already marked.
1833 to_ref = from_ref;
1834 } else {
1835 // Newly marked.
1836 to_ref = nullptr;
1837 }
1838 } else {
1839 // Non-immune non-moving space. Use the mark bitmap.
1840 accounting::ContinuousSpaceBitmap* mark_bitmap =
1841 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1842 accounting::LargeObjectBitmap* los_bitmap =
1843 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1844 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1845 bool is_los = mark_bitmap == nullptr;
1846 if (!is_los && mark_bitmap->Test(from_ref)) {
1847 // Already marked.
1848 to_ref = from_ref;
1849 } else if (is_los && los_bitmap->Test(from_ref)) {
1850 // Already marked in LOS.
1851 to_ref = from_ref;
1852 } else {
1853 // Not marked.
1854 if (IsOnAllocStack(from_ref)) {
1855 // If on the allocation stack, it's considered marked.
1856 to_ref = from_ref;
1857 } else {
1858 // Not marked.
1859 to_ref = nullptr;
1860 }
1861 }
1862 }
1863 }
1864 return to_ref;
1865}
1866
1867bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
1868 QuasiAtomic::ThreadFenceAcquire();
1869 accounting::ObjectStack* alloc_stack = GetAllocationStack();
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001870 return alloc_stack->Contains(ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001871}
1872
1873mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
1874 if (from_ref == nullptr) {
1875 return nullptr;
1876 }
1877 DCHECK(from_ref != nullptr);
1878 DCHECK(heap_->collector_type_ == kCollectorTypeCC);
Hiroshi Yamauchi60f63f52015-04-23 16:12:40 -07001879 if (kUseBakerReadBarrier && !is_active_) {
1880 // In the lock word forward address state, the read barrier bits
1881 // in the lock word are part of the stored forwarding address and
1882 // invalid. This is usually OK as the from-space copy of objects
1883 // aren't accessed by mutators due to the to-space
1884 // invariant. However, during the dex2oat image writing relocation
1885 // and the zygote compaction, objects can be in the forward
1886 // address state (to store the forward/relocation addresses) and
1887 // they can still be accessed and the invalid read barrier bits
1888 // are consulted. If they look like gray but aren't really, the
1889 // read barriers slow path can trigger when it shouldn't. To guard
1890 // against this, return here if the CC collector isn't running.
1891 return from_ref;
1892 }
1893 DCHECK(region_space_ != nullptr) << "Read barrier slow path taken when CC isn't running?";
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001894 space::RegionSpace::RegionType rtype = region_space_->GetRegionType(from_ref);
1895 if (rtype == space::RegionSpace::RegionType::kRegionTypeToSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001896 // It's already marked.
1897 return from_ref;
1898 }
1899 mirror::Object* to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001900 if (rtype == space::RegionSpace::RegionType::kRegionTypeFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001901 to_ref = GetFwdPtr(from_ref);
1902 if (kUseBakerReadBarrier) {
1903 DCHECK(to_ref != ReadBarrier::GrayPtr()) << "from_ref=" << from_ref << " to_ref=" << to_ref;
1904 }
1905 if (to_ref == nullptr) {
1906 // It isn't marked yet. Mark it by copying it to the to-space.
1907 to_ref = Copy(from_ref);
1908 }
1909 DCHECK(region_space_->IsInToSpace(to_ref) || heap_->non_moving_space_->HasAddress(to_ref))
1910 << "from_ref=" << from_ref << " to_ref=" << to_ref;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -08001911 } else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001912 // This may or may not succeed, which is ok.
1913 if (kUseBakerReadBarrier) {
1914 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1915 }
1916 if (region_space_bitmap_->AtomicTestAndSet(from_ref)) {
1917 // Already marked.
1918 to_ref = from_ref;
1919 } else {
1920 // Newly marked.
1921 to_ref = from_ref;
1922 if (kUseBakerReadBarrier) {
1923 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1924 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001925 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001926 }
1927 } else {
1928 // from_ref is in a non-moving space.
1929 DCHECK(!region_space_->HasAddress(from_ref)) << from_ref;
1930 if (immune_region_.ContainsObject(from_ref)) {
1931 accounting::ContinuousSpaceBitmap* cc_bitmap =
1932 cc_heap_bitmap_->GetContinuousSpaceBitmap(from_ref);
1933 DCHECK(cc_bitmap != nullptr)
1934 << "An immune space object must have a bitmap";
1935 if (kIsDebugBuild) {
1936 DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref)->Test(from_ref))
1937 << "Immune space object must be already marked";
1938 }
1939 // This may or may not succeed, which is ok.
1940 if (kUseBakerReadBarrier) {
1941 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1942 }
1943 if (cc_bitmap->AtomicTestAndSet(from_ref)) {
1944 // Already marked.
1945 to_ref = from_ref;
1946 } else {
1947 // Newly marked.
1948 to_ref = from_ref;
1949 if (kUseBakerReadBarrier) {
1950 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
1951 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001952 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001953 }
1954 } else {
1955 // Use the mark bitmap.
1956 accounting::ContinuousSpaceBitmap* mark_bitmap =
1957 heap_mark_bitmap_->GetContinuousSpaceBitmap(from_ref);
1958 accounting::LargeObjectBitmap* los_bitmap =
1959 heap_mark_bitmap_->GetLargeObjectBitmap(from_ref);
1960 CHECK(los_bitmap != nullptr) << "LOS bitmap covers the entire address range";
1961 bool is_los = mark_bitmap == nullptr;
1962 if (!is_los && mark_bitmap->Test(from_ref)) {
1963 // Already marked.
1964 to_ref = from_ref;
1965 if (kUseBakerReadBarrier) {
1966 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1967 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1968 }
1969 } else if (is_los && los_bitmap->Test(from_ref)) {
1970 // Already marked in LOS.
1971 to_ref = from_ref;
1972 if (kUseBakerReadBarrier) {
1973 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr() ||
1974 to_ref->GetReadBarrierPointer() == ReadBarrier::BlackPtr());
1975 }
1976 } else {
1977 // Not marked.
1978 if (IsOnAllocStack(from_ref)) {
1979 // If it's on the allocation stack, it's considered marked. Keep it white.
1980 to_ref = from_ref;
1981 // Objects on the allocation stack need not be marked.
1982 if (!is_los) {
1983 DCHECK(!mark_bitmap->Test(to_ref));
1984 } else {
1985 DCHECK(!los_bitmap->Test(to_ref));
1986 }
1987 if (kUseBakerReadBarrier) {
1988 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::WhitePtr());
1989 }
1990 } else {
1991 // Not marked or on the allocation stack. Try to mark it.
1992 // This may or may not succeed, which is ok.
1993 if (kUseBakerReadBarrier) {
1994 from_ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
1995 }
1996 if (!is_los && mark_bitmap->AtomicTestAndSet(from_ref)) {
1997 // Already marked.
1998 to_ref = from_ref;
1999 } else if (is_los && los_bitmap->AtomicTestAndSet(from_ref)) {
2000 // Already marked in LOS.
2001 to_ref = from_ref;
2002 } else {
2003 // Newly marked.
2004 to_ref = from_ref;
2005 if (kUseBakerReadBarrier) {
2006 DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr());
2007 }
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002008 PushOntoMarkStack(to_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002009 }
2010 }
2011 }
2012 }
2013 }
2014 return to_ref;
2015}
2016
2017void ConcurrentCopying::FinishPhase() {
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002018 {
2019 MutexLock mu(Thread::Current(), mark_stack_lock_);
2020 CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
2021 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002022 region_space_ = nullptr;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002023 {
2024 MutexLock mu(Thread::Current(), skipped_blocks_lock_);
2025 skipped_blocks_map_.clear();
2026 }
2027 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
2028 heap_->ClearMarkedObjects();
2029}
2030
Mathieu Chartier97509952015-07-13 14:35:43 -07002031bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002032 mirror::Object* from_ref = field->AsMirrorPtr();
Mathieu Chartier97509952015-07-13 14:35:43 -07002033 mirror::Object* to_ref = IsMarked(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002034 if (to_ref == nullptr) {
2035 return false;
2036 }
2037 if (from_ref != to_ref) {
2038 QuasiAtomic::ThreadFenceRelease();
2039 field->Assign(to_ref);
2040 QuasiAtomic::ThreadFenceSequentiallyConsistent();
2041 }
2042 return true;
2043}
2044
Mathieu Chartier97509952015-07-13 14:35:43 -07002045mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
2046 return Mark(from_ref);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002047}
2048
2049void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
Mathieu Chartier97509952015-07-13 14:35:43 -07002050 heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002051}
2052
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002053void ConcurrentCopying::ProcessReferences(Thread* self) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002054 TimingLogger::ScopedTiming split("ProcessReferences", GetTimings());
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07002055 // We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002056 WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
2057 GetHeap()->GetReferenceProcessor()->ProcessReferences(
Mathieu Chartier97509952015-07-13 14:35:43 -07002058 true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08002059}
2060
2061void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
2062 TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2063 region_space_->RevokeAllThreadLocalBuffers();
2064}
2065
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07002066} // namespace collector
2067} // namespace gc
2068} // namespace art