Refactor sweeping logic into malloc space.
Removes duplicated code in MarkSweep/SemiSpace.
Deleted VerifyImageRoots since it had race conditions and is tested
by pre/post GC heap verification.
Change-Id: I9636359ff6adb3e93d56ce77a3e15299ed23dfd5
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index cae2a54..a6fb35d 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -333,12 +333,6 @@
}
}
- // Before freeing anything, lets verify the heap.
- if (kIsDebugBuild) {
- ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- VerifyImageRoots();
- }
-
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -595,23 +589,6 @@
timings_.EndSplit();
}
-void MarkSweep::CheckObject(const Object* obj) {
- DCHECK(obj != NULL);
- VisitObjectReferences(const_cast<Object*>(obj), [this](const Object* obj, const Object* ref,
- MemberOffset offset, bool is_static) NO_THREAD_SAFETY_ANALYSIS {
- Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
- CheckReference(obj, ref, offset, is_static);
- }, true);
-}
-
-void MarkSweep::VerifyImageRootVisitor(Object* root, void* arg) {
- DCHECK(root != NULL);
- DCHECK(arg != NULL);
- MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
- DCHECK(mark_sweep->heap_->GetMarkBitmap()->Test(root));
- mark_sweep->CheckObject(root);
-}
-
void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
CHECK(space->IsMallocSpace());
space::MallocSpace* alloc_space = space->AsMallocSpace();
@@ -884,30 +861,6 @@
}
}
-void MarkSweep::VerifyImageRoots() {
- // Verify roots ensures that all the references inside the image space point
- // objects which are either in the image space or marked objects in the alloc
- // space
- timings_.StartSplit("VerifyImageRoots");
- for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (space->IsImageSpace()) {
- space::ImageSpace* image_space = space->AsImageSpace();
- uintptr_t begin = reinterpret_cast<uintptr_t>(image_space->Begin());
- uintptr_t end = reinterpret_cast<uintptr_t>(image_space->End());
- accounting::SpaceBitmap* live_bitmap = image_space->GetLiveBitmap();
- DCHECK(live_bitmap != NULL);
- live_bitmap->VisitMarkedRange(begin, end, [this](const Object* obj) {
- if (kCheckLocks) {
- Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
- }
- DCHECK(obj != NULL);
- CheckObject(obj);
- });
- }
- }
- timings_.EndSplit();
-}
-
class RecursiveMarkTask : public MarkStackTask<false> {
public:
RecursiveMarkTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
@@ -1050,12 +1003,6 @@
Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
}
-struct SweepCallbackContext {
- MarkSweep* mark_sweep;
- space::AllocSpace* space;
- Thread* self;
-};
-
class CheckpointMarkThreadRoots : public Closure {
public:
explicit CheckpointMarkThreadRoots(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
@@ -1095,36 +1042,6 @@
timings_.EndSplit();
}
-void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
- SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
- MarkSweep* mark_sweep = context->mark_sweep;
- Heap* heap = mark_sweep->GetHeap();
- space::AllocSpace* space = context->space;
- Thread* self = context->self;
- Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
- // Use a bulk free, that merges consecutive objects before freeing or free per object?
- // Documentation suggests better free performance with merging, but this may be at the expensive
- // of allocation.
- size_t freed_objects = num_ptrs;
- // AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
- size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
- heap->RecordFree(freed_objects, freed_bytes);
- mark_sweep->freed_objects_.FetchAndAdd(freed_objects);
- mark_sweep->freed_bytes_.FetchAndAdd(freed_bytes);
-}
-
-void MarkSweep::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
- SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
- Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
- Heap* heap = context->mark_sweep->GetHeap();
- // We don't free any actual memory to avoid dirtying the shared zygote pages.
- for (size_t i = 0; i < num_ptrs; ++i) {
- Object* obj = static_cast<Object*>(ptrs[i]);
- heap->GetLiveBitmap()->Clear(obj);
- heap->GetCardTable()->MarkCard(obj);
- }
-}
-
void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
space::MallocSpace* space = heap_->GetNonMovingSpace();
timings_.StartSplit("SweepArray");
@@ -1206,45 +1123,19 @@
void MarkSweep::Sweep(bool swap_bitmaps) {
DCHECK(mark_stack_->IsEmpty());
TimingLogger::ScopedSplit("Sweep", &timings_);
-
- const bool partial = (GetGcType() == kGcTypePartial);
- SweepCallbackContext scc;
- scc.mark_sweep = this;
- scc.self = Thread::Current();
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (!space->IsMallocSpace()) {
- continue;
- }
- // We always sweep always collect spaces.
- bool sweep_space = space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect;
- if (!partial && !sweep_space) {
- // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
- sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
- }
- if (sweep_space) {
- uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
- uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- scc.space = space->AsMallocSpace();
- accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
- if (swap_bitmaps) {
- std::swap(live_bitmap, mark_bitmap);
- }
- if (!space->IsZygoteSpace()) {
- TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
- // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
- accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
- &SweepCallback, reinterpret_cast<void*>(&scc));
- } else {
- TimingLogger::ScopedSplit split("SweepZygote", &timings_);
- // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
- // memory.
- accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
- &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
- }
+ if (space->IsMallocSpace()) {
+ space::MallocSpace* malloc_space = space->AsMallocSpace();
+ TimingLogger::ScopedSplit split(
+ malloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
+ size_t freed_objects = 0;
+ size_t freed_bytes = 0;
+ malloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+ heap_->RecordFree(freed_objects, freed_bytes);
+ freed_objects_.FetchAndAdd(freed_objects);
+ freed_bytes_.FetchAndAdd(freed_bytes);
}
}
-
SweepLargeObjects(swap_bitmaps);
}
@@ -1272,48 +1163,6 @@
GetHeap()->RecordFree(freed_objects, freed_bytes);
}
-void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
- for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (space->IsMallocSpace() && space->Contains(ref)) {
- DCHECK(IsMarked(obj));
-
- bool is_marked = IsMarked(ref);
- if (!is_marked) {
- LOG(INFO) << *space;
- LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
- << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
- << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
- << reinterpret_cast<void*>(offset.Int32Value()) << " wasn't marked";
-
- const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
- DCHECK(klass != NULL);
- const ObjectArray<ArtField>* fields = is_static ? klass->GetSFields() : klass->GetIFields();
- DCHECK(fields != NULL);
- bool found = false;
- for (int32_t i = 0; i < fields->GetLength(); ++i) {
- const ArtField* cur = fields->Get(i);
- if (cur->GetOffset().Int32Value() == offset.Int32Value()) {
- LOG(WARNING) << "Field referencing the alloc space was " << PrettyField(cur);
- found = true;
- break;
- }
- }
- if (!found) {
- LOG(WARNING) << "Could not find field in object alloc space with offset " << offset.Int32Value();
- }
-
- bool obj_marked = heap_->GetCardTable()->IsDirty(obj);
- if (!obj_marked) {
- LOG(WARNING) << "Object '" << PrettyTypeOf(obj) << "' "
- << "(" << reinterpret_cast<const void*>(obj) << ") contains references to "
- << "the alloc space, but wasn't card marked";
- }
- }
- }
- break;
- }
-}
-
// Process the "referent" field in a java.lang.ref.Reference. If the
// referent has not yet been marked, put it on the appropriate list in
// the heap for later processing.
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 62991bb..e2eafb5 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -100,11 +100,6 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Verify that image roots point to only marked objects within the alloc space.
- void VerifyImageRoots()
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Builds a mark stack and recursively mark until it empties.
void RecursiveMark()
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
@@ -251,20 +246,6 @@
// Returns true if we need to add obj to a mark stack.
bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
- static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- // Special sweep for zygote that just marks objects / dirties cards.
- static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- void CheckReference(const mirror::Object* obj, const mirror::Object* ref, MemberOffset offset,
- bool is_static)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- void CheckObject(const mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
// Verify the roots of the heap and print out information related to any invalid roots.
// Called in MarkObject, so may we may not hold the mutator lock.
void VerifyRoots()
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a4f7121..0dd8792 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -465,45 +465,19 @@
void SemiSpace::Sweep(bool swap_bitmaps) {
DCHECK(mark_stack_->IsEmpty());
TimingLogger::ScopedSplit("Sweep", &timings_);
-
- const bool partial = (GetGcType() == kGcTypePartial);
- SweepCallbackContext scc;
- scc.mark_sweep = this;
- scc.self = Thread::Current();
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (!space->IsMallocSpace()) {
- continue;
- }
- // We always sweep always collect spaces.
- bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
- if (!partial && !sweep_space) {
- // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
- sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
- }
- if (sweep_space && space->IsMallocSpace()) {
- uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
- uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- scc.space = space->AsMallocSpace();
- accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
- accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
- if (swap_bitmaps) {
- std::swap(live_bitmap, mark_bitmap);
- }
- if (!space->IsZygoteSpace()) {
- TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
- // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
- accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
- &SweepCallback, reinterpret_cast<void*>(&scc));
- } else {
- TimingLogger::ScopedSplit split("SweepZygote", &timings_);
- // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
- // memory.
- accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
- &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
- }
+ if (space->IsMallocSpace() && space != from_space_ && space != to_space_) {
+ space::MallocSpace* malloc_space = space->AsMallocSpace();
+ TimingLogger::ScopedSplit split(
+ malloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
+ size_t freed_objects = 0;
+ size_t freed_bytes = 0;
+ malloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+ heap_->RecordFree(freed_objects, freed_bytes);
+ freed_objects_.FetchAndAdd(freed_objects);
+ freed_bytes_.FetchAndAdd(freed_bytes);
}
}
-
SweepLargeObjects(swap_bitmaps);
}
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 46df0a1..2727431 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -16,7 +16,8 @@
#include "malloc_space.h"
-#include "gc/accounting/card_table.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
@@ -238,6 +239,83 @@
<< ",name=\"" << GetName() << "\"]";
}
+struct SweepCallbackContext {
+ bool swap_bitmaps;
+ Heap* heap;
+ space::MallocSpace* space;
+ Thread* self;
+ size_t freed_objects;
+ size_t freed_bytes;
+};
+
+static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
+ SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+ space::AllocSpace* space = context->space;
+ Thread* self = context->self;
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
+ // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
+ // the bitmaps as an optimization.
+ if (!context->swap_bitmaps) {
+ accounting::SpaceBitmap* bitmap = context->space->GetLiveBitmap();
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ bitmap->Clear(ptrs[i]);
+ }
+ }
+ // Use a bulk free, that merges consecutive objects before freeing or free per object?
+ // Documentation suggests better free performance with merging, but this may be at the expensive
+ // of allocation.
+ context->freed_objects += num_ptrs;
+ context->freed_bytes += space->FreeList(self, num_ptrs, ptrs);
+}
+
+static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
+ SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
+ accounting::CardTable* card_table = context->heap->GetCardTable();
+ // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
+ // the bitmaps as an optimization.
+ if (!context->swap_bitmaps) {
+ accounting::SpaceBitmap* bitmap = context->space->GetLiveBitmap();
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ bitmap->Clear(ptrs[i]);
+ }
+ }
+ // We don't free any actual memory to avoid dirtying the shared zygote pages.
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ // Need to mark the card since this will update the mod-union table next GC cycle.
+ card_table->MarkCard(ptrs[i]);
+ }
+}
+
+void MallocSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) {
+ DCHECK(freed_objects != nullptr);
+ DCHECK(freed_bytes != nullptr);
+ accounting::SpaceBitmap* live_bitmap = GetLiveBitmap();
+ accounting::SpaceBitmap* mark_bitmap = GetMarkBitmap();
+ // If the bitmaps are bound then sweeping this space clearly won't do anything.
+ if (live_bitmap == mark_bitmap) {
+ return;
+ }
+ SweepCallbackContext scc;
+ scc.swap_bitmaps = swap_bitmaps;
+ scc.heap = Runtime::Current()->GetHeap();
+ scc.self = Thread::Current();
+ scc.space = this;
+ scc.freed_objects = 0;
+ scc.freed_bytes = 0;
+ if (swap_bitmaps) {
+ std::swap(live_bitmap, mark_bitmap);
+ }
+ // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
+ accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
+ reinterpret_cast<uintptr_t>(Begin()),
+ reinterpret_cast<uintptr_t>(End()),
+ IsZygoteSpace() ? &ZygoteSweepCallback : &SweepCallback,
+ reinterpret_cast<void*>(&scc));
+ *freed_objects += scc.freed_objects;
+ *freed_bytes += scc.freed_bytes;
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index d25f9cb..7681b6d 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -148,6 +148,9 @@
// don't do this we may get heap corruption instead of a segfault at null.
virtual void InvalidateAllocator() = 0;
+ // Sweep the references in the malloc space.
+ void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
+
protected:
MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
byte* limit, size_t growth_limit);