Bump pointer space only collection.
Add a mode of collection that collects only the bump pointer spaces,
as opposed to the whole heap. It's disabled behind a flag.
TODO: It still scans the entire non-moving space to look for
inter-space references. Use a card table like technique to limit the
scope of scanning and speed it up.
Bug: 11650816
Change-Id: I56cb11e78e47a578bff644e6e6c63d978cfedf39
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index aed260c..113139b 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -62,8 +62,12 @@
static constexpr bool kProtectFromSpace = true;
static constexpr bool kResetFromSpace = true;
-// TODO: move this to a new file as a new garbage collector?
+// TODO: move these to a new file as a new garbage collector?
+// If true, 'promote' some objects from the bump pointer spaces to the non-moving space.
static constexpr bool kEnableSimplePromo = false;
+// If true, collect the bump pointer spaces only, as opposed to the
+// whole heap in some collections.
+static constexpr bool kEnableBumpPointerSpacesOnlyCollection = false;
// TODO: Unduplicate logic.
void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
@@ -89,7 +93,11 @@
// being sorted by Heap::AddContinuousSpace.
if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
- immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
+ // Use Limit() instead of End() because otherwise if
+ // kEnableBumpPointerSpacesOnlyCollection is true, the alloc
+ // space might expand due to promotion and the sense of immunity
+ // may change in the middle of a GC.
+ immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_);
}
}
}
@@ -103,11 +111,22 @@
if (space == to_space_) {
BindLiveToMarkBitmap(to_space_);
} else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
- || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+ || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect
+ // Add the main free list space and the non-moving
+ // space to the immune space if a bump pointer space
+ // only collection.
+ || (kEnableBumpPointerSpacesOnlyCollection &&
+ !whole_heap_collection_ && (space == GetHeap()->GetNonMovingSpace() ||
+ space == GetHeap()->GetPrimaryFreeListSpace()))) {
ImmuneSpace(space);
}
}
}
+ if (kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
+ // We won't collect the large object space if a bump pointer space only collection.
+ is_large_object_space_immune_ = true;
+ GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+ }
timings_.EndSplit();
}
@@ -117,11 +136,14 @@
mark_stack_(nullptr),
immune_begin_(nullptr),
immune_end_(nullptr),
+ is_large_object_space_immune_(false),
to_space_(nullptr),
from_space_(nullptr),
self_(nullptr),
last_gc_to_space_end_(nullptr),
- bytes_promoted_(0) {
+ bytes_promoted_(0),
+ whole_heap_collection_(true),
+ whole_heap_collection_interval_counter_(0) {
}
void SemiSpace::InitializePhase() {
@@ -131,6 +153,7 @@
DCHECK(mark_stack_ != nullptr);
immune_begin_ = nullptr;
immune_end_ = nullptr;
+ is_large_object_space_immune_ = false;
self_ = Thread::Current();
// Do any pre GC verification.
timings_.NewSplit("PreGcVerification");
@@ -147,6 +170,19 @@
}
void SemiSpace::MarkingPhase() {
+ if (kEnableBumpPointerSpacesOnlyCollection) {
+ if (clear_soft_references_) {
+ // If we want to collect as much as possible, collect the whole
+ // heap (and reset the interval counter to be consistent.)
+ whole_heap_collection_ = true;
+ whole_heap_collection_interval_counter_ = 0;
+ }
+ if (whole_heap_collection_) {
+ VLOG(heap) << "Whole heap collection";
+ } else {
+ VLOG(heap) << "Bump pointer space only collection";
+ }
+ }
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
@@ -195,23 +231,77 @@
// If the space is immune then we need to mark the references to other spaces.
if (IsImmuneSpace(space)) {
accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
- CHECK(table != nullptr);
- // TODO: Improve naming.
- TimingLogger::ScopedSplit split(
- space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
- "UpdateAndMarkImageModUnionTable",
- &timings_);
- table->UpdateAndMarkReferences(MarkRootCallback, this);
+ if (table != nullptr) {
+ // TODO: Improve naming.
+ TimingLogger::ScopedSplit split(
+ space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+ "UpdateAndMarkImageModUnionTable",
+ &timings_);
+ table->UpdateAndMarkReferences(MarkRootCallback, this);
+ } else {
+ // If a bump pointer space only collection, the non-moving
+ // space is added to the immune space. But the non-moving
+ // space doesn't have a mod union table. Instead, its live
+ // bitmap will be scanned later in MarkReachableObjects().
+ DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
+ (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()));
+ }
}
}
}
+class SemiSpaceScanObjectVisitor {
+ public:
+ explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
+ void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an
+ // exclusive lock on the mutator lock, but
+ // SpaceBitmap::VisitMarkedRange() only requires the shared lock.
+ DCHECK(obj != nullptr);
+ semi_space_->ScanObject(obj);
+ }
+ private:
+ SemiSpace* semi_space_;
+};
+
void SemiSpace::MarkReachableObjects() {
timings_.StartSplit("MarkStackAsLive");
accounting::ObjectStack* live_stack = heap_->GetLiveStack();
heap_->MarkAllocStackAsLive(live_stack);
live_stack->Reset();
timings_.EndSplit();
+
+ for (auto& space : heap_->GetContinuousSpaces()) {
+ // If the space is immune and has no mod union table (the
+ // non-moving space when the bump pointer space only collection is
+ // enabled,) then we need to scan its live bitmap as roots
+ // (including the objects on the live stack which have just marked
+ // in the live bitmap above in MarkAllocStackAsLive().)
+ if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) {
+ DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
+ (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
+ accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ SemiSpaceScanObjectVisitor visitor(this);
+ live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
+ reinterpret_cast<uintptr_t>(space->End()),
+ visitor);
+ }
+ }
+
+ if (is_large_object_space_immune_) {
+ DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_);
+ // When the large object space is immune, we need to scan the
+ // large object space as roots as they contain references to their
+ // classes (primitive array classes) that could move though they
+ // don't contain any other references.
+ space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects();
+ SemiSpaceScanObjectVisitor visitor(this);
+ for (const Object* obj : large_live_objects->GetObjects()) {
+ visitor(const_cast<Object*>(obj));
+ }
+ }
+
// Recursively process the mark stack.
ProcessMarkStack(true);
}
@@ -298,6 +388,7 @@
bool SemiSpace::MarkLargeObject(const Object* obj) {
// TODO: support >1 discontinuous space.
space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+ DCHECK(large_object_space->Contains(obj));
accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects();
if (UNLIKELY(!large_objects->Test(obj))) {
large_objects->Set(obj);
@@ -311,27 +402,53 @@
size_t bytes_allocated;
mirror::Object* forward_address = nullptr;
if (kEnableSimplePromo && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
- // If it's allocated before the last GC (older), move (pseudo-promote) it to
- // the non-moving space (as sort of an old generation).
+ // If it's allocated before the last GC (older), move
+ // (pseudo-promote) it to the main free list space (as sort
+ // of an old generation.)
size_t bytes_promoted;
- space::MallocSpace* non_moving_space = GetHeap()->GetNonMovingSpace();
- forward_address = non_moving_space->Alloc(self_, object_size, &bytes_promoted);
+ space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
+ forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted);
if (forward_address == nullptr) {
// If out of space, fall back to the to-space.
forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
} else {
GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted);
bytes_promoted_ += bytes_promoted;
- // Mark forward_address on the live bit map.
- accounting::SpaceBitmap* live_bitmap = non_moving_space->GetLiveBitmap();
+ // Handle the bitmaps marking.
+ accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
- DCHECK(!live_bitmap->Test(forward_address));
- live_bitmap->Set(forward_address);
- // Mark forward_address on the mark bit map.
- accounting::SpaceBitmap* mark_bitmap = non_moving_space->GetMarkBitmap();
+ accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
- DCHECK(!mark_bitmap->Test(forward_address));
- mark_bitmap->Set(forward_address);
+ DCHECK(!live_bitmap->Test(forward_address));
+ if (kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
+ // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
+ DCHECK_EQ(live_bitmap, mark_bitmap);
+
+ // If a bump pointer space only collection (and the
+ // promotion is enabled,) delay the live bitmap marking
+ // of the promoted object until it's popped off the mark
+ // stack (ProcessMarkStack()). The rationale: we may be
+ // in the middle of scanning the objects in the
+ // promo destination space for
+ // non-moving-space-to-bump-pointer-space references by
+ // iterating over the marked bits of the live bitmap
+ // (MarkReachableObjects()). If we don't delay it (and
+ // instead mark the promoted object here), the above
+ // promo destination space scan could encounter the
+ // just-promoted object and forward the references in
+ // the promoted object's fields even through it is
+ // pushed onto the mark stack. If this happens, the
+ // promoted object would be in an inconsistent state,
+ // that is, it's on the mark stack (gray) but its fields
+ // are already forwarded (black), which would cause a
+ // DCHECK(!to_space_->HasAddress(obj)) failure below.
+ } else {
+ // Mark forward_address on the live bit map.
+ live_bitmap->Set(forward_address);
+ // Mark forward_address on the mark bit map.
+ DCHECK(!mark_bitmap->Test(forward_address));
+ mark_bitmap->Set(forward_address);
+ }
}
DCHECK(forward_address != nullptr);
} else {
@@ -345,7 +462,7 @@
to_space_live_bitmap_->Set(forward_address);
}
DCHECK(to_space_->HasAddress(forward_address) ||
- (kEnableSimplePromo && GetHeap()->GetNonMovingSpace()->HasAddress(forward_address)));
+ (kEnableSimplePromo && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
return forward_address;
}
@@ -372,6 +489,13 @@
} else {
accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
if (LIKELY(object_bitmap != nullptr)) {
+ if (kEnableBumpPointerSpacesOnlyCollection) {
+ // If a bump pointer space only collection, we should not
+ // reach here as we don't/won't mark the objects in the
+ // non-moving space (except for the promoted objects.) Note
+ // the non-moving space is added to the immune space.
+ DCHECK(whole_heap_collection_);
+ }
// This object was not previously marked.
if (!object_bitmap->Test(obj)) {
object_bitmap->Set(obj);
@@ -430,7 +554,7 @@
}
bool SemiSpace::ShouldSweepSpace(space::MallocSpace* space) const {
- return space != from_space_ && space != to_space_;
+ return space != from_space_ && space != to_space_ && !IsImmuneSpace(space);
}
void SemiSpace::Sweep(bool swap_bitmaps) {
@@ -452,10 +576,13 @@
freed_bytes_.FetchAndAdd(freed_bytes);
}
}
- SweepLargeObjects(swap_bitmaps);
+ if (!is_large_object_space_immune_) {
+ SweepLargeObjects(swap_bitmaps);
+ }
}
void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
+ DCHECK(!is_large_object_space_immune_);
TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
size_t freed_objects = 0;
size_t freed_bytes = 0;
@@ -494,9 +621,30 @@
// Scan anything that's on the mark stack.
void SemiSpace::ProcessMarkStack(bool paused) {
+ space::MallocSpace* promo_dest_space = NULL;
+ accounting::SpaceBitmap* live_bitmap = NULL;
+ if (kEnableSimplePromo && kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
+ // If a bump pointer space only collection (and the promotion is
+ // enabled,) we delay the live-bitmap marking of promoted objects
+ // from MarkObject() until this function.
+ promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
+ live_bitmap = promo_dest_space->GetLiveBitmap();
+ DCHECK(live_bitmap != nullptr);
+ accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
+ DCHECK(mark_bitmap != nullptr);
+ DCHECK_EQ(live_bitmap, mark_bitmap);
+ }
timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
while (!mark_stack_->IsEmpty()) {
- ScanObject(mark_stack_->PopBack());
+ Object* obj = mark_stack_->PopBack();
+ if (kEnableSimplePromo && kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
+ promo_dest_space->HasAddress(obj)) {
+ // obj has just been promoted. Mark the live bitmap for it,
+ // which is delayed from MarkObject().
+ DCHECK(!live_bitmap->Test(obj));
+ live_bitmap->Set(obj);
+ }
+ ScanObject(obj);
}
timings_.EndSplit();
}
@@ -579,6 +727,24 @@
// Reset the marked large objects.
space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
large_objects->GetMarkObjects()->Clear();
+
+ if (kEnableBumpPointerSpacesOnlyCollection) {
+ // Decide whether to do a whole heap collection or a bump pointer
+ // only space collection at the next collection by updating
+ // whole_heap_collection. Enable whole_heap_collection once every
+ // kDefaultWholeHeapCollectionInterval collections.
+ if (!whole_heap_collection_) {
+ --whole_heap_collection_interval_counter_;
+ DCHECK_GE(whole_heap_collection_interval_counter_, 0);
+ if (whole_heap_collection_interval_counter_ == 0) {
+ whole_heap_collection_ = true;
+ }
+ } else {
+ DCHECK_EQ(whole_heap_collection_interval_counter_, 0);
+ whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval;
+ whole_heap_collection_ = false;
+ }
+ }
}
} // namespace collector