Improve the generational mode.
- Turn the compile-time flags for generational mode into a command
line flag.
- In the generational mode, always collect the whole heap, as opposed
to the bump pointer space only, if a collection is an explicit,
native allocation-triggered or last attempt one.
Change-Id: I7a14a707cc47e6e3aa4a3292db62533409f17563
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 28428cc..25e8966 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -37,6 +37,7 @@
GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
: heap_(heap),
name_(name),
+ gc_cause_(kGcCauseForAlloc),
clear_soft_references_(false),
verbose_(VLOG_IS_ON(heap)),
duration_ns_(0),
@@ -63,13 +64,14 @@
total_freed_bytes_ = 0;
}
-void GarbageCollector::Run(bool clear_soft_references) {
+void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* self = Thread::Current();
uint64_t start_time = NanoTime();
pause_times_.clear();
duration_ns_ = 0;
clear_soft_references_ = clear_soft_references;
+ gc_cause_ = gc_cause;
// Reset stats.
freed_bytes_ = 0;
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 1779339..088f1d4 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -19,6 +19,7 @@
#include "base/histogram.h"
#include "base/timing_logger.h"
+#include "gc/gc_cause.h"
#include "gc_type.h"
#include "locks.h"
#include <stdint.h>
@@ -46,7 +47,7 @@
virtual GcType GetGcType() const = 0;
// Run the garbage collector.
- void Run(bool clear_soft_references);
+ void Run(GcCause gc_cause, bool clear_soft_references);
Heap* GetHeap() const {
return heap_;
@@ -133,6 +134,7 @@
std::string name_;
+ GcCause gc_cause_;
bool clear_soft_references_;
const bool verbose_;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 113139b..99c726d 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -62,12 +62,6 @@
static constexpr bool kProtectFromSpace = true;
static constexpr bool kResetFromSpace = true;
-// TODO: move these to a new file as a new garbage collector?
-// If true, 'promote' some objects from the bump pointer spaces to the non-moving space.
-static constexpr bool kEnableSimplePromo = false;
-// If true, collect the bump pointer spaces only, as opposed to the
-// whole heap in some collections.
-static constexpr bool kEnableBumpPointerSpacesOnlyCollection = false;
// TODO: Unduplicate logic.
void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
@@ -93,10 +87,10 @@
// being sorted by Heap::AddContinuousSpace.
if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
- // Use Limit() instead of End() because otherwise if
- // kEnableBumpPointerSpacesOnlyCollection is true, the alloc
- // space might expand due to promotion and the sense of immunity
- // may change in the middle of a GC.
+ // Use Limit() instead of End() because otherwise if the
+ // generational mode is enabled, the alloc space might expand
+ // due to promotion and the sense of immunity may change in the
+ // middle of a GC.
immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_);
}
}
@@ -115,14 +109,14 @@
// Add the main free list space and the non-moving
// space to the immune space if a bump pointer space
// only collection.
- || (kEnableBumpPointerSpacesOnlyCollection &&
- !whole_heap_collection_ && (space == GetHeap()->GetNonMovingSpace() ||
- space == GetHeap()->GetPrimaryFreeListSpace()))) {
+ || (generational_ && !whole_heap_collection_ &&
+ (space == GetHeap()->GetNonMovingSpace() ||
+ space == GetHeap()->GetPrimaryFreeListSpace()))) {
ImmuneSpace(space);
}
}
}
- if (kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
+ if (generational_ && !whole_heap_collection_) {
// We won't collect the large object space if a bump pointer space only collection.
is_large_object_space_immune_ = true;
GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
@@ -130,7 +124,7 @@
timings_.EndSplit();
}
-SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
+SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
: GarbageCollector(heap,
name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
mark_stack_(nullptr),
@@ -140,6 +134,7 @@
to_space_(nullptr),
from_space_(nullptr),
self_(nullptr),
+ generational_(generational),
last_gc_to_space_end_(nullptr),
bytes_promoted_(0),
whole_heap_collection_(true),
@@ -170,10 +165,12 @@
}
void SemiSpace::MarkingPhase() {
- if (kEnableBumpPointerSpacesOnlyCollection) {
- if (clear_soft_references_) {
- // If we want to collect as much as possible, collect the whole
- // heap (and reset the interval counter to be consistent.)
+ if (generational_) {
+ if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
+ clear_soft_references_) {
+ // If an explicit, native allocation-triggered, or last attempt
+ // collection, collect the whole heap (and reset the interval
+ // counter to be consistent.)
whole_heap_collection_ = true;
whole_heap_collection_interval_counter_ = 0;
}
@@ -189,7 +186,7 @@
// Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
// wrong space.
heap_->SwapSemiSpaces();
- if (kEnableSimplePromo) {
+ if (generational_) {
// If last_gc_to_space_end_ is out of the bounds of the from-space
// (the to-space from last GC), then point it to the beginning of
// the from-space. For example, the very first GC or the
@@ -243,7 +240,7 @@
// space is added to the immune space. But the non-moving
// space doesn't have a mod union table. Instead, its live
// bitmap will be scanned later in MarkReachableObjects().
- DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
+ DCHECK(generational_ && !whole_heap_collection_ &&
(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()));
}
}
@@ -278,7 +275,7 @@
// (including the objects on the live stack which have just marked
// in the live bitmap above in MarkAllocStackAsLive().)
if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) {
- DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
+ DCHECK(generational_ && !whole_heap_collection_ &&
(space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
SemiSpaceScanObjectVisitor visitor(this);
@@ -289,7 +286,7 @@
}
if (is_large_object_space_immune_) {
- DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_);
+ DCHECK(generational_ && !whole_heap_collection_);
// When the large object space is immune, we need to scan the
// large object space as roots as they contain references to their
// classes (primitive array classes) that could move though they
@@ -359,7 +356,7 @@
mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
}
- if (kEnableSimplePromo) {
+ if (generational_) {
// Record the end (top) of the to space so we can distinguish
// between objects that were allocated since the last GC and the
// older objects.
@@ -401,7 +398,7 @@
size_t object_size = obj->SizeOf();
size_t bytes_allocated;
mirror::Object* forward_address = nullptr;
- if (kEnableSimplePromo && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
+ if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
// If it's allocated before the last GC (older), move
// (pseudo-promote) it to the main free list space (as sort
// of an old generation.)
@@ -420,27 +417,25 @@
accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK(!live_bitmap->Test(forward_address));
- if (kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
+ if (!whole_heap_collection_) {
// If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
DCHECK_EQ(live_bitmap, mark_bitmap);
- // If a bump pointer space only collection (and the
- // promotion is enabled,) delay the live bitmap marking
- // of the promoted object until it's popped off the mark
- // stack (ProcessMarkStack()). The rationale: we may be
- // in the middle of scanning the objects in the
- // promo destination space for
+ // If a bump pointer space only collection, delay the live
+ // bitmap marking of the promoted object until it's popped off
+ // the mark stack (ProcessMarkStack()). The rationale: we may
+ // be in the middle of scanning the objects in the promo
+ // destination space for
// non-moving-space-to-bump-pointer-space references by
// iterating over the marked bits of the live bitmap
- // (MarkReachableObjects()). If we don't delay it (and
- // instead mark the promoted object here), the above
- // promo destination space scan could encounter the
- // just-promoted object and forward the references in
- // the promoted object's fields even through it is
- // pushed onto the mark stack. If this happens, the
- // promoted object would be in an inconsistent state,
- // that is, it's on the mark stack (gray) but its fields
- // are already forwarded (black), which would cause a
+ // (MarkReachableObjects()). If we don't delay it (and instead
+ // mark the promoted object here), the above promo destination
+ // space scan could encounter the just-promoted object and
+ // forward the references in the promoted object's fields even
+ // through it is pushed onto the mark stack. If this happens,
+ // the promoted object would be in an inconsistent state, that
+ // is, it's on the mark stack (gray) but its fields are
+ // already forwarded (black), which would cause a
// DCHECK(!to_space_->HasAddress(obj)) failure below.
} else {
// Mark forward_address on the live bit map.
@@ -462,7 +457,7 @@
to_space_live_bitmap_->Set(forward_address);
}
DCHECK(to_space_->HasAddress(forward_address) ||
- (kEnableSimplePromo && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
+ (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
return forward_address;
}
@@ -489,7 +484,7 @@
} else {
accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
if (LIKELY(object_bitmap != nullptr)) {
- if (kEnableBumpPointerSpacesOnlyCollection) {
+ if (generational_) {
// If a bump pointer space only collection, we should not
// reach here as we don't/won't mark the objects in the
// non-moving space (except for the promoted objects.) Note
@@ -623,7 +618,7 @@
void SemiSpace::ProcessMarkStack(bool paused) {
space::MallocSpace* promo_dest_space = NULL;
accounting::SpaceBitmap* live_bitmap = NULL;
- if (kEnableSimplePromo && kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
+ if (generational_ && !whole_heap_collection_) {
// If a bump pointer space only collection (and the promotion is
// enabled,) we delay the live-bitmap marking of promoted objects
// from MarkObject() until this function.
@@ -637,8 +632,7 @@
timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
- if (kEnableSimplePromo && kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
- promo_dest_space->HasAddress(obj)) {
+ if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
// obj has just been promoted. Mark the live bitmap for it,
// which is delayed from MarkObject().
DCHECK(!live_bitmap->Test(obj));
@@ -728,7 +722,7 @@
space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
large_objects->GetMarkObjects()->Clear();
- if (kEnableBumpPointerSpacesOnlyCollection) {
+ if (generational_) {
// Decide whether to do a whole heap collection or a bump pointer
// only space collection at the next collection by updating
// whole_heap_collection. Enable whole_heap_collection once every
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index ba9f0f6..bf129a3 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -63,7 +63,8 @@
class SemiSpace : public GarbageCollector {
public:
- explicit SemiSpace(Heap* heap, const std::string& name_prefix = "");
+ explicit SemiSpace(Heap* heap, bool generational = false,
+ const std::string& name_prefix = "");
~SemiSpace() {}
@@ -274,25 +275,31 @@
Thread* self_;
- // Used for kEnableSimplePromo. The end/top of the bump pointer
- // space at the end of the last collection.
+ // When true, the generational mode (promotion and the bump pointer
+ // space only collection) is enabled. TODO: move these to a new file
+ // as a new garbage collector?
+ bool generational_;
+
+ // Used for the generational mode. the end/top of the bump
+ // pointer space at the end of the last collection.
byte* last_gc_to_space_end_;
- // Used for kEnableSimplePromo. During a collection, keeps track of
- // how many bytes of objects have been copied so far from the bump
- // pointer space to the non-moving space.
+ // Used for the generational mode. During a collection, keeps track
+ // of how many bytes of objects have been copied so far from the
+ // bump pointer space to the non-moving space.
uint64_t bytes_promoted_;
- // When true, collect the whole heap. When false, collect only the
- // bump pointer spaces.
+ // Used for the generational mode. When true, collect the whole
+ // heap. When false, collect only the bump pointer spaces.
bool whole_heap_collection_;
- // A counter used to enable whole_heap_collection_ once per
- // interval.
+ // Used for the generational mode. A counter used to enable
+ // whole_heap_collection_ once per interval.
int whole_heap_collection_interval_counter_;
- // The default interval of the whole heap collection. If N, the
- // whole heap collection occurs every N collections.
+ // Used for the generational mode. The default interval of the whole
+ // heap collection. If N, the whole heap collection occurs every N
+ // collections.
static constexpr int kDefaultWholeHeapCollectionInterval = 5;
private:
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 06395cf..4bc9ad2 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -32,6 +32,8 @@
kCollectorTypeCMS,
// Semi-space / mark-sweep hybrid, enables compaction.
kCollectorTypeSS,
+ // A generational variant of kCollectorTypeSS.
+ kCollectorTypeGSS,
};
std::ostream& operator<<(std::ostream& os, const CollectorType& collector_type);
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
new file mode 100644
index 0000000..b25f7ff
--- /dev/null
+++ b/runtime/gc/gc_cause.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc_cause.h"
+#include "globals.h"
+#include "base/logging.h"
+
+#include <ostream>
+
+namespace art {
+namespace gc {
+
+const char* PrettyCause(GcCause cause) {
+ switch (cause) {
+ case kGcCauseForAlloc: return "Alloc";
+ case kGcCauseBackground: return "Background";
+ case kGcCauseExplicit: return "Explicit";
+ case kGcCauseForNativeAlloc: return "NativeAlloc";
+ case kGcCauseCollectorTransition: return" CollectorTransition";
+ default:
+ LOG(FATAL) << "Unreachable";
+ }
+ return "";
+}
+
+std::ostream& operator<<(std::ostream& os, const GcCause& gc_cause) {
+ os << PrettyCause(gc_cause);
+ return os;
+}
+
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
new file mode 100644
index 0000000..7499b9e
--- /dev/null
+++ b/runtime/gc/gc_cause.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_GC_CAUSE_H_
+#define ART_RUNTIME_GC_GC_CAUSE_H_
+
+#include <ostream>
+
+namespace art {
+namespace gc {
+
+// What caused the GC?
+enum GcCause {
+ // GC triggered by a failed allocation. Thread doing allocation is blocked waiting for GC before
+ // retrying allocation.
+ kGcCauseForAlloc,
+ // A background GC trying to ensure there is free memory ahead of allocations.
+ kGcCauseBackground,
+ // An explicit System.gc() call.
+ kGcCauseExplicit,
+ // GC triggered for a native allocation.
+ kGcCauseForNativeAlloc,
+ // GC triggered for a collector transition.
+ kGcCauseCollectorTransition,
+};
+
+const char* PrettyCause(GcCause cause);
+std::ostream& operator<<(std::ostream& os, const GcCause& gc_cause);
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_GC_CAUSE_H_
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 56e3e00..6d30e1c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -273,7 +273,8 @@
}
if (kMovingCollector) {
// TODO: Clean this up.
- semi_space_collector_ = new collector::SemiSpace(this);
+ bool generational = post_zygote_collector_type_ == kCollectorTypeGSS;
+ semi_space_collector_ = new collector::SemiSpace(this, generational);
garbage_collectors_.push_back(semi_space_collector_);
}
@@ -1165,7 +1166,8 @@
}
tl->SuspendAll();
switch (collector_type) {
- case kCollectorTypeSS: {
+ case kCollectorTypeSS:
+ case kCollectorTypeGSS: {
mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
CHECK(main_space_ != nullptr);
Compact(temp_space_, main_space_);
@@ -1179,7 +1181,7 @@
case kCollectorTypeMS:
// Fall through.
case kCollectorTypeCMS: {
- if (collector_type_ == kCollectorTypeSS) {
+ if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) {
// TODO: Use mem-map from temp space?
MemMap* mem_map = allocator_mem_map_.release();
CHECK(mem_map != nullptr);
@@ -1233,7 +1235,8 @@
collector_type_ = collector_type;
gc_plan_.clear();
switch (collector_type_) {
- case kCollectorTypeSS: {
+ case kCollectorTypeSS:
+ case kCollectorTypeGSS: {
concurrent_gc_ = false;
gc_plan_.push_back(collector::kGcTypeFull);
if (use_tlab_) {
@@ -1388,7 +1391,7 @@
temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
zygote_collector.SetFromSpace(bump_pointer_space_);
zygote_collector.SetToSpace(&target_space);
- zygote_collector.Run(false);
+ zygote_collector.Run(kGcCauseCollectorTransition, false);
CHECK(temp_space_->IsEmpty());
total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
@@ -1469,17 +1472,6 @@
}
}
-const char* PrettyCause(GcCause cause) {
- switch (cause) {
- case kGcCauseForAlloc: return "Alloc";
- case kGcCauseBackground: return "Background";
- case kGcCauseExplicit: return "Explicit";
- default:
- LOG(FATAL) << "Unreachable";
- }
- return "";
-}
-
void Heap::SwapSemiSpaces() {
// Swap the spaces so we allocate into the space which we just evacuated.
std::swap(bump_pointer_space_, temp_space_);
@@ -1492,7 +1484,7 @@
if (target_space != source_space) {
semi_space_collector_->SetFromSpace(source_space);
semi_space_collector_->SetToSpace(target_space);
- semi_space_collector_->Run(false);
+ semi_space_collector_->Run(kGcCauseCollectorTransition, false);
}
}
@@ -1541,7 +1533,7 @@
collector::GarbageCollector* collector = nullptr;
// TODO: Clean this up.
- if (collector_type_ == kCollectorTypeSS) {
+ if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) {
DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
current_allocator_ == kAllocatorTypeTLAB);
gc_type = semi_space_collector_->GetGcType();
@@ -1569,7 +1561,7 @@
ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
- collector->Run(clear_soft_references);
+ collector->Run(gc_cause, clear_soft_references);
total_objects_freed_ever_ += collector->GetFreedObjects();
total_bytes_freed_ever_ += collector->GetFreedBytes();
@@ -2383,7 +2375,7 @@
}
// If we still are over the watermark, attempt a GC for alloc and run finalizers.
if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
- CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
+ CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
RunFinalization(env);
native_need_to_run_finalization_ = false;
CHECK(!env->ExceptionCheck());
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 465ee4c..0c3db86 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -25,6 +25,7 @@
#include "base/timing_logger.h"
#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/card_table.h"
+#include "gc/gc_cause.h"
#include "gc/collector/gc_type.h"
#include "gc/collector_type.h"
#include "globals.h"
@@ -98,18 +99,6 @@
kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
};
-// What caused the GC?
-enum GcCause {
- // GC triggered by a failed allocation. Thread doing allocation is blocked waiting for GC before
- // retrying allocation.
- kGcCauseForAlloc,
- // A background GC trying to ensure there is free memory ahead of allocations.
- kGcCauseBackground,
- // An explicit System.gc() call.
- kGcCauseExplicit,
-};
-std::ostream& operator<<(std::ostream& os, const GcCause& policy);
-
// How we want to sanity check the heap's correctness.
enum HeapVerificationMode {
kHeapVerificationNotPermitted, // Too early in runtime start-up for heap to be verified.