Improve OOME log message informing OOME before heap full
We throw OOME when <1% of the heap is free after GC. We should improve
the corresponding log message to clarify why we throw OOME even when
allocation size is < largest contiguous chunk.
Test: art/test/testrunner/testrunner.py
Bug: 188465700
Change-Id: I71bf744a3966271ba31f336f7ceb58c848105df7
Merged-In: I71bf744a3966271ba31f336f7ceb58c848105df7
(cherry picked from commit 26e9e75dc4ef6fbe863651e29aaaeff00edcdf92)
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index f1572cd..2698874 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -2069,7 +2069,7 @@
return reclaimed_bytes;
}
-void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
+bool RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
Thread* self = Thread::Current();
size_t largest_continuous_free_pages = 0;
WriterMutexLock wmu(self, bulk_free_lock_);
@@ -2098,7 +2098,9 @@
<< ", space footprint " << footprint_ << " bytes"
<< ", space max capacity " << max_capacity_ << " bytes"
<< ")" << std::endl;
+ return true;
}
+ return false;
}
void RosAlloc::DumpStats(std::ostream& os) {
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 48e3576..a5bfd8f 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -929,7 +929,7 @@
void Verify() REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !bulk_free_lock_,
!lock_);
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes)
+ bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes)
REQUIRES(!bulk_free_lock_, !lock_);
void DumpStats(std::ostream& os)
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5397d62..d9cd1f5 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -171,6 +171,10 @@
static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
sizeof(mirror::HeapReference<mirror::Object>);
+// After a GC (due to allocation failure) we should retrieve at least this
+// fraction of the current max heap size. Otherwise throw OOME.
+static constexpr double kMinFreeHeapAfterGcForAlloc = 0.01;
+
// For deterministic compilation, we need the heap to be at a well-known address.
static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
// Dump the rosalloc stats on SIGQUIT.
@@ -1446,7 +1450,16 @@
CHECK(space != nullptr) << "allocator_type:" << allocator_type
<< " byte_count:" << byte_count
<< " total_bytes_free:" << total_bytes_free;
- space->LogFragmentationAllocFailure(oss, byte_count);
+ // LogFragmentationAllocFailure returns true if byte_count is greater than
+ // the largest free contiguous chunk in the space. Return value false
+ // means that we are throwing OOME because the amount of free heap after
+ // GC is less than kMinFreeHeapAfterGcForAlloc in proportion of the heap-size.
+ // Log an appropriate message in that case.
+ if (!space->LogFragmentationAllocFailure(oss, byte_count)) {
+ oss << "; giving up on allocation because <"
+ << kMinFreeHeapAfterGcForAlloc * 100
+ << "% of heap free after GC.";
+ }
}
}
self->ThrowOutOfMemoryError(oss.str().c_str());
@@ -1788,9 +1801,6 @@
size_t* usable_size,
size_t* bytes_tl_bulk_allocated,
ObjPtr<mirror::Class>* klass) {
- // After a GC (due to allocation failure) we should retrieve at least this
- // fraction of the current max heap size. Otherwise throw OOME.
- constexpr double kMinFreeHeapAfterGcForAlloc = 0.01;
bool was_default_allocator = allocator == GetCurrentAllocator();
// Make sure there is no pending exception since we may need to throw an OOME.
self->AssertNoPendingException();
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index c4fda14..3a0155a 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -220,12 +220,16 @@
return true;
}
-void BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
- size_t /* failed_alloc_bytes */) {
+bool BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
+ size_t failed_alloc_bytes) {
size_t max_contiguous_allocation = Limit() - End();
- os << "; failed due to fragmentation (largest possible contiguous allocation "
- << max_contiguous_allocation << " bytes)";
+ if (failed_alloc_bytes > max_contiguous_allocation) {
+ os << "; failed due to fragmentation (largest possible contiguous allocation "
+ << max_contiguous_allocation << " bytes)";
+ return true;
+ }
// Caller's job to print failed_alloc_bytes.
+ return false;
}
size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 559fae8..08ed503 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -161,7 +161,7 @@
bytes_allocated_.fetch_sub(bytes, std::memory_order_relaxed);
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
+ bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
// Object alignment within the space.
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index f3fccbb..7564c89 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -359,8 +359,8 @@
}
}
-void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os,
- size_t failed_alloc_bytes ATTRIBUTE_UNUSED) {
+bool DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os,
+ size_t failed_alloc_bytes) {
Thread* const self = Thread::Current();
size_t max_contiguous_allocation = 0;
// To allow the Walk/InspectAll() to exclusively-lock the mutator
@@ -369,8 +369,12 @@
Locks::mutator_lock_->AssertSharedHeld(self);
ScopedThreadSuspension sts(self, kSuspended);
Walk(MSpaceChunkCallback, &max_contiguous_allocation);
- os << "; failed due to fragmentation (largest possible contiguous allocation "
- << max_contiguous_allocation << " bytes)";
+ if (failed_alloc_bytes > max_contiguous_allocation) {
+ os << "; failed due to fragmentation (largest possible contiguous allocation "
+ << max_contiguous_allocation << " bytes)";
+ return true;
+ }
+ return false;
}
} // namespace space
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 930f557..429b4d0 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -149,7 +149,7 @@
return this;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
+ bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index d1b4d7c..2d17a18 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -641,9 +641,10 @@
return scc.freed;
}
-void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
+bool LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
size_t /*failed_alloc_bytes*/) {
UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
std::pair<uint8_t*, uint8_t*> LargeObjectMapSpace::GetBeginEndAtomic() const {
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 13251d6..8b3115c 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -101,7 +101,7 @@
const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
return Begin() <= byte_obj && byte_obj < End();
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
+ bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if the large object is a zygote large object. Potentially slow.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 2b008f3..091dc4e 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -722,8 +722,8 @@
}
}
-void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
- size_t /* failed_alloc_bytes */) {
+bool RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
+ size_t failed_alloc_bytes) {
size_t max_contiguous_allocation = 0;
MutexLock mu(Thread::Current(), region_lock_);
@@ -759,12 +759,15 @@
max_contiguous_allocation = std::min(max_contiguous_allocation,
regions_free_for_alloc * kRegionSize);
-
- os << "; failed due to fragmentation (largest possible contiguous allocation "
- << max_contiguous_allocation << " bytes). Number of "
- << PrettySize(kRegionSize)
- << " sized free regions are: " << regions_free_for_alloc;
+ if (failed_alloc_bytes > max_contiguous_allocation) {
+ os << "; failed due to fragmentation (largest possible contiguous allocation "
+ << max_contiguous_allocation << " bytes). Number of "
+ << PrettySize(kRegionSize)
+ << " sized free regions are: " << regions_free_for_alloc;
+ return true;
+ }
// Caller's job to print failed_alloc_bytes.
+ return false;
}
void RegionSpace::Clear() {
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index afb42b0..c3b272d 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -227,7 +227,7 @@
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return nullptr;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
+ bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
// Object alignment within the space.
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 00f5ab2..7becea0 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -152,8 +152,8 @@
virtual ~RosAllocSpace();
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override {
- rosalloc_->LogFragmentationAllocFailure(os, failed_alloc_bytes);
+ bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override {
+ return rosalloc_->LogFragmentationAllocFailure(os, failed_alloc_bytes);
}
void DumpStats(std::ostream& os);
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 5ea97eb..160c730 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -243,7 +243,10 @@
// from Heap::num_bytes_allocated_ or zero if unnecessary.
virtual size_t RevokeAllThreadLocalBuffers() = 0;
- virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
+ // Compute largest free contiguous chunk of memory available in the space and
+ // log it if it's smaller than failed_alloc_bytes and return true.
+ // Otherwise leave os untouched and return false.
+ virtual bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
protected:
struct SweepCallbackContext {
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 66427a7..c5e3a70 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -111,7 +111,7 @@
UNREACHABLE();
}
-void ZygoteSpace::LogFragmentationAllocFailure(std::ostream&, size_t) {
+bool ZygoteSpace::LogFragmentationAllocFailure(std::ostream&, size_t) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 631691d..3ebc943 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -79,7 +79,7 @@
return false;
}
- void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
+ bool LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) override
REQUIRES_SHARED(Locks::mutator_lock_);
protected: