Native allocation accounting
Added two new functions: registerNativeAllocation and registerNativeFree.
These functions should be used to let the GC know about native allocations
which are held live by Java objects and released in finalizers. GC are performed
or requested from within registerNativeAllocation when the total number of native
bytes accounted for exceeds a certain threshold. After a GC occurs in
registerNativeAllocation, finalizers are run so that the native memory is freed.
Added a test which shows how to use these functions.
Change-Id: I40f3c79e1c02d5008dec7d58d61c5bb97ec2fc1b
diff --git a/src/gc/heap.cc b/src/gc/heap.cc
index a68cc02..eae1520 100644
--- a/src/gc/heap.cc
+++ b/src/gc/heap.cc
@@ -25,6 +25,7 @@
#include <vector>
#include "base/stl_util.h"
+#include "common_throws.h"
#include "cutils/sched_policy.h"
#include "debugger.h"
#include "gc/accounting/atomic_stack.h"
@@ -170,12 +171,15 @@
capacity_(capacity),
growth_limit_(growth_limit),
max_allowed_footprint_(initial_size),
+ native_footprint_gc_watermark_(initial_size),
+ native_footprint_limit_(2 * initial_size),
concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes)
: std::numeric_limits<size_t>::max()),
total_bytes_freed_ever_(0),
total_objects_freed_ever_(0),
large_object_threshold_(3 * kPageSize),
num_bytes_allocated_(0),
+ native_bytes_allocated_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
verify_pre_gc_heap_(false),
@@ -569,9 +573,6 @@
Dbg::RecordAllocation(c, byte_count);
}
if (static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_) {
- // We already have a request pending, no reason to start more until we update
- // concurrent_start_bytes_.
- concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
// The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
SirtRef<mirror::Object> ref(self, obj);
RequestConcurrentGC(self);
@@ -1690,6 +1691,19 @@
max_allowed_footprint_ = max_allowed_footprint;
}
+void Heap::UpdateMaxNativeFootprint() {
+ size_t native_size = native_bytes_allocated_;
+ // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
+ size_t target_size = native_size / GetTargetHeapUtilization();
+ if (target_size > native_size + max_free_) {
+ target_size = native_size + max_free_;
+ } else if (target_size < native_size + min_free_) {
+ target_size = native_size + min_free_;
+ }
+ native_footprint_gc_watermark_ = target_size;
+ native_footprint_limit_ = 2 * target_size - native_size;
+}
+
void Heap::GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration) {
// We know what our utilization is at this moment.
// This doesn't actually resize any memory. It just lets the heap grow more when necessary.
@@ -1746,6 +1760,8 @@
DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_);
DCHECK_LE(max_allowed_footprint_, growth_limit_);
}
+
+ UpdateMaxNativeFootprint();
}
void Heap::ClearGrowthLimit() {
@@ -1881,6 +1897,10 @@
return;
}
+ // We already have a request pending, no reason to start more until we update
+ // concurrent_start_bytes_.
+ concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
+
JNIEnv* env = self->GetJniEnv();
DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
@@ -1958,5 +1978,64 @@
return alloc_space_->Trim();
}
+bool Heap::IsGCRequestPending() const {
+ return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
+}
+
+void Heap::RegisterNativeAllocation(int bytes) {
+ // Total number of native bytes allocated.
+ native_bytes_allocated_ += bytes;
+ Thread* self = Thread::Current();
+ if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
+ // The second watermark is higher than the gc watermark. If you hit this it means you are
+ // allocating native objects faster than the GC can keep up with.
+ if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+ JNIEnv* env = self->GetJniEnv();
+ // Can't do this in WellKnownClasses::Init since System is not properly set up at that
+ // point.
+ if (WellKnownClasses::java_lang_System_runFinalization == NULL) {
+ DCHECK(WellKnownClasses::java_lang_System != NULL);
+ WellKnownClasses::java_lang_System_runFinalization =
+ CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
+ assert(WellKnownClasses::java_lang_System_runFinalization != NULL);
+ }
+ if (WaitForConcurrentGcToComplete(self) != collector::kGcTypeNone) {
+ // Just finished a GC, attempt to run finalizers.
+ env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
+ WellKnownClasses::java_lang_System_runFinalization);
+ CHECK(!env->ExceptionCheck());
+ }
+
+ // If we still are over the watermark, attempt a GC for alloc and run finalizers.
+ if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+ CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
+ env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
+ WellKnownClasses::java_lang_System_runFinalization);
+ CHECK(!env->ExceptionCheck());
+ }
+ // We have just run finalizers, update the native watermark since it is very likely that
+ // finalizers released native managed allocations.
+ UpdateMaxNativeFootprint();
+ } else {
+ if (!IsGCRequestPending()) {
+ RequestConcurrentGC(self);
+ }
+ }
+ }
+}
+
+void Heap::RegisterNativeFree(int bytes) {
+ int expected_size, new_size;
+ do {
+ expected_size = native_bytes_allocated_.get();
+ new_size = expected_size - bytes;
+ if (new_size < 0) {
+ ThrowRuntimeException("attempted to free %d native bytes with only %d native bytes registered as allocated",
+ bytes, expected_size);
+ break;
+ }
+ } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
+}
+
} // namespace gc
} // namespace art
diff --git a/src/gc/heap.h b/src/gc/heap.h
index 790ab02..980f3bc 100644
--- a/src/gc/heap.h
+++ b/src/gc/heap.h
@@ -126,6 +126,10 @@
mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RegisterNativeAllocation(int bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void RegisterNativeFree(int bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// The given reference is believed to be to an object in the Java heap, check the soundness of it.
void VerifyObjectImpl(const mirror::Object* o);
void VerifyObject(const mirror::Object* o) {
@@ -403,6 +407,7 @@
void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ bool IsGCRequestPending() const;
void RecordAllocation(size_t size, mirror::Object* object)
LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
@@ -421,6 +426,10 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PostGcVerification(collector::GarbageCollector* gc);
+ // Update the watermark for the native allocated bytes based on the current number of native
+ // bytes allocated and the target utilization ratio.
+ void UpdateMaxNativeFootprint();
+
// Given the current contents of the alloc space, increase the allowed heap footprint to match
// the target utilization ratio. This should only be called immediately after a full garbage
// collection.
@@ -498,6 +507,10 @@
// When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
// a GC should be triggered.
size_t max_allowed_footprint_;
+ // The watermark at which a concurrent GC is requested by registerNativeAllocation.
+ size_t native_footprint_gc_watermark_;
+ // The watermark at which a GC is performed inside of registerNativeAllocation.
+ size_t native_footprint_limit_;
// When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
// it completes ahead of an allocation failing.
@@ -515,6 +528,9 @@
// Number of bytes allocated. Adjusted after each allocation and free.
AtomicInteger num_bytes_allocated_;
+ // Bytes which are allocated and managed by native code but still need to be accounted for.
+ AtomicInteger native_bytes_allocated_;
+
// Heap verification flags.
const bool verify_missing_card_marks_;
const bool verify_system_weaks_;