Revert "Revert "Prevent overflows for concurrent_start_bytes_ sum""
This reverts commit 83ba9b979d932a5b6430c1affd171429b70b6c3f.
It also fixes a bug exposed by the original CL, and cleans up a
couple of related issues:
- CheckConcurrentGCForNative was renamed to reflect the fact that
it does not just deal with concurrent GC.
- In the non-concurrent case, concurrent_start_bytes_ is not
meaningful; use target_footprint_ instead.
- UnsignedSum should use >= instead of > .
The middle one of these caused the test failures observed with the
previous CL.
Test: Build without read barrier, and ran with --runtime-option=-Xgc:SS
Change-Id: Iae004c453bf2cae2739df66b6797af4a792886fc
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 6bdba12..18dfbf5 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -917,9 +917,13 @@
return main_space_backup_ != nullptr;
}
+ // Size_t saturating arithmetic
static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
return x > y ? x - y : 0;
}
+ static ALWAYS_INLINE size_t UnsignedSum(size_t x, size_t y) {
+ return x + y >= x ? x + y : std::numeric_limits<size_t>::max();
+ }
static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
return
@@ -950,13 +954,13 @@
// Checks whether we should garbage collect:
ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
- float NativeMemoryOverTarget(size_t current_native_bytes);
+ float NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent);
ALWAYS_INLINE void CheckConcurrentGCForJava(Thread* self,
size_t new_num_bytes_allocated,
ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
- void CheckConcurrentGCForNative(Thread* self)
+ void CheckGCForNative(Thread* self)
REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
accounting::ObjectStack* GetMarkStack() {