Merge "Fix regression in target run-test 069. ." into dalvik-dev
diff --git a/src/class_linker.cc b/src/class_linker.cc
index 243d9a5..40ca635 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -2784,7 +2784,7 @@
}
bool ClassLinker::EnsureInitialized(Class* c, bool can_run_clinit, bool can_init_fields) {
- CHECK(c != NULL);
+ DCHECK(c != NULL);
if (c->IsInitialized()) {
return true;
}
diff --git a/src/gc/space.cc b/src/gc/space.cc
index 9c8819b..274d777 100644
--- a/src/gc/space.cc
+++ b/src/gc/space.cc
@@ -73,8 +73,8 @@
AllocSpace::AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
byte* end, size_t growth_limit)
: MemMapSpace(name, mem_map, end - begin, kGcRetentionPolicyAlwaysCollect),
- num_bytes_allocated_(0), num_objects_allocated_(0),
- lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
+ num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
+ total_objects_allocated_(0), lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
growth_limit_(growth_limit) {
CHECK(mspace != NULL);
@@ -202,7 +202,10 @@
*reinterpret_cast<word*>(reinterpret_cast<byte*>(result) + AllocationSize(result)
- sizeof(word) - kChunkOverhead) = kPaddingValue;
}
- num_bytes_allocated_ += AllocationSize(result);
+ size_t allocation_size = AllocationSize(result);
+ num_bytes_allocated_ += allocation_size;
+ total_bytes_allocated_ += allocation_size;
+ ++total_objects_allocated_;
++num_objects_allocated_;
return result;
}
@@ -552,8 +555,8 @@
LargeObjectSpace::LargeObjectSpace(const std::string& name)
: DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
- num_bytes_allocated_(0),
- num_objects_allocated_(0) {
+ num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
+ total_objects_allocated_(0) {
live_objects_.reset(new SpaceSetMap("large live objects"));
mark_objects_.reset(new SpaceSetMap("large marked objects"));
}
@@ -583,8 +586,11 @@
Object* obj = reinterpret_cast<Object*>(mem_map->Begin());
large_objects_.push_back(obj);
mem_maps_.Put(obj, mem_map);
- num_bytes_allocated_ += mem_map->Size();
+ size_t allocation_size = mem_map->Size();
+ num_bytes_allocated_ += allocation_size;
+ total_bytes_allocated_ += allocation_size;
++num_objects_allocated_;
+ ++total_objects_allocated_;
return obj;
}
@@ -757,7 +763,9 @@
}
num_objects_allocated_++;
+ total_objects_allocated_++;
num_bytes_allocated_ += num_bytes;
+ total_bytes_allocated_ += num_bytes;
return reinterpret_cast<Object*>(addr);
}
diff --git a/src/gc/space.h b/src/gc/space.h
index a543500..ab29582 100644
--- a/src/gc/space.h
+++ b/src/gc/space.h
@@ -314,6 +314,14 @@
return num_objects_allocated_;
}
+ size_t GetTotalBytesAllocated() const {
+ return total_bytes_allocated_;
+ }
+
+ size_t GetTotalObjectsAllocated() const {
+ return total_objects_allocated_;
+ }
+
private:
Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
@@ -324,6 +332,8 @@
// Approximate number of bytes which have been allocated into the space.
size_t num_bytes_allocated_;
size_t num_objects_allocated_;
+ size_t total_bytes_allocated_;
+ size_t total_objects_allocated_;
static size_t bitmap_index_;
@@ -453,6 +463,14 @@
return num_objects_allocated_;
}
+ size_t GetTotalBytesAllocated() const {
+ return total_bytes_allocated_;
+ }
+
+ size_t GetTotalObjectsAllocated() const {
+ return total_objects_allocated_;
+ }
+
protected:
LargeObjectSpace(const std::string& name);
@@ -460,6 +478,8 @@
// Approximate number of bytes which have been allocated into the space.
size_t num_bytes_allocated_;
size_t num_objects_allocated_;
+ size_t total_bytes_allocated_;
+ size_t total_objects_allocated_;
UniquePtr<SpaceSetMap> live_objects_;
UniquePtr<SpaceSetMap> mark_objects_;
diff --git a/src/heap.cc b/src/heap.cc
index 3989a24..d3de603 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -139,8 +139,9 @@
concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
concurrent_start_size_(128 * KB),
concurrent_min_free_(256 * KB),
- concurrent_gc_start_rate_(3 * MB / 2),
sticky_gc_count_(0),
+ total_bytes_freed_(0),
+ total_objects_freed_(0),
large_object_threshold_(3 * kPageSize),
num_bytes_allocated_(0),
verify_missing_card_marks_(false),
@@ -152,7 +153,6 @@
min_alloc_space_size_for_sticky_gc_(2 * MB),
min_remaining_space_for_sticky_gc_(1 * MB),
last_trim_time_(0),
- try_running_gc_(false),
requesting_gc_(false),
max_allocation_stack_size_(MB),
reference_referent_offset_(0),
@@ -161,6 +161,10 @@
reference_pendingNext_offset_(0),
finalizer_reference_zombie_offset_(0),
target_utilization_(0.5),
+ total_paused_time_(0),
+ total_wait_time_(0),
+ measure_allocation_time_(false),
+ total_allocation_time_(0),
verify_objects_(false) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
@@ -320,6 +324,39 @@
}
}
+void Heap::DumpGcPerformanceInfo() {
+ // Dump cumulative timings.
+ LOG(INFO) << "Dumping cumulative Gc timings";
+ uint64_t total_duration = 0;
+ for (CumulativeTimings::iterator it = cumulative_timings_.begin();
+ it != cumulative_timings_.end(); ++it) {
+ CumulativeLogger* logger = it->second;
+ if (logger->GetTotalNs() != 0) {
+ logger->Dump();
+ total_duration += logger->GetTotalNs();
+ }
+ }
+ uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
+ size_t total_objects_allocated = GetTotalObjectsAllocated();
+ size_t total_bytes_allocated = GetTotalBytesAllocated();
+ if (total_duration != 0) {
+ const double total_seconds = double(total_duration / 1000) / 1000000.0;
+ LOG(INFO) << "Total time spent in GC: " << PrettyDuration(total_duration);
+ LOG(INFO) << "Mean GC size throughput: "
+ << PrettySize(GetTotalBytesFreed() / total_seconds) << "/s";
+ LOG(INFO) << "Mean GC object throughput: " << GetTotalObjectsFreed() / total_seconds << "/s";
+ }
+ LOG(INFO) << "Total number of allocations: " << total_objects_allocated;
+ LOG(INFO) << "Total bytes allocated " << PrettySize(total_bytes_allocated);
+ if (measure_allocation_time_) {
+ LOG(INFO) << "Total time spent allocating: " << PrettyDuration(allocation_time);
+ LOG(INFO) << "Mean allocation time: "
+ << PrettyDuration(allocation_time / total_objects_allocated);
+ }
+ LOG(INFO) << "Total mutator paused time: " << PrettyDuration(total_paused_time_);
+ LOG(INFO) << "Total waiting for Gc to complete time: " << PrettyDuration(total_wait_time_);
+}
+
Heap::~Heap() {
// If we don't reset then the mark stack complains in it's destructor.
allocation_stack_->Reset();
@@ -377,6 +414,10 @@
Object* obj = NULL;
size_t size = 0;
+ uint64_t allocation_start = 0;
+ if (measure_allocation_time_) {
+ allocation_start = NanoTime();
+ }
// We need to have a zygote space or else our newly allocated large object can end up in the
// Zygote resulting in it being prematurely freed.
@@ -385,21 +426,17 @@
if (byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray()) {
size = RoundUp(byte_count, kPageSize);
obj = Allocate(self, NULL, size);
-
- if (obj != NULL) {
- // Make sure that our large object didn't get placed anywhere within the space interval or else
- // it breaks the immune range.
- DCHECK(reinterpret_cast<byte*>(obj) < spaces_.front()->Begin() ||
- reinterpret_cast<byte*>(obj) >= spaces_.back()->End());
- }
+ // Make sure that our large object didn't get placed anywhere within the space interval or else
+ // it breaks the immune range.
+ DCHECK(obj == NULL ||
+ reinterpret_cast<byte*>(obj) < spaces_.front()->Begin() ||
+ reinterpret_cast<byte*>(obj) >= spaces_.back()->End());
} else {
obj = Allocate(self, alloc_space_, byte_count);
- size = alloc_space_->AllocationSize(obj);
- if (obj != NULL) {
- // Additional verification to ensure that we did not allocate into a zygote space.
- DCHECK(!have_zygote_space_ || !FindSpaceFromObject(obj)->IsZygoteSpace());
- }
+ // Ensure that we did not allocate into a zygote space.
+ DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj)->IsZygoteSpace());
+ size = alloc_space_->AllocationSize(obj);
}
if (LIKELY(obj != NULL)) {
@@ -422,6 +459,10 @@
}
VerifyObject(obj);
+ if (measure_allocation_time_) {
+ total_allocation_time_ += (NanoTime() - allocation_start) / kTimeAdjust;
+ }
+
return obj;
}
int64_t total_bytes_free = GetFreeMemory();
@@ -566,8 +607,6 @@
}
void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
- COMPILE_ASSERT(sizeof(size_t) == sizeof(int32_t),
- int32_t_must_be_same_size_as_size_t_for_used_atomic_operations);
DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_));
num_bytes_allocated_ -= freed_bytes;
@@ -688,18 +727,58 @@
return TryToAllocate(self, space, alloc_size, true);
}
-int64_t Heap::GetMaxMemory() {
+float Heap::GetTargetHeapUtilization() const {
+ return target_utilization_;
+}
+
+void Heap::SetTargetHeapUtilization(float target) {
+ DCHECK_GT(target, 0.0f); // asserted in Java code
+ DCHECK_LT(target, 1.0f);
+ target_utilization_ = target;
+}
+
+int64_t Heap::GetMaxMemory() const {
return growth_limit_;
}
-int64_t Heap::GetTotalMemory() {
+int64_t Heap::GetTotalMemory() const {
return GetMaxMemory();
}
-int64_t Heap::GetFreeMemory() {
+int64_t Heap::GetFreeMemory() const {
return GetMaxMemory() - num_bytes_allocated_;
}
+size_t Heap::GetTotalBytesFreed() const {
+ return total_bytes_freed_;
+}
+
+size_t Heap::GetTotalObjectsFreed() const {
+ return total_objects_freed_;
+}
+
+size_t Heap::GetTotalObjectsAllocated() const {
+ size_t total = large_object_space_->GetTotalObjectsAllocated();
+ for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+ Space* space = *it;
+ if (space->IsAllocSpace()) {
+ total += space->AsAllocSpace()->GetTotalObjectsAllocated();
+ }
+ }
+ return total;
+}
+
+size_t Heap::GetTotalBytesAllocated() const {
+ size_t total = large_object_space_->GetTotalBytesAllocated();
+ for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+ Space* space = *it;
+ if (space->IsAllocSpace()) {
+ total += space->AsAllocSpace()->GetTotalBytesAllocated();
+ }
+ }
+ return total;
+}
+
class InstanceCounter {
public:
InstanceCounter(Class* c, bool count_assignable, size_t* const count)
@@ -1027,6 +1106,8 @@
cleared_references = mark_sweep.GetClearedReferences();
bytes_freed = mark_sweep.GetFreedBytes();
+ total_bytes_freed_ += bytes_freed;
+ total_objects_freed_ += mark_sweep.GetFreedObjects();
}
if (verify_post_gc_heap_) {
@@ -1049,6 +1130,7 @@
// If the GC was slow, then print timings in the log.
uint64_t duration = (NanoTime() - start_time) / 1000 * 1000;
+ total_paused_time_ += duration / kTimeAdjust;
if (duration > MsToNs(50)) {
const size_t percent_free = GetPercentFree();
const size_t current_heap_size = GetUsedMemorySize();
@@ -1586,15 +1668,12 @@
// Make sure everything in the live stack isn't something we unmarked.
std::sort(allocation_stack_->Begin(), allocation_stack_->End());
for (Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
- if (std::binary_search(allocation_stack_->Begin(), allocation_stack_->End(), *it)) {
- LOG(FATAL) << "Unmarked object " << *it << " in the live stack";
- }
+ DCHECK(!std::binary_search(allocation_stack_->Begin(), allocation_stack_->End(), *it))
+ << "Unmarked object " << *it << " in the live stack";
}
} else {
for (Object** it = allocation_stack_->Begin(); it != allocation_stack_->End(); ++it) {
- if (GetLiveBitmap()->Test(*it)) {
- LOG(FATAL) << "Object " << *it << " is marked as live";
- }
+ DCHECK(!GetLiveBitmap()->Test(*it)) << "Object " << *it << " is marked as live";
}
}
#endif
@@ -1662,12 +1741,16 @@
cleared_references = mark_sweep.GetClearedReferences();
bytes_freed = mark_sweep.GetFreedBytes();
+ total_bytes_freed_ += bytes_freed;
+ total_objects_freed_ += mark_sweep.GetFreedObjects();
}
GrowForUtilization();
timings.AddSplit("GrowForUtilization");
EnqueueClearedReferences(&cleared_references);
+ timings.AddSplit("EnqueueClearedReferences");
+
RequestHeapTrim();
timings.AddSplit("Finish");
@@ -1675,6 +1758,7 @@
uint64_t pause_roots = (root_end - root_begin) / 1000 * 1000;
uint64_t pause_dirty = (dirty_end - dirty_begin) / 1000 * 1000;
uint64_t duration = (NanoTime() - root_begin) / 1000 * 1000;
+ total_paused_time_ += (pause_roots + pause_dirty) / kTimeAdjust;
if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5)) {
const size_t percent_free = GetPercentFree();
const size_t current_heap_size = GetUsedMemorySize();
@@ -1706,6 +1790,7 @@
do_wait = is_gc_running_;
}
if (do_wait) {
+ uint64_t wait_time;
// We must wait, change thread state then sleep on gc_complete_cond_;
ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete);
{
@@ -1714,8 +1799,9 @@
gc_complete_cond_->Wait(self, *gc_complete_lock_);
}
last_gc_type = last_gc_type_;
+ wait_time = NanoTime() - wait_start;;
+ total_wait_time_ += wait_time;
}
- uint64_t wait_time = NanoTime() - wait_start;
if (wait_time > MsToNs(5)) {
LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time);
}
@@ -1727,12 +1813,7 @@
void Heap::DumpForSigQuit(std::ostream& os) {
os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetUsedMemorySize()) << "/"
<< PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
- // Dump cumulative timings.
- LOG(INFO) << "Dumping cumulative Gc timings";
- for (CumulativeTimings::iterator it = cumulative_timings_.begin();
- it != cumulative_timings_.end(); ++it) {
- it->second->Dump();
- }
+ DumpGcPerformanceInfo();
}
size_t Heap::GetPercentFree() {
diff --git a/src/heap.h b/src/heap.h
index 209d631..3d49e30 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -82,8 +82,9 @@
class Heap {
public:
static const size_t kInitialSize = 2 * MB;
-
static const size_t kMaximumSize = 32 * MB;
+ // Used so that we don't overflow the allocation time atomic integer.
+ static const size_t kTimeAdjust = 1024;
typedef void (RootVisitor)(const Object* root, void* arg);
typedef bool (IsMarkedTester)(const Object* object, void* arg);
@@ -136,11 +137,11 @@
void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
// Implements java.lang.Runtime.maxMemory.
- int64_t GetMaxMemory();
+ int64_t GetMaxMemory() const;
// Implements java.lang.Runtime.totalMemory.
- int64_t GetTotalMemory();
+ int64_t GetTotalMemory() const;
// Implements java.lang.Runtime.freeMemory.
- int64_t GetFreeMemory();
+ int64_t GetFreeMemory() const;
// Implements VMDebug.countInstancesOfClass.
int64_t CountInstances(Class* c, bool count_assignable)
@@ -153,16 +154,11 @@
// Target ideal heap utilization ratio, implements
// dalvik.system.VMRuntime.getTargetHeapUtilization.
- float GetTargetHeapUtilization() {
- return target_utilization_;
- }
+ float GetTargetHeapUtilization() const;
+
// Set target ideal heap utilization ratio, implements
// dalvik.system.VMRuntime.setTargetHeapUtilization.
- void SetTargetHeapUtilization(float target) {
- DCHECK_GT(target, 0.0f); // asserted in Java code
- DCHECK_LT(target, 1.0f);
- target_utilization_ = target;
- }
+ void SetTargetHeapUtilization(float target);
// For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
// from the system. Doesn't allow the space to exceed its growth limit.
@@ -252,6 +248,18 @@
size_t GetConcurrentMinFree() const;
size_t GetUsedMemorySize() const;
+ // Returns the total number of objects allocated since the heap was created.
+ size_t GetTotalObjectsAllocated() const;
+
+ // Returns the total number of bytes allocated since the heap was created.
+ size_t GetTotalBytesAllocated() const;
+
+ // Returns the total number of objects freed since the heap was created.
+ size_t GetTotalObjectsFreed() const;
+
+ // Returns the total number of bytes freed since the heap was created.
+ size_t GetTotalBytesFreed() const;
+
// Functions for getting the bitmap which corresponds to an object's address.
// This is probably slow, TODO: use better data structure like binary tree .
ContinuousSpace* FindSpaceFromObject(const Object*) const;
@@ -298,6 +306,9 @@
// UnReserve the address range where the oat file will be placed.
void UnReserveOatFileAddressRange();
+ // GC performance measuring
+ void DumpGcPerformanceInfo();
+
private:
// Allocates uninitialized storage. Passing in a null space tries to place the object in the
// large object space.
@@ -413,10 +424,11 @@
size_t concurrent_min_free_;
// Number of bytes allocated since the last Gc, we use this to help determine when to schedule concurrent GCs.
size_t bytes_since_last_gc_;
- // Start a concurrent GC if we have allocated concurrent_gc_start_rate_ bytes and not done a GCs.
- size_t concurrent_gc_start_rate_;
size_t sticky_gc_count_;
+ size_t total_bytes_freed_;
+ size_t total_objects_freed_;
+
// Primitive objects larger than this size are put in the large object space.
size_t large_object_threshold_;
@@ -450,11 +462,6 @@
UniquePtr<HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
UniquePtr<HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
- // True while the garbage collector is trying to signal the GC daemon thread.
- // This flag is needed to prevent recursion from occurring when the JNI calls
- // allocate memory and request another GC.
- bool try_running_gc_;
-
// Used to ensure that we don't ever recursively request GC.
volatile bool requesting_gc_;
@@ -487,6 +494,14 @@
// Target ideal heap utilization ratio
float target_utilization_;
+ // Total time which mutators are paused or waiting for GC to complete.
+ uint64_t total_paused_time_;
+ uint64_t total_wait_time_;
+
+ // Total number of objects allocated in microseconds.
+ const bool measure_allocation_time_;
+ AtomicInteger total_allocation_time_;
+
bool verify_objects_;
friend class MarkSweep;
diff --git a/src/runtime_support.h b/src/runtime_support.h
index 0d24e48..5a5cdcd 100644
--- a/src/runtime_support.h
+++ b/src/runtime_support.h
@@ -74,7 +74,8 @@
return NULL; // Failure
}
}
- if (!runtime->GetClassLinker()->EnsureInitialized(klass, true, true)) {
+ if (!klass->IsInitialized() &&
+ !runtime->GetClassLinker()->EnsureInitialized(klass, true, true)) {
DCHECK(self->IsExceptionPending());
return NULL; // Failure
}
diff --git a/src/timing_logger.h b/src/timing_logger.h
index 5bc9d71..7dc2671 100644
--- a/src/timing_logger.h
+++ b/src/timing_logger.h
@@ -170,7 +170,7 @@
<< FormatDuration(std_dev * kAdjust, tu) << " " << labels_[i] << "\n";
}
uint64_t total_mean_x2 = total_time_squared_;
- uint64_t mean_total_ns = GetTotalNs();
+ uint64_t mean_total_ns = GetTotalTime();
if (iterations_ != 0) {
total_mean_x2 /= iterations_;
mean_total_ns /= iterations_;
@@ -183,6 +183,12 @@
}
uint64_t GetTotalNs() const {
+ return GetTotalTime() * kAdjust;
+ }
+
+ private:
+
+ uint64_t GetTotalTime() const {
uint64_t total = 0;
for (size_t i = 0; i < times_.size(); ++i) {
total += times_[i];
@@ -190,7 +196,6 @@
return total;
}
- private:
static const uint64_t kAdjust = 1000;
std::string name_;
bool precise_;