Merge "Track cumulative objects and bytes copied for CC"
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 70faf4b..7afe6f9 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1508,7 +1508,9 @@
uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
+ cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
+ cumulative_objects_moved_.FetchAndAddRelaxed(to_objects);
if (kEnableFromSpaceAccountingCheck) {
CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
@@ -2360,6 +2362,8 @@
if (rb_slow_path_count_gc_total_ > 0) {
os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n";
}
+ os << "Cumulative bytes moved " << cumulative_bytes_moved_.LoadRelaxed() << "\n";
+ os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n";
}
} // namespace collector
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 55c4570..5b0e2d6 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -272,6 +272,8 @@
// How many objects and bytes we moved. Used for accounting.
Atomic<size_t> bytes_moved_;
Atomic<size_t> objects_moved_;
+ Atomic<uint64_t> cumulative_bytes_moved_;
+ Atomic<uint64_t> cumulative_objects_moved_;
// The skipped blocks are memory blocks/chucks that were copies of
// objects that were unused due to lost races (cas failures) at