Handle zero-size allocations properly in heap profiler
Now zero-size allocations are ignored for heap profiling.
Percentage calculation is originally changed to avoid div-by-zero,
but it also solves integer overflow issue with >32768 Kb allocations
on platforms with 32-bit size_t.
Review URL: https://codereview.chromium.org/1859143003
Cr-Commit-Position: refs/heads/master@{#385426}
CrOS-Libchrome-Original-Commit: a29434e19fa91646e9c40afa5078e64e0ae73903
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
index 9a846c5..a0fc4be 100644
--- a/base/trace_event/heap_profiler_allocation_register.cc
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -36,6 +36,8 @@
size_t size,
AllocationContext context) {
DCHECK(address != nullptr);
+ if (size == 0)
+ return;
CellIndex* idx_ptr = Lookup(address);
diff --git a/base/trace_event/heap_profiler_allocation_register_unittest.cc b/base/trace_event/heap_profiler_allocation_register_unittest.cc
index 3ec4580..ceaff6e 100644
--- a/base/trace_event/heap_profiler_allocation_register_unittest.cc
+++ b/base/trace_event/heap_profiler_allocation_register_unittest.cc
@@ -59,17 +59,20 @@
AllocationRegister reg;
AllocationContext ctx = AllocationContext::Empty();
+ // Zero-sized allocations should be discarded.
+ reg.Insert(reinterpret_cast<void*>(1), 0, ctx);
+
EXPECT_EQ(0u, OrAllAddresses(reg));
- reg.Insert(reinterpret_cast<void*>(1), 0, ctx);
+ reg.Insert(reinterpret_cast<void*>(1), 1, ctx);
EXPECT_EQ(1u, OrAllAddresses(reg));
- reg.Insert(reinterpret_cast<void*>(2), 0, ctx);
+ reg.Insert(reinterpret_cast<void*>(2), 1, ctx);
EXPECT_EQ(3u, OrAllAddresses(reg));
- reg.Insert(reinterpret_cast<void*>(4), 0, ctx);
+ reg.Insert(reinterpret_cast<void*>(4), 1, ctx);
EXPECT_EQ(7u, OrAllAddresses(reg));
@@ -90,8 +93,8 @@
AllocationRegister reg;
AllocationContext ctx = AllocationContext::Empty();
- reg.Insert(reinterpret_cast<void*>(1), 0, ctx);
- reg.Insert(reinterpret_cast<void*>(2), 0, ctx);
+ reg.Insert(reinterpret_cast<void*>(1), 1, ctx);
+ reg.Insert(reinterpret_cast<void*>(2), 1, ctx);
reg.Remove(reinterpret_cast<void*>(1));
reg.Remove(reinterpret_cast<void*>(1)); // Remove for the second time.
reg.Remove(reinterpret_cast<void*>(4)); // Remove never inserted address.
@@ -179,7 +182,7 @@
uint32_t initial_water_mark = GetHighWaterMark(reg);
for (uintptr_t i = 2; i < prime; i++) {
- size_t size = i % 31;
+ size_t size = i % 31 + 1;
expected_sum += size;
reg.Insert(reinterpret_cast<void*>(i), size, ctx);
}
@@ -189,7 +192,7 @@
// Iterate the numbers 2, 3, ..., prime - 1 in pseudorandom order.
for (uintptr_t i = generator; i != 1; i = (i * generator) % prime) {
- size_t size = i % 31;
+ size_t size = i % 31 + 1;
expected_sum -= size;
reg.Remove(reinterpret_cast<void*>(i));
EXPECT_EQ(expected_sum, SumAllSizes(reg));
@@ -200,12 +203,12 @@
// Insert |prime - 2| entries again. This should use cells from the free list,
// so the |next_unused_cell_| index should not change.
for (uintptr_t i = 2; i < prime; i++)
- reg.Insert(reinterpret_cast<void*>(i), 0, ctx);
+ reg.Insert(reinterpret_cast<void*>(i), 1, ctx);
ASSERT_EQ(prime - 2, GetHighWaterMark(reg) - initial_water_mark);
// Inserting one more entry should use a fresh cell again.
- reg.Insert(reinterpret_cast<void*>(prime), 0, ctx);
+ reg.Insert(reinterpret_cast<void*>(prime), 1, ctx);
ASSERT_EQ(prime - 1, GetHighWaterMark(reg) - initial_water_mark);
}
@@ -268,7 +271,7 @@
// minus 1 elements are inserted, because cell 0 is unused, so this should
// fill up the available cells exactly.
for (i = 1; i < GetNumCells(reg); i++) {
- reg.Insert(reinterpret_cast<void*>(i), 0, ctx);
+ reg.Insert(reinterpret_cast<void*>(i), 1, ctx);
}
// Adding just one extra element might still work because the allocated memory
@@ -277,7 +280,7 @@
const size_t cells_per_page = GetNumCellsPerPage();
ASSERT_DEATH(for (size_t j = 0; j < cells_per_page; j++) {
- reg.Insert(reinterpret_cast<void*>(i + j), 0, ctx);
+ reg.Insert(reinterpret_cast<void*>(i + j), 1, ctx);
}, "");
}
#endif
diff --git a/base/trace_event/heap_profiler_heap_dump_writer.cc b/base/trace_event/heap_profiler_heap_dump_writer.cc
index fe99e48..7ad401b 100644
--- a/base/trace_event/heap_profiler_heap_dump_writer.cc
+++ b/base/trace_event/heap_profiler_heap_dump_writer.cc
@@ -129,20 +129,14 @@
std::make_heap(buckets.begin(), buckets.end());
// Keep including buckets until adding one would increase the number of
- // bytes accounted for by less than 0.8 percent. This simple heuristic works
+ // bytes accounted for by less than 0.8% (1/125). This simple heuristic works
// quite well. The large buckets end up in [it, end()), [begin(), it) is the
// part that contains the max-heap of small buckets.
size_t accounted_for = 0;
std::vector<Bucket>::iterator it;
for (it = buckets.end(); it != buckets.begin(); --it) {
- // Compute the contribution to the number of bytes accounted for as a
- // fraction of 125 (in increments of 0.8 percent). Anything less than 1/125
- // is rounded down to 0 due to integer division. Buckets are iterated by
- // descending size, so later buckets cannot have a larger contribution than
- // this one.
accounted_for += buckets.front().size;
- size_t contribution = buckets.front().size * 125 / accounted_for;
- if (contribution == 0)
+ if (buckets.front().size < (accounted_for / 125))
break;
// Put the largest bucket in [begin, it) at |it - 1| and max-heapify
@@ -234,6 +228,7 @@
// contexts stored in |bytes_by_context|.
Bucket root_bucket;
for (const auto& context_and_size : bytes_by_context) {
+ DCHECK_GT(context_and_size.second, 0u);
const AllocationContext* context = &context_and_size.first;
const size_t size = context_and_size.second;
root_bucket.bytes_by_context.push_back(std::make_pair(context, size));