Add native memory accounting through custom allocator.
Added a custom allocator that lets you pass in a special tag which
specifices where the allocation came from. This is used when
dumping. The performance overhead is low since each allocation only
does a atomic add/sub for each allocation/free.
The measurements are dumped to traces.txt during SIGQUIT.
Example output:
I/art (27274): AllocatorTagHeap active=120 max=120 total=168
I/art (27274): AllocatorTagMonitorList active=1572 max=6240 total=11724
I/art (27274): AllocatorTagClassTable active=185208 max=185208 total=268608
I/art (27274): AllocatorTagInternTable active=430368 max=430368 total=436080
I/art (27274): AllocatorTagMaps active=5616 max=6168 total=34392
I/art (27274): AllocatorTagLOS active=1024 max=1536 total=2044
I/art (27274): AllocatorTagSafeMap active=0 max=51936 total=533688
I/art (27274): AllocatorTagLOSMaps active=144 max=1248 total=5760
I/art (27274): AllocatorTagReferenceTable active=10944 max=11840 total=19136
I/art (27274): AllocatorTagHeapBitmap active=32 max=40 total=56
I/art (27274): AllocatorTagHeapBitmapLOS active=8 max=8 total=8
I/art (27274): AllocatorTagVerifier active=0 max=18844 total=1073156
I/art (27274): AllocatorTagModUnionCardSet active=5300 max=5920 total=56020
I/art (27274): AllocatorTagModUnionReferenceArray active=24864 max=24864 total=24864
I/art (27274): AllocatorTagJNILibrarires active=320 max=320 total=320
I/art (27274): AllocatorTagOatFile active=1400 max=1400 total=5852
Change-Id: Ibb470ef2e9c9a24563bb46422d46a55799704d82
(cherry picked from commit 5369c40f75fdcb1be7a7c06db212ce965c83a164)
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index b0018d2..06b7cca 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -293,14 +293,14 @@
private:
void CopyRegToLockDepth(size_t dst, size_t src) {
- SafeMap<uint32_t, uint32_t>::iterator it = reg_to_lock_depths_.find(src);
+ auto it = reg_to_lock_depths_.find(src);
if (it != reg_to_lock_depths_.end()) {
reg_to_lock_depths_.Put(dst, it->second);
}
}
bool IsSetLockDepth(size_t reg, size_t depth) {
- SafeMap<uint32_t, uint32_t>::iterator it = reg_to_lock_depths_.find(reg);
+ auto it = reg_to_lock_depths_.find(reg);
if (it != reg_to_lock_depths_.end()) {
return (it->second & (1 << depth)) != 0;
} else {
@@ -311,7 +311,7 @@
void SetRegToLockDepth(size_t reg, size_t depth) {
CHECK_LT(depth, 32u);
DCHECK(!IsSetLockDepth(reg, depth));
- SafeMap<uint32_t, uint32_t>::iterator it = reg_to_lock_depths_.find(reg);
+ auto it = reg_to_lock_depths_.find(reg);
if (it == reg_to_lock_depths_.end()) {
reg_to_lock_depths_.Put(reg, 1 << depth);
} else {
@@ -322,7 +322,7 @@
void ClearRegToLockDepth(size_t reg, size_t depth) {
CHECK_LT(depth, 32u);
DCHECK(IsSetLockDepth(reg, depth));
- SafeMap<uint32_t, uint32_t>::iterator it = reg_to_lock_depths_.find(reg);
+ auto it = reg_to_lock_depths_.find(reg);
DCHECK(it != reg_to_lock_depths_.end());
uint32_t depths = it->second ^ (1 << depth);
if (depths != 0) {
@@ -337,8 +337,7 @@
}
RegisterLine(size_t num_regs, MethodVerifier* verifier)
- : verifier_(verifier),
- num_regs_(num_regs) {
+ : verifier_(verifier), num_regs_(num_regs) {
memset(&line_, 0, num_regs_ * sizeof(uint16_t));
SetResultTypeToUnknown();
}
@@ -352,11 +351,11 @@
// Length of reg_types_
const uint32_t num_regs_;
// A stack of monitor enter locations
- std::vector<uint32_t> monitors_;
+ std::vector<uint32_t, TrackingAllocator<uint32_t, kAllocatorTagVerifier>> monitors_;
// A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
// stack we verify that monitor-enter/exit are correctly nested. That is, if there was a
// monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5
- SafeMap<uint32_t, uint32_t> reg_to_lock_depths_;
+ AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier> reg_to_lock_depths_;
// An array of RegType Ids associated with each dex register.
uint16_t line_[0];