Fix numerous issues with DdmVmInternal allocation tracking
Issues addressed:
- Using without JDWP attached caused native crash.
- When buffer is full (64k entries), number of entries reported was 0.
- Disabling tracking after disabling tracking caused native crash.
- Asking for allocations after disabled caused native crash.
- Lock ordering issues between mutator lock and alloc tracker lock.
Adding 098-ddmc test to cover these cases.
Bug: 17392248
(cherry picked from commit a5815065ac0877add9c0db3605d27b4d6c426e61)
Change-Id: Ib0bc18dfcdafcc050ab9dceed3d167dd878d1d7a
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 23caefc..f01ea0c 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -30,22 +30,24 @@
namespace art {
Mutex* Locks::abort_lock_ = nullptr;
+Mutex* Locks::alloc_tracker_lock_ = nullptr;
Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
+Mutex* Locks::deoptimization_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
+Mutex* Locks::profiler_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
-Mutex* Locks::profiler_lock_ = nullptr;
Mutex* Locks::unexpected_signal_lock_ = nullptr;
Mutex* Locks::intern_table_lock_ = nullptr;
@@ -830,21 +832,23 @@
DCHECK(modify_ldt_lock_ == nullptr);
}
DCHECK(abort_lock_ != nullptr);
+ DCHECK(alloc_tracker_lock_ != nullptr);
DCHECK(allocated_monitor_ids_lock_ != nullptr);
DCHECK(allocated_thread_ids_lock_ != nullptr);
DCHECK(breakpoint_lock_ != nullptr);
DCHECK(classlinker_classes_lock_ != nullptr);
+ DCHECK(deoptimization_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
+ DCHECK(intern_table_lock_ != nullptr);
DCHECK(jni_libraries_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
DCHECK(mutator_lock_ != nullptr);
+ DCHECK(profiler_lock_ != nullptr);
DCHECK(thread_list_lock_ != nullptr);
DCHECK(thread_list_suspend_thread_lock_ != nullptr);
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
- DCHECK(profiler_lock_ != nullptr);
DCHECK(unexpected_signal_lock_ != nullptr);
- DCHECK(intern_table_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kThreadListSuspendThreadLock;
@@ -853,7 +857,12 @@
new Mutex("thread list suspend thread by .. lock", current_lock_level);
#define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
- DCHECK_LT(new_level, current_lock_level); \
+ if (new_level >= current_lock_level) { \
+ /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
+ fprintf(stderr, "New local level %d is not less than current level %d\n", \
+ new_level, current_lock_level); \
+ exit(1); \
+ } \
current_lock_level = new_level;
UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
@@ -876,6 +885,14 @@
DCHECK(trace_lock_ == nullptr);
trace_lock_ = new Mutex("trace lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
+ DCHECK(deoptimization_lock_ == nullptr);
+ deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
+ DCHECK(alloc_tracker_lock_ == nullptr);
+ alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
DCHECK(thread_list_lock_ == nullptr);
thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
@@ -911,7 +928,6 @@
DCHECK(intern_table_lock_ == nullptr);
intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
DCHECK(abort_lock_ == nullptr);
abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 2a623fd..6642b1e 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -85,6 +85,7 @@
kJniLoadLibraryLock,
kThreadListLock,
kBreakpointInvokeLock,
+ kAllocTrackerLock,
kDeoptimizationLock,
kTraceLock,
kProfilerLock,
@@ -557,9 +558,17 @@
// Guards trace (ie traceview) requests.
static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
+ // Guards debugger recent allocation records.
+ static Mutex* alloc_tracker_lock_ ACQUIRED_AFTER(trace_lock_);
+
+ // Guards updates to instrumentation to ensure mutual exclusion of
+ // events like deoptimization requests.
+ // TODO: improve name, perhaps instrumentation_update_lock_.
+ static Mutex* deoptimization_lock_ ACQUIRED_AFTER(alloc_tracker_lock_);
+
// The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
// attaching and detaching.
- static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
+ static Mutex* thread_list_lock_ ACQUIRED_AFTER(deoptimization_lock_);
// Guards maintaining loading library data structures.
static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
@@ -586,7 +595,7 @@
static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
// Have an exclusive aborting thread.
- static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+ static Mutex* abort_lock_ ACQUIRED_AFTER(intern_table_lock_);
// Allow mutual exclusion when manipulating Thread::suspend_count_.
// TODO: Does the trade-off of a per-thread lock make sense?
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b3c887e..488e6e7 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -61,7 +61,15 @@
namespace art {
static const size_t kMaxAllocRecordStackDepth = 16; // Max 255.
-static const size_t kDefaultNumAllocRecords = 64*1024; // Must be a power of 2.
+static const size_t kDefaultNumAllocRecords = 64*1024; // Must be a power of 2. 2BE can hold 64k-1.
+
+// Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
+static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
+ if (alloc_record_count > 0xffff) {
+ return 0xffff;
+ }
+ return alloc_record_count;
+}
class AllocRecordStackTraceElement {
public:
@@ -116,9 +124,10 @@
}
void Dbg::TypeCache::Clear() {
- ScopedObjectAccess soa(Thread::Current());
+ JavaVMExt* vm = Runtime::Current()->GetJavaVM();
+ Thread* self = Thread::Current();
for (const auto& p : objects_) {
- soa.Vm()->DeleteWeakGlobalRef(soa.Self(), p.second);
+ vm->DeleteWeakGlobalRef(self, p.second);
}
objects_.clear();
}
@@ -131,8 +140,9 @@
return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
}
- void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- type_ = Dbg::GetTypeCache()->Add(t);
+ void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
+ Locks::alloc_tracker_lock_) {
+ type_ = Dbg::type_cache_.Add(t);
}
size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -304,7 +314,6 @@
static ObjectRegistry* gRegistry = nullptr;
// Recent allocation tracking.
-Mutex* Dbg::alloc_tracker_lock_ = nullptr;
AllocRecord* Dbg::recent_allocation_records_ = nullptr; // TODO: CircularBuffer<AllocRecord>
size_t Dbg::alloc_record_max_ = 0;
size_t Dbg::alloc_record_head_ = 0;
@@ -312,7 +321,6 @@
Dbg::TypeCache Dbg::type_cache_;
// Deoptimization support.
-Mutex* Dbg::deoptimization_lock_ = nullptr;
std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
size_t Dbg::full_deoptimization_event_count_ = 0;
size_t Dbg::delayed_full_undeoptimization_count_ = 0;
@@ -642,8 +650,6 @@
CHECK(gRegistry == nullptr);
gRegistry = new ObjectRegistry;
- alloc_tracker_lock_ = new Mutex("AllocTracker lock");
- deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock);
// Init JDWP if the debugger is enabled. This may connect out to a
// debugger, passively listen for a debugger, or block waiting for a
// debugger.
@@ -677,10 +683,6 @@
gJdwpState = nullptr;
delete gRegistry;
gRegistry = nullptr;
- delete alloc_tracker_lock_;
- alloc_tracker_lock_ = nullptr;
- delete deoptimization_lock_;
- deoptimization_lock_ = nullptr;
}
void Dbg::GcDidFinish() {
@@ -747,7 +749,7 @@
}
{
- MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
CHECK_EQ(deoptimization_requests_.size(), 0U);
CHECK_EQ(full_deoptimization_event_count_, 0U);
CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
@@ -792,7 +794,7 @@
// Since we're going to disable deoptimization, we clear the deoptimization requests queue.
// This prevents us from having any pending deoptimization request when the debugger attaches
// to us again while no event has been requested yet.
- MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
deoptimization_requests_.clear();
full_deoptimization_event_count_ = 0U;
delayed_full_undeoptimization_count_ = 0U;
@@ -2922,7 +2924,7 @@
}
void Dbg::DelayFullUndeoptimization() {
- MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
++delayed_full_undeoptimization_count_;
DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
}
@@ -2930,7 +2932,7 @@
void Dbg::ProcessDelayedFullUndeoptimizations() {
// TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
{
- MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
while (delayed_full_undeoptimization_count_ > 0) {
DeoptimizationRequest req;
req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
@@ -2947,7 +2949,7 @@
// Nothing to do.
return;
}
- MutexLock mu(Thread::Current(), *deoptimization_lock_);
+ MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
RequestDeoptimizationLocked(req);
}
@@ -3025,7 +3027,7 @@
Thread* const self = Thread::Current();
{
// Avoid suspend/resume if there is no pending request.
- MutexLock mu(self, *deoptimization_lock_);
+ MutexLock mu(self, *Locks::deoptimization_lock_);
if (deoptimization_requests_.empty()) {
return;
}
@@ -3037,7 +3039,7 @@
runtime->GetThreadList()->SuspendAll();
const ThreadState old_state = self->SetStateUnsafe(kRunnable);
{
- MutexLock mu(self, *deoptimization_lock_);
+ MutexLock mu(self, *Locks::deoptimization_lock_);
size_t req_index = 0;
for (DeoptimizationRequest& request : deoptimization_requests_) {
VLOG(jdwp) << "Process deoptimization request #" << req_index++;
@@ -4318,30 +4320,40 @@
return kDefaultNumAllocRecords;
}
-void Dbg::SetAllocTrackingEnabled(bool enabled) {
- if (enabled) {
+void Dbg::SetAllocTrackingEnabled(bool enable) {
+ Thread* self = Thread::Current();
+ if (enable) {
{
- MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
- if (recent_allocation_records_ == nullptr) {
- alloc_record_max_ = GetAllocTrackerMax();
- LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
- << kMaxAllocRecordStackDepth << " frames, taking "
- << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
- alloc_record_head_ = alloc_record_count_ = 0;
- recent_allocation_records_ = new AllocRecord[alloc_record_max_];
- CHECK(recent_allocation_records_ != nullptr);
+ MutexLock mu(self, *Locks::alloc_tracker_lock_);
+ if (recent_allocation_records_ != nullptr) {
+ return; // Already enabled, bail.
}
+ alloc_record_max_ = GetAllocTrackerMax();
+ LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
+ << kMaxAllocRecordStackDepth << " frames, taking "
+ << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
+ DCHECK_EQ(alloc_record_head_, 0U);
+ DCHECK_EQ(alloc_record_count_, 0U);
+ recent_allocation_records_ = new AllocRecord[alloc_record_max_];
+ CHECK(recent_allocation_records_ != nullptr);
}
Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
} else {
- Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
{
- MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
+ ScopedObjectAccess soa(self); // For type_cache_.Clear();
+ MutexLock mu(self, *Locks::alloc_tracker_lock_);
+ if (recent_allocation_records_ == nullptr) {
+ return; // Already disabled, bail.
+ }
LOG(INFO) << "Disabling alloc tracker";
delete[] recent_allocation_records_;
recent_allocation_records_ = nullptr;
+ alloc_record_head_ = 0;
+ alloc_record_count_ = 0;
type_cache_.Clear();
}
+ // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
+ Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
}
}
@@ -4381,8 +4393,9 @@
Thread* self = Thread::Current();
CHECK(self != nullptr);
- MutexLock mu(self, *alloc_tracker_lock_);
+ MutexLock mu(self, *Locks::alloc_tracker_lock_);
if (recent_allocation_records_ == nullptr) {
+ // In the process of shutting down recording, bail.
return;
}
@@ -4408,12 +4421,12 @@
// Returns the index of the head element.
//
-// We point at the most-recently-written record, so if gAllocRecordCount is 1
+// We point at the most-recently-written record, so if alloc_record_count_ is 1
// we want to use the current element. Take "head+1" and subtract count
// from it.
//
// We need to handle underflow in our circular buffer, so we add
-// gAllocRecordMax and then mask it back down.
+// alloc_record_max_ and then mask it back down.
size_t Dbg::HeadIndex() {
return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
(Dbg::alloc_record_max_ - 1);
@@ -4421,7 +4434,7 @@
void Dbg::DumpRecentAllocations() {
ScopedObjectAccess soa(Thread::Current());
- MutexLock mu(soa.Self(), *alloc_tracker_lock_);
+ MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
if (recent_allocation_records_ == nullptr) {
LOG(INFO) << "Not recording tracked allocations";
return;
@@ -4430,7 +4443,8 @@
// "i" is the head of the list. We want to start at the end of the
// list and move forward to the tail.
size_t i = HeadIndex();
- size_t count = alloc_record_count_;
+ const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
+ uint16_t count = capped_count;
LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
while (count--) {
@@ -4534,7 +4548,7 @@
* followed by UTF-16 data.
*
* We send up 16-bit unsigned indexes into string tables. In theory there
- * can be (kMaxAllocRecordStackDepth * gAllocRecordMax) unique strings in
+ * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
* each table, but in practice there should be far fewer.
*
* The chief reason for using a string table here is to keep the size of
@@ -4554,7 +4568,7 @@
Thread* self = Thread::Current();
std::vector<uint8_t> bytes;
{
- MutexLock mu(self, *alloc_tracker_lock_);
+ MutexLock mu(self, *Locks::alloc_tracker_lock_);
//
// Part 1: generate string tables.
//
@@ -4562,8 +4576,9 @@
StringTable method_names;
StringTable filenames;
- int count = alloc_record_count_;
- int idx = HeadIndex();
+ const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
+ uint16_t count = capped_count;
+ size_t idx = HeadIndex();
while (count--) {
AllocRecord* record = &recent_allocation_records_[idx];
std::string temp;
@@ -4580,7 +4595,7 @@
idx = (idx + 1) & (alloc_record_max_ - 1);
}
- LOG(INFO) << "allocation records: " << alloc_record_count_;
+ LOG(INFO) << "allocation records: " << capped_count;
//
// Part 2: Generate the output and store it in the buffer.
@@ -4601,7 +4616,7 @@
// (2b) number of class name strings
// (2b) number of method name strings
// (2b) number of source file name strings
- JDWP::Append2BE(bytes, alloc_record_count_);
+ JDWP::Append2BE(bytes, capped_count);
size_t string_table_offset = bytes.size();
JDWP::Append4BE(bytes, 0); // We'll patch this later...
JDWP::Append2BE(bytes, class_names.Size());
@@ -4610,7 +4625,7 @@
idx = HeadIndex();
std::string temp;
- for (count = alloc_record_count_; count != 0; --count) {
+ for (count = capped_count; count != 0; --count) {
// For each entry:
// (4b) total allocation size
// (2b) thread id
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 52ae7a9..3e16288 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -192,9 +192,11 @@
class TypeCache {
public:
// Returns a weak global for the input type. Deduplicates.
- jobject Add(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ jobject Add(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
+ Locks::alloc_tracker_lock_);
// Clears the type cache and deletes all the weak global refs.
- void Clear();
+ void Clear() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
+ Locks::alloc_tracker_lock_);
private:
std::multimap<int32_t, jobject> objects_;
@@ -221,8 +223,8 @@
*/
static void Connected();
static void GoActive()
- LOCKS_EXCLUDED(Locks::breakpoint_lock_, deoptimization_lock_, Locks::mutator_lock_);
- static void Disconnected() LOCKS_EXCLUDED(deoptimization_lock_, Locks::mutator_lock_);
+ LOCKS_EXCLUDED(Locks::breakpoint_lock_, Locks::deoptimization_lock_, Locks::mutator_lock_);
+ static void Disconnected() LOCKS_EXCLUDED(Locks::deoptimization_lock_, Locks::mutator_lock_);
static void Disposed();
// Returns true if we're actually debugging with a real debugger, false if it's
@@ -493,20 +495,20 @@
// Records deoptimization request in the queue.
static void RequestDeoptimization(const DeoptimizationRequest& req)
- LOCKS_EXCLUDED(deoptimization_lock_)
+ LOCKS_EXCLUDED(Locks::deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Support delayed full undeoptimization requests. This is currently only used for single-step
// events.
- static void DelayFullUndeoptimization() LOCKS_EXCLUDED(deoptimization_lock_);
+ static void DelayFullUndeoptimization() LOCKS_EXCLUDED(Locks::deoptimization_lock_);
static void ProcessDelayedFullUndeoptimizations()
- LOCKS_EXCLUDED(deoptimization_lock_)
+ LOCKS_EXCLUDED(Locks::deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Manage deoptimization after updating JDWP events list. Suspends all threads, processes each
// request and finally resumes all threads.
static void ManageDeoptimization()
- LOCKS_EXCLUDED(deoptimization_lock_)
+ LOCKS_EXCLUDED(Locks::deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Breakpoints.
@@ -560,17 +562,17 @@
* Recent allocation tracking support.
*/
static void RecordAllocation(mirror::Class* type, size_t byte_count)
- LOCKS_EXCLUDED(alloc_tracker_lock_)
+ LOCKS_EXCLUDED(Locks::alloc_tracker_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(alloc_tracker_lock_);
+ static void SetAllocTrackingEnabled(bool enabled) LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
static bool IsAllocTrackingEnabled() {
return recent_allocation_records_ != nullptr;
}
static jbyteArray GetRecentAllocations()
- LOCKS_EXCLUDED(alloc_tracker_lock_)
+ LOCKS_EXCLUDED(Locks::alloc_tracker_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static size_t HeadIndex() EXCLUSIVE_LOCKS_REQUIRED(alloc_tracker_lock_);
- static void DumpRecentAllocations() LOCKS_EXCLUDED(alloc_tracker_lock_);
+ static size_t HeadIndex() EXCLUSIVE_LOCKS_REQUIRED(Locks::alloc_tracker_lock_);
+ static void DumpRecentAllocations() LOCKS_EXCLUDED(Locks::alloc_tracker_lock_);
enum HpifWhen {
HPIF_WHEN_NEVER = 0,
@@ -596,10 +598,6 @@
static void DdmSendHeapSegments(bool native)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static TypeCache* GetTypeCache() {
- return &type_cache_;
- }
-
private:
static void DdmBroadcast(bool connect) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void PostThreadStartOrStop(Thread*, uint32_t)
@@ -617,52 +615,47 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
static void RequestDeoptimizationLocked(const DeoptimizationRequest& req)
- EXCLUSIVE_LOCKS_REQUIRED(deoptimization_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::deoptimization_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static Mutex* alloc_tracker_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-
- static AllocRecord* recent_allocation_records_ PT_GUARDED_BY(alloc_tracker_lock_);
- static size_t alloc_record_max_ GUARDED_BY(alloc_tracker_lock_);
- static size_t alloc_record_head_ GUARDED_BY(alloc_tracker_lock_);
- static size_t alloc_record_count_ GUARDED_BY(alloc_tracker_lock_);
-
- // Guards deoptimization requests.
- // TODO rename to instrumentation_update_lock.
- static Mutex* deoptimization_lock_ ACQUIRED_AFTER(Locks::breakpoint_lock_);
+ static AllocRecord* recent_allocation_records_ PT_GUARDED_BY(Locks::alloc_tracker_lock_);
+ static size_t alloc_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_);
+ static size_t alloc_record_head_ GUARDED_BY(Locks::alloc_tracker_lock_);
+ static size_t alloc_record_count_ GUARDED_BY(Locks::alloc_tracker_lock_);
// Deoptimization requests to be processed each time the event list is updated. This is used when
// registering and unregistering events so we do not deoptimize while holding the event list
// lock.
// TODO rename to instrumentation_requests.
- static std::vector<DeoptimizationRequest> deoptimization_requests_ GUARDED_BY(deoptimization_lock_);
+ static std::vector<DeoptimizationRequest> deoptimization_requests_ GUARDED_BY(Locks::deoptimization_lock_);
// Count the number of events requiring full deoptimization. When the counter is > 0, everything
// is deoptimized, otherwise everything is undeoptimized.
// Note: we fully deoptimize on the first event only (when the counter is set to 1). We fully
// undeoptimize when the last event is unregistered (when the counter is set to 0).
- static size_t full_deoptimization_event_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t full_deoptimization_event_count_ GUARDED_BY(Locks::deoptimization_lock_);
// Count the number of full undeoptimization requests delayed to next resume or end of debug
// session.
- static size_t delayed_full_undeoptimization_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t delayed_full_undeoptimization_count_ GUARDED_BY(Locks::deoptimization_lock_);
static size_t* GetReferenceCounterForEvent(uint32_t instrumentation_event);
// Weak global type cache, TODO improve this.
- static TypeCache type_cache_;
+ static TypeCache type_cache_ GUARDED_BY(Locks::alloc_tracker_lock_);
// Instrumentation event reference counters.
// TODO we could use an array instead of having all these dedicated counters. Instrumentation
// events are bits of a mask so we could convert them to array index.
- static size_t dex_pc_change_event_ref_count_ GUARDED_BY(deoptimization_lock_);
- static size_t method_enter_event_ref_count_ GUARDED_BY(deoptimization_lock_);
- static size_t method_exit_event_ref_count_ GUARDED_BY(deoptimization_lock_);
- static size_t field_read_event_ref_count_ GUARDED_BY(deoptimization_lock_);
- static size_t field_write_event_ref_count_ GUARDED_BY(deoptimization_lock_);
- static size_t exception_catch_event_ref_count_ GUARDED_BY(deoptimization_lock_);
+ static size_t dex_pc_change_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
+ static size_t method_enter_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
+ static size_t method_exit_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
+ static size_t field_read_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
+ static size_t field_write_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
+ static size_t exception_catch_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
static uint32_t instrumentation_events_ GUARDED_BY(Locks::mutator_lock_);
+ friend class AllocRecord; // For type_cache_ with proper annotalysis.
DISALLOW_COPY_AND_ASSIGN(Dbg);
};
diff --git a/test/098-ddmc/expected.txt b/test/098-ddmc/expected.txt
new file mode 100644
index 0000000..f8cda4c
--- /dev/null
+++ b/test/098-ddmc/expected.txt
@@ -0,0 +1,23 @@
+Confirm empty
+empty=Allocations[message header len: 15 entry header len: 9 stack frame len: 8 number of entries: 0 offset to string table from start of message: 15 number of class name strings: 0 number of method name strings: 0 number of source file name strings: 0]
+Confirm enable
+status=false
+status=true
+Capture some allocations (note just this causes allocations)
+before > 0=true
+Confirm when we overflow, we don't roll over to zero. b/17392248
+before < overflowAllocations=true
+after > before=true
+after.numberOfEntries=65535
+Disable and confirm back to empty
+status=false
+reset=Allocations[message header len: 15 entry header len: 9 stack frame len: 8 number of entries: 0 offset to string table from start of message: 15 number of class name strings: 0 number of method name strings: 0 number of source file name strings: 0]
+Confirm we can disable twice in a row
+status=false
+status=false
+Confirm we can reenable twice in a row without losing allocations
+status=true
+status=true
+second > first =true
+Goodbye
+goodbye=Allocations[message header len: 15 entry header len: 9 stack frame len: 8 number of entries: 0 offset to string table from start of message: 15 number of class name strings: 0 number of method name strings: 0 number of source file name strings: 0]
diff --git a/test/098-ddmc/info.txt b/test/098-ddmc/info.txt
new file mode 100644
index 0000000..39d26db
--- /dev/null
+++ b/test/098-ddmc/info.txt
@@ -0,0 +1 @@
+Tests of private org.apache.harmony.dalvik.ddmc.* APIs used for ddms support.
diff --git a/test/098-ddmc/src/Main.java b/test/098-ddmc/src/Main.java
new file mode 100644
index 0000000..962bd7f
--- /dev/null
+++ b/test/098-ddmc/src/Main.java
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.nio.ByteBuffer;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ String name = System.getProperty("java.vm.name");
+ if (!"Dalvik".equals(name)) {
+ System.out.println("This test is not supported on " + name);
+ return;
+ }
+ testRecentAllocationTracking();
+ }
+
+ private static void testRecentAllocationTracking() throws Exception {
+ System.out.println("Confirm empty");
+ Allocations empty = new Allocations(DdmVmInternal.getRecentAllocations());
+ System.out.println("empty=" + empty);
+
+ System.out.println("Confirm enable");
+ System.out.println("status=" + DdmVmInternal.getRecentAllocationStatus());
+ DdmVmInternal.enableRecentAllocations(true);
+ System.out.println("status=" + DdmVmInternal.getRecentAllocationStatus());
+
+ System.out.println("Capture some allocations (note just this causes allocations)");
+ Allocations before = new Allocations(DdmVmInternal.getRecentAllocations());
+ System.out.println("before > 0=" + (before.numberOfEntries > 0));
+
+ System.out.println("Confirm when we overflow, we don't roll over to zero. b/17392248");
+ final int overflowAllocations = 64 * 1024; // Won't fit in unsigned 16-bit value.
+ for (int i = 0; i < overflowAllocations; i++) {
+ new String("fnord");
+ }
+ Allocations after = new Allocations(DdmVmInternal.getRecentAllocations());
+ System.out.println("before < overflowAllocations=" + (before.numberOfEntries < overflowAllocations));
+ System.out.println("after > before=" + (after.numberOfEntries > before.numberOfEntries));
+ System.out.println("after.numberOfEntries=" + after.numberOfEntries);
+
+ System.out.println("Disable and confirm back to empty");
+ DdmVmInternal.enableRecentAllocations(false);
+ System.out.println("status=" + DdmVmInternal.getRecentAllocationStatus());
+ Allocations reset = new Allocations(DdmVmInternal.getRecentAllocations());
+ System.out.println("reset=" + reset);
+
+ System.out.println("Confirm we can disable twice in a row");
+ DdmVmInternal.enableRecentAllocations(false);
+ System.out.println("status=" + DdmVmInternal.getRecentAllocationStatus());
+ DdmVmInternal.enableRecentAllocations(false);
+ System.out.println("status=" + DdmVmInternal.getRecentAllocationStatus());
+
+ System.out.println("Confirm we can reenable twice in a row without losing allocations");
+ DdmVmInternal.enableRecentAllocations(true);
+ System.out.println("status=" + DdmVmInternal.getRecentAllocationStatus());
+ for (int i = 0; i < 16 * 1024; i++) {
+ new String("fnord");
+ }
+ Allocations first = new Allocations(DdmVmInternal.getRecentAllocations());
+ DdmVmInternal.enableRecentAllocations(true);
+ System.out.println("status=" + DdmVmInternal.getRecentAllocationStatus());
+ Allocations second = new Allocations(DdmVmInternal.getRecentAllocations());
+ System.out.println("second > first =" + (second.numberOfEntries > first.numberOfEntries));
+
+ System.out.println("Goodbye");
+ DdmVmInternal.enableRecentAllocations(false);
+ Allocations goodbye = new Allocations(DdmVmInternal.getRecentAllocations());
+ System.out.println("goodbye=" + goodbye);
+ }
+
+ private static class Allocations {
+ final int messageHeaderLen;
+ final int entryHeaderLen;
+ final int stackFrameLen;
+ final int numberOfEntries;
+ final int offsetToStringTableFromStartOfMessage;
+ final int numberOfClassNameStrings;
+ final int numberOfMethodNameStrings;
+ final int numberOfSourceFileNameStrings;
+
+ Allocations(byte[] allocations) {
+ ByteBuffer b = ByteBuffer.wrap(allocations);
+ messageHeaderLen = b.get() & 0xff;
+ if (messageHeaderLen != 15) {
+ throw new IllegalArgumentException("Unexpected messageHeaderLen " + messageHeaderLen);
+ }
+ entryHeaderLen = b.get() & 0xff;
+ if (entryHeaderLen != 9) {
+ throw new IllegalArgumentException("Unexpected entryHeaderLen " + entryHeaderLen);
+ }
+ stackFrameLen = b.get() & 0xff;
+ if (stackFrameLen != 8) {
+ throw new IllegalArgumentException("Unexpected messageHeaderLen " + stackFrameLen);
+ }
+ numberOfEntries = b.getShort() & 0xffff;
+ offsetToStringTableFromStartOfMessage = b.getInt();
+ numberOfClassNameStrings = b.getShort() & 0xffff;
+ numberOfMethodNameStrings = b.getShort() & 0xffff;
+ numberOfSourceFileNameStrings = b.getShort() & 0xffff;
+ }
+
+ public String toString() {
+ return ("Allocations[message header len: " + messageHeaderLen +
+ " entry header len: " + entryHeaderLen +
+ " stack frame len: " + stackFrameLen +
+ " number of entries: " + numberOfEntries +
+ " offset to string table from start of message: " + offsetToStringTableFromStartOfMessage +
+ " number of class name strings: " + numberOfClassNameStrings +
+ " number of method name strings: " + numberOfMethodNameStrings +
+ " number of source file name strings: " + numberOfSourceFileNameStrings +
+ "]");
+ }
+ }
+
+ private static class DdmVmInternal {
+ private static final Method enableRecentAllocationsMethod;
+ private static final Method getRecentAllocationStatusMethod;
+ private static final Method getRecentAllocationsMethod;
+ static {
+ try {
+ Class c = Class.forName("org.apache.harmony.dalvik.ddmc.DdmVmInternal");
+ enableRecentAllocationsMethod = c.getDeclaredMethod("enableRecentAllocations",
+ Boolean.TYPE);
+ getRecentAllocationStatusMethod = c.getDeclaredMethod("getRecentAllocationStatus");
+ getRecentAllocationsMethod = c.getDeclaredMethod("getRecentAllocations");
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static void enableRecentAllocations(boolean enable) throws Exception {
+ enableRecentAllocationsMethod.invoke(null, enable);
+ }
+ public static boolean getRecentAllocationStatus() throws Exception {
+ return (boolean) getRecentAllocationStatusMethod.invoke(null);
+ }
+ public static byte[] getRecentAllocations() throws Exception {
+ return (byte[]) getRecentAllocationsMethod.invoke(null);
+ }
+ }
+}