Hold mutator lock in DdmSendHeapSegments for all spaces

Previously we were releasing the mutator lock in DdmSendHeapSegments
and only reacquiring it for RosAlloc spaces. This was causing problems
since the HeapChunkCallback access object fields through mirror.

Bug: 17950534

(cherry picked from commit d6527cf8e824d9057f32755f2ff4bdcf46c7095b)

Change-Id: Idb307fd4c01450a07e3c9621e04d2aabf2c6a0b9
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 96b44bf..9d2ca7c 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4364,11 +4364,7 @@
 
   Thread* self = Thread::Current();
 
-  // To allow the Walk/InspectAll() below to exclusively-lock the
-  // mutator lock, temporarily release the shared access to the
-  // mutator lock here by transitioning to the suspended state.
   Locks::mutator_lock_->AssertSharedHeld(self);
-  self->TransitionFromRunnableToSuspended(kSuspended);
 
   // Send a series of heap segment chunks.
   HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
@@ -4382,32 +4378,39 @@
     gc::Heap* heap = Runtime::Current()->GetHeap();
     for (const auto& space : heap->GetContinuousSpaces()) {
       if (space->IsDlMallocSpace()) {
+        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
         // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
         // allocation then the first sizeof(size_t) may belong to it.
         context.SetChunkOverhead(sizeof(size_t));
         space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
       } else if (space->IsRosAllocSpace()) {
         context.SetChunkOverhead(0);
-        space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+        // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
+        // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
+        self->TransitionFromRunnableToSuspended(kSuspended);
+        ThreadList* tl = Runtime::Current()->GetThreadList();
+        tl->SuspendAll();
+        {
+          ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+          space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+        }
+        tl->ResumeAll();
+        self->TransitionFromSuspendedToRunnable();
       } else if (space->IsBumpPointerSpace()) {
+        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
         context.SetChunkOverhead(0);
-        ReaderMutexLock mu(self, *Locks::mutator_lock_);
-        WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
         space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
       } else {
         UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
       }
       context.ResetStartOfNextChunk();
     }
+    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
     // Walk the large objects, these are not in the AllocSpace.
     context.SetChunkOverhead(0);
     heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
   }
 
-  // Shared-lock the mutator lock back.
-  self->TransitionFromSuspendedToRunnable();
-  Locks::mutator_lock_->AssertSharedHeld(self);
-
   // Finally, send a heap end chunk.
   Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
 }