Assert no-thread-mark-stack mapping after switching to shared mode

Bug:144408451
Test: art/test/testrunner/testrunner.py --host -t 004-ThreadStress
Change-Id: I6e7eddb138ba90a2c4bef7a39e48036ff5184e38
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 7260231..67383c4 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -953,6 +953,18 @@
   thread_mark_stack_map_.erase(it);
 }
 
+void ConcurrentCopying::AssertEmptyThreadMarkStackMap() {
+  if (!thread_mark_stack_map_.empty()) {
+    LOG(FATAL_WITHOUT_ABORT) << "thread_mark_stack_map not empty. size:"
+                             << thread_mark_stack_map_.size()
+                             << " Mappings:";
+    for (const auto & iter : thread_mark_stack_map_) {
+      LOG(FATAL_WITHOUT_ABORT) << "thread:" << iter.first << " mark-stack:" << iter.second;
+    }
+    LOG(FATAL) << "pooled_mark_stacks size:" << pooled_mark_stacks_.size();
+  }
+}
+
 void ConcurrentCopying::AssertNoThreadMarkStackMapping(Thread* thread) {
   MutexLock mu(Thread::Current(), mark_stack_lock_);
   CHECK(thread_mark_stack_map_.find(thread) == thread_mark_stack_map_.end());
@@ -1248,10 +1260,7 @@
   {
     MutexLock mu(thread_running_gc_, mark_stack_lock_);
     CHECK(revoked_mark_stacks_.empty());
-    CHECK(thread_mark_stack_map_.empty()) << " size:"
-                                          << thread_mark_stack_map_.size()
-                                          << " pooled_mark_stacks size:"
-                                          << pooled_mark_stacks_.size();
+    AssertEmptyThreadMarkStackMap();
     CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
   }
 
@@ -2093,10 +2102,7 @@
     {
       MutexLock mu(thread_running_gc_, mark_stack_lock_);
       CHECK(revoked_mark_stacks_.empty());
-      CHECK(thread_mark_stack_map_.empty()) << " size:"
-                                            << thread_mark_stack_map_.size()
-                                            << " pooled_mark_stacks size:"
-                                            << pooled_mark_stacks_.size();
+      AssertEmptyThreadMarkStackMap();
       CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
     }
     while (true) {
@@ -2124,10 +2130,7 @@
     {
       MutexLock mu(thread_running_gc_, mark_stack_lock_);
       CHECK(revoked_mark_stacks_.empty());
-      CHECK(thread_mark_stack_map_.empty()) << " size:"
-                                            << thread_mark_stack_map_.size()
-                                            << " pooled_mark_stacks size:"
-                                            << pooled_mark_stacks_.size();
+      AssertEmptyThreadMarkStackMap();
       CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
     }
     // Process the GC mark stack in the exclusive mode. No need to take the lock.
@@ -2149,6 +2152,14 @@
                                                        const Processor& processor) {
   // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
   RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
+  if (disable_weak_ref_access) {
+    CHECK_EQ(static_cast<uint32_t>(mark_stack_mode_.load(std::memory_order_relaxed)),
+             static_cast<uint32_t>(kMarkStackModeShared));
+    // From this point onwards no mutator should require a thread-local mark
+    // stack.
+    MutexLock mu(thread_running_gc_, mark_stack_lock_);
+    AssertEmptyThreadMarkStackMap();
+  }
   size_t count = 0;
   std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
   {
@@ -2175,6 +2186,11 @@
       }
     }
   }
+  if (disable_weak_ref_access) {
+    MutexLock mu(thread_running_gc_, mark_stack_lock_);
+    CHECK(revoked_mark_stacks_.empty());
+    CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
+  }
   return count;
 }
 
@@ -2415,10 +2431,7 @@
     MutexLock mu(thread_running_gc_, mark_stack_lock_);
     CHECK(gc_mark_stack_->IsEmpty());
     CHECK(revoked_mark_stacks_.empty());
-    CHECK(thread_mark_stack_map_.empty()) << " size:"
-                                          << thread_mark_stack_map_.size()
-                                          << " pooled_mark_stacks size:"
-                                          << pooled_mark_stacks_.size();
+    AssertEmptyThreadMarkStackMap();
     CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
   }
 }
@@ -3682,10 +3695,7 @@
   {
     MutexLock mu(self, mark_stack_lock_);
     CHECK(revoked_mark_stacks_.empty());
-    CHECK(thread_mark_stack_map_.empty()) << " size:"
-                                          << thread_mark_stack_map_.size()
-                                          << " pooled_mark_stacks size:"
-                                          << pooled_mark_stacks_.size();
+    AssertEmptyThreadMarkStackMap();
     CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
   }
   // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index bc6d7b5..dd295d1 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -329,6 +329,7 @@
       REQUIRES(mark_stack_lock_);
   void AddThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
       REQUIRES(mark_stack_lock_);
+  void AssertEmptyThreadMarkStackMap() REQUIRES(mark_stack_lock_);
 
   space::RegionSpace* region_space_;      // The underlying region space.
   std::unique_ptr<Barrier> gc_barrier_;