Change MemMap::maps_ to not be global variable

Runtime.exit() was causing globals to get destructed at the same time
that another thread was using it for allocating a new mem map.

Bug: 17962201
Change-Id: I400cb7b8141d858f3c08a6fe59a02838c04c6962
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index f67616e..994e235 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -74,6 +74,7 @@
 
 namespace TrackedAllocators {
 
+// These globals are safe since they don't have any non-trivial destructors.
 Atomic<size_t> g_bytes_used[kAllocatorTagCount];
 volatile size_t g_max_bytes_used[kAllocatorTagCount];
 Atomic<uint64_t> g_total_bytes_used[kAllocatorTagCount];
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index eed6f71..ea3da64 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -185,6 +185,8 @@
   int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
   ASSERT_EQ(mkdir_result, 0);
 
+  MemMap::Init();  // For LoadExpectSingleDexFile
+
   std::string error_msg;
   java_lang_dex_file_ = LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str());
   boot_class_path_.push_back(java_lang_dex_file_);
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 231e9e5..3144ce1 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -70,7 +70,7 @@
   return os;
 }
 
-MemMap::Maps MemMap::maps_;
+MemMap::Maps* MemMap::maps_ = nullptr;
 
 #if USE_ART_LOW_4G_ALLOCATOR
 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
@@ -457,11 +457,12 @@
   // Remove it from maps_.
   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
   bool found = false;
-  for (auto it = maps_.lower_bound(base_begin_), end = maps_.end();
+  DCHECK(maps_ != nullptr);
+  for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
        it != end && it->first == base_begin_; ++it) {
     if (it->second == this) {
       found = true;
-      maps_.erase(it);
+      maps_->erase(it);
       break;
     }
   }
@@ -483,7 +484,8 @@
 
     // Add it to maps_.
     MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
-    maps_.insert(std::pair<void*, MemMap*>(base_begin_, this));
+    DCHECK(maps_ != nullptr);
+    maps_->insert(std::make_pair(base_begin_, this));
   }
 }
 
@@ -614,7 +616,7 @@
 
 bool MemMap::HasMemMap(MemMap* map) {
   void* base_begin = map->BaseBegin();
-  for (auto it = maps_.lower_bound(base_begin), end = maps_.end();
+  for (auto it = maps_->lower_bound(base_begin), end = maps_->end();
        it != end && it->first == base_begin; ++it) {
     if (it->second == map) {
       return true;
@@ -626,7 +628,8 @@
 MemMap* MemMap::GetLargestMemMapAt(void* address) {
   size_t largest_size = 0;
   MemMap* largest_map = nullptr;
-  for (auto it = maps_.lower_bound(address), end = maps_.end();
+  DCHECK(maps_ != nullptr);
+  for (auto it = maps_->lower_bound(address), end = maps_->end();
        it != end && it->first == address; ++it) {
     MemMap* map = it->second;
     CHECK(map != nullptr);
@@ -638,6 +641,20 @@
   return largest_map;
 }
 
+void MemMap::Init() {
+  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+  if (maps_ == nullptr) {
+    // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
+    maps_ = new Maps;
+  }
+}
+
+void MemMap::Shutdown() {
+  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+  delete maps_;
+  maps_ = nullptr;
+}
+
 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 314bf8d..df1222c 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -138,6 +138,9 @@
 
   typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
 
+  static void Init() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+  static void Shutdown() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+
  private:
   MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size,
          int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
@@ -167,7 +170,7 @@
 #endif
 
   // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
-  static Maps maps_ GUARDED_BY(Locks::mem_maps_lock_);
+  static Maps* maps_ GUARDED_BY(Locks::mem_maps_lock_);
 
   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
 };
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index a78f463..14a72b9 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -87,6 +87,10 @@
     delete m1;
   }
 
+  void CommonInit() {
+    MemMap::Init();
+  }
+
 #if defined(__LP64__) && !defined(__x86_64__)
   static uintptr_t GetLinearScanPos() {
     return MemMap::next_mem_pos_;
@@ -101,10 +105,10 @@
 #endif
 
 TEST_F(MemMapTest, Start) {
+  CommonInit();
   uintptr_t start = GetLinearScanPos();
   EXPECT_LE(64 * KB, start);
   EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
-
 #ifdef __BIONIC__
   // Test a couple of values. Make sure they are different.
   uintptr_t last = 0;
@@ -122,6 +126,7 @@
 #endif
 
 TEST_F(MemMapTest, MapAnonymousEmpty) {
+  CommonInit();
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
                                              nullptr,
@@ -143,6 +148,7 @@
 
 #ifdef __LP64__
 TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
+  CommonInit();
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
                                              nullptr,
@@ -157,6 +163,7 @@
 #endif
 
 TEST_F(MemMapTest, MapAnonymousExactAddr) {
+  CommonInit();
   std::string error_msg;
   // Map at an address that should work, which should succeed.
   std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
@@ -200,6 +207,7 @@
 #endif
 
 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
+  CommonInit();
   // This test may not work under valgrind.
   if (RUNNING_ON_VALGRIND == 0) {
     uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
@@ -217,6 +225,7 @@
 }
 
 TEST_F(MemMapTest, MapAnonymousOverflow) {
+  CommonInit();
   std::string error_msg;
   uintptr_t ptr = 0;
   ptr -= kPageSize;  // Now it's close to the top.
@@ -232,6 +241,7 @@
 
 #ifdef __LP64__
 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
+  CommonInit();
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
                                              reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
@@ -244,6 +254,7 @@
 }
 
 TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
+  CommonInit();
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
                                              reinterpret_cast<uint8_t*>(0xF0000000),
@@ -257,6 +268,7 @@
 #endif
 
 TEST_F(MemMapTest, CheckNoGaps) {
+  CommonInit();
   std::string error_msg;
   constexpr size_t kNumPages = 3;
   // Map a 3-page mem map.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 42d05a9..3bd825b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -193,6 +193,7 @@
   Thread::Shutdown();
   QuasiAtomic::Shutdown();
   verifier::MethodVerifier::Shutdown();
+  MemMap::Shutdown();
   // TODO: acquire a static mutex on Runtime to avoid racing.
   CHECK(instance_ == nullptr || instance_ == this);
   instance_ = nullptr;
@@ -645,6 +646,8 @@
 bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
   CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
 
+  MemMap::Init();
+
   std::unique_ptr<ParsedOptions> options(ParsedOptions::Create(raw_options, ignore_unrecognized));
   if (options.get() == nullptr) {
     LOG(ERROR) << "Failed to parse options";