Change MemMap::maps_ to not be global variable

Runtime.exit() was causing globals to get destructed at the same time
that another thread was using it for allocating a new mem map.

Bug: 17962201
Change-Id: I400cb7b8141d858f3c08a6fe59a02838c04c6962
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index e3eb9e9..d1d47fb 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -397,6 +397,7 @@
   // Reserve where the image will be loaded up front so that other parts of test set up don't
   // accidentally end up colliding with the fixed memory address when we need to load the image.
   std::string error_msg;
+  MemMap::Init();
   image_reservation_.reset(MemMap::MapAnonymous("image reservation",
                                                 reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
                                                 (size_t)100 * 1024 * 1024,  // 100MB
diff --git a/compiler/image_test.cc b/compiler/image_test.cc
index cf4259f..d5d487f 100644
--- a/compiler/image_test.cc
+++ b/compiler/image_test.cc
@@ -63,7 +63,7 @@
   ScratchFile oat_file(OS::CreateEmptyFile(oat_filename.c_str()));
 
   const uintptr_t requested_image_base = ART_BASE_ADDRESS;
-  ImageWriter writer(*compiler_driver_, requested_image_base);
+  std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_driver_, requested_image_base));
   {
     {
       jobject class_loader = NULL;
@@ -83,8 +83,8 @@
       t.NewTiming("WriteElf");
       SafeMap<std::string, std::string> key_value_store;
       OatWriter oat_writer(class_linker->GetBootClassPath(), 0, 0, 0, compiler_driver_.get(),
-                           &writer, &timings, &key_value_store);
-      bool success = writer.PrepareImageAddressSpace() &&
+                           writer.get(), &timings, &key_value_store);
+      bool success = writer->PrepareImageAddressSpace() &&
           compiler_driver_->WriteElf(GetTestAndroidRoot(),
                                      !kIsTargetBuild,
                                      class_linker->GetBootClassPath(),
@@ -99,9 +99,9 @@
 
   {
     bool success_image =
-        writer.Write(image_file.GetFilename(), dup_oat->GetPath(), dup_oat->GetPath());
+        writer->Write(image_file.GetFilename(), dup_oat->GetPath(), dup_oat->GetPath());
     ASSERT_TRUE(success_image);
-    bool success_fixup = ElfWriter::Fixup(dup_oat.get(), writer.GetOatDataBegin());
+    bool success_fixup = ElfWriter::Fixup(dup_oat.get(), writer->GetOatDataBegin());
     ASSERT_TRUE(success_fixup);
   }
 
@@ -130,14 +130,18 @@
   compiler_driver_.reset();
 
   // Tear down old runtime before making a new one, clearing out misc state.
+
+  // Remove the reservation of the memory for use to load the image.
+  // Need to do this before we reset the runtime.
+  UnreserveImageSpace();
+  writer.reset(nullptr);
+
   runtime_.reset();
   java_lang_dex_file_ = NULL;
 
+  MemMap::Init();
   std::unique_ptr<const DexFile> dex(LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str()));
 
-  // Remove the reservation of the memory for use to load the image.
-  UnreserveImageSpace();
-
   RuntimeOptions options;
   std::string image("-Ximage:");
   image.append(image_location.GetFilename());
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 7be4349..2a7d998 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1262,6 +1262,7 @@
 
   RuntimeOptions runtime_options;
   std::vector<const DexFile*> boot_class_path;
+  art::MemMap::Init();  // For ZipEntry::ExtractToMemMap.
   if (boot_image_option.empty()) {
     size_t failure_count = OpenDexFiles(dex_filenames, dex_locations, boot_class_path);
     if (failure_count > 0) {
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 4ed428c..504addc 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -758,6 +758,7 @@
 
 static int patchoat(int argc, char **argv) {
   InitLogging(argv);
+  MemMap::Init();
   const bool debug = kIsDebugBuild;
   orig_argc = argc;
   orig_argv = argv;
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index f67616e..994e235 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -74,6 +74,7 @@
 
 namespace TrackedAllocators {
 
+// These globals are safe since they don't have any non-trivial destructors.
 Atomic<size_t> g_bytes_used[kAllocatorTagCount];
 volatile size_t g_max_bytes_used[kAllocatorTagCount];
 Atomic<uint64_t> g_total_bytes_used[kAllocatorTagCount];
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index eed6f71..ea3da64 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -185,6 +185,8 @@
   int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
   ASSERT_EQ(mkdir_result, 0);
 
+  MemMap::Init();  // For LoadExpectSingleDexFile
+
   std::string error_msg;
   java_lang_dex_file_ = LoadExpectSingleDexFile(GetLibCoreDexFileName().c_str());
   boot_class_path_.push_back(java_lang_dex_file_);
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 231e9e5..3144ce1 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -70,7 +70,7 @@
   return os;
 }
 
-MemMap::Maps MemMap::maps_;
+MemMap::Maps* MemMap::maps_ = nullptr;
 
 #if USE_ART_LOW_4G_ALLOCATOR
 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
@@ -457,11 +457,12 @@
   // Remove it from maps_.
   MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
   bool found = false;
-  for (auto it = maps_.lower_bound(base_begin_), end = maps_.end();
+  DCHECK(maps_ != nullptr);
+  for (auto it = maps_->lower_bound(base_begin_), end = maps_->end();
        it != end && it->first == base_begin_; ++it) {
     if (it->second == this) {
       found = true;
-      maps_.erase(it);
+      maps_->erase(it);
       break;
     }
   }
@@ -483,7 +484,8 @@
 
     // Add it to maps_.
     MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
-    maps_.insert(std::pair<void*, MemMap*>(base_begin_, this));
+    DCHECK(maps_ != nullptr);
+    maps_->insert(std::make_pair(base_begin_, this));
   }
 }
 
@@ -614,7 +616,7 @@
 
 bool MemMap::HasMemMap(MemMap* map) {
   void* base_begin = map->BaseBegin();
-  for (auto it = maps_.lower_bound(base_begin), end = maps_.end();
+  for (auto it = maps_->lower_bound(base_begin), end = maps_->end();
        it != end && it->first == base_begin; ++it) {
     if (it->second == map) {
       return true;
@@ -626,7 +628,8 @@
 MemMap* MemMap::GetLargestMemMapAt(void* address) {
   size_t largest_size = 0;
   MemMap* largest_map = nullptr;
-  for (auto it = maps_.lower_bound(address), end = maps_.end();
+  DCHECK(maps_ != nullptr);
+  for (auto it = maps_->lower_bound(address), end = maps_->end();
        it != end && it->first == address; ++it) {
     MemMap* map = it->second;
     CHECK(map != nullptr);
@@ -638,6 +641,20 @@
   return largest_map;
 }
 
+void MemMap::Init() {
+  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+  if (maps_ == nullptr) {
+    // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
+    maps_ = new Maps;
+  }
+}
+
+void MemMap::Shutdown() {
+  MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+  delete maps_;
+  maps_ = nullptr;
+}
+
 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 314bf8d..df1222c 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -138,6 +138,9 @@
 
   typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
 
+  static void Init() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+  static void Shutdown() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+
  private:
   MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size,
          int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
@@ -167,7 +170,7 @@
 #endif
 
   // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
-  static Maps maps_ GUARDED_BY(Locks::mem_maps_lock_);
+  static Maps* maps_ GUARDED_BY(Locks::mem_maps_lock_);
 
   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
 };
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index a78f463..14a72b9 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -87,6 +87,10 @@
     delete m1;
   }
 
+  void CommonInit() {
+    MemMap::Init();
+  }
+
 #if defined(__LP64__) && !defined(__x86_64__)
   static uintptr_t GetLinearScanPos() {
     return MemMap::next_mem_pos_;
@@ -101,10 +105,10 @@
 #endif
 
 TEST_F(MemMapTest, Start) {
+  CommonInit();
   uintptr_t start = GetLinearScanPos();
   EXPECT_LE(64 * KB, start);
   EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
-
 #ifdef __BIONIC__
   // Test a couple of values. Make sure they are different.
   uintptr_t last = 0;
@@ -122,6 +126,7 @@
 #endif
 
 TEST_F(MemMapTest, MapAnonymousEmpty) {
+  CommonInit();
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
                                              nullptr,
@@ -143,6 +148,7 @@
 
 #ifdef __LP64__
 TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
+  CommonInit();
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
                                              nullptr,
@@ -157,6 +163,7 @@
 #endif
 
 TEST_F(MemMapTest, MapAnonymousExactAddr) {
+  CommonInit();
   std::string error_msg;
   // Map at an address that should work, which should succeed.
   std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
@@ -200,6 +207,7 @@
 #endif
 
 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
+  CommonInit();
   // This test may not work under valgrind.
   if (RUNNING_ON_VALGRIND == 0) {
     uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
@@ -217,6 +225,7 @@
 }
 
 TEST_F(MemMapTest, MapAnonymousOverflow) {
+  CommonInit();
   std::string error_msg;
   uintptr_t ptr = 0;
   ptr -= kPageSize;  // Now it's close to the top.
@@ -232,6 +241,7 @@
 
 #ifdef __LP64__
 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
+  CommonInit();
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
                                              reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
@@ -244,6 +254,7 @@
 }
 
 TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
+  CommonInit();
   std::string error_msg;
   std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
                                              reinterpret_cast<uint8_t*>(0xF0000000),
@@ -257,6 +268,7 @@
 #endif
 
 TEST_F(MemMapTest, CheckNoGaps) {
+  CommonInit();
   std::string error_msg;
   constexpr size_t kNumPages = 3;
   // Map a 3-page mem map.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 42d05a9..3bd825b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -193,6 +193,7 @@
   Thread::Shutdown();
   QuasiAtomic::Shutdown();
   verifier::MethodVerifier::Shutdown();
+  MemMap::Shutdown();
   // TODO: acquire a static mutex on Runtime to avoid racing.
   CHECK(instance_ == nullptr || instance_ == this);
   instance_ = nullptr;
@@ -645,6 +646,8 @@
 bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
   CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
 
+  MemMap::Init();
+
   std::unique_ptr<ParsedOptions> options(ParsedOptions::Create(raw_options, ignore_unrecognized));
   if (options.get() == nullptr) {
     LOG(ERROR) << "Failed to parse options";