Add an overload for MemMap::MapAnonymous().

Add an overload that does not take `reuse` or `use_ashmem`
parameters but uses default values (`reuse = false` and
`use_ashmem = true`) to simplify callers.

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Change-Id: Iaa44f770dee7e043c3a1d6867dfb0416dec83b25
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 2f01766..d603d96 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -328,7 +328,6 @@
                                             (size_t)120 * 1024 * 1024,  // 120MB
                                             PROT_NONE,
                                             false /* no need for 4gb flag with fixed mmap */,
-                                            false /* not reusing existing reservation */,
                                             &error_msg);
   CHECK(image_reservation_.IsValid()) << error_msg;
 }
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 27e7974..67ded32 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -735,7 +735,6 @@
                                              length,
                                              PROT_READ | PROT_WRITE,
                                              /* low_4gb */ false,
-                                             /* reuse */ false,
                                              &error_msg);
     if (UNLIKELY(!image_info.image_.IsValid())) {
       LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 525fade..cd7d502 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -131,6 +131,14 @@
                              bool reuse,
                              std::string* error_msg,
                              bool use_ashmem = true);
+  static MemMap MapAnonymous(const char* name,
+                             uint8_t* addr,
+                             size_t byte_count,
+                             int prot,
+                             bool low_4gb,
+                             std::string* error_msg) {
+    return MapAnonymous(name, addr, byte_count, prot, low_4gb, /* reuse */ false, error_msg);
+  }
 
   // Create placeholder for a region allocated by direct call to mmap.
   // This is useful when we do not have control over the code calling mmap,
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index b2f5c72..396f12b 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -57,7 +57,6 @@
                                       size,
                                       PROT_READ,
                                       low_4gb,
-                                      /* reuse */ false,
                                       &error_msg);
     CHECK(map.IsValid());
     return map.Begin();
@@ -73,7 +72,6 @@
                                      2 * page_size,
                                      PROT_READ | PROT_WRITE,
                                      low_4gb,
-                                     /* reuse */ false,
                                      &error_msg);
     // Check its state and write to it.
     ASSERT_TRUE(m0.IsValid());
@@ -171,7 +169,6 @@
                                      kPageSize,
                                      PROT_READ,
                                      /* low_4gb */ false,
-                                     /* reuse */ false,
                                      &error_msg);
   ASSERT_TRUE(dest.IsValid());
   MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
@@ -179,7 +176,6 @@
                                        kPageSize,
                                        PROT_WRITE | PROT_READ,
                                        /* low_4gb */ false,
-                                       /* reuse */ false,
                                        &error_msg);
   ASSERT_TRUE(source.IsValid());
   void* source_addr = source.Begin();
@@ -212,7 +208,6 @@
                                                      // source.
                                      PROT_READ,
                                      /* low_4gb */ false,
-                                     /* reuse */ false,
                                      &error_msg);
   ASSERT_TRUE(dest.IsValid());
   MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
@@ -220,7 +215,6 @@
                                        3 * kPageSize,
                                        PROT_WRITE | PROT_READ,
                                        /* low_4gb */ false,
-                                       /* reuse */ false,
                                        &error_msg);
   ASSERT_TRUE(source.IsValid());
   uint8_t* source_addr = source.Begin();
@@ -256,7 +250,6 @@
                                      3 * kPageSize,
                                      PROT_READ,
                                      /* low_4gb */ false,
-                                     /* reuse */ false,
                                      &error_msg);
   ASSERT_TRUE(dest.IsValid());
   MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
@@ -264,7 +257,6 @@
                                        kPageSize,
                                        PROT_WRITE | PROT_READ,
                                        /* low_4gb */ false,
-                                       /* reuse */ false,
                                        &error_msg);
   ASSERT_TRUE(source.IsValid());
   uint8_t* source_addr = source.Begin();
@@ -298,7 +290,6 @@
                           // the way we we move source.
           PROT_READ | PROT_WRITE,
           /* low_4gb */ false,
-          /* reuse */ false,
           &error_msg);
   ASSERT_TRUE(dest.IsValid());
   // Resize down to 1 page so we can remap the rest.
@@ -309,7 +300,6 @@
                                        2 * kPageSize,
                                        PROT_WRITE | PROT_READ,
                                        /* low_4gb */ false,
-                                       /* reuse */ false,
                                        &error_msg);
   ASSERT_TRUE(source.IsValid());
   ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
@@ -346,7 +336,6 @@
                                     0,
                                     PROT_READ,
                                     /* low_4gb */ false,
-                                    /* reuse */ false,
                                     &error_msg);
   ASSERT_FALSE(map.IsValid()) << error_msg;
   ASSERT_FALSE(error_msg.empty());
@@ -357,7 +346,6 @@
                              kPageSize,
                              PROT_READ | PROT_WRITE,
                              /* low_4gb */ false,
-                             /* reuse */ false,
                              &error_msg);
   ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -371,7 +359,6 @@
                                     0x20000,
                                     PROT_READ | PROT_WRITE,
                                     /* low_4gb */ false,
-                                    /* reuse */ false,
                                     nullptr);
   ASSERT_FALSE(map.IsValid());
 }
@@ -385,7 +372,6 @@
                                     0,
                                     PROT_READ,
                                     /* low_4gb */ true,
-                                    /* reuse */ false,
                                     &error_msg);
   ASSERT_FALSE(map.IsValid()) << error_msg;
   ASSERT_FALSE(error_msg.empty());
@@ -396,7 +382,6 @@
                              kPageSize,
                              PROT_READ | PROT_WRITE,
                              /* low_4gb */ true,
-                             /* reuse */ false,
                              &error_msg);
   ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -435,7 +420,6 @@
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
                                      /* low_4gb */ false,
-                                     /* reuse */ false,
                                      &error_msg);
   ASSERT_TRUE(map0.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -446,7 +430,6 @@
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
                                      /* low_4gb */ false,
-                                     /* reuse */ false,
                                      &error_msg);
   ASSERT_TRUE(map1.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -457,7 +440,6 @@
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
                                      /* low_4gb */ false,
-                                     /* reuse */ false,
                                      &error_msg);
   ASSERT_FALSE(map2.IsValid()) << error_msg;
   ASSERT_TRUE(!error_msg.empty());
@@ -494,7 +476,6 @@
                                size,
                                PROT_READ | PROT_WRITE,
                                /*low_4gb*/ true,
-                               /* reuse */ false,
                                &error_msg);
     if (map.IsValid()) {
       break;
@@ -516,7 +497,6 @@
                                     2 * kPageSize,  // brings it over the top.
                                     PROT_READ | PROT_WRITE,
                                     /* low_4gb */ false,
-                                    /* reuse */ false,
                                     &error_msg);
   ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
@@ -532,7 +512,6 @@
                            kPageSize,
                            PROT_READ | PROT_WRITE,
                            /* low_4gb */ true,
-                           /* reuse */ false,
                            &error_msg);
   ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
@@ -546,7 +525,6 @@
                                     0x20000000,
                                     PROT_READ | PROT_WRITE,
                                     /* low_4gb */ true,
-                                    /* reuse */ false,
                                     &error_msg);
   ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
@@ -586,7 +564,6 @@
                                     kPageSize * kNumPages,
                                     PROT_READ | PROT_WRITE,
                                     /* low_4gb */ false,
-                                    /* reuse */ false,
                                     &error_msg);
   ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -602,7 +579,6 @@
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
                                      /* low_4gb */ false,
-                                     /* reuse */ false,
                                      &error_msg);
   ASSERT_TRUE(map0.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -611,7 +587,6 @@
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
                                      /* low_4gb */ false,
-                                     /* reuse */ false,
                                      &error_msg);
   ASSERT_TRUE(map1.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -620,7 +595,6 @@
                                      kPageSize,
                                      PROT_READ | PROT_WRITE,
                                      /* low_4gb */ false,
-                                     /* reuse */ false,
                                      &error_msg);
   ASSERT_TRUE(map2.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
@@ -653,7 +627,6 @@
                                    14 * page_size,
                                    PROT_READ | PROT_WRITE,
                                    /* low_4gb */ false,
-                                   /* reuse */ false,
                                    &error_msg);
   ASSERT_TRUE(m0.IsValid());
   uint8_t* base0 = m0.Begin();
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index 3c68ca1..a841bae 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -79,7 +79,6 @@
                                     GetUncompressedLength(),
                                     PROT_READ | PROT_WRITE,
                                     /* low_4gb */ false,
-                                    /* reuse */ false,
                                     error_msg);
   if (!map.IsValid()) {
     DCHECK(!error_msg->empty());
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 030ad98..895e734 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -250,7 +250,6 @@
                                                dequick_size,
                                                PROT_NONE,
                                                /*low_4gb*/ false,
-                                               /*reuse*/ false,
                                                &error);
     mmap_name += "-TEMP";
     temp_mmap_ = art::MemMap::MapAnonymous(mmap_name.c_str(),
@@ -258,7 +257,6 @@
                                            dequick_size,
                                            PROT_READ | PROT_WRITE,
                                            /*low_4gb*/ false,
-                                           /*reuse*/ false,
                                            &error);
     if (UNLIKELY(dex_data_mmap_.IsValid() && temp_mmap_.IsValid())) {
       // Need to save the initial dexfile so we don't need to search for it in the fault-handler.
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 6cba48a..8707e27 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -309,7 +309,6 @@
       data.size(),
       PROT_READ|PROT_WRITE,
       /*low_4gb*/ false,
-      /*reuse*/ false,
       error_msg);
   if (LIKELY(map.IsValid())) {
     memcpy(map.Begin(), data.data(), data.size());
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 0f472e2..a9fbafe 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -62,7 +62,6 @@
                                     size,
                                     PROT_READ | PROT_WRITE,
                                     low_4gb,
-                                    /* reuse */ false,
                                     &error_msg);
   CHECK(map.IsValid()) << error_msg;
   return map;
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index b0eef00..9e3159d 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -254,7 +254,6 @@
                                                       end - start,
                                                       PROT_NONE,
                                                       /* low_4gb*/ false,
-                                                      /* reuse */ false,
                                                       &error_msg));
     ASSERT_TRUE(image_reservation_.back().IsValid()) << error_msg;
     LOG(INFO) << "Reserved space for image " <<
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 4ae7362..d45a689 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1168,7 +1168,6 @@
                                             loaded_size,
                                             PROT_NONE,
                                             low_4gb,
-                                            /* reuse */ false,
                                             error_msg);
       if (!reserve.IsValid()) {
         *error_msg = StringPrintf("Failed to allocate %s: %s",
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 2a71dec..10af10d 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -257,7 +257,6 @@
                                     capacity_ * sizeof(begin_[0]),
                                     PROT_READ | PROT_WRITE,
                                     /* low_4gb */ false,
-                                    /* reuse */ false,
                                     &error_msg);
     CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
     uint8_t* addr = mem_map_.Begin();
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index e157e5e..bb2beaa 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -53,7 +53,6 @@
                                         bitmap_size,
                                         PROT_READ | PROT_WRITE,
                                         /* low_4gb */ false,
-                                        /* reuse */ false,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 89645e0..7cddec6 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -69,7 +69,6 @@
                                         capacity + 256,
                                         PROT_READ | PROT_WRITE,
                                         /* low_4gb */ false,
-                                        /* reuse */ false,
                                         &error_msg);
   CHECK(mem_map.IsValid()) << "couldn't allocate card table: " << error_msg;
   // All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index d8b1bb2..8bdf6da 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -44,7 +44,6 @@
                                     capacity,
                                     PROT_READ | PROT_WRITE,
                                     /* low_4gb */ false,
-                                    /* reuse */ false,
                                     &error_msg);
     CHECK(mem_map_.IsValid() && mem_map_.Begin() != nullptr)
         << "couldn't allocate read barrier table: " << error_msg;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index f87a67e..2946486 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -89,7 +89,6 @@
                                         bitmap_size,
                                         PROT_READ | PROT_WRITE,
                                         /* low_4gb */ false,
-                                        /* reuse */ false,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 1639a82..0dbafde 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -96,7 +96,6 @@
                                            RoundUp(max_num_of_pages, kPageSize),
                                            PROT_READ | PROT_WRITE,
                                            /* low_4gb */ false,
-                                           /* reuse */ false,
                                            &error_msg);
   CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
   page_map_ = page_map_mem_map_.Begin();
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 53fd1f4..fdd0b62 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -139,7 +139,6 @@
         RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
         PROT_READ | PROT_WRITE,
         /* low_4gb */ false,
-        /* reuse */ false,
         &error_msg);
     CHECK(sweep_array_free_buffer_mem_map_.IsValid())
         << "Couldn't allocate sweep array free buffer: " << error_msg;
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index 558a4a7..145bd02 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -88,7 +88,6 @@
                                       image_size,
                                       PROT_READ | PROT_WRITE,
                                       /*low_4gb*/true,
-                                      /*reuse*/false,
                                       &error_str);
     if (!map.IsValid()) {
       LOG(ERROR) << error_str;
@@ -102,7 +101,6 @@
                                           oat_size,
                                           PROT_READ | PROT_WRITE,
                                           /*low_4gb*/true,
-                                          /*reuse*/false,
                                           &error_str);
     if (!oat_map.IsValid()) {
       LOG(ERROR) << error_str;
@@ -146,7 +144,6 @@
                                       size,
                                       PROT_READ | PROT_WRITE,
                                       /*low_4gb*/ true,
-                                      /*reuse*/ false,
                                       &error_str);
     if (!map.IsValid()) {
       LOG(ERROR) << "Failed to allocate memory region " << error_str;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 334c7a0..997d3b6 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -109,7 +109,6 @@
       RoundUp(kSweepArrayChunkFreeSize * sizeof(mirror::Object*), kPageSize),
       PROT_READ | PROT_WRITE,
       /* low_4gb */ false,
-      /* reuse */ false,
       &error_msg);
   CHECK(sweep_array_free_buffer_mem_map_.IsValid())
       << "Couldn't allocate sweep array free buffer: " << error_msg;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1578db5..7913354 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -409,7 +409,6 @@
                                             capacity_,
                                             PROT_READ | PROT_WRITE,
                                             /* low_4gb */ true,
-                                            /* reuse */ false,
                                             &error_str);
     }
     CHECK(main_mem_map_1.IsValid()) << error_str;
@@ -669,7 +668,6 @@
                                       capacity,
                                       PROT_READ | PROT_WRITE,
                                       /* low_4gb*/ true,
-                                      /* reuse */ false,
                                       out_error_str);
     if (map.IsValid() || request_begin == nullptr) {
       return map;
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index d35ae38..8720a3e 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -38,7 +38,6 @@
                                      16 * KB,
                                      PROT_READ,
                                      /*low_4gb*/ true,
-                                     /*reuse*/ false,
                                      &error_msg);
     ASSERT_TRUE(reserved_.IsValid()) << error_msg;
     CommonRuntimeTest::SetUp();
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 2712ec2..42453f5 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -33,7 +33,6 @@
                                         capacity,
                                         PROT_READ | PROT_WRITE,
                                         /* low_4gb */ true,
-                                        /* reuse */ false,
                                         &error_msg);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 2a4803a..7178627 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -818,7 +818,6 @@
                                       image_header.GetImageSize(),
                                       PROT_READ | PROT_WRITE,
                                       /*low_4gb*/ true,
-                                      /*reuse*/ false,
                                       error_msg);
     if (map.IsValid()) {
       const size_t stored_size = image_header.GetDataSize();
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index ada59b3..76ea9fd 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -140,7 +140,6 @@
                                         num_bytes,
                                         PROT_READ | PROT_WRITE,
                                         /* low_4gb */ true,
-                                        /* reuse */ false,
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(WARNING) << "Large object allocation failed: " << error_msg;
@@ -354,7 +353,6 @@
                                         size,
                                         PROT_READ | PROT_WRITE,
                                         /* low_4gb */ true,
-                                        /* reuse */ false,
                                         &error_msg);
   CHECK(mem_map.IsValid()) << "Failed to allocate large object space mem map: " << error_msg;
   return new FreeListSpace(name, std::move(mem_map), mem_map.Begin(), mem_map.End());
@@ -378,7 +376,6 @@
                            alloc_info_size,
                            PROT_READ | PROT_WRITE,
                            /* low_4gb */ false,
-                           /* reuse */ false,
                            &error_msg);
   CHECK(allocation_info_map_.IsValid()) << "Failed to allocate allocation info map" << error_msg;
   allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 91e0ce8..445560a 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -110,7 +110,6 @@
                                         *capacity,
                                         PROT_READ | PROT_WRITE,
                                         /* low_4gb */ true,
-                                        /* reuse */ false,
                                         &error_msg);
   if (!mem_map.IsValid()) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index a2e2e95..f74fa86 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -59,7 +59,6 @@
                                    capacity + kRegionSize,
                                    PROT_READ | PROT_WRITE,
                                    /* low_4gb */ true,
-                                   /* reuse */ false,
                                    &error_msg);
     if (mem_map.IsValid() || requested_begin == nullptr) {
       break;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 098db9f..8ab4a9b 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -83,7 +83,6 @@
                                         table_bytes,
                                         PROT_READ | PROT_WRITE,
                                         /* low_4gb */ false,
-                                        /* reuse */ false,
                                         error_msg);
   if (!table_mem_map_.IsValid() && error_msg->empty()) {
     *error_msg = "Unable to map memory for indirect ref table";
@@ -227,7 +226,6 @@
                                         table_bytes,
                                         PROT_READ | PROT_WRITE,
                                         /* is_low_4gb */ false,
-                                        /* reuse */ false,
                                         error_msg);
   if (!new_map.IsValid()) {
     return false;
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index d49ebd1..71fabd0 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -177,7 +177,6 @@
                                             length,
                                             PROT_READ | PROT_WRITE,
                                             /* low_4gb */ false,
-                                            /* reuse */ false,
                                             &error_message);
   if (!dex_mem_map.IsValid()) {
     ScopedObjectAccess soa(env);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 9248bb9..30d4587 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1167,7 +1167,6 @@
                                                  kPageSize,
                                                  PROT_NONE,
                                                  /* low_4g */ true,
-                                                 /* reuse */ false,
                                                  /* error_msg */ nullptr);
     if (!protected_fault_page_.IsValid()) {
       LOG(WARNING) << "Could not reserve sentinel fault page";
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 4c4dcd8..ed0472f 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -194,8 +194,7 @@
                                       /* addr */ nullptr,
                                       128 * kPageSize,  // Just some small stack.
                                       PROT_READ | PROT_WRITE,
-                                      false,
-                                      false,
+                                      /* low_4gb */ false,
                                       &error_msg);
   ASSERT_TRUE(stack.IsValid()) << error_msg;
 
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 2a69bc6..28fc59c 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -51,7 +51,6 @@
                                 stack_size,
                                 PROT_READ | PROT_WRITE,
                                 /* low_4gb */ false,
-                                /* reuse */ false,
                                 &error_msg);
   CHECK(stack_.IsValid()) << error_msg;
   CHECK_ALIGNED(stack_.Begin(), kPageSize);