Separate Malloc and MemMap ArenaPools

Make ArenaPool an abstract base class and leave MallocArenaPool
implementation with it.  This enables arena_allocator to be free
of MemMap, Mutex, etc., in preparation to move the remaining collections
out of runtime/base to libartbase/base.

Bug: 22322814
Test: make -j 50 test-art-host
      build and boot

Change-Id: Ief84dcbfb749165d9bc82000c6b8f96f93052422
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index f582341..c139fcf 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -20,6 +20,7 @@
 #include "base/callee_save_type.h"
 #include "base/enums.h"
 #include "base/leb128.h"
+#include "base/malloc_arena_pool.h"
 #include "class_linker.h"
 #include "common_runtime_test.h"
 #include "dex/code_item_accessors-inl.h"
@@ -67,7 +68,7 @@
       fake_code_.push_back(0x70 | i);
     }
 
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaStack arena_stack(&pool);
     ScopedArenaAllocator allocator(&arena_stack);
     StackMapStream stack_maps(&allocator, kRuntimeISA);
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 236b5c0..11b0e2b 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -20,6 +20,7 @@
 #include "arch/instruction_set.h"
 #include "base/arena_allocator.h"
 #include "base/enums.h"
+#include "base/malloc_arena_pool.h"
 #include "cfi_test.h"
 #include "gtest/gtest.h"
 #include "jni/quick/calling_convention.h"
@@ -61,7 +62,7 @@
     const bool is_synchronized = false;
     const char* shorty = "IIFII";
 
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     std::unique_ptr<JniCallingConvention> jni_conv(
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index d001cfe..9ccdff3 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -27,6 +27,7 @@
 #include "base/enums.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/macros.h"
+#include "base/malloc_arena_pool.h"
 #include "base/utils.h"
 #include "calling_convention.h"
 #include "class_linker.h"
@@ -174,7 +175,7 @@
     }
   }
 
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
 
   // Calling conventions used to iterate over parameters to method
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index 7875517..0056c50 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -21,6 +21,7 @@
 #include "arch/arm/asm_support_arm.h"
 #include "art_method.h"
 #include "base/bit_utils.h"
+#include "base/malloc_arena_pool.h"
 #include "compiled_method.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "linker/linker_patch.h"
@@ -355,7 +356,7 @@
 }
 
 std::vector<uint8_t> Thumb2RelativePatcher::CompileThunk(const ThunkKey& key) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
   arm::ArmVIXLAssembler assembler(&allocator);
 
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index b268204..4bfe99b 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -20,6 +20,7 @@
 #include "arch/arm64/instruction_set_features_arm64.h"
 #include "art_method.h"
 #include "base/bit_utils.h"
+#include "base/malloc_arena_pool.h"
 #include "compiled_method-inl.h"
 #include "driver/compiler_driver.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -511,7 +512,7 @@
 }
 
 std::vector<uint8_t> Arm64RelativePatcher::CompileThunk(const ThunkKey& key) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
   arm64::Arm64Assembler assembler(&allocator);
 
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 6dcbadb..a9bc566 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -20,6 +20,7 @@
 #include <memory>
 #include <vector>
 
+#include "base/malloc_arena_pool.h"
 #include "base/scoped_arena_allocator.h"
 #include "builder.h"
 #include "common_compiler_test.h"
@@ -97,7 +98,7 @@
   ScopedArenaAllocator* GetScopedAllocator() { return &scoped_allocator_; }
 
  private:
-  ArenaPool pool_;
+  MallocArenaPool pool_;
   ArenaAllocator allocator_;
   ArenaStack arena_stack_;
   ScopedArenaAllocator scoped_allocator_;
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index cb87cab..be35201 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -15,6 +15,7 @@
  */
 
 #include "base/arena_allocator.h"
+#include "base/malloc_arena_pool.h"
 #include "nodes.h"
 #include "parallel_move_resolver.h"
 
@@ -180,7 +181,7 @@
 
 
 TYPED_TEST(ParallelMoveTest, Dependency) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
 
   {
@@ -207,7 +208,7 @@
 }
 
 TYPED_TEST(ParallelMoveTest, Cycle) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
 
   {
@@ -257,7 +258,7 @@
 }
 
 TYPED_TEST(ParallelMoveTest, ConstantLast) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
   TypeParam resolver(&allocator);
   HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
@@ -276,7 +277,7 @@
 }
 
 TYPED_TEST(ParallelMoveTest, Pairs) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
 
   {
@@ -453,7 +454,7 @@
 }
 
 TYPED_TEST(ParallelMoveTest, MultiCycles) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
 
   {
@@ -551,7 +552,7 @@
 
 // Test that we do 64bits moves before 32bits moves.
 TYPED_TEST(ParallelMoveTest, CyclesWith64BitsMoves) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
 
   {
@@ -610,7 +611,7 @@
 }
 
 TYPED_TEST(ParallelMoveTest, CyclesWith64BitsMoves2) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
 
   {
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 7e517f3..e36c592 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -18,6 +18,7 @@
 
 #include "art_method.h"
 #include "base/arena_bit_vector.h"
+#include "base/malloc_arena_pool.h"
 #include "stack_map_stream.h"
 
 #include "gtest/gtest.h"
@@ -46,7 +47,7 @@
 using Kind = DexRegisterLocation::Kind;
 
 TEST(StackMapTest, Test1) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
@@ -128,7 +129,7 @@
 }
 
 TEST(StackMapTest, Test2) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
@@ -413,7 +414,7 @@
 }
 
 TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
@@ -508,7 +509,7 @@
 }
 
 TEST(StackMapTest, TestNonLiveDexRegisters) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
@@ -588,7 +589,7 @@
 // StackMap::kNoDexRegisterMapSmallEncoding, and ensure we do
 // not treat it as kNoDexRegisterMap.
 TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
@@ -652,7 +653,7 @@
 }
 
 TEST(StackMapTest, TestShareDexRegisterMap) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
@@ -711,7 +712,7 @@
 }
 
 TEST(StackMapTest, TestNoDexRegisterMap) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
@@ -761,7 +762,7 @@
 }
 
 TEST(StackMapTest, InlineTest) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
@@ -949,7 +950,7 @@
 }
 
 TEST(StackMapTest, TestDeduplicateStackMask) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
@@ -978,7 +979,7 @@
 }
 
 TEST(StackMapTest, TestInvokeInfo) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaStack arena_stack(&pool);
   ScopedArenaAllocator allocator(&arena_stack);
   StackMapStream stream(&allocator, kRuntimeISA);
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 921d401..57360e7 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -17,6 +17,7 @@
 #include "trampoline_compiler.h"
 
 #include "base/arena_allocator.h"
+#include "base/malloc_arena_pool.h"
 #include "jni_env_ext.h"
 
 #ifdef ART_ENABLE_CODEGEN_arm
@@ -243,7 +244,7 @@
 std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline64(InstructionSet isa,
                                                                EntryPointCallingConvention abi,
                                                                ThreadOffset64 offset) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
   switch (isa) {
 #ifdef ART_ENABLE_CODEGEN_arm64
@@ -269,7 +270,7 @@
 std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline32(InstructionSet isa,
                                                                EntryPointCallingConvention abi,
                                                                ThreadOffset32 offset) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
   switch (isa) {
 #ifdef ART_ENABLE_CODEGEN_arm
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 0cb8bbb..7c800b3 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -26,6 +26,7 @@
 #include <fstream>
 #include <iterator>
 
+#include "base/malloc_arena_pool.h"
 #include "assembler_test_base.h"
 #include "common_runtime_test.h"  // For ScratchFile
 
@@ -1606,7 +1607,7 @@
 
   static constexpr size_t kWarnManyCombinationsThreshold = 500;
 
-  ArenaPool pool_;
+  MallocArenaPool pool_;
   std::unique_ptr<ArenaAllocator> allocator_;
   std::unique_ptr<Ass> assembler_;
   std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 655d17d..053e202 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -27,6 +27,7 @@
 #include "utils/arm/jni_macro_assembler_arm_vixl.h"
 
 #include "base/hex_dump.h"
+#include "base/malloc_arena_pool.h"
 #include "common_runtime_test.h"
 
 namespace art {
@@ -169,7 +170,7 @@
  public:
   ArmVIXLAssemblerTest() : pool(), allocator(&pool), assembler(&allocator) { }
 
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator;
   ArmVIXLJNIMacroAssembler assembler;
 };
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
index 1aefc84..b70c18b 100644
--- a/compiler/utils/jni_macro_assembler_test.h
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -20,6 +20,7 @@
 #include "jni_macro_assembler.h"
 
 #include "assembler_test_base.h"
+#include "base/malloc_arena_pool.h"
 #include "common_runtime_test.h"  // For ScratchFile
 
 #include <sys/stat.h>
@@ -139,7 +140,7 @@
     test_helper_->Driver(*data, assembly_text, test_name);
   }
 
-  ArenaPool pool_;
+  MallocArenaPool pool_;
   std::unique_ptr<ArenaAllocator> allocator_;
   std::unique_ptr<Ass> assembler_;
   std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 8f72db7..cd007b3 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -17,13 +17,14 @@
 #include "assembler_x86.h"
 
 #include "base/arena_allocator.h"
+#include "base/malloc_arena_pool.h"
 #include "base/stl_util.h"
 #include "utils/assembler_test.h"
 
 namespace art {
 
 TEST(AssemblerX86, CreateBuffer) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
   AssemblerBuffer buffer(&allocator);
   AssemblerBuffer::EnsureCapacity ensured(&buffer);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 104e215..0589df5 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -21,6 +21,7 @@
 #include <random>
 
 #include "base/bit_utils.h"
+#include "base/malloc_arena_pool.h"
 #include "base/stl_util.h"
 #include "jni_macro_assembler_x86_64.h"
 #include "utils/assembler_test.h"
@@ -29,7 +30,7 @@
 namespace art {
 
 TEST(AssemblerX86_64, CreateBuffer) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
   AssemblerBuffer buffer(&allocator);
   AssemblerBuffer::EnsureCapacity ensured(&buffer);
diff --git a/runtime/Android.bp b/runtime/Android.bp
index c0f1c36..b380dab 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -35,6 +35,8 @@
         "base/arena_allocator.cc",
         "base/arena_bit_vector.cc",
         "base/file_utils.cc",
+        "base/malloc_arena_pool.cc",
+        "base/mem_map_arena_pool.cc",
         "base/mutex.cc",
         "base/quasi_atomic.cc",
         "base/scoped_arena_allocator.cc",
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index fe0f876..348a812 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -25,11 +25,6 @@
 
 #include <android-base/logging.h>
 
-#include "base/systrace.h"
-#include "mem_map.h"
-#include "mutex.h"
-#include "thread-current-inl.h"
-
 namespace art {
 
 constexpr size_t kMemoryToolRedZoneBytes = 8;
@@ -190,194 +185,6 @@
 Arena::Arena() : bytes_allocated_(0), memory_(nullptr), size_(0), next_(nullptr) {
 }
 
-class MallocArena FINAL : public Arena {
- public:
-  explicit MallocArena(size_t size = arena_allocator::kArenaDefaultSize);
-  virtual ~MallocArena();
- private:
-  static constexpr size_t RequiredOverallocation() {
-    return (alignof(std::max_align_t) < ArenaAllocator::kArenaAlignment)
-        ? ArenaAllocator::kArenaAlignment - alignof(std::max_align_t)
-        : 0u;
-  }
-
-  uint8_t* unaligned_memory_;
-};
-
-MallocArena::MallocArena(size_t size) {
-  // We need to guarantee kArenaAlignment aligned allocation for the new arena.
-  // TODO: Use std::aligned_alloc() when it becomes available with C++17.
-  constexpr size_t overallocation = RequiredOverallocation();
-  unaligned_memory_ = reinterpret_cast<uint8_t*>(calloc(1, size + overallocation));
-  CHECK(unaligned_memory_ != nullptr);  // Abort on OOM.
-  DCHECK_ALIGNED(unaligned_memory_, alignof(std::max_align_t));
-  if (overallocation == 0u) {
-    memory_ = unaligned_memory_;
-  } else {
-    memory_ = AlignUp(unaligned_memory_, ArenaAllocator::kArenaAlignment);
-    if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
-      size_t head = memory_ - unaligned_memory_;
-      size_t tail = overallocation - head;
-      MEMORY_TOOL_MAKE_NOACCESS(unaligned_memory_, head);
-      MEMORY_TOOL_MAKE_NOACCESS(memory_ + size, tail);
-    }
-  }
-  DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
-  size_ = size;
-}
-
-MallocArena::~MallocArena() {
-  constexpr size_t overallocation = RequiredOverallocation();
-  if (overallocation != 0u && UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
-    size_t head = memory_ - unaligned_memory_;
-    size_t tail = overallocation - head;
-    MEMORY_TOOL_MAKE_UNDEFINED(unaligned_memory_, head);
-    MEMORY_TOOL_MAKE_UNDEFINED(memory_ + size_, tail);
-  }
-  free(reinterpret_cast<void*>(unaligned_memory_));
-}
-
-class MemMapArena FINAL : public Arena {
- public:
-  MemMapArena(size_t size, bool low_4gb, const char* name);
-  virtual ~MemMapArena();
-  void Release() OVERRIDE;
-
- private:
-  std::unique_ptr<MemMap> map_;
-};
-
-MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) {
-  // Round up to a full page as that's the smallest unit of allocation for mmap()
-  // and we want to be able to use all memory that we actually allocate.
-  size = RoundUp(size, kPageSize);
-  std::string error_msg;
-  map_.reset(MemMap::MapAnonymous(
-      name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
-  CHECK(map_.get() != nullptr) << error_msg;
-  memory_ = map_->Begin();
-  static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
-                "Arena should not need stronger alignment than kPageSize.");
-  DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
-  size_ = map_->Size();
-}
-
-MemMapArena::~MemMapArena() {
-  // Destroys MemMap via std::unique_ptr<>.
-}
-
-void MemMapArena::Release() {
-  if (bytes_allocated_ > 0) {
-    map_->MadviseDontNeedAndZero();
-    bytes_allocated_ = 0;
-  }
-}
-
-void Arena::Reset() {
-  if (bytes_allocated_ > 0) {
-    memset(Begin(), 0, bytes_allocated_);
-    bytes_allocated_ = 0;
-  }
-}
-
-ArenaPool::ArenaPool(bool use_malloc, bool low_4gb, const char* name)
-    : use_malloc_(use_malloc),
-      lock_("Arena pool lock", kArenaPoolLock),
-      free_arenas_(nullptr),
-      low_4gb_(low_4gb),
-      name_(name) {
-  if (low_4gb) {
-    CHECK(!use_malloc) << "low4gb must use map implementation";
-  }
-  if (!use_malloc) {
-    MemMap::Init();
-  }
-}
-
-ArenaPool::~ArenaPool() {
-  ReclaimMemory();
-}
-
-void ArenaPool::ReclaimMemory() {
-  while (free_arenas_ != nullptr) {
-    Arena* arena = free_arenas_;
-    free_arenas_ = free_arenas_->next_;
-    delete arena;
-  }
-}
-
-void ArenaPool::LockReclaimMemory() {
-  MutexLock lock(Thread::Current(), lock_);
-  ReclaimMemory();
-}
-
-Arena* ArenaPool::AllocArena(size_t size) {
-  Thread* self = Thread::Current();
-  Arena* ret = nullptr;
-  {
-    MutexLock lock(self, lock_);
-    if (free_arenas_ != nullptr && LIKELY(free_arenas_->Size() >= size)) {
-      ret = free_arenas_;
-      free_arenas_ = free_arenas_->next_;
-    }
-  }
-  if (ret == nullptr) {
-    ret = use_malloc_ ? static_cast<Arena*>(new MallocArena(size)) :
-        new MemMapArena(size, low_4gb_, name_);
-  }
-  ret->Reset();
-  return ret;
-}
-
-void ArenaPool::TrimMaps() {
-  if (!use_malloc_) {
-    ScopedTrace trace(__PRETTY_FUNCTION__);
-    // Doesn't work for malloc.
-    MutexLock lock(Thread::Current(), lock_);
-    for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
-      arena->Release();
-    }
-  }
-}
-
-size_t ArenaPool::GetBytesAllocated() const {
-  size_t total = 0;
-  MutexLock lock(Thread::Current(), lock_);
-  for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
-    total += arena->GetBytesAllocated();
-  }
-  return total;
-}
-
-void ArenaPool::FreeArenaChain(Arena* first) {
-  if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
-    for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
-      MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
-    }
-  }
-
-  if (arena_allocator::kArenaAllocatorPreciseTracking) {
-    // Do not reuse arenas when tracking.
-    while (first != nullptr) {
-      Arena* next = first->next_;
-      delete first;
-      first = next;
-    }
-    return;
-  }
-
-  if (first != nullptr) {
-    Arena* last = first;
-    while (last->next_ != nullptr) {
-      last = last->next_;
-    }
-    Thread* self = Thread::Current();
-    MutexLock lock(self, lock_);
-    last->next_ = free_arenas_;
-    free_arenas_ = first;
-  }
-}
-
 size_t ArenaAllocator::BytesAllocated() const {
   return ArenaAllocatorStats::BytesAllocated();
 }
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 688f01b..f59cfdd 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -25,7 +25,6 @@
 #include "base/dchecked_vector.h"
 #include "base/macros.h"
 #include "base/memory_tool.h"
-#include "mutex.h"
 
 namespace art {
 
@@ -236,7 +235,8 @@
   uint8_t* memory_;
   size_t size_;
   Arena* next_;
-  friend class ArenaPool;
+  friend class MallocArenaPool;
+  friend class MemMapArenaPool;
   friend class ArenaAllocator;
   friend class ArenaStack;
   friend class ScopedArenaAllocator;
@@ -250,25 +250,20 @@
 
 class ArenaPool {
  public:
-  explicit ArenaPool(bool use_malloc = true,
-                     bool low_4gb = false,
-                     const char* name = "LinearAlloc");
-  ~ArenaPool();
-  Arena* AllocArena(size_t size) REQUIRES(!lock_);
-  void FreeArenaChain(Arena* first) REQUIRES(!lock_);
-  size_t GetBytesAllocated() const REQUIRES(!lock_);
-  void ReclaimMemory() NO_THREAD_SAFETY_ANALYSIS;
-  void LockReclaimMemory() REQUIRES(!lock_);
-  // Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works
-  // use_malloc is false.
-  void TrimMaps() REQUIRES(!lock_);
+  virtual ~ArenaPool() = default;
+
+  virtual Arena* AllocArena(size_t size) = 0;
+  virtual void FreeArenaChain(Arena* first) = 0;
+  virtual size_t GetBytesAllocated() const = 0;
+  virtual void ReclaimMemory() = 0;
+  virtual void LockReclaimMemory() = 0;
+  // Trim the maps in arenas by madvising, used by JIT to reduce memory usage.
+  virtual void TrimMaps() = 0;
+
+ protected:
+  ArenaPool() = default;
 
  private:
-  const bool use_malloc_;
-  mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  Arena* free_arenas_ GUARDED_BY(lock_);
-  const bool low_4gb_;
-  const char* name_;
   DISALLOW_COPY_AND_ASSIGN(ArenaPool);
 };
 
diff --git a/runtime/base/arena_allocator_test.cc b/runtime/base/arena_allocator_test.cc
index 68e26af..68e99f4 100644
--- a/runtime/base/arena_allocator_test.cc
+++ b/runtime/base/arena_allocator_test.cc
@@ -16,6 +16,7 @@
 
 #include "base/arena_allocator-inl.h"
 #include "base/arena_bit_vector.h"
+#include "base/malloc_arena_pool.h"
 #include "base/memory_tool.h"
 #include "gtest/gtest.h"
 
@@ -33,7 +34,7 @@
 };
 
 TEST_F(ArenaAllocatorTest, Test) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
   ArenaBitVector bv(&allocator, 10, true);
   bv.SetBit(5);
@@ -44,7 +45,7 @@
 
 TEST_F(ArenaAllocatorTest, MakeDefined) {
   // Regression test to make sure we mark the allocated area defined.
-  ArenaPool pool;
+  MallocArenaPool pool;
   static constexpr size_t kSmallArraySize = 10;
   static constexpr size_t kLargeArraySize = 50;
   uint32_t* small_array;
@@ -71,7 +72,7 @@
   }
 
   {
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
     // Note: Leaving some space for memory tool red zones.
     void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 5 / 8);
@@ -80,7 +81,7 @@
     ASSERT_EQ(1u, NumberOfArenas(&allocator));
   }
   {
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
     void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
     void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 11 / 16);
@@ -92,7 +93,7 @@
     ASSERT_EQ(3u, NumberOfArenas(&allocator));
   }
   {
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
     void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
     void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
@@ -105,7 +106,7 @@
     ASSERT_EQ(2u, NumberOfArenas(&allocator));
   }
   {
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
     void* alloc1 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 9 / 16);
     void* alloc2 = allocator.Alloc(arena_allocator::kArenaDefaultSize * 13 / 16);
@@ -118,7 +119,7 @@
     ASSERT_EQ(2u, NumberOfArenas(&allocator));
   }
   {
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
     // Note: Leaving some space for memory tool red zones.
     for (size_t i = 0; i != 15; ++i) {
@@ -133,7 +134,7 @@
 }
 
 TEST_F(ArenaAllocatorTest, AllocAlignment) {
-  ArenaPool pool;
+  MallocArenaPool pool;
   ArenaAllocator allocator(&pool);
   for (size_t iterations = 0; iterations <= 10; ++iterations) {
     for (size_t size = 1; size <= ArenaAllocator::kAlignment + 1; ++size) {
@@ -153,7 +154,7 @@
 
   {
     // Case 1: small aligned allocation, aligned extend inside arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2;
@@ -166,7 +167,7 @@
 
   {
     // Case 2: small aligned allocation, non-aligned extend inside arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2;
@@ -179,7 +180,7 @@
 
   {
     // Case 3: small non-aligned allocation, aligned extend inside arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
@@ -192,7 +193,7 @@
 
   {
     // Case 4: small non-aligned allocation, aligned non-extend inside arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
@@ -208,7 +209,7 @@
 
   {
     // Case 5: large allocation, aligned extend into next arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = arena_allocator::kArenaDefaultSize -
@@ -222,7 +223,7 @@
 
   {
     // Case 6: large allocation, non-aligned extend into next arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = arena_allocator::kArenaDefaultSize -
@@ -241,7 +242,7 @@
 TEST_F(ArenaAllocatorTest, ReallocAlignment) {
   {
     // Case 1: small aligned allocation, aligned extend inside arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2;
@@ -258,7 +259,7 @@
 
   {
     // Case 2: small aligned allocation, non-aligned extend inside arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2;
@@ -275,7 +276,7 @@
 
   {
     // Case 3: small non-aligned allocation, aligned extend inside arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
@@ -292,7 +293,7 @@
 
   {
     // Case 4: small non-aligned allocation, aligned non-extend inside arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = ArenaAllocator::kAlignment * 2 + (ArenaAllocator::kAlignment / 2);
@@ -312,7 +313,7 @@
 
   {
     // Case 5: large allocation, aligned extend into next arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = arena_allocator::kArenaDefaultSize -
@@ -330,7 +331,7 @@
 
   {
     // Case 6: large allocation, non-aligned extend into next arena.
-    ArenaPool pool;
+    MallocArenaPool pool;
     ArenaAllocator allocator(&pool);
 
     const size_t original_size = arena_allocator::kArenaDefaultSize -
diff --git a/runtime/base/malloc_arena_pool.cc b/runtime/base/malloc_arena_pool.cc
new file mode 100644
index 0000000..7df4aef
--- /dev/null
+++ b/runtime/base/malloc_arena_pool.cc
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "malloc_arena_pool.h"
+
+#include <sys/mman.h>
+
+#include <algorithm>
+#include <cstddef>
+#include <iomanip>
+#include <numeric>
+
+#include <android-base/logging.h>
+#include "base/arena_allocator-inl.h"
+
+namespace art {
+
+class MallocArena FINAL : public Arena {
+ public:
+  explicit MallocArena(size_t size = arena_allocator::kArenaDefaultSize);
+  virtual ~MallocArena();
+ private:
+  static constexpr size_t RequiredOverallocation() {
+    return (alignof(std::max_align_t) < ArenaAllocator::kArenaAlignment)
+        ? ArenaAllocator::kArenaAlignment - alignof(std::max_align_t)
+        : 0u;
+  }
+
+  uint8_t* unaligned_memory_;
+};
+
+MallocArena::MallocArena(size_t size) {
+  // We need to guarantee kArenaAlignment aligned allocation for the new arena.
+  // TODO: Use std::aligned_alloc() when it becomes available with C++17.
+  constexpr size_t overallocation = RequiredOverallocation();
+  unaligned_memory_ = reinterpret_cast<uint8_t*>(calloc(1, size + overallocation));
+  CHECK(unaligned_memory_ != nullptr);  // Abort on OOM.
+  DCHECK_ALIGNED(unaligned_memory_, alignof(std::max_align_t));
+  if (overallocation == 0u) {
+    memory_ = unaligned_memory_;
+  } else {
+    memory_ = AlignUp(unaligned_memory_, ArenaAllocator::kArenaAlignment);
+    if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+      size_t head = memory_ - unaligned_memory_;
+      size_t tail = overallocation - head;
+      MEMORY_TOOL_MAKE_NOACCESS(unaligned_memory_, head);
+      MEMORY_TOOL_MAKE_NOACCESS(memory_ + size, tail);
+    }
+  }
+  DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
+  size_ = size;
+}
+
+MallocArena::~MallocArena() {
+  constexpr size_t overallocation = RequiredOverallocation();
+  if (overallocation != 0u && UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+    size_t head = memory_ - unaligned_memory_;
+    size_t tail = overallocation - head;
+    MEMORY_TOOL_MAKE_UNDEFINED(unaligned_memory_, head);
+    MEMORY_TOOL_MAKE_UNDEFINED(memory_ + size_, tail);
+  }
+  free(reinterpret_cast<void*>(unaligned_memory_));
+}
+
+void Arena::Reset() {
+  if (bytes_allocated_ > 0) {
+    memset(Begin(), 0, bytes_allocated_);
+    bytes_allocated_ = 0;
+  }
+}
+
+MallocArenaPool::MallocArenaPool() : free_arenas_(nullptr) {
+}
+
+MallocArenaPool::~MallocArenaPool() {
+  ReclaimMemory();
+}
+
+void MallocArenaPool::ReclaimMemory() {
+  while (free_arenas_ != nullptr) {
+    Arena* arena = free_arenas_;
+    free_arenas_ = free_arenas_->next_;
+    delete arena;
+  }
+}
+
+void MallocArenaPool::LockReclaimMemory() {
+  std::lock_guard<std::mutex> lock(lock_);
+  ReclaimMemory();
+}
+
+Arena* MallocArenaPool::AllocArena(size_t size) {
+  Arena* ret = nullptr;
+  {
+    std::lock_guard<std::mutex> lock(lock_);
+    if (free_arenas_ != nullptr && LIKELY(free_arenas_->Size() >= size)) {
+      ret = free_arenas_;
+      free_arenas_ = free_arenas_->next_;
+    }
+  }
+  if (ret == nullptr) {
+    ret = new MallocArena(size);
+  }
+  ret->Reset();
+  return ret;
+}
+
+void MallocArenaPool::TrimMaps() {
+  // Nop, because there is no way to do madvise here.
+}
+
+size_t MallocArenaPool::GetBytesAllocated() const {
+  size_t total = 0;
+  std::lock_guard<std::mutex> lock(lock_);
+  for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+    total += arena->GetBytesAllocated();
+  }
+  return total;
+}
+
+void MallocArenaPool::FreeArenaChain(Arena* first) {
+  if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+    for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
+      MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
+    }
+  }
+
+  if (arena_allocator::kArenaAllocatorPreciseTracking) {
+    // Do not reuse arenas when tracking.
+    while (first != nullptr) {
+      Arena* next = first->next_;
+      delete first;
+      first = next;
+    }
+    return;
+  }
+
+  if (first != nullptr) {
+    Arena* last = first;
+    while (last->next_ != nullptr) {
+      last = last->next_;
+    }
+    std::lock_guard<std::mutex> lock(lock_);
+    last->next_ = free_arenas_;
+    free_arenas_ = first;
+  }
+}
+
+}  // namespace art
diff --git a/runtime/base/malloc_arena_pool.h b/runtime/base/malloc_arena_pool.h
new file mode 100644
index 0000000..9cd2572
--- /dev/null
+++ b/runtime/base/malloc_arena_pool.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_MALLOC_ARENA_POOL_H_
+#define ART_RUNTIME_BASE_MALLOC_ARENA_POOL_H_
+
+#include <mutex>
+
+#include "base/arena_allocator.h"
+
+namespace art {
+
+class MallocArenaPool FINAL : public ArenaPool {
+ public:
+  MallocArenaPool();
+  ~MallocArenaPool();
+  Arena* AllocArena(size_t size) OVERRIDE;
+  void FreeArenaChain(Arena* first) OVERRIDE;
+  size_t GetBytesAllocated() const OVERRIDE;
+  void ReclaimMemory() OVERRIDE;
+  void LockReclaimMemory() OVERRIDE;
+  // Is a nop for malloc pools.
+  void TrimMaps() OVERRIDE;
+
+ private:
+  Arena* free_arenas_;
+  // Use a std::mutex here as Arenas are at the bottom of the lock hierarchy when malloc is used.
+  mutable std::mutex lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(MallocArenaPool);
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_MALLOC_ARENA_POOL_H_
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
new file mode 100644
index 0000000..d5ea19b
--- /dev/null
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mem_map_arena_pool.h"
+
+#include <sys/mman.h>
+
+#include <algorithm>
+#include <cstddef>
+#include <iomanip>
+#include <numeric>
+
+#include <android-base/logging.h>
+
+#include "base/arena_allocator-inl.h"
+#include "base/systrace.h"
+#include "mem_map.h"
+
+namespace art {
+
+class MemMapArena FINAL : public Arena {
+ public:
+  MemMapArena(size_t size, bool low_4gb, const char* name);
+  virtual ~MemMapArena();
+  void Release() OVERRIDE;
+
+ private:
+  std::unique_ptr<MemMap> map_;
+};
+
+MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) {
+  // Round up to a full page as that's the smallest unit of allocation for mmap()
+  // and we want to be able to use all memory that we actually allocate.
+  size = RoundUp(size, kPageSize);
+  std::string error_msg;
+  map_.reset(MemMap::MapAnonymous(
+      name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
+  CHECK(map_.get() != nullptr) << error_msg;
+  memory_ = map_->Begin();
+  static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
+                "Arena should not need stronger alignment than kPageSize.");
+  DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
+  size_ = map_->Size();
+}
+
+MemMapArena::~MemMapArena() {
+  // Destroys MemMap via std::unique_ptr<>.
+}
+
+void MemMapArena::Release() {
+  if (bytes_allocated_ > 0) {
+    map_->MadviseDontNeedAndZero();
+    bytes_allocated_ = 0;
+  }
+}
+
+MemMapArenaPool::MemMapArenaPool(bool low_4gb, const char* name)
+    : low_4gb_(low_4gb),
+      name_(name),
+      free_arenas_(nullptr) {
+  MemMap::Init();
+}
+
+MemMapArenaPool::~MemMapArenaPool() {
+  ReclaimMemory();
+}
+
+void MemMapArenaPool::ReclaimMemory() {
+  while (free_arenas_ != nullptr) {
+    Arena* arena = free_arenas_;
+    free_arenas_ = free_arenas_->next_;
+    delete arena;
+  }
+}
+
+void MemMapArenaPool::LockReclaimMemory() {
+  std::lock_guard<std::mutex> lock(lock_);
+  ReclaimMemory();
+}
+
+Arena* MemMapArenaPool::AllocArena(size_t size) {
+  Arena* ret = nullptr;
+  {
+    std::lock_guard<std::mutex> lock(lock_);
+    if (free_arenas_ != nullptr && LIKELY(free_arenas_->Size() >= size)) {
+      ret = free_arenas_;
+      free_arenas_ = free_arenas_->next_;
+    }
+  }
+  if (ret == nullptr) {
+    ret = new MemMapArena(size, low_4gb_, name_);
+  }
+  ret->Reset();
+  return ret;
+}
+
+void MemMapArenaPool::TrimMaps() {
+  ScopedTrace trace(__PRETTY_FUNCTION__);
+  std::lock_guard<std::mutex> lock(lock_);
+  for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+    arena->Release();
+  }
+}
+
+size_t MemMapArenaPool::GetBytesAllocated() const {
+  size_t total = 0;
+  std::lock_guard<std::mutex> lock(lock_);
+  for (Arena* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+    total += arena->GetBytesAllocated();
+  }
+  return total;
+}
+
+void MemMapArenaPool::FreeArenaChain(Arena* first) {
+  if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+    for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
+      MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
+    }
+  }
+
+  if (arena_allocator::kArenaAllocatorPreciseTracking) {
+    // Do not reuse arenas when tracking.
+    while (first != nullptr) {
+      Arena* next = first->next_;
+      delete first;
+      first = next;
+    }
+    return;
+  }
+
+  if (first != nullptr) {
+    Arena* last = first;
+    while (last->next_ != nullptr) {
+      last = last->next_;
+    }
+    std::lock_guard<std::mutex> lock(lock_);
+    last->next_ = free_arenas_;
+    free_arenas_ = first;
+  }
+}
+
+}  // namespace art
diff --git a/runtime/base/mem_map_arena_pool.h b/runtime/base/mem_map_arena_pool.h
new file mode 100644
index 0000000..24e150e
--- /dev/null
+++ b/runtime/base/mem_map_arena_pool.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_MEM_MAP_ARENA_POOL_H_
+#define ART_RUNTIME_BASE_MEM_MAP_ARENA_POOL_H_
+
+#include "base/arena_allocator.h"
+
+namespace art {
+
+class MemMapArenaPool FINAL : public ArenaPool {
+ public:
+  explicit MemMapArenaPool(bool low_4gb = false, const char* name = "LinearAlloc");
+  virtual ~MemMapArenaPool();
+  Arena* AllocArena(size_t size) OVERRIDE;
+  void FreeArenaChain(Arena* first) OVERRIDE;
+  size_t GetBytesAllocated() const OVERRIDE;
+  void ReclaimMemory() OVERRIDE;
+  void LockReclaimMemory() OVERRIDE;
+  // Trim the maps in arenas by madvising, used by JIT to reduce memory usage.
+  void TrimMaps() OVERRIDE;
+
+ private:
+  const bool low_4gb_;
+  const char* name_;
+  Arena* free_arenas_;
+  // Use a std::mutex here as Arenas are second-from-the-bottom when using MemMaps, and MemMap
+  // itself uses std::mutex scoped to within an allocate/free only.
+  mutable std::mutex lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemMapArenaPool);
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_MEM_MAP_ARENA_POOL_H_
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 1bbce4f..f4132c2 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -36,6 +36,7 @@
 #include "base/dumpable.h"
 #include "base/file_utils.h"
 #include "base/logging.h"  // For VLOG.
+#include "base/malloc_arena_pool.h"
 #include "base/mutex.h"
 #include "base/os.h"
 #include "base/safe_map.h"
@@ -90,7 +91,7 @@
 }
 
 ProfileCompilationInfo::ProfileCompilationInfo()
-    : default_arena_pool_(/*use_malloc*/true, /*low_4gb*/false, "ProfileCompilationInfo"),
+    : default_arena_pool_(),
       allocator_(&default_arena_pool_),
       info_(allocator_.Adapter(kArenaAllocProfile)),
       profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 6c56db9..4ac8c61 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -23,6 +23,7 @@
 #include "base/arena_containers.h"
 #include "base/arena_object.h"
 #include "base/atomic.h"
+#include "base/malloc_arena_pool.h"
 #include "base/safe_map.h"
 #include "bit_memory_region.h"
 #include "dex/dex_cache_resolved_classes.h"
@@ -792,7 +793,7 @@
   friend class ProfileAssistantTest;
   friend class Dex2oatLayoutTest;
 
-  ArenaPool default_arena_pool_;
+  MallocArenaPool default_arena_pool_;
   ArenaAllocator allocator_;
 
   // Vector containing the actual profile info.
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index 384b2e3..87086f2 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_LINEAR_ALLOC_H_
 
 #include "base/arena_allocator.h"
+#include "base/mutex.h"
 
 namespace art {
 
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index b80ce7d..4068158 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -62,6 +62,8 @@
 #include "base/dumpable.h"
 #include "base/enums.h"
 #include "base/file_utils.h"
+#include "base/malloc_arena_pool.h"
+#include "base/mem_map_arena_pool.h"
 #include "base/memory_tool.h"
 #include "base/mutex.h"
 #include "base/os.h"
@@ -1332,13 +1334,17 @@
   // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
   // can't be trimmed as easily.
   const bool use_malloc = IsAotCompiler();
-  arena_pool_.reset(new ArenaPool(use_malloc, /* low_4gb */ false));
-  jit_arena_pool_.reset(
-      new ArenaPool(/* use_malloc */ false, /* low_4gb */ false, "CompilerMetadata"));
+  if (use_malloc) {
+    arena_pool_.reset(new MallocArenaPool());
+    jit_arena_pool_.reset(new MallocArenaPool());
+  } else {
+    arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false));
+    jit_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ false, "CompilerMetadata"));
+  }
 
   if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
     // 4gb, no malloc. Explanation in header.
-    low_4gb_arena_pool_.reset(new ArenaPool(/* use_malloc */ false, /* low_4gb */ true));
+    low_4gb_arena_pool_.reset(new MemMapArenaPool(/* low_4gb */ true));
   }
   linear_alloc_.reset(CreateLinearAlloc());
 
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 18ad6b5..168eb7b 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -22,6 +22,7 @@
 
 #include <android-base/logging.h>
 
+#include "base/mutex.h"
 #include "base/safe_map.h"
 #include "base/scoped_arena_containers.h"