New arena memory allocator.

Before we were creating arenas for each method. The issue with doing this
is that we needed to memset each memory allocation. This can be improved
if you start out with arenas that contain all zeroed memory and recycle
them for each method. When you give memory back to the arena pool you do
a single memset to zero out all of the memory that you used.

Always inlined the fast path of the allocation code.

Removed the "zero" parameter since the new arena allocator always returns
zeroed memory.

Host dex2oat time on target oat apks (2 samples each).
Before:
real	1m11.958s
user	4m34.020s
sys	1m28.570s

After:
real	1m9.690s
user	4m17.670s
sys	1m23.960s

Target device dex2oat samples (Mako, Thinkfree.apk):
Without new arena allocator:
0m26.47s real     0m54.60s user     0m25.85s system
0m25.91s real     0m54.39s user     0m26.69s system
0m26.61s real     0m53.77s user     0m27.35s system
0m26.33s real     0m54.90s user     0m25.30s system
0m26.34s real     0m53.94s user     0m27.23s system

With new arena allocator:
0m25.02s real     0m54.46s user     0m19.94s system
0m25.17s real     0m55.06s user     0m20.72s system
0m24.85s real     0m55.14s user     0m19.30s system
0m24.59s real     0m54.02s user     0m20.07s system
0m25.06s real     0m55.00s user     0m20.42s system

Correctness of Thinkfree.apk.oat verified by diffing both of the oat files.

Change-Id: I5ff7b85ffe86c57d3434294ca7a621a695bf57a9
diff --git a/compiler/dex/arena_allocator.cc b/compiler/dex/arena_allocator.cc
index 3a3e385..36393e7 100644
--- a/compiler/dex/arena_allocator.cc
+++ b/compiler/dex/arena_allocator.cc
@@ -18,9 +18,14 @@
 #include "dex_file-inl.h"
 #include "arena_allocator.h"
 #include "base/logging.h"
+#include "base/mutex.h"
 
 namespace art {
 
+// Memmap is a bit slower than malloc according to my measurements.
+static constexpr bool kUseMemMap = false;
+static constexpr bool kUseMemSet = true && kUseMemMap;
+
 static const char* alloc_names[ArenaAllocator::kNumAllocKinds] = {
   "Misc       ",
   "BasicBlock ",
@@ -37,108 +42,144 @@
   "Preds      ",
 };
 
-ArenaAllocator::ArenaAllocator(size_t default_size)
-  : default_size_(default_size),
-    block_size_(default_size - sizeof(ArenaMemBlock)),
-    arena_head_(NULL),
-    current_block_(NULL),
-    num_arena_blocks_(0),
-    malloc_bytes_(0),
-    lost_bytes_(0),
-    num_allocations_(0) {
-  memset(&alloc_stats_[0], 0, sizeof(alloc_stats_));
-  // Start with an empty arena.
-  arena_head_ = current_block_ = EmptyArenaBlock();
-  num_arena_blocks_++;
-}
-
-ArenaAllocator::~ArenaAllocator() {
-  // Reclaim all the arena blocks allocated so far.
-  ArenaMemBlock* head = arena_head_;
-  while (head != NULL) {
-    ArenaMemBlock* p = head;
-    head = head->next;
-    free(p);
+Arena::Arena(size_t size)
+    : bytes_allocated_(0),
+      map_(nullptr),
+      next_(nullptr) {
+  if (kUseMemMap) {
+    map_ = MemMap::MapAnonymous("dalvik-arena", NULL, size, PROT_READ | PROT_WRITE);
+    memory_ = map_->Begin();
+    size_ = map_->Size();
+  } else {
+    memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
+    size_ = size;
   }
-  arena_head_ = NULL;
-  num_arena_blocks_ = 0;
 }
 
-// Return an arena with no storage for use as a sentinal.
-ArenaAllocator::ArenaMemBlock* ArenaAllocator::EmptyArenaBlock() {
-  ArenaMemBlock* res = static_cast<ArenaMemBlock*>(malloc(sizeof(ArenaMemBlock)));
-  malloc_bytes_ += sizeof(ArenaMemBlock);
-  res->block_size = 0;
-  res->bytes_allocated = 0;
-  res->next = NULL;
-  return res;
+Arena::~Arena() {
+  if (kUseMemMap) {
+    delete map_;
+  } else {
+    free(reinterpret_cast<void*>(memory_));
+  }
 }
 
-// Arena-based malloc for compilation tasks.
-void* ArenaAllocator::NewMem(size_t size, bool zero, ArenaAllocKind kind) {
-  DCHECK(current_block_ != NULL);
-  DCHECK(arena_head_ != NULL);
-  size = (size + 3) & ~3;
-  alloc_stats_[kind] += size;
-  ArenaMemBlock* allocation_block = current_block_;  // Assume we'll fit.
-  size_t remaining_space = current_block_->block_size - current_block_->bytes_allocated;
-  if (remaining_space < size) {
-    /*
-     * Time to allocate a new block.  If this is a large allocation or we have
-     * significant space remaining in the current block then fulfill the allocation
-     * request with a custom-sized malloc() - otherwise grab a new standard block.
-     */
-    size_t allocation_size = sizeof(ArenaMemBlock);
-    if ((remaining_space >= ARENA_HIGH_WATER) || (size > block_size_)) {
-      allocation_size += size;
+void Arena::Reset() {
+  if (bytes_allocated_) {
+    if (kUseMemSet || !kUseMemMap) {
+      memset(Begin(), 0, bytes_allocated_);
     } else {
-      allocation_size += block_size_;
+      madvise(Begin(), bytes_allocated_, MADV_DONTNEED);
     }
-    ArenaMemBlock *new_block = static_cast<ArenaMemBlock*>(malloc(allocation_size));
-    if (new_block == NULL) {
-      LOG(FATAL) << "Arena allocation failure";
-    }
-    malloc_bytes_ += allocation_size;
-    new_block->block_size = allocation_size - sizeof(ArenaMemBlock);
-    new_block->bytes_allocated = 0;
-    new_block->next = NULL;
-    num_arena_blocks_++;
-    /*
-     * If the new block is completely full, insert it into the head of the list so we don't
-     * bother trying to fit more and won't hide the potentially allocatable space on the
-     * last (current_block_) block.  TUNING: if we move to a mark scheme, revisit
-     * this code to keep allocation order intact.
-     */
-    if (new_block->block_size == size) {
-      new_block->next = arena_head_;
-      arena_head_ = new_block;
-    } else {
-      int lost = (current_block_->block_size - current_block_->bytes_allocated);
-      lost_bytes_ += lost;
-      current_block_->next = new_block;
-      current_block_ = new_block;
-    }
-    allocation_block = new_block;
+    bytes_allocated_ = 0;
   }
-  void* ptr = &allocation_block->ptr[allocation_block->bytes_allocated];
-  allocation_block->bytes_allocated += size;
-  if (zero) {
-    memset(ptr, 0, size);
-  }
-  num_allocations_++;
-  return ptr;
 }
 
-// Dump memory usage stats.
-void ArenaAllocator::DumpMemStats(std::ostream& os) const {
+ArenaPool::ArenaPool()
+    : lock_("Arena pool lock"),
+      free_arenas_(nullptr) {
+}
+
+ArenaPool::~ArenaPool() {
+  while (free_arenas_ != nullptr) {
+    auto* arena = free_arenas_;
+    free_arenas_ = free_arenas_->next_;
+    delete arena;
+  }
+}
+
+Arena* ArenaPool::AllocArena(size_t size) {
+  Thread* self = Thread::Current();
+  Arena* ret = nullptr;
+  {
+    MutexLock lock(self, lock_);
+    if (free_arenas_ != nullptr && LIKELY(free_arenas_->Size() >= size)) {
+      ret = free_arenas_;
+      free_arenas_ = free_arenas_->next_;
+    }
+  }
+  if (ret == nullptr) {
+    ret = new Arena(size);
+  }
+  ret->Reset();
+  return ret;
+}
+
+void ArenaPool::FreeArena(Arena* arena) {
+  Thread* self = Thread::Current();
+  {
+    MutexLock lock(self, lock_);
+    arena->next_ = free_arenas_;
+    free_arenas_ = arena;
+  }
+}
+
+size_t ArenaAllocator::BytesAllocated() const {
   size_t total = 0;
   for (int i = 0; i < kNumAllocKinds; i++) {
     total += alloc_stats_[i];
   }
-  os << " MEM: used: " << total << ", allocated: " << malloc_bytes_
-     << ", lost: " << lost_bytes_ << "\n";
-  os << "Number of blocks allocated: " << num_arena_blocks_ << ", Number of allocations: "
-     << num_allocations_ << ", avg: " << total / num_allocations_ << "\n";
+  return total;
+}
+
+ArenaAllocator::ArenaAllocator(ArenaPool* pool)
+  : pool_(pool),
+    begin_(nullptr),
+    end_(nullptr),
+    ptr_(nullptr),
+    arena_head_(nullptr),
+    num_allocations_(0) {
+  memset(&alloc_stats_[0], 0, sizeof(alloc_stats_));
+}
+
+void ArenaAllocator::UpdateBytesAllocated() {
+  if (arena_head_ != nullptr) {
+    // Update how many bytes we have allocated into the arena so that the arena pool knows how
+    // much memory to zero out.
+    arena_head_->bytes_allocated_ = ptr_ - begin_;
+  }
+}
+
+ArenaAllocator::~ArenaAllocator() {
+  // Reclaim all the arenas by giving them back to the thread pool.
+  UpdateBytesAllocated();
+  while (arena_head_ != nullptr) {
+    Arena* arena = arena_head_;
+    arena_head_ = arena_head_->next_;
+    pool_->FreeArena(arena);
+  }
+}
+
+void ArenaAllocator::ObtainNewArenaForAllocation(size_t allocation_size) {
+  UpdateBytesAllocated();
+  Arena* new_arena = pool_->AllocArena(std::max(Arena::kDefaultSize, allocation_size));
+  new_arena->next_ = arena_head_;
+  arena_head_ = new_arena;
+  // Update our internal data structures.
+  ptr_ = begin_ = new_arena->Begin();
+  end_ = new_arena->End();
+}
+
+// Dump memory usage stats.
+void ArenaAllocator::DumpMemStats(std::ostream& os) const {
+  size_t malloc_bytes = 0;
+  // Start out with how many lost bytes we have in the arena we are currently allocating into.
+  size_t lost_bytes(end_ - ptr_);
+  size_t num_arenas = 0;
+  for (Arena* arena = arena_head_; arena != nullptr; arena = arena->next_) {
+    malloc_bytes += arena->Size();
+    if (arena != arena_head_) {
+      lost_bytes += arena->RemainingSpace();
+    }
+    ++num_arenas;
+  }
+  const size_t bytes_allocated = BytesAllocated();
+  os << " MEM: used: " << bytes_allocated << ", allocated: " << malloc_bytes
+     << ", lost: " << lost_bytes << "\n";
+  if (num_allocations_ != 0) {
+    os << "Number of arenas allocated: " << num_arenas << ", Number of allocations: "
+       << num_allocations_ << ", avg size: " << bytes_allocated / num_allocations_ << "\n";
+  }
   os << "===== Allocation by kind\n";
   for (int i = 0; i < kNumAllocKinds; i++) {
       os << alloc_names[i] << std::setw(10) << alloc_stats_[i] << "\n";
diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h
index e8e2c02..dda52a2 100644
--- a/compiler/dex/arena_allocator.h
+++ b/compiler/dex/arena_allocator.h
@@ -19,65 +19,127 @@
 
 #include <stdint.h>
 #include <stddef.h>
+
+#include "base/mutex.h"
 #include "compiler_enums.h"
+#include "mem_map.h"
 
 namespace art {
 
-#define ARENA_DEFAULT_BLOCK_SIZE (256 * 1024)
-#define ARENA_HIGH_WATER (16 * 1024)
+class Arena;
+class ArenaPool;
+class ArenaAllocator;
 
-class ArenaAllocator {
-  public:
-    // Type of allocation for memory tuning.
-    enum ArenaAllocKind {
-      kAllocMisc,
-      kAllocBB,
-      kAllocLIR,
-      kAllocMIR,
-      kAllocDFInfo,
-      kAllocGrowableArray,
-      kAllocGrowableBitMap,
-      kAllocDalvikToSSAMap,
-      kAllocDebugInfo,
-      kAllocSuccessor,
-      kAllocRegAlloc,
-      kAllocData,
-      kAllocPredecessors,
-      kNumAllocKinds
-    };
-
-  explicit ArenaAllocator(size_t default_size = ARENA_DEFAULT_BLOCK_SIZE);
-  ~ArenaAllocator();
-  void* NewMem(size_t size, bool zero, ArenaAllocKind kind);
-  size_t BytesAllocated() {
-    return malloc_bytes_;
+class Arena {
+ public:
+  static constexpr size_t kDefaultSize = 128 * KB;
+  explicit Arena(size_t size = kDefaultSize);
+  ~Arena();
+  void Reset();
+  uint8_t* Begin() {
+    return memory_;
   }
 
+  uint8_t* End() {
+    return memory_ + size_;
+  }
+
+  size_t Size() const {
+    return size_;
+  }
+
+  size_t RemainingSpace() const {
+    return Size() - bytes_allocated_;
+  }
+
+ private:
+  size_t bytes_allocated_;
+  uint8_t* memory_;
+  size_t size_;
+  MemMap* map_;
+  Arena* next_;
+  friend class ArenaPool;
+  friend class ArenaAllocator;
+  DISALLOW_COPY_AND_ASSIGN(Arena);
+};
+
+class ArenaPool {
+ public:
+  ArenaPool();
+  ~ArenaPool();
+  Arena* AllocArena(size_t size);
+  void FreeArena(Arena* arena);
+
+ private:
+  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  Arena* free_arenas_ GUARDED_BY(lock_);
+  DISALLOW_COPY_AND_ASSIGN(ArenaPool);
+};
+
+class ArenaAllocator {
+ public:
+  // Type of allocation for memory tuning.
+  enum ArenaAllocKind {
+    kAllocMisc,
+    kAllocBB,
+    kAllocLIR,
+    kAllocMIR,
+    kAllocDFInfo,
+    kAllocGrowableArray,
+    kAllocGrowableBitMap,
+    kAllocDalvikToSSAMap,
+    kAllocDebugInfo,
+    kAllocSuccessor,
+    kAllocRegAlloc,
+    kAllocData,
+    kAllocPredecessors,
+    kNumAllocKinds
+  };
+
+  static constexpr bool kCountAllocations = false;
+
+  explicit ArenaAllocator(ArenaPool* pool);
+  ~ArenaAllocator();
+
+  // Returns zeroed memory.
+  void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
+    bytes = (bytes + 3) & ~3;
+    if (UNLIKELY(ptr_ + bytes > end_)) {
+      // Obtain a new block.
+      ObtainNewArenaForAllocation(bytes);
+      if (UNLIKELY(ptr_ == nullptr)) {
+        return nullptr;
+      }
+    }
+    if (kCountAllocations) {
+      alloc_stats_[kind] += bytes;
+      ++num_allocations_;
+    }
+    uint8_t* ret = ptr_;
+    ptr_ += bytes;
+    return ret;
+  }
+
+  void ObtainNewArenaForAllocation(size_t allocation_size);
+  size_t BytesAllocated() const;
   void DumpMemStats(std::ostream& os) const;
 
-  private:
-    // Variable-length allocation block.
-    struct ArenaMemBlock {
-      size_t block_size;
-      size_t bytes_allocated;
-      ArenaMemBlock *next;
-      char ptr[0];
-    };
+ private:
+  void UpdateBytesAllocated();
 
-    ArenaMemBlock* EmptyArenaBlock();
+  ArenaPool* pool_;
+  uint8_t* begin_;
+  uint8_t* end_;
+  uint8_t* ptr_;
+  Arena* arena_head_;
 
-    size_t default_size_;                    // Smallest size of new allocation block.
-    size_t block_size_;                      // Amount of allocatable bytes on a default block.
-    ArenaMemBlock* arena_head_;              // Head of linked list of allocation blocks.
-    ArenaMemBlock* current_block_;           // NOTE: code assumes there's always at least 1 block.
-    int num_arena_blocks_;
-    uint32_t malloc_bytes_;                  // Number of actual bytes malloc'd
-    uint32_t alloc_stats_[kNumAllocKinds];   // Bytes used by various allocation kinds.
-    uint32_t lost_bytes_;                    // Lost memory at end of too-small region
-    uint32_t num_allocations_;
+  // Statistics.
+  size_t num_allocations_;
+  size_t alloc_stats_[kNumAllocKinds];   // Bytes used by various allocation kinds.
+
+  DISALLOW_COPY_AND_ASSIGN(ArenaAllocator);
 };  // ArenaAllocator
 
-
 struct MemStats {
    public:
      void Dump(std::ostream& os) const {
diff --git a/compiler/dex/arena_bit_vector.cc b/compiler/dex/arena_bit_vector.cc
index 724fdf8..3fa9295 100644
--- a/compiler/dex/arena_bit_vector.cc
+++ b/compiler/dex/arena_bit_vector.cc
@@ -35,8 +35,8 @@
      expandable_(expandable),
      kind_(kind),
      storage_size_((start_bits + 31) >> 5),
-     storage_(static_cast<uint32_t*>(arena_->NewMem(storage_size_ * sizeof(uint32_t), true,
-                                                    ArenaAllocator::kAllocGrowableBitMap))) {
+     storage_(static_cast<uint32_t*>(arena_->Alloc(storage_size_ * sizeof(uint32_t),
+                                                   ArenaAllocator::kAllocGrowableBitMap))) {
   DCHECK_EQ(sizeof(storage_[0]), 4U);    // Assuming 32-bit units.
 }
 
@@ -68,8 +68,8 @@
     unsigned int new_size = (num + 1 + 31) >> 5;
     DCHECK_GT(new_size, storage_size_);
     uint32_t *new_storage =
-        static_cast<uint32_t*>(arena_->NewMem(new_size * sizeof(uint32_t), false,
-                                              ArenaAllocator::kAllocGrowableBitMap));
+        static_cast<uint32_t*>(arena_->Alloc(new_size * sizeof(uint32_t),
+                                             ArenaAllocator::kAllocGrowableBitMap));
     memcpy(new_storage, storage_, storage_size_ * sizeof(uint32_t));
     // Zero out the new storage words.
     memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(uint32_t));
diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h
index 4ec8c88..8bcd628 100644
--- a/compiler/dex/arena_bit_vector.h
+++ b/compiler/dex/arena_bit_vector.h
@@ -67,8 +67,8 @@
         }
 
         static void* operator new(size_t size, ArenaAllocator* arena) {
-          return arena->NewMem(sizeof(ArenaBitVector::Iterator), true,
-                               ArenaAllocator::kAllocGrowableBitMap);
+          return arena->Alloc(sizeof(ArenaBitVector::Iterator),
+                              ArenaAllocator::kAllocGrowableBitMap);
         };
         static void operator delete(void* p) {}  // Nop.
 
@@ -84,7 +84,7 @@
     ~ArenaBitVector() {}
 
     static void* operator new(size_t size, ArenaAllocator* arena) {
-      return arena->NewMem(sizeof(ArenaBitVector), true, ArenaAllocator::kAllocGrowableBitMap);
+      return arena->Alloc(sizeof(ArenaBitVector), ArenaAllocator::kAllocGrowableBitMap);
     }
     static void operator delete(void* p) {}  // Nop.
 
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index a9b5bf6..26d0923 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -43,7 +43,7 @@
 class Mir2Lir;
 
 struct CompilationUnit {
-  CompilationUnit()
+  explicit CompilationUnit(ArenaPool* pool)
     : compiler_driver(NULL),
       class_linker(NULL),
       dex_file(NULL),
@@ -66,6 +66,7 @@
       num_regs(0),
       num_compiler_temps(0),
       compiler_flip_match(false),
+      arena(pool),
       mir_graph(NULL),
       cg(NULL) {}
   /*
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index d1f7f3e..2303649 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -119,30 +119,30 @@
   VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
 
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  UniquePtr<CompilationUnit> cu(new CompilationUnit);
+  CompilationUnit cu(&compiler.GetArenaPool());
 
-  cu->compiler_driver = &compiler;
-  cu->class_linker = class_linker;
-  cu->instruction_set = compiler.GetInstructionSet();
-  cu->compiler_backend = compiler_backend;
-  DCHECK((cu->instruction_set == kThumb2) ||
-         (cu->instruction_set == kX86) ||
-         (cu->instruction_set == kMips));
+  cu.compiler_driver = &compiler;
+  cu.class_linker = class_linker;
+  cu.instruction_set = compiler.GetInstructionSet();
+  cu.compiler_backend = compiler_backend;
+  DCHECK((cu.instruction_set == kThumb2) ||
+         (cu.instruction_set == kX86) ||
+         (cu.instruction_set == kMips));
 
 
   /* Adjust this value accordingly once inlining is performed */
-  cu->num_dalvik_registers = code_item->registers_size_;
+  cu.num_dalvik_registers = code_item->registers_size_;
   // TODO: set this from command line
-  cu->compiler_flip_match = false;
-  bool use_match = !cu->compiler_method_match.empty();
-  bool match = use_match && (cu->compiler_flip_match ^
-      (PrettyMethod(method_idx, dex_file).find(cu->compiler_method_match) !=
+  cu.compiler_flip_match = false;
+  bool use_match = !cu.compiler_method_match.empty();
+  bool match = use_match && (cu.compiler_flip_match ^
+      (PrettyMethod(method_idx, dex_file).find(cu.compiler_method_match) !=
        std::string::npos));
   if (!use_match || match) {
-    cu->disable_opt = kCompilerOptimizerDisableFlags;
-    cu->enable_debug = kCompilerDebugFlags;
-    cu->verbose = VLOG_IS_ON(compiler) ||
-        (cu->enable_debug & (1 << kDebugVerbose));
+    cu.disable_opt = kCompilerOptimizerDisableFlags;
+    cu.enable_debug = kCompilerDebugFlags;
+    cu.verbose = VLOG_IS_ON(compiler) ||
+        (cu.enable_debug & (1 << kDebugVerbose));
   }
 
   /*
@@ -152,12 +152,12 @@
 
   if (compiler_backend == kPortable) {
     // Fused long branches not currently usseful in bitcode.
-    cu->disable_opt |= (1 << kBranchFusing);
+    cu.disable_opt |= (1 << kBranchFusing);
   }
 
-  if (cu->instruction_set == kMips) {
+  if (cu.instruction_set == kMips) {
     // Disable some optimizations for mips for now
-    cu->disable_opt |= (
+    cu.disable_opt |= (
         (1 << kLoadStoreElimination) |
         (1 << kLoadHoisting) |
         (1 << kSuppressLoads) |
@@ -170,72 +170,71 @@
         (1 << kPromoteCompilerTemps));
   }
 
-  cu->mir_graph.reset(new MIRGraph(cu.get(), &cu->arena));
+  cu.mir_graph.reset(new MIRGraph(&cu, &cu.arena));
 
   /* Gathering opcode stats? */
   if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
-    cu->mir_graph->EnableOpcodeCounting();
+    cu.mir_graph->EnableOpcodeCounting();
   }
 
   /* Build the raw MIR graph */
-  cu->mir_graph->InlineMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx,
+  cu.mir_graph->InlineMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx,
                               class_loader, dex_file);
 
 #if !defined(ART_USE_PORTABLE_COMPILER)
-  if (cu->mir_graph->SkipCompilation(Runtime::Current()->GetCompilerFilter())) {
+  if (cu.mir_graph->SkipCompilation(Runtime::Current()->GetCompilerFilter())) {
     return NULL;
   }
 #endif
 
   /* Do a code layout pass */
-  cu->mir_graph->CodeLayout();
+  cu.mir_graph->CodeLayout();
 
   /* Perform SSA transformation for the whole method */
-  cu->mir_graph->SSATransformation();
+  cu.mir_graph->SSATransformation();
 
   /* Do constant propagation */
-  cu->mir_graph->PropagateConstants();
+  cu.mir_graph->PropagateConstants();
 
   /* Count uses */
-  cu->mir_graph->MethodUseCount();
+  cu.mir_graph->MethodUseCount();
 
   /* Perform null check elimination */
-  cu->mir_graph->NullCheckElimination();
+  cu.mir_graph->NullCheckElimination();
 
   /* Combine basic blocks where possible */
-  cu->mir_graph->BasicBlockCombine();
+  cu.mir_graph->BasicBlockCombine();
 
   /* Do some basic block optimizations */
-  cu->mir_graph->BasicBlockOptimization();
+  cu.mir_graph->BasicBlockOptimization();
 
-  if (cu->enable_debug & (1 << kDebugDumpCheckStats)) {
-    cu->mir_graph->DumpCheckStats();
+  if (cu.enable_debug & (1 << kDebugDumpCheckStats)) {
+    cu.mir_graph->DumpCheckStats();
   }
 
   if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
-    cu->mir_graph->ShowOpcodeStats();
+    cu.mir_graph->ShowOpcodeStats();
   }
 
   /* Set up regLocation[] array to describe values - one for each ssa_name. */
-  cu->mir_graph->BuildRegLocations();
+  cu.mir_graph->BuildRegLocations();
 
   CompiledMethod* result = NULL;
 
 #if defined(ART_USE_PORTABLE_COMPILER)
   if (compiler_backend == kPortable) {
-    cu->cg.reset(PortableCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena,
-                                       llvm_compilation_unit));
+    cu.cg.reset(PortableCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena, llvm_compilation_unit));
   } else {
 #endif
     switch (compiler.GetInstructionSet()) {
       case kThumb2:
-        cu->cg.reset(ArmCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena));
+        cu.cg.reset(ArmCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena));
         break;
       case kMips:
-        cu->cg.reset(MipsCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena));
+        cu.cg.reset(MipsCodeGenerator(&cu, cu.mir_graph.get(), &cu.arena));
         break;
       case kX86:
-        cu->cg.reset(X86CodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena));
+        cu.cg.reset(X86CodeGenerator(&cu, cu.mir_graph.get(), &cu.arena));
         break;
       default:
         LOG(FATAL) << "Unexpected instruction set: " << compiler.GetInstructionSet();
@@ -244,9 +243,9 @@
   }
 #endif
 
-  cu->cg->Materialize();
+  cu.cg->Materialize();
 
-  result = cu->cg->GetCompiledMethod();
+  result = cu.cg->GetCompiledMethod();
 
   if (result) {
     VLOG(compiler) << "Compiled " << PrettyMethod(method_idx, dex_file);
@@ -254,15 +253,15 @@
     VLOG(compiler) << "Deferred " << PrettyMethod(method_idx, dex_file);
   }
 
-  if (cu->enable_debug & (1 << kDebugShowMemoryUsage)) {
-    if (cu->arena.BytesAllocated() > (5 * 1024 *1024)) {
-      MemStats mem_stats(cu->arena);
+  if (cu.enable_debug & (1 << kDebugShowMemoryUsage)) {
+    if (cu.arena.BytesAllocated() > (5 * 1024 *1024)) {
+      MemStats mem_stats(cu.arena);
       LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats);
     }
   }
 
-  if (cu->enable_debug & (1 << kDebugShowSummaryMemoryUsage)) {
-    LOG(INFO) << "MEMINFO " << cu->arena.BytesAllocated() << " " << cu->mir_graph->GetNumBlocks()
+  if (cu.enable_debug & (1 << kDebugShowSummaryMemoryUsage)) {
+    LOG(INFO) << "MEMINFO " << cu.arena.BytesAllocated() << " " << cu.mir_graph->GetNumBlocks()
               << " " << PrettyMethod(method_idx, dex_file);
   }
 
diff --git a/compiler/dex/growable_array.h b/compiler/dex/growable_array.h
index 08a6478..8e2abfb 100644
--- a/compiler/dex/growable_array.h
+++ b/compiler/dex/growable_array.h
@@ -67,7 +67,7 @@
         }
 
         static void* operator new(size_t size, ArenaAllocator* arena) {
-          return arena->NewMem(sizeof(GrowableArray::Iterator), true, ArenaAllocator::kAllocGrowableArray);
+          return arena->Alloc(sizeof(GrowableArray::Iterator), ArenaAllocator::kAllocGrowableArray);
         };
         static void operator delete(void* p) {}  // Nop.
 
@@ -81,8 +81,8 @@
         num_allocated_(init_length),
         num_used_(0),
         kind_(kind) {
-      elem_list_ = static_cast<T*>(arena_->NewMem(sizeof(T) * init_length, true,
-                                                  ArenaAllocator::kAllocGrowableArray));
+      elem_list_ = static_cast<T*>(arena_->Alloc(sizeof(T) * init_length,
+                                                 ArenaAllocator::kAllocGrowableArray));
     };
 
 
@@ -95,8 +95,8 @@
       if (new_length > target_length) {
          target_length = new_length;
       }
-      T* new_array = static_cast<T*>(arena_->NewMem(sizeof(T) * target_length, true,
-                                                    ArenaAllocator::kAllocGrowableArray));
+      T* new_array = static_cast<T*>(arena_->Alloc(sizeof(T) * target_length,
+                                                   ArenaAllocator::kAllocGrowableArray));
       memcpy(new_array, elem_list_, sizeof(T) * num_allocated_);
       num_allocated_ = target_length;
       elem_list_ = new_array;
@@ -153,7 +153,7 @@
     T* GetRawStorage() const { return elem_list_; }
 
     static void* operator new(size_t size, ArenaAllocator* arena) {
-      return arena->NewMem(sizeof(GrowableArray<T>), true, ArenaAllocator::kAllocGrowableArray);
+      return arena->Alloc(sizeof(GrowableArray<T>), ArenaAllocator::kAllocGrowableArray);
     };
     static void operator delete(void* p) {}  // Nop.
 
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index c3a33ff..3a73717 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -954,11 +954,11 @@
   int i;
 
   mir->ssa_rep->num_uses = num_uses;
-  mir->ssa_rep->uses = static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, true,
-                                                        ArenaAllocator::kAllocDFInfo));
+  mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
+                                                       ArenaAllocator::kAllocDFInfo));
   // NOTE: will be filled in during type & size inference pass
-  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, true,
-                                                           ArenaAllocator::kAllocDFInfo));
+  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
+                                                          ArenaAllocator::kAllocDFInfo));
 
   for (i = 0; i < num_uses; i++) {
     HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
@@ -972,11 +972,11 @@
   int i;
 
   mir->ssa_rep->num_uses = num_uses;
-  mir->ssa_rep->uses = static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, true,
-                                                        ArenaAllocator::kAllocDFInfo));
+  mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
+                                                       ArenaAllocator::kAllocDFInfo));
   // NOTE: will be filled in during type & size inference pass
-  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, true,
-                                                           ArenaAllocator::kAllocDFInfo));
+  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
+                                                          ArenaAllocator::kAllocDFInfo));
 
   for (i = 0; i < num_uses; i++) {
     HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
@@ -991,8 +991,8 @@
 
   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     mir->ssa_rep =
-        static_cast<struct SSARepresentation *>(arena_->NewMem(sizeof(SSARepresentation), true,
-                                                               ArenaAllocator::kAllocDFInfo));
+        static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
+                                                              ArenaAllocator::kAllocDFInfo));
 
     int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
 
@@ -1041,10 +1041,10 @@
 
     if (num_uses) {
       mir->ssa_rep->num_uses = num_uses;
-      mir->ssa_rep->uses = static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, false,
-                                                            ArenaAllocator::kAllocDFInfo));
-      mir->ssa_rep->fp_use = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, false,
-                                                               ArenaAllocator::kAllocDFInfo));
+      mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
+                                                           ArenaAllocator::kAllocDFInfo));
+      mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
+                                                              ArenaAllocator::kAllocDFInfo));
     }
 
     int num_defs = 0;
@@ -1058,10 +1058,10 @@
 
     if (num_defs) {
       mir->ssa_rep->num_defs = num_defs;
-      mir->ssa_rep->defs = static_cast<int*>(arena_->NewMem(sizeof(int) * num_defs, false,
-                                                            ArenaAllocator::kAllocDFInfo));
-      mir->ssa_rep->fp_def = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_defs, false,
-                                                               ArenaAllocator::kAllocDFInfo));
+      mir->ssa_rep->defs = static_cast<int*>(arena_->Alloc(sizeof(int) * num_defs,
+                                                           ArenaAllocator::kAllocDFInfo));
+      mir->ssa_rep->fp_def = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_defs,
+                                                              ArenaAllocator::kAllocDFInfo));
     }
 
     DecodedInstruction *d_insn = &mir->dalvikInsn;
@@ -1109,8 +1109,8 @@
    * predecessor blocks.
    */
   bb->data_flow_info->vreg_to_ssa_map =
-      static_cast<int*>(arena_->NewMem(sizeof(int) * cu_->num_dalvik_registers, false,
-                                       ArenaAllocator::kAllocDFInfo));
+      static_cast<int*>(arena_->Alloc(sizeof(int) * cu_->num_dalvik_registers,
+                                      ArenaAllocator::kAllocDFInfo));
 
   memcpy(bb->data_flow_info->vreg_to_ssa_map, vreg_to_ssa_map_,
          sizeof(int) * cu_->num_dalvik_registers);
@@ -1146,12 +1146,12 @@
    * Dalvik register, and the SSA names for those are the same.
    */
   vreg_to_ssa_map_ =
-      static_cast<int*>(arena_->NewMem(sizeof(int) * num_dalvik_reg, false,
-                                       ArenaAllocator::kAllocDFInfo));
+      static_cast<int*>(arena_->Alloc(sizeof(int) * num_dalvik_reg,
+                                      ArenaAllocator::kAllocDFInfo));
   /* Keep track of the higest def for each dalvik reg */
   ssa_last_defs_ =
-      static_cast<int*>(arena_->NewMem(sizeof(int) * num_dalvik_reg, false,
-                                       ArenaAllocator::kAllocDFInfo));
+      static_cast<int*>(arena_->Alloc(sizeof(int) * num_dalvik_reg,
+                                      ArenaAllocator::kAllocDFInfo));
 
   for (unsigned int i = 0; i < num_dalvik_reg; i++) {
     vreg_to_ssa_map_[i] = i;
@@ -1174,8 +1174,8 @@
       bb->block_type == kEntryBlock ||
       bb->block_type == kExitBlock) {
       bb->data_flow_info =
-          static_cast<BasicBlockDataFlow*>(arena_->NewMem(sizeof(BasicBlockDataFlow), true,
-                                                          ArenaAllocator::kAllocDFInfo));
+          static_cast<BasicBlockDataFlow*>(arena_->Alloc(sizeof(BasicBlockDataFlow),
+                                                         ArenaAllocator::kAllocDFInfo));
       }
   }
 }
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 86f6ee5..81702e3 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -399,8 +399,8 @@
     BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
                                       /* create */ true, /* immed_pred_block_p */ &cur_block);
     SuccessorBlockInfo *successor_block_info =
-        static_cast<SuccessorBlockInfo*>(arena_->NewMem(sizeof(SuccessorBlockInfo), false,
-                                                        ArenaAllocator::kAllocSuccessor));
+        static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
+                                                       ArenaAllocator::kAllocSuccessor));
     successor_block_info->block = case_block;
     successor_block_info->key =
         (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
@@ -444,7 +444,7 @@
         catches_.insert(catch_block->start_offset);
       }
       SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
-          (arena_->NewMem(sizeof(SuccessorBlockInfo), false, ArenaAllocator::kAllocSuccessor));
+          (arena_->Alloc(sizeof(SuccessorBlockInfo), ArenaAllocator::kAllocSuccessor));
       successor_block_info->block = catch_block;
       successor_block_info->key = iterator.GetHandlerTypeIndex();
       cur_block->successor_block_list.blocks->Insert(successor_block_info);
@@ -490,7 +490,7 @@
   new_block->start_offset = insn->offset;
   cur_block->fall_through = new_block;
   new_block->predecessors->Insert(cur_block);
-  MIR* new_insn = static_cast<MIR*>(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocMIR));
+  MIR* new_insn = static_cast<MIR*>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocMIR));
   *new_insn = *insn;
   insn->dalvikInsn.opcode =
       static_cast<Instruction::Code>(kMirOpCheck);
@@ -571,13 +571,12 @@
   int num_patterns = sizeof(special_patterns)/sizeof(special_patterns[0]);
   bool live_pattern = (num_patterns > 0) && !(cu_->disable_opt & (1 << kMatch));
   bool* dead_pattern =
-      static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_patterns, true,
-                                        ArenaAllocator::kAllocMisc));
+      static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_patterns, ArenaAllocator::kAllocMisc));
   int pattern_pos = 0;
 
   /* Parse all instructions and put them into containing basic blocks */
   while (code_ptr < code_end) {
-    MIR *insn = static_cast<MIR *>(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocMIR));
+    MIR *insn = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocMIR));
     insn->offset = current_offset_;
     insn->m_unit_index = current_method_;
     int width = ParseInsn(code_ptr, &insn->dalvikInsn);
@@ -1002,7 +1001,7 @@
     str.append("]--optimized away");
   }
   int length = str.length() + 1;
-  ret = static_cast<char*>(arena_->NewMem(length, false, ArenaAllocator::kAllocDFInfo));
+  ret = static_cast<char*>(arena_->Alloc(length, ArenaAllocator::kAllocDFInfo));
   strncpy(ret, str.c_str(), length);
   return ret;
 }
@@ -1115,8 +1114,8 @@
  */
 CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
                                   bool is_range) {
-  CallInfo* info = static_cast<CallInfo*>(arena_->NewMem(sizeof(CallInfo), true,
-                                                         ArenaAllocator::kAllocMisc));
+  CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
+                                                        ArenaAllocator::kAllocMisc));
   MIR* move_result_mir = FindMoveResult(bb, mir);
   if (move_result_mir == NULL) {
     info->result.location = kLocInvalid;
@@ -1127,8 +1126,7 @@
   }
   info->num_arg_words = mir->ssa_rep->num_uses;
   info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
-      (arena_->NewMem(sizeof(RegLocation) * info->num_arg_words, false,
-                      ArenaAllocator::kAllocMisc));
+      (arena_->Alloc(sizeof(RegLocation) * info->num_arg_words, ArenaAllocator::kAllocMisc));
   for (int i = 0; i < info->num_arg_words; i++) {
     info->args[i] = GetRawSrc(mir, i);
   }
@@ -1142,8 +1140,8 @@
 
 // Allocate a new basic block.
 BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
-  BasicBlock* bb = static_cast<BasicBlock*>(arena_->NewMem(sizeof(BasicBlock), true,
-                                                           ArenaAllocator::kAllocBB));
+  BasicBlock* bb = static_cast<BasicBlock*>(arena_->Alloc(sizeof(BasicBlock),
+                                                          ArenaAllocator::kAllocBB));
   bb->block_type = block_type;
   bb->id = block_id;
   // TUNING: better estimate of the exit block predecessors?
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index c02deab..28ab283 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -426,8 +426,8 @@
   }
 
   void EnableOpcodeCounting() {
-    opcode_count_ = static_cast<int*>(arena_->NewMem(kNumPackedOpcodes * sizeof(int), true,
-                                      ArenaAllocator::kAllocMisc));
+    opcode_count_ = static_cast<int*>(arena_->Alloc(kNumPackedOpcodes * sizeof(int),
+                                                    ArenaAllocator::kAllocMisc));
   }
 
   void ShowOpcodeStats();
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 9f694de..b7611f8 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -94,8 +94,8 @@
 
 void MIRGraph::PropagateConstants() {
   is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
-  constant_values_ = static_cast<int*>(arena_->NewMem(sizeof(int) * GetNumSSARegs(), true,
-                                                      ArenaAllocator::kAllocDFInfo));
+  constant_values_ = static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumSSARegs(),
+                                                     ArenaAllocator::kAllocDFInfo));
   AllNodesIterator iter(this, false /* not iterative */);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     DoConstantPropogation(bb);
@@ -399,8 +399,7 @@
                 DCHECK_EQ(SelectKind(if_true), kSelectMove);
                 DCHECK_EQ(SelectKind(if_false), kSelectMove);
                 int* src_ssa =
-                    static_cast<int*>(arena_->NewMem(sizeof(int) * 3, false,
-                                                     ArenaAllocator::kAllocDFInfo));
+                    static_cast<int*>(arena_->Alloc(sizeof(int) * 3, ArenaAllocator::kAllocDFInfo));
                 src_ssa[0] = mir->ssa_rep->uses[0];
                 src_ssa[1] = if_true->ssa_rep->uses[0];
                 src_ssa[2] = if_false->ssa_rep->uses[0];
@@ -409,16 +408,14 @@
               }
               mir->ssa_rep->num_defs = 1;
               mir->ssa_rep->defs =
-                  static_cast<int*>(arena_->NewMem(sizeof(int) * 1, false,
-                                                   ArenaAllocator::kAllocDFInfo));
+                  static_cast<int*>(arena_->Alloc(sizeof(int) * 1, ArenaAllocator::kAllocDFInfo));
               mir->ssa_rep->fp_def =
-                  static_cast<bool*>(arena_->NewMem(sizeof(bool) * 1, false,
-                                                    ArenaAllocator::kAllocDFInfo));
+                  static_cast<bool*>(arena_->Alloc(sizeof(bool) * 1, ArenaAllocator::kAllocDFInfo));
               mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
               // Match type of uses to def.
               mir->ssa_rep->fp_use =
-                  static_cast<bool*>(arena_->NewMem(sizeof(bool) * mir->ssa_rep->num_uses, false,
-                                                    ArenaAllocator::kAllocDFInfo));
+                  static_cast<bool*>(arena_->Alloc(sizeof(bool) * mir->ssa_rep->num_uses,
+                                                   ArenaAllocator::kAllocDFInfo));
               for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
                 mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
               }
@@ -805,8 +802,7 @@
 
 void MIRGraph::DumpCheckStats() {
   Checkstats* stats =
-      static_cast<Checkstats*>(arena_->NewMem(sizeof(Checkstats), true,
-                                              ArenaAllocator::kAllocDFInfo));
+      static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), ArenaAllocator::kAllocDFInfo));
   checkstats_ = stats;
   AllNodesIterator iter(this, false /* not iterative */);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 2d8e24f..2dbe5f5 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -316,13 +316,12 @@
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
-                                               ArenaAllocator::kAllocData));
+      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
-  tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
-                                                       ArenaAllocator::kAllocLIR));
+  tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
+                                                      ArenaAllocator::kAllocLIR));
   switch_tables_.Insert(tab_rec);
 
   // Get the switch value
@@ -365,13 +364,12 @@
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
-                                               ArenaAllocator::kAllocData));
+      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable),  ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
   tab_rec->targets =
-      static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true, ArenaAllocator::kAllocLIR));
+      static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), ArenaAllocator::kAllocLIR));
   switch_tables_.Insert(tab_rec);
 
   // Get the switch value
@@ -419,8 +417,7 @@
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   // Add the table to the list - we'll process it later
   FillArrayData *tab_rec =
-      static_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
-                                                 ArenaAllocator::kAllocData));
+      static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   uint16_t width = tab_rec->table[1];
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 47d3d97..6cc3052 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -538,16 +538,14 @@
   int num_temps = sizeof(core_temps)/sizeof(*core_temps);
   int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
   int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
-                                                        ArenaAllocator::kAllocRegAlloc));
+  reg_pool_ = static_cast<RegisterPool*>(arena_->Alloc(sizeof(*reg_pool_),
+                                                       ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_core_regs = num_regs;
   reg_pool_->core_regs = reinterpret_cast<RegisterInfo*>
-      (arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
-                     ArenaAllocator::kAllocRegAlloc));
+      (arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs), ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_fp_regs = num_fp_regs;
   reg_pool_->FPRegs = static_cast<RegisterInfo*>
-      (arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
-                      ArenaAllocator::kAllocRegAlloc));
+      (arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs), ArenaAllocator::kAllocRegAlloc));
   CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
   CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
   // Keep special registers from being allocated
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 5f6f3d5..d89f1ed 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -321,7 +321,7 @@
 LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) {
   /* Add the constant to the literal pool */
   if (constant_list_p) {
-    LIR* new_value = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocData));
+    LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocData));
     new_value->operands[0] = value;
     new_value->next = *constant_list_p;
     *constant_list_p = new_value;
@@ -793,7 +793,7 @@
   if (it == boundary_map_.end()) {
     LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
   }
-  LIR* new_label = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
+  LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
   new_label->dalvik_offset = vaddr;
   new_label->opcode = kPseudoCaseLabel;
   new_label->operands[0] = keyVal;
@@ -961,8 +961,8 @@
       first_lir_insn_(NULL),
       last_lir_insn_(NULL) {
   promotion_map_ = static_cast<PromotionMap*>
-      (arena_->NewMem((cu_->num_dalvik_registers  + cu_->num_compiler_temps + 1) *
-                      sizeof(promotion_map_[0]), true, ArenaAllocator::kAllocRegAlloc));
+      (arena_->Alloc((cu_->num_dalvik_registers  + cu_->num_compiler_temps + 1) *
+                      sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc));
 }
 
 void Mir2Lir::Materialize() {
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 2e9c845..630e990 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -249,7 +249,7 @@
         /* Only sink store instructions */
         if (sink_distance && !is_this_lir_load) {
           LIR* new_store_lir =
-              static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
+              static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
           *new_store_lir = *this_lir;
           /*
            * Stop point found - insert *before* the check_lir
@@ -446,7 +446,7 @@
       if (slot >= 0) {
         LIR* cur_lir = prev_inst_list[slot];
         LIR* new_load_lir =
-          static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
+          static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
         *new_load_lir = *this_lir;
         /*
          * Insertion is guaranteed to succeed since check_lir
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index eaae0e1..d53c012 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -67,13 +67,12 @@
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
-                                               ArenaAllocator::kAllocData));
+      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int elements = table[1];
   tab_rec->targets =
-      static_cast<LIR**>(arena_->NewMem(elements * sizeof(LIR*), true, ArenaAllocator::kAllocLIR));
+      static_cast<LIR**>(arena_->Alloc(elements * sizeof(LIR*), ArenaAllocator::kAllocLIR));
   switch_tables_.Insert(tab_rec);
 
   // The table is composed of 8-byte key/disp pairs
@@ -147,12 +146,11 @@
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
-                                               ArenaAllocator::kAllocData));
+      static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
-  tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
+  tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
                                                        ArenaAllocator::kAllocLIR));
   switch_tables_.Insert(tab_rec);
 
@@ -228,8 +226,8 @@
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   // Add the table to the list - we'll process it later
   FillArrayData *tab_rec =
-      reinterpret_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
-                                                      ArenaAllocator::kAllocData));
+      reinterpret_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData),
+                                                     ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   uint16_t width = tab_rec->table[1];
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 7a9e91a..4ee5b23 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -462,16 +462,14 @@
   int num_temps = sizeof(core_temps)/sizeof(*core_temps);
   int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
   int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
-                                                        ArenaAllocator::kAllocRegAlloc));
+  reg_pool_ = static_cast<RegisterPool*>(arena_->Alloc(sizeof(*reg_pool_),
+                                                       ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_core_regs = num_regs;
   reg_pool_->core_regs = static_cast<RegisterInfo*>
-     (arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
-                     ArenaAllocator::kAllocRegAlloc));
+     (arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs), ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_fp_regs = num_fp_regs;
   reg_pool_->FPRegs = static_cast<RegisterInfo*>
-      (arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
-                      ArenaAllocator::kAllocRegAlloc));
+      (arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs), ArenaAllocator::kAllocRegAlloc));
   CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
   CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
   // Keep special registers from being allocated
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index d9aef5d..440df2a 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -40,7 +40,7 @@
 
 inline LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0,
                             int op1, int op2, int op3, int op4, LIR* target) {
-  LIR* insn = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
+  LIR* insn = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
   insn->dalvik_offset = dalvik_offset;
   insn->opcode = opcode;
   insn->operands[0] = op0;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 862c1d7..c41feb1 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -812,8 +812,8 @@
 void Mir2Lir::MethodMIR2LIR() {
   // Hold the labels of each block.
   block_label_list_ =
-      static_cast<LIR*>(arena_->NewMem(sizeof(LIR) * mir_graph_->GetNumBlocks(), true,
-                                       ArenaAllocator::kAllocLIR));
+      static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(),
+                                      ArenaAllocator::kAllocLIR));
 
   PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index a0b98dd..71b74a4 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -971,11 +971,11 @@
    * to describe register live ranges for GC.
    */
   RefCounts *core_regs =
-      static_cast<RefCounts*>(arena_->NewMem(sizeof(RefCounts) * num_regs, true,
-                                             ArenaAllocator::kAllocRegAlloc));
+      static_cast<RefCounts*>(arena_->Alloc(sizeof(RefCounts) * num_regs,
+                                            ArenaAllocator::kAllocRegAlloc));
   RefCounts *FpRegs =
-      static_cast<RefCounts *>(arena_->NewMem(sizeof(RefCounts) * num_regs, true,
-                                              ArenaAllocator::kAllocRegAlloc));
+      static_cast<RefCounts *>(arena_->Alloc(sizeof(RefCounts) * num_regs,
+                                             ArenaAllocator::kAllocRegAlloc));
   // Set ssa names for original Dalvik registers
   for (int i = 0; i < dalvik_regs; i++) {
     core_regs[i].s_reg = FpRegs[i].s_reg = i;
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 6e3e55f..2be2aa9 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -74,13 +74,12 @@
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable *>(arena_->NewMem(sizeof(SwitchTable), true,
-                                                ArenaAllocator::kAllocData));
+      static_cast<SwitchTable *>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
-  tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
-                                                       ArenaAllocator::kAllocLIR));
+  tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
+                                                      ArenaAllocator::kAllocLIR));
   switch_tables_.Insert(tab_rec);
 
   // Get the switch value
@@ -131,8 +130,7 @@
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   // Add the table to the list - we'll process it later
   FillArrayData *tab_rec =
-      static_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
-                                                 ArenaAllocator::kAllocData));
+      static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   uint16_t width = tab_rec->table[1];
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 699f3ae..26accab 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -438,16 +438,16 @@
   int num_temps = sizeof(core_temps)/sizeof(*core_temps);
   int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
   int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
-                                                        ArenaAllocator::kAllocRegAlloc));
+  reg_pool_ = static_cast<RegisterPool*>(arena_->Alloc(sizeof(*reg_pool_),
+                                                       ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_core_regs = num_regs;
   reg_pool_->core_regs =
-      static_cast<RegisterInfo*>(arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
-                                                ArenaAllocator::kAllocRegAlloc));
+      static_cast<RegisterInfo*>(arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs),
+                                               ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_fp_regs = num_fp_regs;
   reg_pool_->FPRegs =
-      static_cast<RegisterInfo *>(arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
-                                                 ArenaAllocator::kAllocRegAlloc));
+      static_cast<RegisterInfo *>(arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs),
+                                                ArenaAllocator::kAllocRegAlloc));
   CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
   CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
   // Keep special registers from being allocated
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 18d8e93..cd1602f 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -136,8 +136,8 @@
   int num_registers = cu_->num_dalvik_registers;
   /* Allocate num_dalvik_registers bit vector pointers */
   def_block_matrix_ = static_cast<ArenaBitVector**>
-      (arena_->NewMem(sizeof(ArenaBitVector *) * num_registers, true,
-                      ArenaAllocator::kAllocDFInfo));
+      (arena_->Alloc(sizeof(ArenaBitVector *) * num_registers,
+                     ArenaAllocator::kAllocDFInfo));
   int i;
 
   /* Initialize num_register vectors with num_blocks bits each */
@@ -384,8 +384,8 @@
 
   /* Initalize & Clear i_dom_list */
   if (i_dom_list_ == NULL) {
-    i_dom_list_ = static_cast<int*>(arena_->NewMem(sizeof(int) * num_reachable_blocks, false,
-                                                   ArenaAllocator::kAllocDFInfo));
+    i_dom_list_ = static_cast<int*>(arena_->Alloc(sizeof(int) * num_reachable_blocks,
+                                                  ArenaAllocator::kAllocDFInfo));
   }
   for (int i = 0; i < num_reachable_blocks; i++) {
     i_dom_list_[i] = NOTVISITED;
@@ -564,7 +564,7 @@
         continue;
       }
       MIR *phi =
-          static_cast<MIR*>(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocDFInfo));
+          static_cast<MIR*>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocDFInfo));
       phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
       phi->dalvikInsn.vA = dalvik_reg;
       phi->offset = phi_bb->start_offset;
@@ -610,14 +610,11 @@
     int num_uses = uses.size();
     mir->ssa_rep->num_uses = num_uses;
     mir->ssa_rep->uses =
-        static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, false,
-                                         ArenaAllocator::kAllocDFInfo));
+        static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, ArenaAllocator::kAllocDFInfo));
     mir->ssa_rep->fp_use =
-        static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, true,
-                                          ArenaAllocator::kAllocDFInfo));
+        static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, ArenaAllocator::kAllocDFInfo));
     int* incoming =
-        static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, false,
-                                         ArenaAllocator::kAllocDFInfo));
+        static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses, ArenaAllocator::kAllocDFInfo));
     // TODO: Ugly, rework (but don't burden each MIR/LIR for Phi-only needs)
     mir->dalvikInsn.vB = reinterpret_cast<uintptr_t>(incoming);
 
@@ -644,7 +641,7 @@
 
   /* Save SSA map snapshot */
   int* saved_ssa_map =
-      static_cast<int*>(arena_->NewMem(map_size, false, ArenaAllocator::kAllocDalvikToSSAMap));
+      static_cast<int*>(arena_->Alloc(map_size, ArenaAllocator::kAllocDalvikToSSAMap));
   memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
 
   if (block->fall_through) {
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 5ee6753..07f37bb 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -374,8 +374,8 @@
  */
 void MIRGraph::BuildRegLocations() {
   /* Allocate the location map */
-  RegLocation* loc = static_cast<RegLocation*>(arena_->NewMem(GetNumSSARegs() * sizeof(*loc), true,
-                                                              ArenaAllocator::kAllocRegAlloc));
+  RegLocation* loc = static_cast<RegLocation*>(arena_->Alloc(GetNumSSARegs() * sizeof(*loc),
+                                                             ArenaAllocator::kAllocRegAlloc));
   for (int i = 0; i < GetNumSSARegs(); i++) {
     loc[i] = fresh_loc;
     loc[i].s_reg_low = i;
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 22a510b..fa1b8f9 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -26,6 +26,7 @@
 #include "compiled_class.h"
 #include "compiled_method.h"
 #include "dex_file.h"
+#include "dex/arena_allocator.h"
 #include "instruction_set.h"
 #include "invoke_type.h"
 #include "method_reference.h"
@@ -213,6 +214,9 @@
     support_boot_image_fixup_ = support_boot_image_fixup;
   }
 
+  ArenaPool& GetArenaPool() {
+    return arena_pool_;
+  }
 
   bool WriteElf(const std::string& android_root,
                 bool is_host,
@@ -423,6 +427,9 @@
 
   pthread_key_t tls_key_;
 
+  // Arena pool used by the compiler.
+  ArenaPool arena_pool_;
+
   typedef void (*CompilerEnableAutoElfLoadingFn)(CompilerDriver& driver);
   CompilerEnableAutoElfLoadingFn compiler_enable_auto_elf_loading_;