Compiler: continuing refactoring

Moving the arena memory allocation mechanism into it's own class as
a prelude to cleaning up the MIR and LIR data structures.

Reworked bit vector as a class using placement new w/ the arena
allocator.

Reworked GrowableList as a class template using the new arena
allocator and renamed to GrowableArray.

Change-Id: I639c4c08abe068094cae2649e04f58c8addd0015
diff --git a/build/Android.libart-compiler.mk b/build/Android.libart-compiler.mk
index 95c5ea0..515aaed 100644
--- a/build/Android.libart-compiler.mk
+++ b/build/Android.libart-compiler.mk
@@ -16,6 +16,8 @@
 
 LIBART_COMPILER_SRC_FILES := \
 	src/compiler/dex/local_value_numbering.cc \
+	src/compiler/dex/arena_allocator.cc \
+	src/compiler/dex/arena_bit_vector.cc \
 	src/compiler/dex/quick/arm/assemble_arm.cc \
 	src/compiler/dex/quick/arm/call_arm.cc \
 	src/compiler/dex/quick/arm/fp_arm.cc \
@@ -42,7 +44,6 @@
 	src/compiler/dex/quick/x86/target_x86.cc \
 	src/compiler/dex/quick/x86/utility_x86.cc \
 	src/compiler/dex/portable/mir_to_gbc.cc \
-	src/compiler/dex/compiler_utility.cc \
 	src/compiler/dex/mir_dataflow.cc \
 	src/compiler/dex/dataflow_iterator.cc \
 	src/compiler/dex/mir_optimization.cc \
diff --git a/src/compiler/dex/arena_allocator.cc b/src/compiler/dex/arena_allocator.cc
new file mode 100644
index 0000000..2db8445
--- /dev/null
+++ b/src/compiler/dex/arena_allocator.cc
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "dex_file-inl.h"
+#include "arena_allocator.h"
+#include "base/logging.h"
+
+namespace art {
+
+static const char* alloc_names[ArenaAllocator::kNumAllocKinds] = {
+  "Misc       ",
+  "BasicBlock ",
+  "LIR        ",
+  "MIR        ",
+  "DataFlow   ",
+  "GrowList   ",
+  "GrowBitMap ",
+  "Dalvik2SSA ",
+  "DebugInfo  ",
+  "Successor  ",
+  "RegAlloc   ",
+  "Data       ",
+  "Preds      ",
+};
+
+ArenaAllocator::ArenaAllocator(size_t default_size)
+  : default_size_(default_size),
+    block_size_(default_size - sizeof(ArenaMemBlock)),
+    arena_head_(NULL),
+    current_arena_(NULL),
+    num_arena_blocks_(0),
+    malloc_bytes_(0) {
+  memset(&alloc_stats_[0], 0, sizeof(alloc_stats_));
+  // Start with an empty arena.
+  arena_head_ = current_arena_ = EmptyArena();
+  num_arena_blocks_++;
+}
+
+// Return an arena with no storage for use as a sentinal.
+ArenaAllocator::ArenaMemBlock* ArenaAllocator::EmptyArena() {
+  ArenaMemBlock* res = static_cast<ArenaMemBlock*>(malloc(sizeof(ArenaMemBlock)));
+  malloc_bytes_ += sizeof(ArenaMemBlock);
+  res->block_size = 0;
+  res->bytes_allocated = 0;
+  res->next = NULL;
+  return res;
+}
+
+// Arena-based malloc for compilation tasks.
+void* ArenaAllocator::NewMem(size_t size, bool zero, ArenaAllocKind kind) {
+  DCHECK(current_arena_ != NULL);
+  DCHECK(arena_head_ != NULL);
+  size = (size + 3) & ~3;
+  alloc_stats_[kind] += size;
+  if (size + current_arena_->bytes_allocated > current_arena_->block_size) {
+    size_t block_size = (size <= block_size_) ?  block_size_ : size;
+    /* Time to allocate a new block */
+    size_t allocation_size = sizeof(ArenaMemBlock) + block_size;
+    ArenaMemBlock *new_arena =
+        static_cast<ArenaMemBlock*>(malloc(allocation_size));
+    if (new_arena == NULL) {
+      LOG(FATAL) << "Arena allocation failure";
+    }
+    malloc_bytes_ += allocation_size;
+    new_arena->block_size = block_size;
+    new_arena->bytes_allocated = 0;
+    new_arena->next = NULL;
+    current_arena_->next = new_arena;
+    current_arena_ = new_arena;
+    num_arena_blocks_++;
+  }
+  void* ptr = &current_arena_->ptr[current_arena_->bytes_allocated];
+  current_arena_->bytes_allocated += size;
+  if (zero) {
+    memset(ptr, 0, size);
+  }
+  return ptr;
+}
+
+// Reclaim all the arena blocks allocated so far.
+void ArenaAllocator::ArenaReset() {
+  ArenaMemBlock* head = arena_head_;
+  while (head != NULL) {
+    ArenaMemBlock* p = head;
+    head = head->next;
+    free(p);
+  }
+  // We must always have an arena.  Create a zero-length one.
+  arena_head_ = current_arena_ = EmptyArena();
+  num_arena_blocks_ = 1;
+}
+
+// Dump memory usage stats.
+void ArenaAllocator::DumpMemStats(std::ostream& os) const {
+  size_t total = 0;
+  for (int i = 0; i < kNumAllocKinds; i++) {
+    total += alloc_stats_[i];
+  }
+  os << " MEM: used: " << total << ", allocated: " << malloc_bytes_ << ", wasted: "
+     << malloc_bytes_ - (total + (num_arena_blocks_ * sizeof(ArenaMemBlock))) << "\n";
+  os << "Number of arenas allocated: " << num_arena_blocks_ << "\n";
+  os << "===== Allocation by kind\n";
+  for (int i = 0; i < kNumAllocKinds; i++) {
+      os << alloc_names[i] << std::setw(10) << alloc_stats_[i] << "\n";
+  }
+}
+
+}  // namespace art
diff --git a/src/compiler/dex/arena_allocator.h b/src/compiler/dex/arena_allocator.h
new file mode 100644
index 0000000..53d1a1b
--- /dev/null
+++ b/src/compiler/dex/arena_allocator.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILER_ARENA_ALLOCATOR_H_
+#define ART_SRC_COMPILER_DEX_COMPILER_ARENA_ALLOCATOR_H_
+
+#include <stdint.h>
+#include <stddef.h>
+#include "compiler_enums.h"
+
+namespace art {
+
+#define ARENA_DEFAULT_BLOCK_SIZE (256 * 1024)
+
+class ArenaAllocator {
+  public:
+
+    // Type of allocation for memory tuning.
+    enum ArenaAllocKind {
+      kAllocMisc,
+      kAllocBB,
+      kAllocLIR,
+      kAllocMIR,
+      kAllocDFInfo,
+      kAllocGrowableArray,
+      kAllocGrowableBitMap,
+      kAllocDalvikToSSAMap,
+      kAllocDebugInfo,
+      kAllocSuccessor,
+      kAllocRegAlloc,
+      kAllocData,
+      kAllocPredecessors,
+      kNumAllocKinds
+    };
+
+  ArenaAllocator(size_t default_size = ARENA_DEFAULT_BLOCK_SIZE);
+  void* NewMem(size_t size, bool zero, ArenaAllocKind kind);
+  void ArenaReset();
+  size_t BytesAllocated() {
+    return malloc_bytes_;
+  }
+
+  void DumpMemStats(std::ostream& os) const;
+
+  private:
+
+    // Variable-length allocation block.
+    struct ArenaMemBlock {
+      size_t block_size;
+      size_t bytes_allocated;
+      ArenaMemBlock *next;
+      char ptr[0];
+    };
+
+    ArenaMemBlock* EmptyArena();
+
+    size_t default_size_;                    // Smallest size of new allocation block.
+    size_t block_size_;                      // Amount of allocatable bytes on a default block.
+    ArenaMemBlock* arena_head_;              // Head of linked list of allocation blocks.
+    ArenaMemBlock* current_arena_;           // NOTE: code assumes there's always at least 1 block.
+    int num_arena_blocks_;
+    uint32_t malloc_bytes_;                 // Number of actual bytes malloc'd
+    uint32_t alloc_stats_[kNumAllocKinds];  // Bytes used by various allocation kinds.
+
+};  // ArenaAllocator
+
+
+struct MemStats {
+   public:
+     void Dump(std::ostream& os) const {
+       arena_.DumpMemStats(os);
+     }
+     MemStats(const ArenaAllocator &arena) : arena_(arena){};
+  private:
+    const ArenaAllocator &arena_;
+}; // MemStats
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_COMPILER_ARENA_ALLOCATOR_H_
diff --git a/src/compiler/dex/arena_bit_vector.cc b/src/compiler/dex/arena_bit_vector.cc
new file mode 100644
index 0000000..6b0fe8f
--- /dev/null
+++ b/src/compiler/dex/arena_bit_vector.cc
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_internals.h"
+#include "dex_file-inl.h"
+
+namespace art {
+
+// TODO: profile to make sure this is still a win relative to just using shifted masks.
+static uint32_t check_masks[32] = {
+  0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
+  0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
+  0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
+  0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
+  0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
+  0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
+  0x40000000, 0x80000000 };
+
+ArenaBitVector::ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits,
+                               bool expandable, OatBitMapKind kind)
+  :  arena_(arena),
+     expandable_(expandable),
+     kind_(kind),
+     storage_size_((start_bits + 31) >> 5),
+     storage_(static_cast<uint32_t*>(arena_->NewMem(storage_size_ * sizeof(uint32_t), true,
+                                                    ArenaAllocator::kAllocGrowableBitMap))) {
+  DCHECK_EQ(sizeof(storage_[0]), 4U);    // Assuming 32-bit units.
+  // TODO: collect detailed memory usage stats by bit vector kind.
+}
+
+/*
+ * Determine whether or not the specified bit is set.
+ */
+bool ArenaBitVector::IsBitSet(unsigned int num) {
+  DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+
+  unsigned int val = storage_[num >> 5] & check_masks[num & 0x1f];
+  return (val != 0);
+}
+
+// Mark all bits bit as "clear".
+void ArenaBitVector::ClearAllBits() {
+  memset(storage_, 0, storage_size_ * sizeof(uint32_t));
+}
+
+// Mark the specified bit as "set".
+/*
+ * TUNING: this could have pathologically bad growth/expand behavior.  Make sure we're
+ * not using it badly or change resize mechanism.
+ */
+void ArenaBitVector::SetBit(unsigned int num) {
+  if (num >= storage_size_ * sizeof(uint32_t) * 8) {
+    DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
+
+    /* Round up to word boundaries for "num+1" bits */
+    unsigned int new_size = (num + 1 + 31) >> 5;
+    DCHECK_GT(new_size, storage_size_);
+    uint32_t *new_storage =
+        static_cast<uint32_t*>(arena_->NewMem(new_size * sizeof(uint32_t), false,
+                                              ArenaAllocator::kAllocGrowableBitMap));
+    memcpy(new_storage, storage_, storage_size_ * sizeof(uint32_t));
+    // Zero out the new storage words.
+    memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(uint32_t));
+    // TOTO: collect stats on space wasted because of resize.
+    storage_ = new_storage;
+    storage_size_ = new_size;
+  }
+
+  storage_[num >> 5] |= check_masks[num & 0x1f];
+}
+
+// Mark the specified bit as "unset".
+void ArenaBitVector::ClearBit(unsigned int num) {
+  DCHECK_LT(num, storage_size_ * sizeof(uint32_t) * 8);
+  storage_[num >> 5] &= ~check_masks[num & 0x1f];
+}
+
+// Copy a whole vector to the other. Sizes must match.
+void ArenaBitVector::Copy(ArenaBitVector* src) {
+  DCHECK_EQ(storage_size_, src->GetStorageSize());
+  memcpy(storage_, src->GetRawStorage(), sizeof(uint32_t) * storage_size_);
+}
+
+// Intersect with another bit vector.  Sizes and expandability must be the same.
+void ArenaBitVector::Intersect(const ArenaBitVector* src) {
+  DCHECK_EQ(storage_size_, src->GetStorageSize());
+  DCHECK_EQ(expandable_, src->IsExpandable());
+  for (unsigned int idx = 0; idx < storage_size_; idx++) {
+    storage_[idx] &= src->GetRawStorageWord(idx);
+  }
+}
+
+/*
+ * Union with another bit vector.  Sizes and expandability must be the same.
+ */
+void ArenaBitVector::Union(const ArenaBitVector* src) {
+  DCHECK_EQ(storage_size_, src->GetStorageSize());
+  DCHECK_EQ(expandable_, src->IsExpandable());
+  for (unsigned int idx = 0; idx < storage_size_; idx++) {
+    storage_[idx] |= src->GetRawStorageWord(idx);
+  }
+}
+
+// Are we equal to another bit vector?  Note: expandability attributes must also match.
+bool ArenaBitVector::Equal(const ArenaBitVector* src) {
+  if (storage_size_ != src->GetStorageSize() ||
+      expandable_ != src->IsExpandable())
+    return false;
+
+  for (unsigned int idx = 0; idx < storage_size_; idx++) {
+    if (storage_[idx] != src->GetRawStorageWord(idx)) return false;
+  }
+  return true;
+}
+
+// Count the number of bits that are set.
+int ArenaBitVector::NumSetBits()
+{
+  unsigned int count = 0;
+
+  for (unsigned int word = 0; word < storage_size_; word++) {
+    count += __builtin_popcount(storage_[word]);
+  }
+  return count;
+}
+
+// Return the position of the next set bit.  -1 means end-of-element reached.
+// TUNING: Hot function.
+int ArenaBitVector::Iterator::Next()
+{
+  // Did anything obviously change since we started?
+  DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
+  DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
+
+  if (bit_index_ >= bit_size_) return -1;
+
+  uint32_t word_index = bit_index_ >> 5;
+  uint32_t end_word_index = bit_size_ >> 5;
+  uint32_t word = bit_storage_[word_index++];
+
+  // Mask out any bits in the first word we've already considered.
+  word &= ~((1 << (bit_index_ & 0x1f))-1);
+
+  for (; word_index <= end_word_index;) {
+    uint32_t bit_pos = bit_index_ & 0x1f;
+    if (word == 0) {
+      bit_index_ += (32 - bit_pos);
+      word = bit_storage_[word_index++];
+      continue;
+    }
+    for (; bit_pos < 32; bit_pos++) {
+      if (word & (1 << bit_pos)) {
+        bit_index_++;
+        return bit_index_ - 1;
+      }
+      bit_index_++;
+    }
+    word = bit_storage_[word_index++];
+  }
+  bit_index_ = bit_size_;
+  return -1;
+}
+
+/*
+ * Mark specified number of bits as "set". Cannot set all bits like ClearAll
+ * since there might be unused bits - setting those to one will confuse the
+ * iterator.
+ */
+void ArenaBitVector::SetInitialBits(unsigned int num_bits)
+{
+  DCHECK_LE(((num_bits + 31) >> 5), storage_size_);
+  unsigned int idx;
+  for (idx = 0; idx < (num_bits >> 5); idx++) {
+    storage_[idx] = -1;
+  }
+  unsigned int rem_num_bits = num_bits & 0x1f;
+  if (rem_num_bits) {
+    storage_[idx] = (1 << rem_num_bits) - 1;
+  }
+}
+
+}  // namespace art
diff --git a/src/compiler/dex/arena_bit_vector.h b/src/compiler/dex/arena_bit_vector.h
new file mode 100644
index 0000000..ffc2160
--- /dev/null
+++ b/src/compiler/dex/arena_bit_vector.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_COMPILER_ARENA_BIT_VECTOR_H_
+#define ART_SRC_COMPILER_DEX_COMPILER_ARENA_BIT_VECTOR_H_
+
+#include <stdint.h>
+#include <stddef.h>
+#include "compiler_enums.h"
+#include "arena_allocator.h"
+
+namespace art {
+
+// Type of growable bitmap for memory tuning.
+enum OatBitMapKind {
+  kBitMapMisc = 0,
+  kBitMapUse,
+  kBitMapDef,
+  kBitMapLiveIn,
+  kBitMapBMatrix,
+  kBitMapDominators,
+  kBitMapIDominated,
+  kBitMapDomFrontier,
+  kBitMapPhi,
+  kBitMapTmpBlocks,
+  kBitMapInputBlocks,
+  kBitMapRegisterV,
+  kBitMapTempSSARegisterV,
+  kBitMapNullCheck,
+  kBitMapTmpBlockV,
+  kBitMapPredecessors,
+  kNumBitMapKinds
+};
+
+/*
+ * Expanding bitmap, used for tracking resources.  Bits are numbered starting
+ * from zero.  All operations on a BitVector are unsynchronized.
+ */
+class ArenaBitVector {
+  public:
+
+    class Iterator {
+      public:
+        Iterator(ArenaBitVector* bit_vector)
+          : p_bits_(bit_vector),
+            bit_storage_(bit_vector->GetRawStorage()),
+            bit_index_(0),
+            bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {};
+
+        int Next();  // Returns -1 when no next.
+
+        static void* operator new(size_t size, ArenaAllocator* arena) {
+          return arena->NewMem(sizeof(ArenaBitVector::Iterator), true,
+                               ArenaAllocator::kAllocGrowableBitMap);
+        };
+        static void operator delete(void* p) {};  // Nop.
+
+      private:
+        ArenaBitVector* const p_bits_;
+        uint32_t* const bit_storage_;
+        uint32_t bit_index_;              // Current index (size in bits).
+        const uint32_t bit_size_;       // Size of vector in bits.
+    };
+
+    ArenaBitVector(ArenaAllocator* arena, unsigned int start_bits, bool expandable,
+                   OatBitMapKind kind = kBitMapMisc);
+    ~ArenaBitVector() {};
+
+    static void* operator new( size_t size, ArenaAllocator* arena) {
+      return arena->NewMem(sizeof(ArenaBitVector), true, ArenaAllocator::kAllocGrowableBitMap);
+    }
+    static void operator delete(void* p) {};  // Nop.
+
+    void SetBit(unsigned int num);
+    void ClearBit(unsigned int num);
+    void MarkAllBits(bool set);
+    void DebugBitVector(char* msg, int length);
+    bool IsBitSet(unsigned int num);
+    void ClearAllBits();
+    void SetInitialBits(unsigned int num_bits);
+    void Copy(ArenaBitVector* src);
+    void Intersect(const ArenaBitVector* src2);
+    void Union(const ArenaBitVector* src);
+    bool Equal(const ArenaBitVector* src);
+    int NumSetBits();
+
+    uint32_t GetStorageSize() const { return storage_size_; }
+    bool IsExpandable() const { return expandable_; }
+    uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
+    uint32_t* GetRawStorage() { return storage_; }
+
+  private:
+    ArenaAllocator* const arena_;
+    const bool expandable_;         // expand bitmap if we run out?
+    const OatBitMapKind kind_;      // for memory use tuning.
+    uint32_t   storage_size_;       // current size, in 32-bit words.
+    uint32_t*  storage_;
+};
+
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_COMPILER_ARENA_BIT_VECTOR_H_
diff --git a/src/compiler/dex/backend.h b/src/compiler/dex/backend.h
index 804bc36..1f0c5d8 100644
--- a/src/compiler/dex/backend.h
+++ b/src/compiler/dex/backend.h
@@ -18,6 +18,7 @@
 #define ART_SRC_COMPILER_DEX_BACKEND_H_
 
 #include "compiled_method.h"
+#include "arena_allocator.h"
 
 namespace art {
 
@@ -29,7 +30,8 @@
     virtual CompiledMethod* GetCompiledMethod() = 0;
 
   protected:
-    Backend() {};
+    Backend() : arena_(NULL) {};
+    ArenaAllocator* arena_;
 
 };  // Class Backend
 
diff --git a/src/compiler/dex/compiler_internals.h b/src/compiler/dex/compiler_internals.h
index 8ef8a71..70e81d0 100644
--- a/src/compiler/dex/compiler_internals.h
+++ b/src/compiler/dex/compiler_internals.h
@@ -28,7 +28,6 @@
 #include "compiler/driver/compiler_driver.h"
 #include "mir_graph.h"
 #include "compiler_ir.h"
-#include "compiler_utility.h"
 #include "frontend.h"
 #include "gc/card_table.h"
 #include "mirror/dex_cache.h"
diff --git a/src/compiler/dex/compiler_ir.h b/src/compiler/dex/compiler_ir.h
index d4bf3da..eb1aec1 100644
--- a/src/compiler/dex/compiler_ir.h
+++ b/src/compiler/dex/compiler_ir.h
@@ -26,13 +26,12 @@
 #include "compiler/llvm/intrinsic_helper.h"
 #include "compiler/llvm/ir_builder.h"
 #include "compiler_enums.h"
-#include "compiler_utility.h"
 #include "dex_instruction.h"
 #include "safe_map.h"
+#include "arena_allocator.h"
 
 namespace art {
 
-struct ArenaBitVector;
 class LLVMInfo;
 namespace llvm {
 class LlvmCompilationUnit;
@@ -67,10 +66,6 @@
       num_regs(0),
       num_compiler_temps(0),
       compiler_flip_match(false),
-      arena_head(NULL),
-      current_arena(NULL),
-      num_arena_blocks(0),
-      mstats(NULL),
       mir_graph(NULL),
       cg(NULL) {}
   /*
@@ -109,10 +104,7 @@
   bool compiler_flip_match;
 
   // TODO: move memory management to mir_graph, or just switch to using standard containers.
-  ArenaMemBlock* arena_head;
-  ArenaMemBlock* current_arena;
-  int num_arena_blocks;
-  Memstats* mstats;
+  ArenaAllocator arena;
 
   UniquePtr<MIRGraph> mir_graph;   // MIR container.
   UniquePtr<Backend> cg;           // Target-specific codegen.
diff --git a/src/compiler/dex/compiler_utility.cc b/src/compiler/dex/compiler_utility.cc
deleted file mode 100644
index 71c9713..0000000
--- a/src/compiler/dex/compiler_utility.cc
+++ /dev/null
@@ -1,630 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler_internals.h"
-#include "dex_file-inl.h"
-
-namespace art {
-
-#ifdef WITH_MEMSTATS
-struct Memstats {
-  uint32_t alloc_stats[kNumAllocKinds];
-  int list_sizes[kNumListKinds];
-  int list_wasted[kNumListKinds];
-  int list_grows[kNumListKinds];
-  int list_max_elems[kNumListKinds];
-  int bit_map_sizes[kNumBitMapKinds];
-  int bit_map_wasted[kNumBitMapKinds];
-  int bit_map_grows[kNumBitMapKinds];
-};
-
-const char* alloc_names[kNumAllocKinds] = {
-  "Misc       ",
-  "BasicBlock ",
-  "LIR        ",
-  "MIR        ",
-  "DataFlow   ",
-  "GrowList   ",
-  "GrowBitMap ",
-  "Dalvik2SSA ",
-  "DebugInfo  ",
-  "Successor  ",
-  "RegAlloc   ",
-  "Data       ",
-  "Preds      ",
-};
-
-const char* list_names[kNumListKinds] = {
-  "Misc                  ",
-  "block_list             ",
-  "SSAtoDalvik           ",
-  "dfs_order              ",
-  "dfs_post_order          ",
-  "dom_post_order_traversal ",
-  "throw_launch_pads       ",
-  "suspend_launch_pads     ",
-  "switch_tables          ",
-  "fill_array_data         ",
-  "SuccessorBlocks       ",
-  "Predecessors          ",
-};
-
-const char* bit_map_names[kNumBitMapKinds] = {
-  "Misc                  ",
-  "Use                   ",
-  "Def                   ",
-  "LiveIn                ",
-  "BlockMatrix           ",
-  "Dominators            ",
-  "IDominated            ",
-  "DomFrontier           ",
-  "Phi                   ",
-  "TmpBlocks             ",
-  "InputBlocks           ",
-  "RegisterV             ",
-  "TempSSARegisterV      ",
-  "Null Check            ",
-  "TmpBlockV             ",
-  "Predecessors          ",
-};
-#endif
-
-#define kArenaBitVectorGrowth    4   /* increase by 4 uint32_ts when limit hit */
-
-/* Allocate the initial memory block for arena-based allocation */
-bool HeapInit(CompilationUnit* cu)
-{
-  DCHECK(cu->arena_head == NULL);
-  cu->arena_head =
-      static_cast<ArenaMemBlock*>(malloc(sizeof(ArenaMemBlock) + ARENA_DEFAULT_SIZE));
-  if (cu->arena_head == NULL) {
-    LOG(FATAL) << "No memory left to create compiler heap memory";
-  }
-  cu->arena_head->block_size = ARENA_DEFAULT_SIZE;
-  cu->current_arena = cu->arena_head;
-  cu->current_arena->bytes_allocated = 0;
-  cu->current_arena->next = NULL;
-  cu->num_arena_blocks = 1;
-#ifdef WITH_MEMSTATS
-  cu->mstats = (Memstats*) NewMem(cu, sizeof(Memstats), true,
-                   kAllocDebugInfo);
-#endif
-  return true;
-}
-
-/* Arena-based malloc for compilation tasks */
-void* NewMem(CompilationUnit* cu, size_t size, bool zero, oat_alloc_kind kind)
-{
-  size = (size + 3) & ~3;
-#ifdef WITH_MEMSTATS
-  if (cu->mstats != NULL) {
-    cu->mstats->alloc_stats[kind] += size;
-  }
-#endif
-retry:
-  /* Normal case - space is available in the current page */
-  if (size + cu->current_arena->bytes_allocated <=
-    cu->current_arena->block_size) {
-    void *ptr;
-    ptr = &cu->current_arena->ptr[cu->current_arena->bytes_allocated];
-    cu->current_arena->bytes_allocated += size;
-    if (zero) {
-      memset(ptr, 0, size);
-    }
-    return ptr;
-  } else {
-    /*
-     * See if there are previously allocated arena blocks before the last
-     * reset
-     */
-    if (cu->current_arena->next) {
-        cu->current_arena = cu->current_arena->next;
-        cu->current_arena->bytes_allocated = 0;
-        goto retry;
-    }
-
-    size_t block_size = (size < ARENA_DEFAULT_SIZE) ?  ARENA_DEFAULT_SIZE : size;
-    /* Time to allocate a new arena */
-    ArenaMemBlock *new_arena =
-        static_cast<ArenaMemBlock*>(malloc(sizeof(ArenaMemBlock) + block_size));
-    if (new_arena == NULL) {
-      LOG(FATAL) << "Arena allocation failure";
-    }
-    new_arena->block_size = block_size;
-    new_arena->bytes_allocated = 0;
-    new_arena->next = NULL;
-    cu->current_arena->next = new_arena;
-    cu->current_arena = new_arena;
-    cu->num_arena_blocks++;
-    if (cu->num_arena_blocks > 20000) {
-      LOG(INFO) << "Total arena pages: " << cu->num_arena_blocks;
-    }
-    goto retry;
-  }
-}
-
-/* Reclaim all the arena blocks allocated so far */
-void ArenaReset(CompilationUnit* cu)
-{
-  ArenaMemBlock* head = cu->arena_head;
-  while (head != NULL) {
-    ArenaMemBlock* p = head;
-    head = head->next;
-    free(p);
-  }
-  cu->arena_head = NULL;
-  cu->current_arena = NULL;
-}
-
-/* Growable List initialization */
-void CompilerInitGrowableList(CompilationUnit* cu, GrowableList* g_list,
-                       size_t init_length, oat_list_kind kind)
-{
-  g_list->num_allocated = init_length;
-  g_list->num_used = 0;
-  g_list->elem_list = static_cast<uintptr_t *>(NewMem(cu, sizeof(intptr_t) * init_length,
-                                             true, kAllocGrowableList));
-#ifdef WITH_MEMSTATS
-  cu->mstats->list_sizes[kind] += sizeof(uintptr_t) * init_length;
-  g_list->kind = kind;
-  if (static_cast<int>(init_length) > cu->mstats->list_max_elems[kind]) {
-    cu->mstats->list_max_elems[kind] = init_length;
-  }
-#endif
-}
-
-void ReallocGrowableList(CompilationUnit* cu, GrowableList* g_list, size_t new_length)
-{
-  if (new_length > g_list->num_allocated) {
-    uintptr_t *new_array =
-        static_cast<uintptr_t*>(NewMem(cu, sizeof(uintptr_t) * new_length, true,
-                                       kAllocGrowableList));
-    memcpy(new_array, g_list->elem_list, sizeof(uintptr_t) * g_list->num_allocated);
-    g_list->num_allocated = new_length;
-    g_list->elem_list = new_array;
-  }
-}
-
-/* Expand the capacity of a growable list */
-static void ExpandGrowableList(CompilationUnit* cu, GrowableList* g_list)
-{
-  int new_length = g_list->num_allocated;
-  if (new_length < 128) {
-    new_length <<= 1;
-  } else {
-    new_length += 128;
-  }
-  uintptr_t *new_array =
-      static_cast<uintptr_t*>(NewMem(cu, sizeof(uintptr_t) * new_length, true,
-                                     kAllocGrowableList));
-  memcpy(new_array, g_list->elem_list, sizeof(uintptr_t) * g_list->num_allocated);
-#ifdef WITH_MEMSTATS
-  cu->mstats->list_sizes[g_list->kind] += sizeof(uintptr_t) * new_length;
-  cu->mstats->list_wasted[g_list->kind] +=
-      sizeof(uintptr_t) * g_list->num_allocated;
-  cu->mstats->list_grows[g_list->kind]++;
-  if (new_length > cu->mstats->list_max_elems[g_list->kind]) {
-    cu->mstats->list_max_elems[g_list->kind] = new_length;
-  }
-#endif
-  g_list->num_allocated = new_length;
-  g_list->elem_list = new_array;
-}
-
-/* Insert a new element into the growable list */
-void InsertGrowableList(CompilationUnit* cu, GrowableList* g_list,
-                           uintptr_t elem)
-{
-  DCHECK_NE(g_list->num_allocated, 0U);
-  if (g_list->num_used == g_list->num_allocated) {
-    ExpandGrowableList(cu, g_list);
-  }
-  g_list->elem_list[g_list->num_used++] = elem;
-}
-
-/* Delete an element from a growable list. Element must be present */
-void DeleteGrowableList(GrowableList* g_list, uintptr_t elem)
-{
-  bool found = false;
-  for (unsigned int i = 0; i < g_list->num_used; i++) {
-    if (!found && g_list->elem_list[i] == elem) {
-      found = true;
-    }
-    if (found) {
-      g_list->elem_list[i] = g_list->elem_list[i+1];
-    }
-  }
-  DCHECK_EQ(found, true);
-  g_list->num_used--;
-}
-
-void GrowableListIteratorInit(GrowableList* g_list,
-                               GrowableListIterator* iterator)
-{
-  iterator->list = g_list;
-  iterator->idx = 0;
-  iterator->size = g_list->num_used;
-}
-
-uintptr_t GrowableListIteratorNext(GrowableListIterator* iterator)
-{
-  DCHECK_EQ(iterator->size, iterator->list->num_used);
-  if (iterator->idx == iterator->size) return 0;
-  return iterator->list->elem_list[iterator->idx++];
-}
-
-uintptr_t GrowableListGetElement(const GrowableList* g_list, size_t idx)
-{
-  DCHECK_LT(idx, g_list->num_used);
-  return g_list->elem_list[idx];
-}
-
-#ifdef WITH_MEMSTATS
-/* Dump memory usage stats */
-void DumpMemStats(CompilationUnit* cu)
-{
-  uint32_t total = 0;
-  for (int i = 0; i < kNumAllocKinds; i++) {
-    total += cu->mstats->alloc_stats[i];
-  }
-  if (total > (10 * 1024 * 1024)) {
-    LOG(INFO) << "MEMUSAGE: " << total << " : "
-        << PrettyMethod(cu->method_idx, *cu->dex_file);
-    LOG(INFO) << "insns_size: " << cu->code_item->insns_size_in_code_units_;
-    LOG(INFO) << "===== Overall allocations";
-    for (int i = 0; i < kNumAllocKinds; i++) {
-        LOG(INFO) << alloc_names[i] << std::setw(10) <<
-        cu->mstats->alloc_stats[i];
-    }
-    LOG(INFO) << "===== GrowableList allocations";
-    for (int i = 0; i < kNumListKinds; i++) {
-      LOG(INFO) << list_names[i]
-                << " S:" << cu->mstats->list_sizes[i]
-                << ", W:" << cu->mstats->list_wasted[i]
-                << ", G:" << cu->mstats->list_grows[i]
-                << ", E:" << cu->mstats->list_max_elems[i];
-    }
-    LOG(INFO) << "===== GrowableBitMap allocations";
-    for (int i = 0; i < kNumBitMapKinds; i++) {
-      LOG(INFO) << bit_map_names[i]
-                << " S:" << cu->mstats->bit_map_sizes[i]
-                << ", W:" << cu->mstats->bit_map_wasted[i]
-                << ", G:" << cu->mstats->bit_map_grows[i];
-    }
-  }
-}
-#endif
-
-static uint32_t check_masks[32] = {
-  0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
-  0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
-  0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
-  0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
-  0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
-  0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
-  0x40000000, 0x80000000 };
-
-/*
- * Allocate a bit vector with enough space to hold at least the specified
- * number of bits.
- *
- * NOTE: memory is allocated from the compiler arena.
- */
-ArenaBitVector* AllocBitVector(CompilationUnit* cu,
-                                unsigned int start_bits, bool expandable,
-                                oat_bit_map_kind kind)
-{
-  ArenaBitVector* bv;
-  unsigned int count;
-
-  DCHECK_EQ(sizeof(bv->storage[0]), 4U);        /* assuming 32-bit units */
-
-  bv = static_cast<ArenaBitVector*>(NewMem(cu, sizeof(ArenaBitVector), false,
-                                                kAllocGrowableBitMap));
-
-  count = (start_bits + 31) >> 5;
-
-  bv->storage_size = count;
-  bv->expandable = expandable;
-  bv->storage = static_cast<uint32_t*>(NewMem(cu, count * sizeof(uint32_t), true,
-                                              kAllocGrowableBitMap));
-#ifdef WITH_MEMSTATS
-  bv->kind = kind;
-  cu->mstats->bit_map_sizes[kind] += count * sizeof(uint32_t);
-#endif
-  return bv;
-}
-
-/*
- * Determine whether or not the specified bit is set.
- */
-bool IsBitSet(const ArenaBitVector* p_bits, unsigned int num)
-{
-  DCHECK_LT(num, p_bits->storage_size * sizeof(uint32_t) * 8);
-
-  unsigned int val = p_bits->storage[num >> 5] & check_masks[num & 0x1f];
-  return (val != 0);
-}
-
-/*
- * Mark all bits bit as "clear".
- */
-void ClearAllBits(ArenaBitVector* p_bits)
-{
-  unsigned int count = p_bits->storage_size;
-  memset(p_bits->storage, 0, count * sizeof(uint32_t));
-}
-
-/*
- * Mark the specified bit as "set".
- *
- * Returns "false" if the bit is outside the range of the vector and we're
- * not allowed to expand.
- *
- * NOTE: memory is allocated from the compiler arena.
- */
-bool SetBit(CompilationUnit* cu, ArenaBitVector* p_bits, unsigned int num)
-{
-  if (num >= p_bits->storage_size * sizeof(uint32_t) * 8) {
-    if (!p_bits->expandable) {
-      LOG(FATAL) << "Can't expand";
-    }
-
-    /* Round up to word boundaries for "num+1" bits */
-    unsigned int new_size = (num + 1 + 31) >> 5;
-    DCHECK_GT(new_size, p_bits->storage_size);
-    uint32_t *new_storage = static_cast<uint32_t*>(NewMem(cu, new_size * sizeof(uint32_t), false,
-                                                         kAllocGrowableBitMap));
-    memcpy(new_storage, p_bits->storage, p_bits->storage_size * sizeof(uint32_t));
-    memset(&new_storage[p_bits->storage_size], 0,
-           (new_size - p_bits->storage_size) * sizeof(uint32_t));
-#ifdef WITH_MEMSTATS
-    cu->mstats->bit_map_wasted[p_bits->kind] +=
-        p_bits->storage_size * sizeof(uint32_t);
-    cu->mstats->bit_map_sizes[p_bits->kind] += new_size * sizeof(uint32_t);
-    cu->mstats->bit_map_grows[p_bits->kind]++;
-#endif
-    p_bits->storage = new_storage;
-    p_bits->storage_size = new_size;
-  }
-
-  p_bits->storage[num >> 5] |= check_masks[num & 0x1f];
-  return true;
-}
-
-/*
- * Mark the specified bit as "unset".
- *
- * Returns "false" if the bit is outside the range of the vector and we're
- * not allowed to expand.
- *
- * NOTE: memory is allocated from the compiler arena.
- */
-bool ClearBit(ArenaBitVector* p_bits, unsigned int num)
-{
-  if (num >= p_bits->storage_size * sizeof(uint32_t) * 8) {
-    LOG(FATAL) << "Attempt to clear a bit not set in the vector yet";;
-  }
-
-  p_bits->storage[num >> 5] &= ~check_masks[num & 0x1f];
-  return true;
-}
-
-/* Initialize the iterator structure */
-void BitVectorIteratorInit(ArenaBitVector* p_bits,
-                            ArenaBitVectorIterator* iterator)
-{
-  iterator->p_bits = p_bits;
-  iterator->bit_size = p_bits->storage_size * sizeof(uint32_t) * 8;
-  iterator->idx = 0;
-}
-
-/*
- * If the vector sizes don't match, log an error and abort.
- */
-static void CheckSizes(const ArenaBitVector* bv1, const ArenaBitVector* bv2)
-{
-  if (bv1->storage_size != bv2->storage_size) {
-    LOG(FATAL) << "Mismatched vector sizes (" << bv1->storage_size
-               << ", " << bv2->storage_size << ")";
-  }
-}
-
-/*
- * Copy a whole vector to the other. Only do that when the both vectors have
- * the same size.
- */
-void CopyBitVector(ArenaBitVector* dest, const ArenaBitVector* src)
-{
-  /* if dest is expandable and < src, we could expand dest to match */
-  CheckSizes(dest, src);
-
-  memcpy(dest->storage, src->storage, sizeof(uint32_t) * dest->storage_size);
-}
-
-/*
- * Intersect two bit vectors and store the result to the dest vector.
- */
-
-bool IntersectBitVectors(ArenaBitVector* dest, const ArenaBitVector* src1,
-                          const ArenaBitVector* src2)
-{
-  DCHECK(src1 != NULL);
-  DCHECK(src2 != NULL);
-  if (dest->storage_size != src1->storage_size ||
-      dest->storage_size != src2->storage_size ||
-      dest->expandable != src1->expandable ||
-      dest->expandable != src2->expandable)
-    return false;
-
-  unsigned int idx;
-  for (idx = 0; idx < dest->storage_size; idx++) {
-    dest->storage[idx] = src1->storage[idx] & src2->storage[idx];
-  }
-  return true;
-}
-
-/*
- * Unify two bit vectors and store the result to the dest vector.
- */
-bool UnifyBitVetors(ArenaBitVector* dest, const ArenaBitVector* src1,
-                      const ArenaBitVector* src2)
-{
-  DCHECK(src1 != NULL);
-  DCHECK(src2 != NULL);
-  if (dest->storage_size != src1->storage_size ||
-      dest->storage_size != src2->storage_size ||
-      dest->expandable != src1->expandable ||
-      dest->expandable != src2->expandable)
-    return false;
-
-  unsigned int idx;
-  for (idx = 0; idx < dest->storage_size; idx++) {
-    dest->storage[idx] = src1->storage[idx] | src2->storage[idx];
-  }
-  return true;
-}
-
-/*
- * Return true if any bits collide.  Vectors must be same size.
- */
-bool TestBitVectors(const ArenaBitVector* src1,
-                     const ArenaBitVector* src2)
-{
-  DCHECK_EQ(src1->storage_size, src2->storage_size);
-  for (uint32_t idx = 0; idx < src1->storage_size; idx++) {
-    if (src1->storage[idx] & src2->storage[idx]) return true;
-  }
-  return false;
-}
-
-/*
- * Compare two bit vectors and return true if difference is seen.
- */
-bool CompareBitVectors(const ArenaBitVector* src1,
-                        const ArenaBitVector* src2)
-{
-  if (src1->storage_size != src2->storage_size ||
-      src1->expandable != src2->expandable)
-    return true;
-
-  unsigned int idx;
-  for (idx = 0; idx < src1->storage_size; idx++) {
-    if (src1->storage[idx] != src2->storage[idx]) return true;
-  }
-  return false;
-}
-
-/*
- * Count the number of bits that are set.
- */
-int CountSetBits(const ArenaBitVector* p_bits)
-{
-  unsigned int word;
-  unsigned int count = 0;
-
-  for (word = 0; word < p_bits->storage_size; word++) {
-    uint32_t val = p_bits->storage[word];
-
-    if (val != 0) {
-      if (val == 0xffffffff) {
-        count += 32;
-      } else {
-        /* count the number of '1' bits */
-        while (val != 0) {
-          val &= val - 1;
-          count++;
-        }
-      }
-    }
-  }
-
-  return count;
-}
-
-/* Return the next position set to 1. -1 means end-of-element reached */
-int BitVectorIteratorNext(ArenaBitVectorIterator* iterator)
-{
-  ArenaBitVector* p_bits = iterator->p_bits;
-  uint32_t bit_index = iterator->idx;
-  uint32_t bit_size = iterator->bit_size;
-
-  DCHECK_EQ(bit_size, p_bits->storage_size * sizeof(uint32_t) * 8);
-
-  if (bit_index >= bit_size) return -1;
-
-  uint32_t word_index = bit_index >> 5;
-  uint32_t end_word_index = bit_size >> 5;
-  uint32_t* storage = p_bits->storage;
-  uint32_t word = storage[word_index++];
-
-  // Mask out any bits in the first word we've already considered
-  word &= ~((1 << (bit_index & 0x1f))-1);
-
-  for (; word_index <= end_word_index;) {
-    uint32_t bit_pos = bit_index & 0x1f;
-    if (word == 0) {
-      bit_index += (32 - bit_pos);
-      word = storage[word_index++];
-      continue;
-    }
-    for (; bit_pos < 32; bit_pos++) {
-      if (word & (1 << bit_pos)) {
-        iterator->idx = bit_index + 1;
-        return bit_index;
-      }
-      bit_index++;
-    }
-    word = storage[word_index++];
-  }
-  iterator->idx = iterator->bit_size;
-  return -1;
-}
-
-/*
- * Mark specified number of bits as "set". Cannot set all bits like ClearAll
- * since there might be unused bits - setting those to one will confuse the
- * iterator.
- */
-void SetInitialBits(ArenaBitVector* p_bits, unsigned int num_bits)
-{
-  unsigned int idx;
-  DCHECK_LE(((num_bits + 31) >> 5), p_bits->storage_size);
-  for (idx = 0; idx < (num_bits >> 5); idx++) {
-    p_bits->storage[idx] = -1;
-  }
-  unsigned int rem_num_bits = num_bits & 0x1f;
-  if (rem_num_bits) {
-    p_bits->storage[idx] = (1 << rem_num_bits) - 1;
-  }
-}
-
-/* Allocate a new basic block */
-BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id)
-{
-  BasicBlock* bb = static_cast<BasicBlock*>(NewMem(cu, sizeof(BasicBlock), true, kAllocBB));
-  bb->block_type = block_type;
-  bb->id = block_id;
-  bb->predecessors = static_cast<GrowableList*>
-      (NewMem(cu, sizeof(GrowableList), false, kAllocPredecessors));
-  CompilerInitGrowableList(cu, bb->predecessors,
-                      (block_type == kExitBlock) ? 2048 : 2,
-                      kListPredecessors);
-  cu->mir_graph->block_id_map_.Put(block_id, block_id);
-  return bb;
-}
-
-}  // namespace art
diff --git a/src/compiler/dex/compiler_utility.h b/src/compiler/dex/compiler_utility.h
deleted file mode 100644
index b2dcd44..0000000
--- a/src/compiler/dex/compiler_utility.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_COMPILER_DEX_COMPILER_UTILITY_H_
-#define ART_SRC_COMPILER_DEX_COMPILER_UTILITY_H_
-
-#include <stdint.h>
-#include <stddef.h>
-#include "compiler_enums.h"
-
-namespace art {
-
-struct CompilationUnit;
-
-// Each arena page has some overhead, so take a few bytes off.
-#define ARENA_DEFAULT_SIZE ((2 * 1024 * 1024) - 256)
-
-// Type of allocation for memory tuning.
-enum oat_alloc_kind {
-  kAllocMisc,
-  kAllocBB,
-  kAllocLIR,
-  kAllocMIR,
-  kAllocDFInfo,
-  kAllocGrowableList,
-  kAllocGrowableBitMap,
-  kAllocDalvikToSSAMap,
-  kAllocDebugInfo,
-  kAllocSuccessor,
-  kAllocRegAlloc,
-  kAllocData,
-  kAllocPredecessors,
-  kNumAllocKinds
-};
-
-// Type of growable list for memory tuning.
-enum oat_list_kind {
-  kListMisc = 0,
-  kListBlockList,
-  kListSSAtoDalvikMap,
-  kListDfsOrder,
-  kListDfsPostOrder,
-  kListDomPostOrderTraversal,
-  kListThrowLaunchPads,
-  kListSuspendLaunchPads,
-  kListSwitchTables,
-  kListFillArrayData,
-  kListSuccessorBlocks,
-  kListPredecessors,
-  kNumListKinds
-};
-
-// Type of growable bitmap for memory tuning.
-enum oat_bit_map_kind {
-  kBitMapMisc = 0,
-  kBitMapUse,
-  kBitMapDef,
-  kBitMapLiveIn,
-  kBitMapBMatrix,
-  kBitMapDominators,
-  kBitMapIDominated,
-  kBitMapDomFrontier,
-  kBitMapPhi,
-  kBitMapTmpBlocks,
-  kBitMapInputBlocks,
-  kBitMapRegisterV,
-  kBitMapTempSSARegisterV,
-  kBitMapNullCheck,
-  kBitMapTmpBlockV,
-  kBitMapPredecessors,
-  kNumBitMapKinds
-};
-
-
-// Uncomment to collect memory usage statistics.
-//#define WITH_MEMSTATS
-
-struct ArenaMemBlock {
-  size_t block_size;
-  size_t bytes_allocated;
-  ArenaMemBlock *next;
-  char ptr[0];
-};
-
-struct GrowableList {
-  GrowableList() : num_allocated(0), num_used(0), elem_list(NULL) {
-  }
-
-  size_t num_allocated;
-  size_t num_used;
-  uintptr_t* elem_list;
-#ifdef WITH_MEMSTATS
-  oat_list_kind kind;
-#endif
-};
-
-struct GrowableListIterator {
-  GrowableList* list;
-  size_t idx;
-  size_t size;
-};
-
-/*
- * Expanding bitmap, used for tracking resources.  Bits are numbered starting
- * from zero.  All operations on a BitVector are unsynchronized.
- */
-struct ArenaBitVector {
-  bool       expandable;   // expand bitmap if we run out?
-  uint32_t   storage_size; // current size, in 32-bit words.
-  uint32_t*  storage;
-#ifdef WITH_MEMSTATS
-  oat_bit_map_kind kind;   // for memory use tuning.
-#endif
-};
-
-// Handy iterator to walk through the bit positions set to 1.
-struct ArenaBitVectorIterator {
-  ArenaBitVector* p_bits;
-  uint32_t idx;
-  uint32_t bit_size;
-};
-
-#define GET_ELEM_N(LIST, TYPE, N) ((reinterpret_cast<TYPE*>(LIST->elem_list)[N]))
-
-#define BLOCK_NAME_LEN 80
-
-// Forward declarations
-struct BasicBlock;
-struct CompilationUnit;
-enum BBType;
-
-// Allocate the initial memory block for arena-based allocation.
-bool HeapInit(CompilationUnit* cu);
-void* NewMem(CompilationUnit* cu, size_t size, bool zero, oat_alloc_kind kind);
-BasicBlock* NewMemBB(CompilationUnit* cu, BBType block_type, int block_id);
-void ArenaReset(CompilationUnit *cu);
-void CompilerInitGrowableList(CompilationUnit* cu, GrowableList* g_list,
-                              size_t init_length, oat_list_kind kind = kListMisc);
-void ReallocGrowableList(CompilationUnit* cu, GrowableList* g_list, size_t new_length);
-void InsertGrowableList(CompilationUnit* cu, GrowableList* g_list, uintptr_t elem);
-void DeleteGrowableList(GrowableList* g_list, uintptr_t elem);
-void GrowableListIteratorInit(GrowableList* g_list, GrowableListIterator* iterator);
-uintptr_t GrowableListIteratorNext(GrowableListIterator* iterator);
-uintptr_t GrowableListGetElement(const GrowableList* g_list, size_t idx);
-ArenaBitVector* AllocBitVector(CompilationUnit* cu, unsigned int start_bits, bool expandable,
-                               oat_bit_map_kind = kBitMapMisc);
-void BitVectorIteratorInit(ArenaBitVector* p_bits, ArenaBitVectorIterator* iterator);
-int BitVectorIteratorNext(ArenaBitVectorIterator* iterator);
-bool SetBit(CompilationUnit *cu, ArenaBitVector* p_bits, unsigned int num);
-bool ClearBit(ArenaBitVector* p_bits, unsigned int num);
-void MarkAllBits(ArenaBitVector* p_bits, bool set);
-void DebugBitVector(char* msg, const ArenaBitVector* bv, int length);
-bool IsBitSet(const ArenaBitVector* p_bits, unsigned int num);
-void ClearAllBits(ArenaBitVector* p_bits);
-void SetInitialBits(ArenaBitVector* p_bits, unsigned int num_bits);
-void CopyBitVector(ArenaBitVector* dest, const ArenaBitVector* src);
-bool IntersectBitVectors(ArenaBitVector* dest, const ArenaBitVector* src1,
-                         const ArenaBitVector* src2);
-bool UnifyBitVetors(ArenaBitVector* dest, const ArenaBitVector* src1, const ArenaBitVector* src2);
-bool CompareBitVectors(const ArenaBitVector* src1, const ArenaBitVector* src2);
-bool TestBitVectors(const ArenaBitVector* src1, const ArenaBitVector* src2);
-int CountSetBits(const ArenaBitVector* p_bits);
-void DumpBlockBitVector(const GrowableList* blocks, char* msg, const ArenaBitVector* bv,
-                        int length);
-void DumpMemStats(CompilationUnit* cu);
-
-void ReplaceSpecialChars(std::string& str);
-std::string GetSSAName(const CompilationUnit* cu, int ssa_reg);
-std::string GetSSANameWithConst(const CompilationUnit* cu, int ssa_reg, bool singles_only);
-void GetBlockName(BasicBlock* bb, char* name);
-const char* GetShortyFromTargetIdx(CompilationUnit*, int);
-
-}  // namespace art
-
-#endif  // ART_SRC_COMPILER_DEX_COMPILER_UTILITY_H_
diff --git a/src/compiler/dex/dataflow_iterator.cc b/src/compiler/dex/dataflow_iterator.cc
index 514eeba..bb5b969 100644
--- a/src/compiler/dex/dataflow_iterator.cc
+++ b/src/compiler/dex/dataflow_iterator.cc
@@ -27,7 +27,7 @@
         changed_ = false;
       }
       if (idx_ >= 0) {
-        int bb_id = block_id_list_->elem_list[idx_--];
+        int bb_id = block_id_list_->Get(idx_--);
         res = mir_graph_->GetBasicBlock(bb_id);
       }
     } else {
@@ -36,22 +36,22 @@
         changed_ = false;
       }
       if (idx_ < end_idx_) {
-        int bb_id = block_id_list_->elem_list[idx_++];
+        int bb_id = block_id_list_->Get(idx_++);
         res = mir_graph_->GetBasicBlock(bb_id);
       }
     }
     return res;
   }
 
-  // AllNodes uses the existing GrowableList iterator, so use different NextBody().
+  // AllNodes uses the existing GrowableArray iterator, so use different NextBody().
   BasicBlock* AllNodesIterator::NextBody(bool had_change) {
     changed_ |= had_change;
     BasicBlock* res = NULL;
     bool keep_looking = true;
     while (keep_looking) {
-      res = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&all_nodes_iterator_));
+      res = all_nodes_iterator_->Next();
       if (is_iterative_ && changed_ && (res == NULL)) {
-        GrowableListIteratorInit(mir_graph_->GetBlockList(), &all_nodes_iterator_);
+        all_nodes_iterator_->Reset();
         changed_ = false;
       } else if ((res == NULL) || (!res->hidden)) {
         keep_looking = false;
diff --git a/src/compiler/dex/dataflow_iterator.h b/src/compiler/dex/dataflow_iterator.h
index f3a2fca..7d32dfc 100644
--- a/src/compiler/dex/dataflow_iterator.h
+++ b/src/compiler/dex/dataflow_iterator.h
@@ -75,8 +75,7 @@
       const int start_idx_;
       const int end_idx_;
       const bool reverse_;
-      GrowableList* block_id_list_;
-      GrowableListIterator all_nodes_iterator_;
+      GrowableArray<int>* block_id_list_;
       int idx_;
       bool changed_;
 
@@ -142,10 +141,18 @@
 
       AllNodesIterator(MIRGraph* mir_graph, bool is_iterative)
           : DataflowIterator(mir_graph, is_iterative, 0, 0, false) {
-        GrowableListIteratorInit(mir_graph->GetBlockList(), &all_nodes_iterator_);
+        all_nodes_iterator_ =
+            new (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator (mir_graph->GetBlockList());
+      }
+
+      virtual void Reset() {
+        all_nodes_iterator_->Reset();
       }
 
       virtual BasicBlock* NextBody(bool had_change);
+
+    private:
+      GrowableArray<BasicBlock*>::Iterator* all_nodes_iterator_;
   };
 
 }  // namespace art
diff --git a/src/compiler/dex/frontend.cc b/src/compiler/dex/frontend.cc
index e99c196..6f4ee6c 100644
--- a/src/compiler/dex/frontend.cc
+++ b/src/compiler/dex/frontend.cc
@@ -27,6 +27,7 @@
 #include "mirror/object.h"
 #include "runtime.h"
 #include "backend.h"
+#include "base/logging.h"
 
 namespace {
 #if !defined(ART_USE_PORTABLE_COMPILER)
@@ -118,10 +119,6 @@
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   UniquePtr<CompilationUnit> cu(new CompilationUnit);
 
-  if (!HeapInit(cu.get())) {
-    LOG(FATAL) << "Failed to initialize compiler heap";
-  }
-
   cu->compiler_driver = &compiler;
   cu->class_linker = class_linker;
   cu->instruction_set = compiler.GetInstructionSet();
@@ -171,7 +168,7 @@
         (1 << kPromoteCompilerTemps));
   }
 
-  cu->mir_graph.reset(new MIRGraph(cu.get()));
+  cu->mir_graph.reset(new MIRGraph(cu.get(), &cu->arena));
 
   /* Gathering opcode stats? */
   if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
@@ -218,17 +215,18 @@
 
 #if defined(ART_USE_PORTABLE_COMPILER)
   if (compiler_backend == kPortable) {
-    cu->cg.reset(PortableCodeGenerator(cu.get(), cu->mir_graph.get(), llvm_compilation_unit));
+    cu->cg.reset(PortableCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena,
+                                       llvm_compilation_unit));
   } else
 #endif
   {
     switch (compiler.GetInstructionSet()) {
       case kThumb2:
-        cu->cg.reset(ArmCodeGenerator(cu.get(), cu->mir_graph.get())); break;
+        cu->cg.reset(ArmCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); break;
       case kMips:
-        cu->cg.reset(MipsCodeGenerator(cu.get(), cu->mir_graph.get())); break;
+        cu->cg.reset(MipsCodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); break;
       case kX86:
-        cu->cg.reset(X86CodeGenerator(cu.get(), cu->mir_graph.get())); break;
+        cu->cg.reset(X86CodeGenerator(cu.get(), cu->mir_graph.get(), &cu->arena)); break;
       default:
         LOG(FATAL) << "Unexpected instruction set: " << compiler.GetInstructionSet();
     }
@@ -244,13 +242,14 @@
     VLOG(compiler) << "Deferred " << PrettyMethod(method_idx, dex_file);
   }
 
-#ifdef WITH_MEMSTATS
   if (cu->enable_debug & (1 << kDebugShowMemoryUsage)) {
-    DumpMemStats(cu.get());
+    if (cu->arena.BytesAllocated() > (5 * 1024 *1024)) {
+      MemStats mem_stats(cu->arena);
+      LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats);
+    }
   }
-#endif
 
-  ArenaReset(cu.get());
+  cu->arena.ArenaReset();
 
   return result;
 }
diff --git a/src/compiler/dex/growable_array.h b/src/compiler/dex/growable_array.h
new file mode 100644
index 0000000..eb865d5
--- /dev/null
+++ b/src/compiler/dex/growable_array.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_GROWABLE_LIST_H_
+#define ART_SRC_COMPILER_DEX_GROWABLE_LIST_H_
+
+#include <stdint.h>
+#include <stddef.h>
+#include "compiler_enums.h"
+#include "arena_allocator.h"
+
+namespace art {
+
+struct CompilationUnit;
+
+// Type of growable list for memory tuning.
+enum OatListKind {
+  kGrowableArrayMisc = 0,
+  kGrowableArrayBlockList,
+  kGrowableArraySSAtoDalvikMap,
+  kGrowableArrayDfsOrder,
+  kGrowableArrayDfsPostOrder,
+  kGrowableArrayDomPostOrderTraversal,
+  kGrowableArrayThrowLaunchPads,
+  kGrowableArraySuspendLaunchPads,
+  kGrowableArraySwitchTables,
+  kGrowableArrayFillArrayData,
+  kGrowableArraySuccessorBlocks,
+  kGrowableArrayPredecessors,
+  kGNumListKinds
+};
+
+template<typename T>
+class GrowableArray {
+  public:
+
+    class Iterator {
+      public:
+        Iterator(GrowableArray* g_list)
+          : idx_(0),
+            g_list_(g_list) {};
+
+        // NOTE: returns 0/NULL when no next.
+        // TODO: redo to make usage consistent with other iterators.
+        T Next() {
+          if (idx_ >= g_list_->Size()) {
+            return 0;
+          } else {
+            return g_list_->Get(idx_++);
+          }
+        }
+
+        void Reset() {
+          idx_ = 0;
+        }
+
+        static void* operator new(size_t size, ArenaAllocator* arena) {
+          return arena->NewMem(sizeof(GrowableArray::Iterator), true, ArenaAllocator::kAllocGrowableArray);
+        };
+        static void operator delete(void* p) {};  // Nop.
+
+      private:
+        size_t idx_;
+        GrowableArray* const g_list_;
+    };
+
+    GrowableArray(ArenaAllocator* arena, size_t init_length, OatListKind kind = kGrowableArrayMisc)
+      : arena_(arena),
+        num_allocated_(0),
+        num_used_(0),
+        kind_(kind) {
+      elem_list_ = static_cast<T*>(arena_->NewMem(sizeof(T) * init_length, true,
+                                                 ArenaAllocator::kAllocGrowableArray));
+      // TODO: Collect detailed memory usage stats by list kind.
+    };
+
+
+    // Expand the list size to at least new length.
+    void Resize(size_t new_length) {
+      if (new_length <= num_allocated_) return;
+      size_t target_length = (num_allocated_ < 128) ? num_allocated_ << 1 : num_allocated_ + 128;
+      if (new_length > target_length) {
+         target_length = new_length;
+      }
+      T* new_array = static_cast<T*>(arena_->NewMem(sizeof(T) * target_length, true,
+                                                    ArenaAllocator::kAllocGrowableArray));
+      memcpy(new_array, elem_list_, sizeof(T) * num_allocated_);
+      // TODO: Collect stats on wasted resize memory.
+      num_allocated_ = target_length;
+      elem_list_ = new_array;
+    };
+
+    // NOTE: does not return storage, just resets use count.
+    void Reset() {
+      num_used_ = 0;
+    }
+
+    // Insert an element to the end of a list, resizing if necessary.
+    void Insert(T elem) {
+      if (num_used_ == num_allocated_) {
+        Resize(num_used_ + 1);
+      }
+      elem_list_[num_used_++] = elem;
+    };
+
+    T Get(size_t index) const {
+      DCHECK_LT(index, num_used_);
+      return elem_list_[index];
+    };
+
+    // Overwrite existing element at position index.  List must be large enough.
+    void Put(size_t index, T elem) {
+      DCHECK_LT(index, num_used_);
+      elem_list_[index] = elem;
+    }
+
+    void Increment(size_t index) {
+      DCHECK_LT(index, num_used_);
+      elem_list_[index]++;
+    }
+
+    void Delete(T element) {
+      bool found = false;
+      for (size_t i = 0; i < num_used_ - 1; i++) {
+        if (!found && elem_list_[i] == element) {
+          found = true;
+        }
+        if (found) {
+          elem_list_[i] = elem_list_[i+1];
+        }
+      }
+      // We should either have found the element, or it was the last (unscanned) element.
+      DCHECK(found || (element == elem_list_[num_used_ - 1]));
+      num_used_--;
+    };
+
+    size_t GetNumAllocated() const { return num_allocated_; }
+
+    size_t Size() const { return num_used_; }
+
+    T* GetRawStorage() const { return elem_list_; }
+
+    static void* operator new(size_t size, ArenaAllocator* arena) {
+      return arena->NewMem(sizeof(GrowableArray<T>), true, ArenaAllocator::kAllocGrowableArray);
+    };
+    static void operator delete(void* p) {};  // Nop.
+
+  private:
+    ArenaAllocator* const arena_;
+    size_t num_allocated_;
+    size_t num_used_;
+    OatListKind kind_;
+    T* elem_list_;
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_GROWABLE_LIST_H_
diff --git a/src/compiler/dex/mir_dataflow.cc b/src/compiler/dex/mir_dataflow.cc
index 5e6eb82..23bf248 100644
--- a/src/compiler/dex/mir_dataflow.cc
+++ b/src/compiler/dex/mir_dataflow.cc
@@ -844,24 +844,23 @@
 
 /* Return the base virtual register for a SSA name */
 int MIRGraph::SRegToVReg(int ssa_reg) const {
-  DCHECK_LT(ssa_reg, static_cast<int>(ssa_base_vregs_->num_used));
-  return GET_ELEM_N(ssa_base_vregs_, int, ssa_reg);
+  return ssa_base_vregs_->Get(ssa_reg);
 }
 
 /* Any register that is used before being defined is considered live-in */
 void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
                             ArenaBitVector* live_in_v, int dalvik_reg_id)
 {
-  SetBit(cu_, use_v, dalvik_reg_id);
-  if (!IsBitSet(def_v, dalvik_reg_id)) {
-    SetBit(cu_, live_in_v, dalvik_reg_id);
+  use_v->SetBit(dalvik_reg_id);
+  if (!def_v->IsBitSet(dalvik_reg_id)) {
+    live_in_v->SetBit(dalvik_reg_id);
   }
 }
 
 /* Mark a reg as being defined */
 void MIRGraph::HandleDef(ArenaBitVector* def_v, int dalvik_reg_id)
 {
-  SetBit(cu_, def_v, dalvik_reg_id);
+  def_v->SetBit(dalvik_reg_id);
 }
 
 /*
@@ -876,11 +875,11 @@
   if (bb->data_flow_info == NULL) return false;
 
   use_v = bb->data_flow_info->use_v =
-      AllocBitVector(cu_, cu_->num_dalvik_registers, false, kBitMapUse);
+      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapUse);
   def_v = bb->data_flow_info->def_v =
-      AllocBitVector(cu_, cu_->num_dalvik_registers, false, kBitMapDef);
+      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapDef);
   live_in_v = bb->data_flow_info->live_in_v =
-      AllocBitVector(cu_, cu_->num_dalvik_registers, false, kBitMapLiveIn);
+      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapLiveIn);
 
   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
@@ -932,13 +931,14 @@
   int subscript = (v_reg < 0) ? 0 : ++ssa_last_defs_[v_reg];
   int ssa_reg = GetNumSSARegs();
   SetNumSSARegs(ssa_reg + 1);
-  InsertGrowableList(cu_, ssa_base_vregs_, v_reg);
-  InsertGrowableList(cu_, ssa_subscripts_, subscript);
+  ssa_base_vregs_->Insert(v_reg);
+  ssa_subscripts_->Insert(subscript);
   std::string ssa_name = GetSSAName(ssa_reg);
-  char* name = static_cast<char*>(NewMem(cu_, ssa_name.length() + 1, false, kAllocDFInfo));
+  char* name = static_cast<char*>(arena_->NewMem(ssa_name.length() + 1, false,
+                                                 ArenaAllocator::kAllocDFInfo));
   strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
-  InsertGrowableList(cu_, ssa_strings_, reinterpret_cast<uintptr_t>(name));
-  DCHECK_EQ(ssa_base_vregs_->num_used, ssa_subscripts_->num_used);
+  ssa_strings_->Insert(name);
+  DCHECK_EQ(ssa_base_vregs_->Size(), ssa_subscripts_->Size());
   return ssa_reg;
 }
 
@@ -966,10 +966,11 @@
   int i;
 
   mir->ssa_rep->num_uses = num_uses;
-  mir->ssa_rep->uses = static_cast<int*>(NewMem(cu_, sizeof(int) * num_uses, true, kAllocDFInfo));
+  mir->ssa_rep->uses = static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, true,
+                                                        ArenaAllocator::kAllocDFInfo));
   // NOTE: will be filled in during type & size inference pass
-  mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_uses, true,
-                                                 kAllocDFInfo));
+  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, true,
+                                                           ArenaAllocator::kAllocDFInfo));
 
   for (i = 0; i < num_uses; i++) {
     HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
@@ -984,10 +985,11 @@
   int i;
 
   mir->ssa_rep->num_uses = num_uses;
-  mir->ssa_rep->uses = static_cast<int*>(NewMem(cu_, sizeof(int) * num_uses, true, kAllocDFInfo));
+  mir->ssa_rep->uses = static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, true,
+                                                        ArenaAllocator::kAllocDFInfo));
   // NOTE: will be filled in during type & size inference pass
-  mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_uses, true,
-                                                 kAllocDFInfo));
+  mir->ssa_rep->fp_use = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, true,
+                                                           ArenaAllocator::kAllocDFInfo));
 
   for (i = 0; i < num_uses; i++) {
     HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
@@ -1002,8 +1004,9 @@
   if (bb->data_flow_info == NULL) return false;
 
   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    mir->ssa_rep = static_cast<struct SSARepresentation *>(NewMem(cu_, sizeof(SSARepresentation),
-                                                                 true, kAllocDFInfo));
+    mir->ssa_rep =
+        static_cast<struct SSARepresentation *>(arena_->NewMem(sizeof(SSARepresentation), true,
+                                                               ArenaAllocator::kAllocDFInfo));
 
     int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
 
@@ -1052,10 +1055,10 @@
 
     if (num_uses) {
       mir->ssa_rep->num_uses = num_uses;
-      mir->ssa_rep->uses = static_cast<int*>(NewMem(cu_, sizeof(int) * num_uses, false,
-                                                   kAllocDFInfo));
-      mir->ssa_rep->fp_use = static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_uses, false,
-                                                     kAllocDFInfo));
+      mir->ssa_rep->uses = static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, false,
+                                                            ArenaAllocator::kAllocDFInfo));
+      mir->ssa_rep->fp_use = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, false,
+                                                               ArenaAllocator::kAllocDFInfo));
     }
 
     int num_defs = 0;
@@ -1069,10 +1072,10 @@
 
     if (num_defs) {
       mir->ssa_rep->num_defs = num_defs;
-      mir->ssa_rep->defs = static_cast<int*>(NewMem(cu_, sizeof(int) * num_defs, false,
-                                                   kAllocDFInfo));
-      mir->ssa_rep->fp_def = static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_defs, false,
-                                                     kAllocDFInfo));
+      mir->ssa_rep->defs = static_cast<int*>(arena_->NewMem(sizeof(int) * num_defs, false,
+                                                            ArenaAllocator::kAllocDFInfo));
+      mir->ssa_rep->fp_def = static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_defs, false,
+                                                               ArenaAllocator::kAllocDFInfo));
     }
 
     DecodedInstruction *d_insn = &mir->dalvikInsn;
@@ -1120,8 +1123,8 @@
    * predecessor blocks.
    */
   bb->data_flow_info->vreg_to_ssa_map =
-      static_cast<int*>(NewMem(cu_, sizeof(int) * cu_->num_dalvik_registers, false,
-                               kAllocDFInfo));
+      static_cast<int*>(arena_->NewMem(sizeof(int) * cu_->num_dalvik_registers, false,
+                                       ArenaAllocator::kAllocDFInfo));
 
   memcpy(bb->data_flow_info->vreg_to_ssa_map, vreg_to_ssa_map_,
          sizeof(int) * cu_->num_dalvik_registers);
@@ -1131,22 +1134,14 @@
 /* Setup the basic data structures for SSA conversion */
 void MIRGraph::CompilerInitializeSSAConversion()
 {
-  int i;
-  int num_dalvik_reg = cu_->num_dalvik_registers;
+  size_t num_dalvik_reg = cu_->num_dalvik_registers;
 
-  ssa_base_vregs_ =
-      static_cast<GrowableList*>(NewMem(cu_, sizeof(GrowableList), false, kAllocDFInfo));
-  ssa_subscripts_ =
-      static_cast<GrowableList*>(NewMem(cu_, sizeof(GrowableList), false, kAllocDFInfo));
-  ssa_strings_ =
-      static_cast<GrowableList*>(NewMem(cu_, sizeof(GrowableList), false, kAllocDFInfo));
-  // Create the ssa mappings, estimating the max size
-  CompilerInitGrowableList(cu_, ssa_base_vregs_, num_dalvik_reg + GetDefCount() + 128,
-                           kListSSAtoDalvikMap);
-  CompilerInitGrowableList(cu_, ssa_subscripts_, num_dalvik_reg + GetDefCount() + 128,
-                           kListSSAtoDalvikMap);
-  CompilerInitGrowableList(cu_, ssa_strings_, num_dalvik_reg + GetDefCount() + 128,
-                           kListSSAtoDalvikMap);
+  ssa_base_vregs_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
+                                            kGrowableArraySSAtoDalvikMap);
+  ssa_subscripts_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
+                                            kGrowableArraySSAtoDalvikMap);
+  ssa_strings_ = new (arena_) GrowableArray<char*>(arena_, num_dalvik_reg + GetDefCount() + 128,
+                                           kGrowableArraySSAtoDalvikMap);
   /*
    * Initial number of SSA registers is equal to the number of Dalvik
    * registers.
@@ -1158,13 +1153,14 @@
    * the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
    * into "(0 << 16) | i"
    */
-  for (i = 0; i < num_dalvik_reg; i++) {
-    InsertGrowableList(cu_, ssa_base_vregs_, i);
-    InsertGrowableList(cu_, ssa_subscripts_, 0);
+  for (unsigned int i = 0; i < num_dalvik_reg; i++) {
+    ssa_base_vregs_->Insert(i);
+    ssa_subscripts_->Insert(0);
     std::string ssa_name = GetSSAName(i);
-    char* name = static_cast<char*>(NewMem(cu_, ssa_name.length() + 1, true, kAllocDFInfo));
+    char* name = static_cast<char*>(arena_->NewMem(ssa_name.length() + 1, true,
+                                                   ArenaAllocator::kAllocDFInfo));
     strncpy(name, ssa_name.c_str(), ssa_name.length() + 1);
-    InsertGrowableList(cu_, ssa_strings_, reinterpret_cast<uintptr_t>(name));
+    ssa_strings_->Insert(name);
   }
 
   /*
@@ -1172,12 +1168,14 @@
    * Dalvik register, and the SSA names for those are the same.
    */
   vreg_to_ssa_map_ =
-      static_cast<int*>(NewMem(cu_, sizeof(int) * num_dalvik_reg, false, kAllocDFInfo));
+      static_cast<int*>(arena_->NewMem(sizeof(int) * num_dalvik_reg, false,
+                                       ArenaAllocator::kAllocDFInfo));
   /* Keep track of the higest def for each dalvik reg */
   ssa_last_defs_ =
-      static_cast<int*>(NewMem(cu_, sizeof(int) * num_dalvik_reg, false, kAllocDFInfo));
+      static_cast<int*>(arena_->NewMem(sizeof(int) * num_dalvik_reg, false,
+                                       ArenaAllocator::kAllocDFInfo));
 
-  for (i = 0; i < num_dalvik_reg; i++) {
+  for (unsigned int i = 0; i < num_dalvik_reg; i++) {
     vreg_to_ssa_map_[i] = i;
     ssa_last_defs_[i] = 0;
   }
@@ -1188,17 +1186,18 @@
   /*
    * Allocate the BasicBlockDataFlow structure for the entry and code blocks
    */
-  GrowableListIterator iterator = GetBasicBlockIterator();
+  GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
 
   while (true) {
-    BasicBlock* bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
+    BasicBlock* bb = iterator.Next();
     if (bb == NULL) break;
     if (bb->hidden == true) continue;
     if (bb->block_type == kDalvikByteCode ||
       bb->block_type == kEntryBlock ||
       bb->block_type == kExitBlock) {
-      bb->data_flow_info = static_cast<BasicBlockDataFlow*>(NewMem(cu_, sizeof(BasicBlockDataFlow),
-                                                                 true, kAllocDFInfo));
+      bb->data_flow_info =
+          static_cast<BasicBlockDataFlow*>(arena_->NewMem(sizeof(BasicBlockDataFlow), true,
+                                                          ArenaAllocator::kAllocDFInfo));
       }
   }
 }
@@ -1276,9 +1275,8 @@
     uint32_t weight = std::min(16U, static_cast<uint32_t>(bb->nesting_depth));
     for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
       int s_reg = mir->ssa_rep->uses[i];
-      DCHECK_LT(s_reg, static_cast<int>(use_counts_.num_used));
-      raw_use_counts_.elem_list[s_reg]++;
-      use_counts_.elem_list[s_reg] += (1 << weight);
+      raw_use_counts_.Increment(s_reg);
+      use_counts_.Put(s_reg, use_counts_.Get(s_reg) + (1 << weight));
     }
     if (!(cu_->disable_opt & (1 << kPromoteCompilerTemps))) {
       int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
@@ -1296,8 +1294,8 @@
           uses_method_star &= InvokeUsesMethodStar(mir);
         }
         if (uses_method_star) {
-          raw_use_counts_.elem_list[method_sreg_]++;
-          use_counts_.elem_list[method_sreg_] += (1 << weight);
+          raw_use_counts_.Increment(method_sreg_);
+          use_counts_.Put(method_sreg_, use_counts_.Get(method_sreg_) + (1 << weight));
         }
       }
     }
@@ -1307,13 +1305,14 @@
 
 void MIRGraph::MethodUseCount()
 {
+  // Now that we know, resize the lists.
   int num_ssa_regs = GetNumSSARegs();
-  CompilerInitGrowableList(cu_, &use_counts_, num_ssa_regs + 32, kListMisc);
-  CompilerInitGrowableList(cu_, &raw_use_counts_, num_ssa_regs + 32, kListMisc);
+  use_counts_.Resize(num_ssa_regs + 32);
+  raw_use_counts_.Resize(num_ssa_regs + 32);
   // Initialize list
   for (int i = 0; i < num_ssa_regs; i++) {
-    InsertGrowableList(cu_, &use_counts_, 0);
-    InsertGrowableList(cu_, &raw_use_counts_, 0);
+    use_counts_.Insert(0);
+    raw_use_counts_.Insert(0);
   }
   if (cu_->disable_opt & (1 << kPromoteRegs)) {
     return;
@@ -1327,11 +1326,10 @@
 /* Verify if all the successor is connected with all the claimed predecessors */
 bool MIRGraph::VerifyPredInfo(BasicBlock* bb)
 {
-  GrowableListIterator iter;
+  GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
 
-  GrowableListIteratorInit(bb->predecessors, &iter);
   while (true) {
-    BasicBlock *pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+    BasicBlock *pred_bb = iter.Next();
     if (!pred_bb) break;
     bool found = false;
     if (pred_bb->taken == bb) {
@@ -1339,12 +1337,9 @@
     } else if (pred_bb->fall_through == bb) {
         found = true;
     } else if (pred_bb->successor_block_list.block_list_type != kNotUsed) {
-      GrowableListIterator iterator;
-      GrowableListIteratorInit(&pred_bb->successor_block_list.blocks,
-                                  &iterator);
+      GrowableArray<SuccessorBlockInfo*>::Iterator iterator(pred_bb->successor_block_list.blocks);
       while (true) {
-        SuccessorBlockInfo *successor_block_info =
-            reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+        SuccessorBlockInfo *successor_block_info = iterator.Next();
         if (successor_block_info == NULL) break;
         BasicBlock *succ_bb = successor_block_info->block;
         if (succ_bb == bb) {
diff --git a/src/compiler/dex/mir_graph.cc b/src/compiler/dex/mir_graph.cc
index a6e9556..37600fc 100644
--- a/src/compiler/dex/mir_graph.cc
+++ b/src/compiler/dex/mir_graph.cc
@@ -70,8 +70,9 @@
   "Select",
 };
 
-MIRGraph::MIRGraph(CompilationUnit* cu)
+MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
     : reg_location_(NULL),
+      compiler_temps_(arena, 6, kGrowableArrayMisc),
       cu_(cu),
       ssa_base_vregs_(NULL),
       ssa_subscripts_(NULL),
@@ -80,12 +81,18 @@
       ssa_last_defs_(NULL),
       is_constant_v_(NULL),
       constant_values_(NULL),
+      use_counts_(arena, 256, kGrowableArrayMisc),
+      raw_use_counts_(arena, 256, kGrowableArrayMisc),
       num_reachable_blocks_(0),
+      dfs_order_(NULL),
+      dfs_post_order_(NULL),
+      dom_post_order_traversal_(NULL),
       i_dom_list_(NULL),
       def_block_matrix_(NULL),
       temp_block_v_(NULL),
       temp_dalvik_register_v_(NULL),
       temp_ssa_register_v_(NULL),
+      block_list_(arena, 100, kGrowableArrayBlockList),
       try_block_addr_(NULL),
       entry_block_(NULL),
       exit_block_(NULL),
@@ -98,10 +105,11 @@
       opcode_count_(NULL),
       num_ssa_regs_(0),
       method_sreg_(0),
-      attributes_(METHOD_IS_LEAF)  // Start with leaf assumption, change on encountering invoke.
+      attributes_(METHOD_IS_LEAF),  // Start with leaf assumption, change on encountering invoke.
+      checkstats_(NULL),
+      arena_(arena)
       {
-  CompilerInitGrowableList(cu, &block_list_, 0, kListBlockList);
-  try_block_addr_ = AllocBitVector(cu, 0, true /* expandable */);
+  try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
 }
 
 bool MIRGraph::ContentIsInsn(const uint16_t* code_ptr) {
@@ -143,8 +151,8 @@
   if (insn == NULL) {
     LOG(FATAL) << "Break split failed";
   }
-  BasicBlock *bottom_block = NewMemBB(cu_, kDalvikByteCode, num_blocks_++);
-  InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(bottom_block));
+  BasicBlock *bottom_block = NewMemBB(kDalvikByteCode, num_blocks_++);
+  block_list_.Insert(bottom_block);
 
   bottom_block->start_offset = code_offset;
   bottom_block->first_mir_insn = insn;
@@ -161,38 +169,30 @@
   bottom_block->taken = orig_block->taken;
   if (bottom_block->taken) {
     orig_block->taken = NULL;
-    DeleteGrowableList(bottom_block->taken->predecessors, reinterpret_cast<uintptr_t>(orig_block));
-    InsertGrowableList(cu_, bottom_block->taken->predecessors,
-                          reinterpret_cast<uintptr_t>(bottom_block));
+    bottom_block->taken->predecessors->Delete(orig_block);
+    bottom_block->taken->predecessors->Insert(bottom_block);
   }
 
   /* Handle the fallthrough path */
   bottom_block->fall_through = orig_block->fall_through;
   orig_block->fall_through = bottom_block;
-  InsertGrowableList(cu_, bottom_block->predecessors,
-                        reinterpret_cast<uintptr_t>(orig_block));
+  bottom_block->predecessors->Insert(orig_block);
   if (bottom_block->fall_through) {
-    DeleteGrowableList(bottom_block->fall_through->predecessors,
-                          reinterpret_cast<uintptr_t>(orig_block));
-    InsertGrowableList(cu_, bottom_block->fall_through->predecessors,
-                          reinterpret_cast<uintptr_t>(bottom_block));
+    bottom_block->fall_through->predecessors->Delete(orig_block);
+    bottom_block->fall_through->predecessors->Insert(bottom_block);
   }
 
   /* Handle the successor list */
   if (orig_block->successor_block_list.block_list_type != kNotUsed) {
     bottom_block->successor_block_list = orig_block->successor_block_list;
     orig_block->successor_block_list.block_list_type = kNotUsed;
-    GrowableListIterator iterator;
-
-    GrowableListIteratorInit(&bottom_block->successor_block_list.blocks,
-                                &iterator);
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bottom_block->successor_block_list.blocks);
     while (true) {
-      SuccessorBlockInfo *successor_block_info =
-          reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+      SuccessorBlockInfo *successor_block_info = iterator.Next();
       if (successor_block_info == NULL) break;
       BasicBlock *bb = successor_block_info->block;
-      DeleteGrowableList(bb->predecessors, reinterpret_cast<uintptr_t>(orig_block));
-      InsertGrowableList(cu_, bb->predecessors, reinterpret_cast<uintptr_t>(bottom_block));
+      bb->predecessors->Delete(orig_block);
+      bb->predecessors->Insert(bottom_block);
     }
   }
 
@@ -234,8 +234,8 @@
   }
 
   if (split) {
-    for (i = 0; i < block_list_.num_used; i++) {
-      bb = reinterpret_cast<BasicBlock*>(block_list_.elem_list[i]);
+    for (i = 0; i < block_list_.Size(); i++) {
+      bb = block_list_.Get(i);
       if (bb->block_type != kDalvikByteCode) continue;
       /* Check if a branch jumps into the middle of an existing block */
       if ((code_offset > bb->start_offset) && (bb->last_mir_insn != NULL) &&
@@ -248,8 +248,8 @@
   }
 
   /* Create a new one */
-  bb = NewMemBB(cu_, kDalvikByteCode, num_blocks_++);
-  InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(bb));
+  bb = NewMemBB(kDalvikByteCode, num_blocks_++);
+  block_list_.Insert(bb);
   bb->start_offset = code_offset;
   block_map_.Put(bb->start_offset, bb);
   return bb;
@@ -271,7 +271,7 @@
     int start_offset = pTry->start_addr_;
     int end_offset = start_offset + pTry->insn_count_;
     for (offset = start_offset; offset < end_offset; offset++) {
-      SetBit(cu_, try_block_addr_, offset);
+      try_block_addr_->SetBit(offset);
     }
   }
 
@@ -325,7 +325,7 @@
   BasicBlock *taken_block = FindBlock(target, /* split */ true, /* create */ true,
                                       /* immed_pred_block_p */ &cur_block);
   cur_block->taken = taken_block;
-  InsertGrowableList(cu_, taken_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
+  taken_block->predecessors->Insert(cur_block);
 
   /* Always terminate the current block for conditional branches */
   if (flags & Instruction::kContinue) {
@@ -348,8 +348,7 @@
                                              /* immed_pred_block_p */
                                              &cur_block);
     cur_block->fall_through = fallthrough_block;
-    InsertGrowableList(cu_, fallthrough_block->predecessors,
-                          reinterpret_cast<uintptr_t>(cur_block));
+    fallthrough_block->predecessors->Insert(cur_block);
   } else if (code_ptr < code_end) {
     /* Create a fallthrough block for real instructions (incl. NOP) */
     if (ContentIsInsn(code_ptr)) {
@@ -413,31 +412,28 @@
   cur_block->successor_block_list.block_list_type =
       (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
       kPackedSwitch : kSparseSwitch;
-  CompilerInitGrowableList(cu_, &cur_block->successor_block_list.blocks, size,
-                      kListSuccessorBlocks);
+  cur_block->successor_block_list.blocks =
+      new (arena_)GrowableArray<SuccessorBlockInfo*>(arena_, size, kGrowableArraySuccessorBlocks);
 
   for (i = 0; i < size; i++) {
     BasicBlock *case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
                                       /* create */ true, /* immed_pred_block_p */ &cur_block);
     SuccessorBlockInfo *successor_block_info =
-        static_cast<SuccessorBlockInfo*>(NewMem(cu_, sizeof(SuccessorBlockInfo),
-                                         false, kAllocSuccessor));
+        static_cast<SuccessorBlockInfo*>(arena_->NewMem(sizeof(SuccessorBlockInfo), false,
+                                                        ArenaAllocator::kAllocSuccessor));
     successor_block_info->block = case_block;
     successor_block_info->key =
         (insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
         first_key + i : keyTable[i];
-    InsertGrowableList(cu_, &cur_block->successor_block_list.blocks,
-                          reinterpret_cast<uintptr_t>(successor_block_info));
-    InsertGrowableList(cu_, case_block->predecessors,
-                          reinterpret_cast<uintptr_t>(cur_block));
+    cur_block->successor_block_list.blocks->Insert(successor_block_info);
+    case_block->predecessors->Insert(cur_block);
   }
 
   /* Fall-through case */
   BasicBlock* fallthrough_block = FindBlock( cur_offset +  width, /* split */ false,
                                            /* create */ true, /* immed_pred_block_p */ NULL);
   cur_block->fall_through = fallthrough_block;
-  InsertGrowableList(cu_, fallthrough_block->predecessors,
-                        reinterpret_cast<uintptr_t>(cur_block));
+  fallthrough_block->predecessors->Insert(cur_block);
 }
 
 /* Process instructions with the kThrow flag */
@@ -445,7 +441,7 @@
                                       int flags, ArenaBitVector* try_block_addr,
                                       const uint16_t* code_ptr, const uint16_t* code_end)
 {
-  bool in_try_block = IsBitSet(try_block_addr, cur_offset);
+  bool in_try_block = try_block_addr->IsBitSet(cur_offset);
 
   /* In try block */
   if (in_try_block) {
@@ -458,8 +454,8 @@
     }
 
     cur_block->successor_block_list.block_list_type = kCatch;
-    CompilerInitGrowableList(cu_, &cur_block->successor_block_list.blocks, 2,
-                        kListSuccessorBlocks);
+    cur_block->successor_block_list.blocks =
+        new (arena_) GrowableArray<SuccessorBlockInfo*>(arena_, 2, kGrowableArraySuccessorBlocks);
 
     for (;iterator.HasNext(); iterator.Next()) {
       BasicBlock *catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
@@ -469,20 +465,18 @@
         catches_.insert(catch_block->start_offset);
       }
       SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
-          (NewMem(cu_, sizeof(SuccessorBlockInfo), false, kAllocSuccessor));
+          (arena_->NewMem(sizeof(SuccessorBlockInfo), false, ArenaAllocator::kAllocSuccessor));
       successor_block_info->block = catch_block;
       successor_block_info->key = iterator.GetHandlerTypeIndex();
-      InsertGrowableList(cu_, &cur_block->successor_block_list.blocks,
-                            reinterpret_cast<uintptr_t>(successor_block_info));
-      InsertGrowableList(cu_, catch_block->predecessors,
-                            reinterpret_cast<uintptr_t>(cur_block));
+      cur_block->successor_block_list.blocks->Insert(successor_block_info);
+      catch_block->predecessors->Insert(cur_block);
     }
   } else {
-    BasicBlock *eh_block = NewMemBB(cu_, kExceptionHandling, num_blocks_++);
+    BasicBlock *eh_block = NewMemBB(kExceptionHandling, num_blocks_++);
     cur_block->taken = eh_block;
-    InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(eh_block));
+    block_list_.Insert(eh_block);
     eh_block->start_offset = cur_offset;
-    InsertGrowableList(cu_, eh_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
+    eh_block->predecessors->Insert(cur_block);
   }
 
   if (insn->dalvikInsn.opcode == Instruction::THROW){
@@ -512,12 +506,12 @@
    * not automatically terminated after the work portion, and may
    * contain following instructions.
    */
-  BasicBlock *new_block = NewMemBB(cu_, kDalvikByteCode, num_blocks_++);
-  InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(new_block));
+  BasicBlock *new_block = NewMemBB(kDalvikByteCode, num_blocks_++);
+  block_list_.Insert(new_block);
   new_block->start_offset = insn->offset;
   cur_block->fall_through = new_block;
-  InsertGrowableList(cu_, new_block->predecessors, reinterpret_cast<uintptr_t>(cur_block));
-  MIR* new_insn = static_cast<MIR*>(NewMem(cu_, sizeof(MIR), true, kAllocMIR));
+  new_block->predecessors->Insert(cur_block);
+  MIR* new_insn = static_cast<MIR*>(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocMIR));
   *new_insn = *insn;
   insn->dalvikInsn.opcode =
       static_cast<Instruction::Code>(kMirOpCheck);
@@ -545,21 +539,20 @@
       current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_;
 
   // TODO: need to rework expansion of block list & try_block_addr when inlining activated.
-  ReallocGrowableList(cu_, &block_list_, block_list_.num_used +
-                      current_code_item_->insns_size_in_code_units_);
+  block_list_.Resize(block_list_.Size() + current_code_item_->insns_size_in_code_units_);
   // TODO: replace with explicit resize routine.  Using automatic extension side effect for now.
-  SetBit(cu_, try_block_addr_, current_code_item_->insns_size_in_code_units_);
-  ClearBit(try_block_addr_, current_code_item_->insns_size_in_code_units_);
+  try_block_addr_->SetBit(current_code_item_->insns_size_in_code_units_);
+  try_block_addr_->ClearBit(current_code_item_->insns_size_in_code_units_);
 
   // If this is the first method, set up default entry and exit blocks.
   if (current_method_ == 0) {
     DCHECK(entry_block_ == NULL);
     DCHECK(exit_block_ == NULL);
     DCHECK(num_blocks_ == 0);
-    entry_block_ = NewMemBB(cu_, kEntryBlock, num_blocks_++);
-    exit_block_ = NewMemBB(cu_, kExitBlock, num_blocks_++);
-    InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(entry_block_));
-    InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(exit_block_));
+    entry_block_ = NewMemBB(kEntryBlock, num_blocks_++);
+    exit_block_ = NewMemBB(kExitBlock, num_blocks_++);
+    block_list_.Insert(entry_block_);
+    block_list_.Insert(exit_block_);
     // TODO: deprecate all "cu->" fields; move what's left to wherever CompilationUnit is allocated.
     cu_->dex_file = &dex_file;
     cu_->class_def_idx = class_def_idx;
@@ -582,16 +575,16 @@
   }
 
   /* Current block to record parsed instructions */
-  BasicBlock *cur_block = NewMemBB(cu_, kDalvikByteCode, num_blocks_++);
+  BasicBlock *cur_block = NewMemBB(kDalvikByteCode, num_blocks_++);
   DCHECK_EQ(current_offset_, 0);
   cur_block->start_offset = current_offset_;
-  InsertGrowableList(cu_, &block_list_, reinterpret_cast<uintptr_t>(cur_block));
+  block_list_.Insert(cur_block);
   /* Add first block to the fast lookup cache */
 // FIXME: block map needs association with offset/method pair rather than just offset
   block_map_.Put(cur_block->start_offset, cur_block);
 // FIXME: this needs to insert at the insert point rather than entry block.
   entry_block_->fall_through = cur_block;
-  InsertGrowableList(cu_, cur_block->predecessors, reinterpret_cast<uintptr_t>(entry_block_));
+  cur_block->predecessors->Insert(entry_block_);
 
     /* Identify code range in try blocks and set up the empty catch blocks */
   ProcessTryCatchBlocks();
@@ -600,7 +593,8 @@
   int num_patterns = sizeof(special_patterns)/sizeof(special_patterns[0]);
   bool live_pattern = (num_patterns > 0) && !(cu_->disable_opt & (1 << kMatch));
   bool* dead_pattern =
-      static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_patterns, true, kAllocMisc));
+      static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_patterns, true,
+                                        ArenaAllocator::kAllocMisc));
   SpecialCaseHandler special_case = kNoHandler;
   // FIXME - wire this up
   (void)special_case;
@@ -608,7 +602,7 @@
 
   /* Parse all instructions and put them into containing basic blocks */
   while (code_ptr < code_end) {
-    MIR *insn = static_cast<MIR *>(NewMem(cu_, sizeof(MIR), true, kAllocMIR));
+    MIR *insn = static_cast<MIR *>(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocMIR));
     insn->offset = current_offset_;
     insn->m_unit_index = current_method_;
     int width = ParseInsn(code_ptr, &insn->dalvikInsn);
@@ -656,8 +650,7 @@
     } else if (flags & Instruction::kReturn) {
       cur_block->terminated_by_return = true;
       cur_block->fall_through = exit_block_;
-      InsertGrowableList(cu_, exit_block_->predecessors,
-                            reinterpret_cast<uintptr_t>(cur_block));
+      exit_block_->predecessors->Insert(cur_block);
       /*
        * Terminate the current block if there are instructions
        * afterwards.
@@ -694,8 +687,7 @@
 
       if ((cur_block->fall_through == NULL) && (flags & Instruction::kContinue)) {
         cur_block->fall_through = next_block;
-        InsertGrowableList(cu_, next_block->predecessors,
-                              reinterpret_cast<uintptr_t>(cur_block));
+        next_block->predecessors->Insert(cur_block);
       }
       cur_block = next_block;
     }
@@ -742,7 +734,7 @@
   int idx;
 
   for (idx = 0; idx < num_blocks; idx++) {
-    int block_idx = all_blocks ? idx : dfs_order_.elem_list[idx];
+    int block_idx = all_blocks ? idx : dfs_order_->Get(idx);
     BasicBlock *bb = GetBasicBlock(block_idx);
     if (bb == NULL) break;
     if (bb->block_type == kDead) continue;
@@ -793,19 +785,15 @@
               bb->start_offset, bb->id,
               (bb->successor_block_list.block_list_type == kCatch) ?
                "Mrecord" : "record");
-      GrowableListIterator iterator;
-      GrowableListIteratorInit(&bb->successor_block_list.blocks,
-                                  &iterator);
-      SuccessorBlockInfo *successor_block_info =
-          reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+      GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
+      SuccessorBlockInfo *successor_block_info = iterator.Next();
 
       int succ_id = 0;
       while (true) {
         if (successor_block_info == NULL) break;
 
         BasicBlock *dest_block = successor_block_info->block;
-        SuccessorBlockInfo *next_successor_block_info =
-            reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+        SuccessorBlockInfo *next_successor_block_info = iterator.Next();
 
         fprintf(file, "    {<f%d> %04x: %04x\\l}%s\\\n",
                 succ_id++,
@@ -824,13 +812,11 @@
       if (bb->successor_block_list.block_list_type == kPackedSwitch ||
           bb->successor_block_list.block_list_type == kSparseSwitch) {
 
-        GrowableListIteratorInit(&bb->successor_block_list.blocks,
-                                    &iterator);
+        GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_block_list.blocks);
 
         succ_id = 0;
         while (true) {
-          SuccessorBlockInfo *successor_block_info =
-              reinterpret_cast<SuccessorBlockInfo*>( GrowableListIteratorNext(&iterator));
+          SuccessorBlockInfo *successor_block_info = iter.Next();
           if (successor_block_info == NULL) break;
 
           BasicBlock *dest_block = successor_block_info->block;
@@ -1028,7 +1014,7 @@
     str.append("]--optimized away");
   }
   int length = str.length() + 1;
-  ret = static_cast<char*>(NewMem(cu_, length, false, kAllocDFInfo));
+  ret = static_cast<char*>(arena_->NewMem(length, false, ArenaAllocator::kAllocDFInfo));
   strncpy(ret, str.c_str(), length);
   return ret;
 }
@@ -1113,10 +1099,10 @@
   LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
   LOG(INFO) << cu_->insns << " insns";
   LOG(INFO) << GetNumBlocks() << " blocks in total";
-  GrowableListIterator iterator = GetBasicBlockIterator();
+  GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
 
   while (true) {
-    bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iterator));
+    bb = iterator.Next();
     if (bb == NULL) break;
     LOG(INFO) << StringPrintf("Block %d (%s) (insn %04x - %04x%s)",
         bb->id,
@@ -1144,7 +1130,8 @@
 CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
                                   bool is_range)
 {
-  CallInfo* info = static_cast<CallInfo*>(NewMem(cu_, sizeof(CallInfo), true, kAllocMisc));
+  CallInfo* info = static_cast<CallInfo*>(arena_->NewMem(sizeof(CallInfo), true,
+                                                         ArenaAllocator::kAllocMisc));
   MIR* move_result_mir = FindMoveResult(bb, mir);
   if (move_result_mir == NULL) {
     info->result.location = kLocInvalid;
@@ -1155,7 +1142,8 @@
   }
   info->num_arg_words = mir->ssa_rep->num_uses;
   info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
-      (NewMem(cu_, sizeof(RegLocation) * info->num_arg_words, false, kAllocMisc));
+      (arena_->NewMem(sizeof(RegLocation) * info->num_arg_words, false,
+                      ArenaAllocator::kAllocMisc));
   for (int i = 0; i < info->num_arg_words; i++) {
     info->args[i] = GetRawSrc(mir, i);
   }
@@ -1167,6 +1155,19 @@
   return info;
 }
 
-
+// Allocate a new basic block.
+BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id)
+{
+  BasicBlock* bb = static_cast<BasicBlock*>(arena_->NewMem(sizeof(BasicBlock), true,
+                                                           ArenaAllocator::kAllocBB));
+  bb->block_type = block_type;
+  bb->id = block_id;
+  // TUNING: better estimate of the exit block predecessors?
+  bb->predecessors = new (arena_)
+      GrowableArray<BasicBlock*>(arena_, (block_type == kExitBlock) ? 2048 : 2, kGrowableArrayPredecessors);
+  bb->successor_block_list.block_list_type = kNotUsed;
+  block_id_map_.Put(block_id, block_id);
+  return bb;
+}
 
 } // namespace art
diff --git a/src/compiler/dex/mir_graph.h b/src/compiler/dex/mir_graph.h
index 514205d..6a45ac0 100644
--- a/src/compiler/dex/mir_graph.h
+++ b/src/compiler/dex/mir_graph.h
@@ -20,6 +20,8 @@
 #include "dex_file.h"
 #include "dex_instruction.h"
 #include "compiler_ir.h"
+#include "arena_bit_vector.h"
+#include "growable_array.h"
 
 namespace art {
 
@@ -145,6 +147,8 @@
 #define MIR_IGNORE_SUSPEND_CHECK        (1 << kMIRIgnoreSuspendCheck)
 #define MIR_DUP                         (1 << kMIRDup)
 
+#define BLOCK_NAME_LEN 80
+
 /*
  * In general, vreg/sreg describe Dalvik registers that originated with dx.  However,
  * it is useful to have compiler-generated temporary registers and have them treated
@@ -211,6 +215,8 @@
   } meta;
 };
 
+struct SuccessorBlockInfo;
+
 struct BasicBlock {
   int id;
   int dfs_id;
@@ -230,13 +236,13 @@
   BasicBlock* taken;
   BasicBlock* i_dom;                // Immediate dominator.
   BasicBlockDataFlow* data_flow_info;
-  GrowableList* predecessors;
+  GrowableArray<BasicBlock*>* predecessors;
   ArenaBitVector* dominators;
   ArenaBitVector* i_dominated;      // Set nodes being immediately dominated.
   ArenaBitVector* dom_frontier;     // Dominance frontier.
   struct {                          // For one-to-many successors like.
     BlockListType block_list_type;  // switch and exception handling.
-    GrowableList blocks;
+    GrowableArray<SuccessorBlockInfo*>* blocks;
   } successor_block_list;
 };
 
@@ -245,6 +251,7 @@
  * "SuccessorBlockInfo".  For catch blocks, key is type index for the exception.  For swtich
  * blocks, key is the case value.
  */
+// TODO: make class with placement new.
 struct SuccessorBlockInfo {
   BasicBlock* block;
   int key;
@@ -302,7 +309,7 @@
 
 class MIRGraph {
  public:
-  MIRGraph(CompilationUnit* cu);
+  MIRGraph(CompilationUnit* cu, ArenaAllocator* arena);
   ~MIRGraph() {}
 
   /*
@@ -342,47 +349,41 @@
     return exit_block_;
   }
 
-  GrowableListIterator GetBasicBlockIterator() {
-    GrowableListIterator iterator;
-    GrowableListIteratorInit(&block_list_, &iterator);
-    return iterator;
-  }
-
   BasicBlock* GetBasicBlock(int block_id) const {
-    return reinterpret_cast<BasicBlock*>(GrowableListGetElement(&block_list_, block_id));
+    return block_list_.Get(block_id);
   }
 
   size_t GetBasicBlockListCount() const {
-    return block_list_.num_used;
+    return block_list_.Size();
   }
 
-  GrowableList* GetBlockList() {
+  GrowableArray<BasicBlock*>* GetBlockList() {
     return &block_list_;
   }
 
-  GrowableList* GetDfsOrder() {
-    return &dfs_order_;
+  GrowableArray<int>* GetDfsOrder() {
+    return dfs_order_;
   }
 
-  GrowableList* GetDfsPostOrder() {
-    return &dfs_post_order_;
+  GrowableArray<int>* GetDfsPostOrder() {
+    return dfs_post_order_;
   }
 
-  GrowableList* GetDomPostOrder() {
-    return &dom_post_order_traversal_;
-  }
-
-  GrowableList* GetSSASubscripts() {
-    return ssa_subscripts_;
+  GrowableArray<int>* GetDomPostOrder() {
+    return dom_post_order_traversal_;
   }
 
   int GetDefCount() const {
     return def_count_;
   }
 
+  ArenaAllocator* GetArena() {
+    return arena_;
+  }
+
   void EnableOpcodeCounting() {
-    opcode_count_ = static_cast<int*>(NewMem(cu_, kNumPackedOpcodes * sizeof(int), true,
-                                             kAllocMisc));
+    opcode_count_ = static_cast<int*>(arena_->NewMem(kNumPackedOpcodes * sizeof(int), true,
+                                      ArenaAllocator::kAllocMisc));
   }
 
   void ShowOpcodeStats();
@@ -400,7 +401,7 @@
   void BasicBlockOptimization();
 
   bool IsConst(int32_t s_reg) const {
-    return (IsBitSet(is_constant_v_, s_reg));
+    return is_constant_v_->IsBitSet(s_reg);
   }
 
   bool IsConst(RegLocation loc) const {
@@ -435,24 +436,24 @@
     num_ssa_regs_ = new_num;
   }
 
-  int GetNumReachableBlocks() const {
+  unsigned int GetNumReachableBlocks() const {
     return num_reachable_blocks_;
   }
 
   int GetUseCount(int vreg) const {
-    return GrowableListGetElement(&use_counts_, vreg);
+    return use_counts_.Get(vreg);
   }
 
   int GetRawUseCount(int vreg) const {
-    return GrowableListGetElement(&raw_use_counts_, vreg);
+    return raw_use_counts_.Get(vreg);
   }
 
   int GetSSASubscript(int ssa_reg) const {
-    return GrowableListGetElement(ssa_subscripts_, ssa_reg);
+    return ssa_subscripts_->Get(ssa_reg);
   }
 
   const char* GetSSAString(int ssa_reg) const {
-    return GET_ELEM_N(ssa_strings_, char*, ssa_reg);
+    return ssa_strings_->Get(ssa_reg);
   }
 
   RegLocation GetRawSrc(MIR* mir, int num)
@@ -538,7 +539,6 @@
   void PrependMIR(BasicBlock* bb, MIR* mir);
   void InsertMIRAfter(BasicBlock* bb, MIR* current_mir, MIR* new_mir);
   char* GetDalvikDisassembly(const MIR* mir);
-
   void ReplaceSpecialChars(std::string& str);
   std::string GetSSAName(int ssa_reg);
   std::string GetSSANameWithConst(int ssa_reg, bool singles_only);
@@ -546,6 +546,7 @@
   const char* GetShortyFromTargetIdx(int);
   void DumpMIRGraph();
   CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
+  BasicBlock* NewMemBB(BBType block_type, int block_id);
 
   /*
    * IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
@@ -555,7 +556,7 @@
 
    // TODO: make these private.
    RegLocation* reg_location_;                         // Map SSA names to location.
-   GrowableList compiler_temps_;
+   GrowableArray<CompilerTemp*> compiler_temps_;
    SafeMap<unsigned int, unsigned int> block_id_map_;  // Block collapse lookup cache.
 
    static const int oat_data_flow_attributes_[kMirOpLast];
@@ -610,10 +611,10 @@
    int GetSSAUseCount(int s_reg);
    bool BasicBlockOpt(BasicBlock* bb);
    bool EliminateNullChecks(BasicBlock* bb);
-   bool NullCheckEliminationInit(BasicBlock* bb);
+   void NullCheckEliminationInit(BasicBlock* bb);
    bool BuildExtendedBBList(struct BasicBlock* bb);
    bool FillDefBlockMatrix(BasicBlock* bb);
-   bool InitializeDominationInfo(BasicBlock* bb);
+   void InitializeDominationInfo(BasicBlock* bb);
    bool ComputeblockIDom(BasicBlock* bb);
    bool ComputeBlockDominators(BasicBlock* bb);
    bool SetDominators(BasicBlock* bb);
@@ -621,32 +622,32 @@
    bool InsertPhiNodeOperands(BasicBlock* bb);
    bool ComputeDominanceFrontier(BasicBlock* bb);
    bool DoConstantPropogation(BasicBlock* bb);
-   bool CountChecks(BasicBlock* bb);
+   void CountChecks(BasicBlock* bb);
    bool CombineBlocks(BasicBlock* bb);
 
    CompilationUnit* const cu_;
-   GrowableList* ssa_base_vregs_;
-   GrowableList* ssa_subscripts_;
-   GrowableList* ssa_strings_;
+   GrowableArray<int>* ssa_base_vregs_;
+   GrowableArray<int>* ssa_subscripts_;
+   GrowableArray<char*>* ssa_strings_;
    // Map original Dalvik virtual reg i to the current SSA name.
    int* vreg_to_ssa_map_;            // length == method->registers_size
    int* ssa_last_defs_;              // length == method->registers_size
    ArenaBitVector* is_constant_v_;   // length == num_ssa_reg
    int* constant_values_;            // length == num_ssa_reg
    // Use counts of ssa names.
-   GrowableList use_counts_;         // Weighted by nesting depth
-   GrowableList raw_use_counts_;     // Not weighted
-   int num_reachable_blocks_;
-   GrowableList dfs_order_;
-   GrowableList dfs_post_order_;
-   GrowableList dom_post_order_traversal_;
+   GrowableArray<uint32_t> use_counts_;      // Weighted by nesting depth
+   GrowableArray<uint32_t> raw_use_counts_;  // Not weighted
+   unsigned int num_reachable_blocks_;
+   GrowableArray<int>* dfs_order_;
+   GrowableArray<int>* dfs_post_order_;
+   GrowableArray<int>* dom_post_order_traversal_;
    int* i_dom_list_;
    ArenaBitVector** def_block_matrix_;    // num_dalvik_register x num_blocks.
    ArenaBitVector* temp_block_v_;
    ArenaBitVector* temp_dalvik_register_v_;
    ArenaBitVector* temp_ssa_register_v_;  // num_ssa_regs.
    static const int kInvalidEntry = -1;
-   GrowableList block_list_;
+   GrowableArray<BasicBlock*> block_list_;
    ArenaBitVector* try_block_addr_;
    BasicBlock* entry_block_;
    BasicBlock* exit_block_;
@@ -666,6 +667,7 @@
    int method_sreg_;
    unsigned int attributes_;
    Checkstats* checkstats_;
+   ArenaAllocator* arena_;
 };
 
 }  // namespace art
diff --git a/src/compiler/dex/mir_optimization.cc b/src/compiler/dex/mir_optimization.cc
index 51b9d9d..54a9a83 100644
--- a/src/compiler/dex/mir_optimization.cc
+++ b/src/compiler/dex/mir_optimization.cc
@@ -22,19 +22,19 @@
 
 static unsigned int Predecessors(BasicBlock* bb)
 {
-  return bb->predecessors->num_used;
+  return bb->predecessors->Size();
 }
 
 /* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
 void MIRGraph::SetConstant(int32_t ssa_reg, int value)
 {
-  SetBit(cu_, is_constant_v_, ssa_reg);
+  is_constant_v_->SetBit(ssa_reg);
   constant_values_[ssa_reg] = value;
 }
 
 void MIRGraph::SetConstantWide(int ssa_reg, int64_t value)
 {
-  SetBit(cu_, is_constant_v_, ssa_reg);
+  is_constant_v_->SetBit(ssa_reg);
   constant_values_[ssa_reg] = Low32Bits(value);
   constant_values_[ssa_reg + 1] = High32Bits(value);
 }
@@ -42,7 +42,6 @@
 bool MIRGraph::DoConstantPropogation(BasicBlock* bb)
 {
   MIR* mir;
-  ArenaBitVector *is_constant_v = is_constant_v_;
 
   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
@@ -83,7 +82,7 @@
       int i;
 
       for (i = 0; i < mir->ssa_rep->num_uses; i++) {
-        if (!IsBitSet(is_constant_v, mir->ssa_rep->uses[i])) break;
+        if (!is_constant_v_->IsBitSet(mir->ssa_rep->uses[i])) break;
       }
       /* Move a register holding a constant to another register */
       if (i == mir->ssa_rep->num_uses) {
@@ -100,9 +99,9 @@
 
 void MIRGraph::PropagateConstants()
 {
-  is_constant_v_ = AllocBitVector(cu_, GetNumSSARegs(), false);
-  constant_values_ = static_cast<int*>(NewMem(cu_, sizeof(int) * GetNumSSARegs(), true,
-                                       kAllocDFInfo));
+  is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
+  constant_values_ = static_cast<int*>(arena_->NewMem(sizeof(int) * GetNumSSARegs(), true,
+                                                      ArenaAllocator::kAllocDFInfo));
   AllNodesIterator iter(this, false /* not iterative */);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     DoConstantPropogation(bb);
@@ -210,8 +209,7 @@
 
 int MIRGraph::GetSSAUseCount(int s_reg)
 {
-  DCHECK(s_reg < static_cast<int>(raw_use_counts_.num_used));
-  return raw_use_counts_.elem_list[s_reg];
+  return raw_use_counts_.Get(s_reg);
 }
 
 
@@ -404,8 +402,9 @@
               } else {
                 DCHECK_EQ(SelectKind(if_true), kSelectMove);
                 DCHECK_EQ(SelectKind(if_false), kSelectMove);
-                int* src_ssa = static_cast<int*>(NewMem(cu_, sizeof(int) * 3, false,
-                                                 kAllocDFInfo));
+                int* src_ssa =
+                    static_cast<int*>(arena_->NewMem(sizeof(int) * 3, false,
+                                                     ArenaAllocator::kAllocDFInfo));
                 src_ssa[0] = mir->ssa_rep->uses[0];
                 src_ssa[1] = if_true->ssa_rep->uses[0];
                 src_ssa[2] = if_false->ssa_rep->uses[0];
@@ -413,10 +412,12 @@
                 mir->ssa_rep->num_uses = 3;
               }
               mir->ssa_rep->num_defs = 1;
-              mir->ssa_rep->defs = static_cast<int*>(NewMem(cu_, sizeof(int) * 1, false,
-                                                     kAllocDFInfo));
-              mir->ssa_rep->fp_def = static_cast<bool*>(NewMem(cu_, sizeof(bool) * 1, false,
-                                                     kAllocDFInfo));
+              mir->ssa_rep->defs =
+                  static_cast<int*>(arena_->NewMem(sizeof(int) * 1, false,
+                                                   ArenaAllocator::kAllocDFInfo));
+              mir->ssa_rep->fp_def =
+                  static_cast<bool*>(arena_->NewMem(sizeof(bool) * 1, false,
+                                                    ArenaAllocator::kAllocDFInfo));
               mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
               /*
                * There is usually a Phi node in the join block for our two cases.  If the
@@ -467,38 +468,37 @@
   return true;
 }
 
-bool MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb)
+void MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb)
 {
-  if (bb->data_flow_info == NULL) return false;
-  bb->data_flow_info->ending_null_check_v =
-      AllocBitVector(cu_, GetNumSSARegs(), false, kBitMapNullCheck);
-  ClearAllBits(bb->data_flow_info->ending_null_check_v);
-  return true;
+  if (bb->data_flow_info != NULL) {
+    bb->data_flow_info->ending_null_check_v =
+        new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapNullCheck);
+  }
 }
 
 /* Collect stats on number of checks removed */
-bool MIRGraph::CountChecks(struct BasicBlock* bb)
+void MIRGraph::CountChecks(struct BasicBlock* bb)
 {
-  if (bb->data_flow_info == NULL) return false;
-  for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-    if (mir->ssa_rep == NULL) {
-      continue;
-    }
-    int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
-    if (df_attributes & DF_HAS_NULL_CHKS) {
-      checkstats_->null_checks++;
-      if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
-        checkstats_->null_checks_eliminated++;
+  if (bb->data_flow_info != NULL) {
+    for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+      if (mir->ssa_rep == NULL) {
+        continue;
       }
-    }
-    if (df_attributes & DF_HAS_RANGE_CHKS) {
-      checkstats_->range_checks++;
-      if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
-        checkstats_->range_checks_eliminated++;
+      int df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
+      if (df_attributes & DF_HAS_NULL_CHKS) {
+        checkstats_->null_checks++;
+        if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
+          checkstats_->null_checks_eliminated++;
+        }
+      }
+      if (df_attributes & DF_HAS_RANGE_CHKS) {
+        checkstats_->range_checks++;
+        if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
+          checkstats_->range_checks_eliminated++;
+        }
       }
     }
   }
-  return false;
 }
 
 /* Try to make common case the fallthrough path */
@@ -514,7 +514,7 @@
     if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
       break;
     }
-    BasicBlock* prev = GET_ELEM_N(walker->predecessors, BasicBlock*, 0);
+    BasicBlock* prev = walker->predecessors->Get(0);
     if (prev->conditional_branch) {
       if (prev->fall_through == walker) {
         // Already done - return
@@ -629,28 +629,26 @@
    * status (except for "this").
    */
   if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
-    ClearAllBits(temp_ssa_register_v_);
+    temp_ssa_register_v_->ClearAllBits();
     if ((cu_->access_flags & kAccStatic) == 0) {
       // If non-static method, mark "this" as non-null
       int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
-      SetBit(cu_, temp_ssa_register_v_, this_reg);
+      temp_ssa_register_v_->SetBit(this_reg);
     }
   } else {
     // Starting state is intesection of all incoming arcs
-    GrowableListIterator iter;
-    GrowableListIteratorInit(bb->predecessors, &iter);
-    BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+    GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
+    BasicBlock* pred_bb = iter.Next();
     DCHECK(pred_bb != NULL);
-    CopyBitVector(temp_ssa_register_v_, pred_bb->data_flow_info->ending_null_check_v);
+    temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v);
     while (true) {
-      pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+      pred_bb = iter.Next();
       if (!pred_bb) break;
       if ((pred_bb->data_flow_info == NULL) ||
           (pred_bb->data_flow_info->ending_null_check_v == NULL)) {
         continue;
       }
-      IntersectBitVectors(temp_ssa_register_v_, temp_ssa_register_v_,
-                          pred_bb->data_flow_info->ending_null_check_v);
+      temp_ssa_register_v_->Intersect(pred_bb->data_flow_info->ending_null_check_v);
     }
   }
 
@@ -663,7 +661,7 @@
 
     // Mark target of NEW* as non-null
     if (df_attributes & DF_NON_NULL_DST) {
-      SetBit(cu_, temp_ssa_register_v_, mir->ssa_rep->defs[0]);
+      temp_ssa_register_v_->SetBit(mir->ssa_rep->defs[0]);
     }
 
     // Mark non-null returns from invoke-style NEW*
@@ -673,7 +671,7 @@
       if (next_mir &&
           next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
         // Mark as null checked
-        SetBit(cu_, temp_ssa_register_v_, next_mir->ssa_rep->defs[0]);
+        temp_ssa_register_v_->SetBit(next_mir->ssa_rep->defs[0]);
       } else {
         if (next_mir) {
           LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
@@ -688,7 +686,7 @@
             // First non-pseudo should be MOVE_RESULT_OBJECT
             if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
               // Mark as null checked
-              SetBit(cu_, temp_ssa_register_v_, tmir->ssa_rep->defs[0]);
+              temp_ssa_register_v_->SetBit(tmir->ssa_rep->defs[0]);
             } else {
               LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
             }
@@ -709,11 +707,10 @@
           mir->ssa_rep->num_uses;
       bool null_checked = true;
       for (int i = 0; i < operands; i++) {
-        null_checked &= IsBitSet(temp_ssa_register_v_,
-        mir->ssa_rep->uses[i]);
+        null_checked &= temp_ssa_register_v_->IsBitSet(mir->ssa_rep->uses[i]);
       }
       if (null_checked) {
-        SetBit(cu_, temp_ssa_register_v_, tgt_sreg);
+        temp_ssa_register_v_->SetBit(tgt_sreg);
       }
     }
 
@@ -728,24 +725,22 @@
         src_idx = 0;
       }
       int src_sreg = mir->ssa_rep->uses[src_idx];
-        if (IsBitSet(temp_ssa_register_v_, src_sreg)) {
+        if (temp_ssa_register_v_->IsBitSet(src_sreg)) {
           // Eliminate the null check
           mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
         } else {
           // Mark s_reg as null-checked
-          SetBit(cu_, temp_ssa_register_v_, src_sreg);
+          temp_ssa_register_v_->SetBit(src_sreg);
         }
      }
   }
 
   // Did anything change?
-  bool res = CompareBitVectors(bb->data_flow_info->ending_null_check_v,
-                                  temp_ssa_register_v_);
-  if (res) {
-    CopyBitVector(bb->data_flow_info->ending_null_check_v,
-                     temp_ssa_register_v_);
+  bool changed = !temp_ssa_register_v_->Equal(bb->data_flow_info->ending_null_check_v);
+  if (changed) {
+    bb->data_flow_info->ending_null_check_v->Copy(temp_ssa_register_v_);
   }
-  return res;
+  return changed;
 }
 
 void MIRGraph::NullCheckElimination()
@@ -795,7 +790,8 @@
 void MIRGraph::DumpCheckStats()
 {
   Checkstats* stats =
-      static_cast<Checkstats*>(NewMem(cu_, sizeof(Checkstats), true, kAllocDFInfo));
+      static_cast<Checkstats*>(arena_->NewMem(sizeof(Checkstats), true,
+                                              ArenaAllocator::kAllocDFInfo));
   checkstats_ = stats;
   AllNodesIterator iter(this, false /* not iterative */);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
@@ -851,7 +847,6 @@
 void MIRGraph::BasicBlockOptimization()
 {
   if (!(cu_->disable_opt & (1 << kBBOpt))) {
-    CompilerInitGrowableList(cu_, &compiler_temps_, 6, kListMisc);
     DCHECK_EQ(cu_->num_compiler_temps, 0);
     // Mark all blocks as not visited
     AllNodesIterator iter(this, false /* not iterative */);
diff --git a/src/compiler/dex/portable/mir_to_gbc.cc b/src/compiler/dex/portable/mir_to_gbc.cc
index 6dcdfcf..6fccb47 100644
--- a/src/compiler/dex/portable/mir_to_gbc.cc
+++ b/src/compiler/dex/portable/mir_to_gbc.cc
@@ -49,7 +49,7 @@
 
 ::llvm::Value* MirConverter::GetLLVMValue(int s_reg)
 {
-  return reinterpret_cast< ::llvm::Value*>(GrowableListGetElement(&llvm_values_, s_reg));
+  return llvm_values_.Get(s_reg);
 }
 
 void MirConverter::SetVregOnValue(::llvm::Value* val, int s_reg)
@@ -74,7 +74,7 @@
   }
   placeholder->replaceAllUsesWith(val);
   val->takeName(placeholder);
-  llvm_values_.elem_list[s_reg] = reinterpret_cast<uintptr_t>(val);
+  llvm_values_.Put(s_reg, val);
   ::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(placeholder);
   DCHECK(inst != NULL);
   inst->eraseFromParent();
@@ -1786,17 +1786,15 @@
             art::llvm::IntrinsicHelper::CatchTargets);
         ::llvm::Value* switch_key =
             irb_->CreateCall(intr, irb_->getInt32(mir->offset));
-        GrowableListIterator iter;
-        GrowableListIteratorInit(&bb->successor_block_list.blocks, &iter);
+        GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_block_list.blocks);
         // New basic block to use for work half
         ::llvm::BasicBlock* work_bb =
             ::llvm::BasicBlock::Create(*context_, "", func_);
         ::llvm::SwitchInst* sw =
             irb_->CreateSwitch(switch_key, work_bb,
-                                     bb->successor_block_list.blocks.num_used);
+                                     bb->successor_block_list.blocks->Size());
         while (true) {
-          SuccessorBlockInfo *successor_block_info =
-              reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iter));
+          SuccessorBlockInfo *successor_block_info = iter.Next();
           if (successor_block_info == NULL) break;
           ::llvm::BasicBlock *target =
               GetLLVMBlock(successor_block_info->block->id);
@@ -1938,7 +1936,6 @@
 void MirConverter::MethodMIR2Bitcode()
 {
   InitIR();
-  CompilerInitGrowableList(cu_, &llvm_values_, mir_graph_->GetNumSSARegs());
 
   // Create the function
   CreateFunction();
@@ -1961,18 +1958,18 @@
     ::llvm::Value* val;
     RegLocation rl_temp = mir_graph_->reg_location_[i];
     if ((mir_graph_->SRegToVReg(i) < 0) || rl_temp.high_word) {
-      InsertGrowableList(cu_, &llvm_values_, 0);
+      llvm_values_.Insert(0);
     } else if ((i < cu_->num_regs) ||
                (i >= (cu_->num_regs + cu_->num_ins))) {
       ::llvm::Constant* imm_value = mir_graph_->reg_location_[i].wide ?
          irb_->getJLong(0) : irb_->getJInt(0);
       val = EmitConst(imm_value, mir_graph_->reg_location_[i]);
       val->setName(mir_graph_->GetSSAString(i));
-      InsertGrowableList(cu_, &llvm_values_, reinterpret_cast<uintptr_t>(val));
+      llvm_values_.Insert(val);
     } else {
       // Recover previously-created argument values
       ::llvm::Value* arg_val = arg_iter++;
-      InsertGrowableList(cu_, &llvm_values_, reinterpret_cast<uintptr_t>(arg_val));
+      llvm_values_.Insert(arg_val);
     }
   }
 
@@ -2051,8 +2048,9 @@
 }
 
 Backend* PortableCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                               ArenaAllocator* const arena,
                                llvm::LlvmCompilationUnit* const llvm_compilation_unit) {
-  return new MirConverter(cu, mir_graph, llvm_compilation_unit);
+  return new MirConverter(cu, mir_graph, arena, llvm_compilation_unit);
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/portable/mir_to_gbc.h b/src/compiler/dex/portable/mir_to_gbc.h
index eb7069c..5088761 100644
--- a/src/compiler/dex/portable/mir_to_gbc.h
+++ b/src/compiler/dex/portable/mir_to_gbc.h
@@ -21,7 +21,6 @@
 #include "compiled_method.h"
 #include "compiler/dex/compiler_enums.h"
 #include "compiler/dex/compiler_ir.h"
-#include "compiler/dex/compiler_utility.h"
 #include "compiler/dex/backend.h"
 #include "compiler/llvm/llvm_compilation_unit.h"
 #include "safe_map.h"
@@ -38,13 +37,14 @@
 
 // Target-specific initialization.
 Backend* PortableCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                               ArenaAllocator* const arena,
                                llvm::LlvmCompilationUnit* const llvm_compilation_unit);
 
 class MirConverter : public Backend {
 
   public:
     // TODO: flesh out and integrate into new world order.
-    MirConverter(CompilationUnit* cu, MIRGraph* mir_graph,
+    MirConverter(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena,
                  llvm::LlvmCompilationUnit* llvm_compilation_unit)
       : cu_(cu),
         mir_graph_(mir_graph),
@@ -59,8 +59,10 @@
         placeholder_bb_(NULL),
         entry_bb_(NULL),
         entry_target_bb_(NULL),
+        llvm_values_(arena, mir_graph->GetNumSSARegs()),
         temp_name_(0),
         current_dalvik_offset_(0) {
+      arena_ = arena;
       if (kIsDebugBuild) {
         cu->enable_debug |= (1 << kDebugVerifyBitcode);
       }
@@ -182,7 +184,7 @@
     ::llvm::BasicBlock* entry_bb_;
     ::llvm::BasicBlock* entry_target_bb_;
     std::string bitcode_filename_;
-    GrowableList llvm_values_;
+    GrowableArray< ::llvm::Value*> llvm_values_;
     int32_t temp_name_;
     SafeMap<int32_t, ::llvm::BasicBlock*> id_to_block_map_;  // block id -> llvm bb.
     int current_dalvik_offset_;
diff --git a/src/compiler/dex/quick/arm/call_arm.cc b/src/compiler/dex/quick/arm/call_arm.cc
index bb46e1f..32d4ed6 100644
--- a/src/compiler/dex/quick/arm/call_arm.cc
+++ b/src/compiler/dex/quick/arm/call_arm.cc
@@ -326,12 +326,14 @@
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
+      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+                                               ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
-  tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
-  InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
+  tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
+                                                       ArenaAllocator::kAllocLIR));
+  switch_tables_.Insert(tab_rec);
 
   // Get the switch value
   rl_src = LoadValue(rl_src, kCoreReg);
@@ -374,12 +376,14 @@
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
+      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+                                               ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
-  tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
-  InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
+  tab_rec->targets =
+      static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true, ArenaAllocator::kAllocLIR));
+  switch_tables_.Insert(tab_rec);
 
   // Get the switch value
   rl_src = LoadValue(rl_src, kCoreReg);
@@ -427,14 +431,15 @@
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   // Add the table to the list - we'll process it later
   FillArrayData *tab_rec =
-      static_cast<FillArrayData*>(NewMem(cu_, sizeof(FillArrayData), true, kAllocData));
+      static_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
+                                                 ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   uint16_t width = tab_rec->table[1];
   uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
   tab_rec->size = (size * width) + 8;
 
-  InsertGrowableList(cu_, &fill_array_data_, reinterpret_cast<uintptr_t>(tab_rec));
+  fill_array_data_.Insert(tab_rec);
 
   // Making a call - use explicit registers
   FlushAllRegs();   /* Everything to home location */
diff --git a/src/compiler/dex/quick/arm/codegen_arm.h b/src/compiler/dex/quick/arm/codegen_arm.h
index df9451a..9e409e6 100644
--- a/src/compiler/dex/quick/arm/codegen_arm.h
+++ b/src/compiler/dex/quick/arm/codegen_arm.h
@@ -24,7 +24,7 @@
 class ArmMir2Lir : public Mir2Lir {
   public:
 
-    ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+    ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
 
     // Required for target - codegen helpers.
     virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
diff --git a/src/compiler/dex/quick/arm/target_arm.cc b/src/compiler/dex/quick/arm/target_arm.cc
index 43bbb69..0a05a3a 100644
--- a/src/compiler/dex/quick/arm/target_arm.cc
+++ b/src/compiler/dex/quick/arm/target_arm.cc
@@ -505,7 +505,8 @@
   return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond));
 }
 
-ArmMir2Lir::ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph) : Mir2Lir(cu, mir_graph) {
+ArmMir2Lir::ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+    : Mir2Lir(cu, mir_graph, arena) {
   // Sanity check - make sure encoding map lines up.
   for (int i = 0; i < kArmLast; i++) {
     if (ArmMir2Lir::EncodingMap[i].opcode != i) {
@@ -516,8 +517,9 @@
   }
 }
 
-Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph) {
-  return new ArmMir2Lir(cu, mir_graph);
+Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena) {
+  return new ArmMir2Lir(cu, mir_graph, arena);
 }
 
 /*
@@ -555,13 +557,16 @@
   int num_temps = sizeof(core_temps)/sizeof(*core_temps);
   int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
   int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  reg_pool_ = static_cast<RegisterPool*>(NewMem(cu_, sizeof(*reg_pool_), true, kAllocRegAlloc));
+  reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
+                                                        ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_core_regs = num_regs;
   reg_pool_->core_regs = reinterpret_cast<RegisterInfo*>
-      (NewMem(cu_, num_regs * sizeof(*reg_pool_->core_regs), true, kAllocRegAlloc));
+      (arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
+                     ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_fp_regs = num_fp_regs;
   reg_pool_->FPRegs = static_cast<RegisterInfo*>
-      (NewMem(cu_, num_fp_regs * sizeof(*reg_pool_->FPRegs), true, kAllocRegAlloc));
+      (arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
+                      ArenaAllocator::kAllocRegAlloc));
   CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
   CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
   // Keep special registers from being allocated
diff --git a/src/compiler/dex/quick/codegen_util.cc b/src/compiler/dex/quick/codegen_util.cc
index 91422ea..717d7ca 100644
--- a/src/compiler/dex/quick/codegen_util.cc
+++ b/src/compiler/dex/quick/codegen_util.cc
@@ -365,7 +365,7 @@
 LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0,
                      int op1, int op2, int op3, int op4, LIR* target)
 {
-  LIR* insn = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
+  LIR* insn = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
   insn->dalvik_offset = dalvik_offset;
   insn->opcode = opcode;
   insn->operands[0] = op0;
@@ -499,7 +499,7 @@
 {
   /* Add the constant to the literal pool */
   if (constant_list_p) {
-    LIR* new_value = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocData));
+    LIR* new_value = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocData));
     new_value->operands[0] = value;
     new_value->next = *constant_list_p;
     *constant_list_p = new_value;
@@ -573,11 +573,9 @@
 /* Write the switch tables to the output stream */
 void Mir2Lir::InstallSwitchTables()
 {
-  GrowableListIterator iterator;
-  GrowableListIteratorInit(&switch_tables_, &iterator);
+  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
   while (true) {
-    Mir2Lir::SwitchTable* tab_rec =
-      reinterpret_cast<Mir2Lir::SwitchTable*>(GrowableListIteratorNext( &iterator));
+    Mir2Lir::SwitchTable* tab_rec = iterator.Next();
     if (tab_rec == NULL) break;
     AlignBuffer(code_buffer_, tab_rec->offset);
     /*
@@ -633,11 +631,9 @@
 /* Write the fill array dta to the output stream */
 void Mir2Lir::InstallFillArrayData()
 {
-  GrowableListIterator iterator;
-  GrowableListIteratorInit(&fill_array_data_, &iterator);
+  GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
   while (true) {
-    Mir2Lir::FillArrayData *tab_rec =
-        reinterpret_cast<Mir2Lir::FillArrayData*>(GrowableListIteratorNext( &iterator));
+    Mir2Lir::FillArrayData *tab_rec = iterator.Next();
     if (tab_rec == NULL) break;
     AlignBuffer(code_buffer_, tab_rec->offset);
     for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
@@ -831,11 +827,9 @@
 
 int Mir2Lir::AssignSwitchTablesOffset(int offset)
 {
-  GrowableListIterator iterator;
-  GrowableListIteratorInit(&switch_tables_, &iterator);
+  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
   while (true) {
-    Mir2Lir::SwitchTable *tab_rec =
-        reinterpret_cast<Mir2Lir::SwitchTable*>(GrowableListIteratorNext(&iterator));
+    Mir2Lir::SwitchTable *tab_rec = iterator.Next();
     if (tab_rec == NULL) break;
     tab_rec->offset = offset;
     if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
@@ -851,11 +845,9 @@
 
 int Mir2Lir::AssignFillArrayDataOffset(int offset)
 {
-  GrowableListIterator iterator;
-  GrowableListIteratorInit(&fill_array_data_, &iterator);
+  GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
   while (true) {
-    Mir2Lir::FillArrayData *tab_rec =
-        reinterpret_cast<Mir2Lir::FillArrayData*>(GrowableListIteratorNext(&iterator));
+    Mir2Lir::FillArrayData *tab_rec = iterator.Next();
     if (tab_rec == NULL) break;
     tab_rec->offset = offset;
     offset += tab_rec->size;
@@ -973,7 +965,7 @@
   if (it == boundary_map_.end()) {
     LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
   }
-  LIR* new_label = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
+  LIR* new_label = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
   new_label->dalvik_offset = vaddr;
   new_label->opcode = kPseudoCaseLabel;
   new_label->operands[0] = keyVal;
@@ -1007,11 +999,9 @@
 
 void Mir2Lir::ProcessSwitchTables()
 {
-  GrowableListIterator iterator;
-  GrowableListIteratorInit(&switch_tables_, &iterator);
+  GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
   while (true) {
-    Mir2Lir::SwitchTable *tab_rec =
-        reinterpret_cast<Mir2Lir::SwitchTable*>(GrowableListIteratorNext(&iterator));
+    Mir2Lir::SwitchTable *tab_rec = iterator.Next();
     if (tab_rec == NULL) break;
     if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
       MarkPackedCaseLabels(tab_rec);
@@ -1123,15 +1113,23 @@
   return res;
 }
 
-Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph)
+// TODO: move to mir_to_lir.cc
+Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
     : literal_list_(NULL),
       method_literal_list_(NULL),
       code_literal_list_(NULL),
       cu_(cu),
       mir_graph_(mir_graph),
+      switch_tables_(arena, 4, kGrowableArraySwitchTables),
+      fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
+      throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
+      suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
+      intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
       data_offset_(0),
       total_size_(0),
       block_label_list_(NULL),
+      current_dalvik_offset_(0),
+      reg_pool_(NULL),
       live_sreg_(0),
       num_core_spills_(0),
       num_fp_spills_(0),
@@ -1141,15 +1139,10 @@
       first_lir_insn_(NULL),
       last_lir_insn_(NULL)
  {
-
-  CompilerInitGrowableList(cu_, &switch_tables_, 4, kListSwitchTables);
-  CompilerInitGrowableList(cu_, &fill_array_data_, 4, kListFillArrayData);
-  CompilerInitGrowableList(cu_, &throw_launchpads_, 2048, kListThrowLaunchPads);
-  CompilerInitGrowableList(cu_, &intrinsic_launchpads_, 4, kListMisc);
-  CompilerInitGrowableList(cu_, &suspend_launchpads_, 2048, kListSuspendLaunchPads);
+  arena_ = arena;
   promotion_map_ = static_cast<PromotionMap*>
-      (NewMem(cu_, (cu_->num_dalvik_registers  + cu_->num_compiler_temps + 1) *
-              sizeof(promotion_map_[0]), true, kAllocRegAlloc));
+      (arena_->NewMem((cu_->num_dalvik_registers  + cu_->num_compiler_temps + 1) *
+                      sizeof(promotion_map_[0]), true, ArenaAllocator::kAllocRegAlloc));
 }
 
 void Mir2Lir::Materialize() {
diff --git a/src/compiler/dex/quick/gen_common.cc b/src/compiler/dex/quick/gen_common.cc
index c13e797..d27f5c6 100644
--- a/src/compiler/dex/quick/gen_common.cc
+++ b/src/compiler/dex/quick/gen_common.cc
@@ -45,7 +45,7 @@
   LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_);
   LIR* branch = OpCondBranch(c_code, tgt);
   // Remember branch target - will process later
-  InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
+  throw_launchpads_.Insert(tgt);
   return branch;
 }
 
@@ -59,7 +59,7 @@
     branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
   }
   // Remember branch target - will process later
-  InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
+  throw_launchpads_.Insert(tgt);
   return branch;
 }
 
@@ -80,7 +80,7 @@
   LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2);
   LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
   // Remember branch target - will process later
-  InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
+  throw_launchpads_.Insert(tgt);
   return branch;
 }
 
@@ -520,13 +520,12 @@
 
 void Mir2Lir::HandleSuspendLaunchPads()
 {
-  LIR** suspend_label = reinterpret_cast<LIR**>(suspend_launchpads_.elem_list);
-  int num_elems = suspend_launchpads_.num_used;
+  int num_elems = suspend_launchpads_.Size();
   int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
   for (int i = 0; i < num_elems; i++) {
     ResetRegPool();
     ResetDefTracking();
-    LIR* lab = suspend_label[i];
+    LIR* lab = suspend_launchpads_.Get(i);
     LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
     current_dalvik_offset_ = lab->operands[1];
     AppendLIR(lab);
@@ -538,12 +537,11 @@
 
 void Mir2Lir::HandleIntrinsicLaunchPads()
 {
-  LIR** intrinsic_label = reinterpret_cast<LIR**>(intrinsic_launchpads_.elem_list);
-  int num_elems = intrinsic_launchpads_.num_used;
+  int num_elems = intrinsic_launchpads_.Size();
   for (int i = 0; i < num_elems; i++) {
     ResetRegPool();
     ResetDefTracking();
-    LIR* lab = intrinsic_label[i];
+    LIR* lab = intrinsic_launchpads_.Get(i);
     CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
     current_dalvik_offset_ = info->offset;
     AppendLIR(lab);
@@ -558,12 +556,11 @@
 
 void Mir2Lir::HandleThrowLaunchPads()
 {
-  LIR** throw_label = reinterpret_cast<LIR**>(throw_launchpads_.elem_list);
-  int num_elems = throw_launchpads_.num_used;
+  int num_elems = throw_launchpads_.Size();
   for (int i = 0; i < num_elems; i++) {
     ResetRegPool();
     ResetDefTracking();
-    LIR* lab = throw_label[i];
+    LIR* lab = throw_launchpads_.Get(i);
     current_dalvik_offset_ = lab->operands[1];
     AppendLIR(lab);
     int func_offset = 0;
@@ -1685,7 +1682,7 @@
   LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
                        reinterpret_cast<uintptr_t>(ret_lab), current_dalvik_offset_);
   branch->target = target;
-  InsertGrowableList(cu_, &suspend_launchpads_, reinterpret_cast<uintptr_t>(target));
+  suspend_launchpads_.Insert(target);
 }
 
 /* Check if we need to check for pending suspend request */
@@ -1701,7 +1698,7 @@
              reinterpret_cast<uintptr_t>(target), current_dalvik_offset_);
   FlushAllRegs();
   OpUnconditionalBranch(launch_pad);
-  InsertGrowableList(cu_, &suspend_launchpads_, reinterpret_cast<uintptr_t>(launch_pad));
+  suspend_launchpads_.Insert(launch_pad);
 }
 
 }  // namespace art
diff --git a/src/compiler/dex/quick/gen_invoke.cc b/src/compiler/dex/quick/gen_invoke.cc
index 3e946f8..8003d9a 100644
--- a/src/compiler/dex/quick/gen_invoke.cc
+++ b/src/compiler/dex/quick/gen_invoke.cc
@@ -861,8 +861,7 @@
     if (range_check) {
       // Set up a launch pad to allow retry in case of bounds violation */
       launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
-      InsertGrowableList(cu_, &intrinsic_launchpads_,
-                            reinterpret_cast<uintptr_t>(launch_pad));
+      intrinsic_launchpads_.Insert(launch_pad);
       OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
       FreeTemp(reg_max);
       OpCondBranch(kCondCs, launch_pad);
@@ -873,8 +872,7 @@
       LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
       // Set up a launch pad to allow retry in case of bounds violation */
       launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
-      InsertGrowableList(cu_, &intrinsic_launchpads_,
-                            reinterpret_cast<uintptr_t>(launch_pad));
+      intrinsic_launchpads_.Insert(launch_pad);
       OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
       FreeTemp(reg_max);
       OpCondBranch(kCondCc, launch_pad);
@@ -1046,7 +1044,7 @@
   int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0;
   GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
   LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
-  InsertGrowableList(cu_, &intrinsic_launchpads_, reinterpret_cast<uintptr_t>(launch_pad));
+  intrinsic_launchpads_.Insert(launch_pad);
   OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
   // NOTE: not a safepoint
   if (cu_->instruction_set != kX86) {
@@ -1085,7 +1083,7 @@
   GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
   //TUNING: check if rl_cmp.s_reg_low is already null checked
   LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
-  InsertGrowableList(cu_, &intrinsic_launchpads_, reinterpret_cast<uintptr_t>(launch_pad));
+  intrinsic_launchpads_.Insert(launch_pad);
   OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
   // NOTE: not a safepoint
   if (cu_->instruction_set != kX86) {
diff --git a/src/compiler/dex/quick/local_optimizations.cc b/src/compiler/dex/quick/local_optimizations.cc
index 695b12c..1cafce4 100644
--- a/src/compiler/dex/quick/local_optimizations.cc
+++ b/src/compiler/dex/quick/local_optimizations.cc
@@ -245,7 +245,8 @@
         DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED"));
         /* Only sink store instructions */
         if (sink_distance && !is_this_lir_load) {
-          LIR* new_store_lir = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
+          LIR* new_store_lir =
+              static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
           *new_store_lir = *this_lir;
           /*
            * Stop point found - insert *before* the check_lir
@@ -432,7 +433,8 @@
       /* Found a slot to hoist to */
       if (slot >= 0) {
         LIR* cur_lir = prev_inst_list[slot];
-        LIR* new_load_lir = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
+        LIR* new_load_lir =
+          static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
         *new_load_lir = *this_lir;
         /*
          * Insertion is guaranteed to succeed since check_lir
diff --git a/src/compiler/dex/quick/mips/call_mips.cc b/src/compiler/dex/quick/mips/call_mips.cc
index f73e602..b53d1e3 100644
--- a/src/compiler/dex/quick/mips/call_mips.cc
+++ b/src/compiler/dex/quick/mips/call_mips.cc
@@ -68,13 +68,14 @@
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
+      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+                                               ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int elements = table[1];
   tab_rec->targets =
-      static_cast<LIR**>(NewMem(cu_, elements * sizeof(LIR*), true, kAllocLIR));
-  InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
+      static_cast<LIR**>(arena_->NewMem(elements * sizeof(LIR*), true, ArenaAllocator::kAllocLIR));
+  switch_tables_.Insert(tab_rec);
 
   // The table is composed of 8-byte key/disp pairs
   int byte_size = elements * 8;
@@ -148,12 +149,14 @@
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
+      static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+                                               ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
-  tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
-  InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
+  tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
+                                                       ArenaAllocator::kAllocLIR));
+  switch_tables_.Insert(tab_rec);
 
   // Get the switch value
   rl_src = LoadValue(rl_src, kCoreReg);
@@ -228,14 +231,15 @@
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   // Add the table to the list - we'll process it later
   FillArrayData *tab_rec =
-      reinterpret_cast<FillArrayData*>(NewMem(cu_, sizeof(FillArrayData), true, kAllocData));
+      reinterpret_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
+                                                      ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   uint16_t width = tab_rec->table[1];
   uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
   tab_rec->size = (size * width) + 8;
 
-  InsertGrowableList(cu_, &fill_array_data_, reinterpret_cast<uintptr_t>(tab_rec));
+  fill_array_data_.Insert(tab_rec);
 
   // Making a call - use explicit registers
   FlushAllRegs();   /* Everything to home location */
diff --git a/src/compiler/dex/quick/mips/codegen_mips.h b/src/compiler/dex/quick/mips/codegen_mips.h
index f681eda..db262a8 100644
--- a/src/compiler/dex/quick/mips/codegen_mips.h
+++ b/src/compiler/dex/quick/mips/codegen_mips.h
@@ -25,7 +25,7 @@
 class MipsMir2Lir : public Mir2Lir {
   public:
 
-    MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+    MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
 
     // Required for target - codegen utilities.
     virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
diff --git a/src/compiler/dex/quick/mips/target_mips.cc b/src/compiler/dex/quick/mips/target_mips.cc
index 8d342af..46a625e 100644
--- a/src/compiler/dex/quick/mips/target_mips.cc
+++ b/src/compiler/dex/quick/mips/target_mips.cc
@@ -488,13 +488,16 @@
   int num_temps = sizeof(core_temps)/sizeof(*core_temps);
   int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
   int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  reg_pool_ = static_cast<RegisterPool*>(NewMem(cu_, sizeof(*reg_pool_), true, kAllocRegAlloc));
+  reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
+                                                        ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_core_regs = num_regs;
   reg_pool_->core_regs = static_cast<RegisterInfo*>
-     (NewMem(cu_, num_regs * sizeof(*reg_pool_->core_regs), true, kAllocRegAlloc));
+     (arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
+                     ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_fp_regs = num_fp_regs;
   reg_pool_->FPRegs = static_cast<RegisterInfo*>
-      (NewMem(cu_, num_fp_regs * sizeof(*reg_pool_->FPRegs), true, kAllocRegAlloc));
+      (arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
+                      ArenaAllocator::kAllocRegAlloc));
   CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
   CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
   // Keep special registers from being allocated
@@ -572,7 +575,8 @@
   return (lir->opcode == kMipsB);
 }
 
-MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph) : Mir2Lir(cu, mir_graph) {
+MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+    : Mir2Lir(cu, mir_graph, arena) {
   for (int i = 0; i < kMipsLast; i++) {
     if (MipsMir2Lir::EncodingMap[i].opcode != i) {
       LOG(FATAL) << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
@@ -582,8 +586,9 @@
   }
 }
 
-Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph) {
-  return new MipsMir2Lir(cu, mir_graph);
+Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                           ArenaAllocator* const arena) {
+  return new MipsMir2Lir(cu, mir_graph, arena);
 }
 
 uint64_t MipsMir2Lir::GetTargetInstFlags(int opcode)
diff --git a/src/compiler/dex/quick/mir_to_lir.cc b/src/compiler/dex/quick/mir_to_lir.cc
index 1f50914..481078d 100644
--- a/src/compiler/dex/quick/mir_to_lir.cc
+++ b/src/compiler/dex/quick/mir_to_lir.cc
@@ -794,7 +794,7 @@
   BasicBlock*bb = NULL;
   for (int idx = 0; idx < num_reachable_blocks; idx++) {
     // TODO: no direct access of growable lists.
-    int dfs_index = mir_graph_->GetDfsOrder()->elem_list[idx];
+    int dfs_index = mir_graph_->GetDfsOrder()->Get(idx);
     bb = mir_graph_->GetBasicBlock(dfs_index);
     if (bb->block_type == kDalvikByteCode) {
       break;
@@ -821,7 +821,8 @@
 {
   // Hold the labels of each block.
   block_label_list_ =
-      static_cast<LIR*>(NewMem(cu_, sizeof(LIR) * mir_graph_->GetNumBlocks(), true, kAllocLIR));
+      static_cast<LIR*>(arena_->NewMem(sizeof(LIR) * mir_graph_->GetNumBlocks(), true,
+                                       ArenaAllocator::kAllocLIR));
 
   PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
diff --git a/src/compiler/dex/quick/mir_to_lir.h b/src/compiler/dex/quick/mir_to_lir.h
index 69ebc7e..270c883 100644
--- a/src/compiler/dex/quick/mir_to_lir.h
+++ b/src/compiler/dex/quick/mir_to_lir.h
@@ -21,8 +21,9 @@
 #include "compiled_method.h"
 #include "compiler/dex/compiler_enums.h"
 #include "compiler/dex/compiler_ir.h"
-#include "compiler/dex/compiler_utility.h"
 #include "compiler/dex/backend.h"
+#include "compiler/dex/growable_array.h"
+#include "compiler/dex/arena_allocator.h"
 #include "safe_map.h"
 
 namespace art {
@@ -124,9 +125,12 @@
 };
 
 // Target-specific initialization.
-Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph);
-Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph);
-Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph);
+Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena);
+Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena);
+Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena);
 
 // Utility macros to traverse the LIR list.
 #define NEXT_LIR(lir) (lir->next)
@@ -684,7 +688,7 @@
     LIR* code_literal_list_;                   // Code literals requiring patching.
 
   protected:
-    Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+    Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
 
     CompilationUnit* GetCompilationUnit() {
       return cu_;
@@ -692,11 +696,11 @@
 
     CompilationUnit* const cu_;
     MIRGraph* const mir_graph_;
-    GrowableList switch_tables_;
-    GrowableList fill_array_data_;
-    GrowableList throw_launchpads_;
-    GrowableList suspend_launchpads_;
-    GrowableList intrinsic_launchpads_;
+    GrowableArray<SwitchTable*> switch_tables_;
+    GrowableArray<FillArrayData*> fill_array_data_;
+    GrowableArray<LIR*> throw_launchpads_;
+    GrowableArray<LIR*> suspend_launchpads_;
+    GrowableArray<LIR*> intrinsic_launchpads_;
     SafeMap<unsigned int, LIR*> boundary_map_; // boundary lookup cache.
     /*
      * Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
@@ -742,6 +746,7 @@
     unsigned int fp_spill_mask_;
     LIR* first_lir_insn_;
     LIR* last_lir_insn_;
+    ArenaAllocator* arena_;
 
 };  // Class Mir2Lir
 
diff --git a/src/compiler/dex/quick/ralloc_util.cc b/src/compiler/dex/quick/ralloc_util.cc
index a6b8793..dd38914 100644
--- a/src/compiler/dex/quick/ralloc_util.cc
+++ b/src/compiler/dex/quick/ralloc_util.cc
@@ -18,7 +18,6 @@
 
 #include "compiler/dex/compiler_ir.h"
 #include "compiler/dex/compiler_internals.h"
-#include "compiler/dex/compiler_utility.h"
 
 namespace art {
 
@@ -1056,10 +1055,12 @@
    * TUNING: replace with linear scan once we have the ability
    * to describe register live ranges for GC.
    */
-  RefCounts *core_regs = static_cast<RefCounts*>(NewMem(cu_, sizeof(RefCounts) * num_regs,
-                                                       true, kAllocRegAlloc));
-  RefCounts *FpRegs = static_cast<RefCounts *>(NewMem(cu_, sizeof(RefCounts) * num_regs,
-                                                      true, kAllocRegAlloc));
+  RefCounts *core_regs =
+      static_cast<RefCounts*>(arena_->NewMem(sizeof(RefCounts) * num_regs, true,
+                                             ArenaAllocator::kAllocRegAlloc));
+  RefCounts *FpRegs =
+      static_cast<RefCounts *>(arena_->NewMem(sizeof(RefCounts) * num_regs, true,
+                                              ArenaAllocator::kAllocRegAlloc));
   // Set ssa names for original Dalvik registers
   for (int i = 0; i < dalvik_regs; i++) {
     core_regs[i].s_reg = FpRegs[i].s_reg = i;
@@ -1069,7 +1070,7 @@
   FpRegs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg();  // For consistecy
   // Set ssa names for compiler_temps
   for (int i = 1; i <= cu_->num_compiler_temps; i++) {
-    CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(mir_graph_->compiler_temps_.elem_list[i]);
+    CompilerTemp* ct = mir_graph_->compiler_temps_.Get(i);
     core_regs[dalvik_regs + i].s_reg = ct->s_reg;
     FpRegs[dalvik_regs + i].s_reg = ct->s_reg;
   }
diff --git a/src/compiler/dex/quick/x86/call_x86.cc b/src/compiler/dex/quick/x86/call_x86.cc
index 6b215f2..614a72d 100644
--- a/src/compiler/dex/quick/x86/call_x86.cc
+++ b/src/compiler/dex/quick/x86/call_x86.cc
@@ -76,12 +76,14 @@
   }
   // Add the table to the list - we'll process it later
   SwitchTable *tab_rec =
-      static_cast<SwitchTable *>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
+      static_cast<SwitchTable *>(arena_->NewMem(sizeof(SwitchTable), true,
+                                                ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   int size = table[1];
-  tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
-  InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
+  tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
+                                                       ArenaAllocator::kAllocLIR));
+  switch_tables_.Insert(tab_rec);
 
   // Get the switch value
   rl_src = LoadValue(rl_src, kCoreReg);
@@ -132,14 +134,15 @@
   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
   // Add the table to the list - we'll process it later
   FillArrayData *tab_rec =
-      static_cast<FillArrayData*>(NewMem(cu_, sizeof(FillArrayData), true, kAllocData));
+      static_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
+                                                 ArenaAllocator::kAllocData));
   tab_rec->table = table;
   tab_rec->vaddr = current_dalvik_offset_;
   uint16_t width = tab_rec->table[1];
   uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
   tab_rec->size = (size * width) + 8;
 
-  InsertGrowableList(cu_, &fill_array_data_, reinterpret_cast<uintptr_t>(tab_rec));
+  fill_array_data_.Insert(tab_rec);
 
   // Making a call - use explicit registers
   FlushAllRegs();   /* Everything to home location */
@@ -251,7 +254,7 @@
     OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
     OpCondBranch(kCondUlt, tgt);
     // Remember branch target - will process later
-    InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
+    throw_launchpads_.Insert(tgt);
   }
 
   FlushIns(ArgLocs, rl_method);
diff --git a/src/compiler/dex/quick/x86/codegen_x86.h b/src/compiler/dex/quick/x86/codegen_x86.h
index 93b6839..99e5148 100644
--- a/src/compiler/dex/quick/x86/codegen_x86.h
+++ b/src/compiler/dex/quick/x86/codegen_x86.h
@@ -25,7 +25,7 @@
 class X86Mir2Lir : public Mir2Lir {
   public:
 
-    X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+    X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
 
     // Required for target - codegen helpers.
     virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
diff --git a/src/compiler/dex/quick/x86/int_x86.cc b/src/compiler/dex/quick/x86/int_x86.cc
index 9c72ad9..0430778 100644
--- a/src/compiler/dex/quick/x86/int_x86.cc
+++ b/src/compiler/dex/quick/x86/int_x86.cc
@@ -32,7 +32,7 @@
   OpRegMem(kOpCmp, reg1, base, offset);
   LIR* branch = OpCondBranch(c_code, tgt);
   // Remember branch target - will process later
-  InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
+  throw_launchpads_.Insert(tgt);
   return branch;
 }
 
diff --git a/src/compiler/dex/quick/x86/target_x86.cc b/src/compiler/dex/quick/x86/target_x86.cc
index 20074f1..e6a49f8 100644
--- a/src/compiler/dex/quick/x86/target_x86.cc
+++ b/src/compiler/dex/quick/x86/target_x86.cc
@@ -458,15 +458,16 @@
   int num_temps = sizeof(core_temps)/sizeof(*core_temps);
   int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
   int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
-  reg_pool_ = static_cast<RegisterPool*>(NewMem(cu_, sizeof(*reg_pool_), true, kAllocRegAlloc));
+  reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
+                                                        ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_core_regs = num_regs;
   reg_pool_->core_regs =
-      static_cast<RegisterInfo*>(NewMem(cu_, num_regs * sizeof(*reg_pool_->core_regs),
-                                             true, kAllocRegAlloc));
+      static_cast<RegisterInfo*>(arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
+                                                ArenaAllocator::kAllocRegAlloc));
   reg_pool_->num_fp_regs = num_fp_regs;
   reg_pool_->FPRegs =
-      static_cast<RegisterInfo *>(NewMem(cu_, num_fp_regs * sizeof(*reg_pool_->FPRegs),
-                                              true, kAllocRegAlloc));
+      static_cast<RegisterInfo *>(arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
+                                                 ArenaAllocator::kAllocRegAlloc));
   CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
   CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
   // Keep special registers from being allocated
@@ -528,7 +529,8 @@
   return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
 }
 
-X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph) : Mir2Lir(cu, mir_graph) {
+X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+    : Mir2Lir(cu, mir_graph, arena) {
   for (int i = 0; i < kX86Last; i++) {
     if (X86Mir2Lir::EncodingMap[i].opcode != i) {
       LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
@@ -538,8 +540,9 @@
   }
 }
 
-Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph) {
-  return new X86Mir2Lir(cu, mir_graph);
+Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena) {
+  return new X86Mir2Lir(cu, mir_graph, arena);
 }
 
 // Not used in x86
diff --git a/src/compiler/dex/ssa_transformation.cc b/src/compiler/dex/ssa_transformation.cc
index d8341e3..02982e5 100644
--- a/src/compiler/dex/ssa_transformation.cc
+++ b/src/compiler/dex/ssa_transformation.cc
@@ -37,12 +37,9 @@
     res = NeedsVisit(bb->taken);
     if (res == NULL) {
       if (bb->successor_block_list.block_list_type != kNotUsed) {
-        GrowableListIterator iterator;
-        GrowableListIteratorInit(&bb->successor_block_list.blocks,
-                                    &iterator);
+        GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
         while (true) {
-          SuccessorBlockInfo *sbi = reinterpret_cast<SuccessorBlockInfo*>
-              (GrowableListIteratorNext(&iterator));
+          SuccessorBlockInfo *sbi = iterator.Next();
           if (sbi == NULL) break;
           res = NeedsVisit(sbi->block);
           if (res != NULL) break;
@@ -57,7 +54,7 @@
 {
   block->visited = true;
   /* Enqueue the pre_order block id */
-  InsertGrowableList(cu_, &dfs_order_, block->id);
+  dfs_order_->Insert(block->id);
 }
 
 void MIRGraph::RecordDFSOrders(BasicBlock* block)
@@ -73,8 +70,8 @@
       succ.push_back(next_successor);
       continue;
     }
-    curr->dfs_id = dfs_post_order_.num_used;
-    InsertGrowableList(cu_, &dfs_post_order_, curr->id);
+    curr->dfs_id = dfs_post_order_->Size();
+    dfs_post_order_->Insert(curr->id);
     succ.pop_back();
   }
 }
@@ -83,19 +80,19 @@
 void MIRGraph::ComputeDFSOrders()
 {
   /* Initialize or reset the DFS pre_order list */
-  if (dfs_order_.elem_list == NULL) {
-    CompilerInitGrowableList(cu_, &dfs_order_, GetNumBlocks(), kListDfsOrder);
+  if (dfs_order_ == NULL) {
+    dfs_order_ = new (arena_) GrowableArray<int>(arena_, GetNumBlocks(), kGrowableArrayDfsOrder);
   } else {
     /* Just reset the used length on the counter */
-    dfs_order_.num_used = 0;
+    dfs_order_->Reset();
   }
 
   /* Initialize or reset the DFS post_order list */
-  if (dfs_post_order_.elem_list == NULL) {
-    CompilerInitGrowableList(cu_, &dfs_post_order_, GetNumBlocks(), kListDfsPostOrder);
+  if (dfs_post_order_ == NULL) {
+    dfs_post_order_ = new (arena_) GrowableArray<int>(arena_, GetNumBlocks(), kGrowableArrayDfsPostOrder);
   } else {
     /* Just reset the used length on the counter */
-    dfs_post_order_.num_used = 0;
+    dfs_post_order_->Reset();
   }
 
   // Reset visited flags from all nodes
@@ -106,7 +103,7 @@
   // Record dfs orders
   RecordDFSOrders(GetEntryBlock());
 
-  num_reachable_blocks_ = dfs_order_.num_used;
+  num_reachable_blocks_ = dfs_order_->Size();
 }
 
 /*
@@ -117,14 +114,12 @@
 {
   if (bb->data_flow_info == NULL) return false;
 
-  ArenaBitVectorIterator iterator;
-
-  BitVectorIteratorInit(bb->data_flow_info->def_v, &iterator);
+  ArenaBitVector::Iterator iterator(bb->data_flow_info->def_v);
   while (true) {
-    int idx = BitVectorIteratorNext(&iterator);
+    int idx = iterator.Next();
     if (idx == -1) break;
     /* Block bb defines register idx */
-    SetBit(cu_, def_block_matrix_[idx], bb->id);
+    def_block_matrix_[idx]->SetBit(bb->id);
   }
   return true;
 }
@@ -134,12 +129,14 @@
   int num_registers = cu_->num_dalvik_registers;
   /* Allocate num_dalvik_registers bit vector pointers */
   def_block_matrix_ = static_cast<ArenaBitVector**>
-      (NewMem(cu_, sizeof(ArenaBitVector *) * num_registers, true, kAllocDFInfo));
+      (arena_->NewMem(sizeof(ArenaBitVector *) * num_registers, true,
+                      ArenaAllocator::kAllocDFInfo));
   int i;
 
   /* Initialize num_register vectors with num_blocks bits each */
   for (i = 0; i < num_registers; i++) {
-    def_block_matrix_[i] = AllocBitVector(cu_, GetNumBlocks(), false, kBitMapBMatrix);
+    def_block_matrix_[i] =
+        new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapBMatrix);
   }
   AllNodesIterator iter(this, false /* not iterative */);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
@@ -157,30 +154,29 @@
   int num_regs = cu_->num_dalvik_registers;
   int in_reg = num_regs - cu_->num_ins;
   for (; in_reg < num_regs; in_reg++) {
-    SetBit(cu_, def_block_matrix_[in_reg], GetEntryBlock()->id);
+    def_block_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
   }
 }
 
 /* Compute the post-order traversal of the CFG */
 void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb)
 {
-  ArenaBitVectorIterator bv_iterator;
-  BitVectorIteratorInit(bb->i_dominated, &bv_iterator);
+  ArenaBitVector::Iterator bv_iterator(bb->i_dominated);
 
   /* Iterate through the dominated blocks first */
   while (true) {
     //TUNING: hot call to BitVectorIteratorNext
-    int bb_idx = BitVectorIteratorNext(&bv_iterator);
+    int bb_idx = bv_iterator.Next();
     if (bb_idx == -1) break;
     BasicBlock* dominated_bb = GetBasicBlock(bb_idx);
     ComputeDomPostOrderTraversal(dominated_bb);
   }
 
   /* Enter the current block id */
-  InsertGrowableList(cu_, &dom_post_order_traversal_, bb->id);
+  dom_post_order_traversal_->Insert(bb->id);
 
   /* hacky loop detection */
-  if (bb->taken && IsBitSet(bb->dominators, bb->taken->id)) {
+  if (bb->taken && bb->dominators->IsBitSet(bb->taken->id)) {
     attributes_ |= METHOD_HAS_LOOP;
   }
 }
@@ -195,7 +191,7 @@
   if (succ_bb->i_dom != dom_bb &&
     succ_bb->block_type == kDalvikByteCode &&
     succ_bb->hidden == false) {
-    SetBit(cu_, dom_bb->dom_frontier, succ_bb->id);
+    dom_bb->dom_frontier->SetBit(succ_bb->id);
   }
 }
 
@@ -210,12 +206,9 @@
     CheckForDominanceFrontier(bb, bb->fall_through);
   }
   if (bb->successor_block_list.block_list_type != kNotUsed) {
-    GrowableListIterator iterator;
-    GrowableListIteratorInit(&bb->successor_block_list.blocks,
-                                  &iterator);
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
       while (true) {
-        SuccessorBlockInfo *successor_block_info =
-            reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+        SuccessorBlockInfo *successor_block_info = iterator.Next();
         if (successor_block_info == NULL) break;
         BasicBlock* succ_bb = successor_block_info->block;
         CheckForDominanceFrontier(bb, succ_bb);
@@ -223,18 +216,16 @@
   }
 
   /* Calculate DF_up */
-  ArenaBitVectorIterator bv_iterator;
-  BitVectorIteratorInit(bb->i_dominated, &bv_iterator);
+  ArenaBitVector::Iterator bv_iterator(bb->i_dominated);
   while (true) {
     //TUNING: hot call to BitVectorIteratorNext
-    int dominated_idx = BitVectorIteratorNext(&bv_iterator);
+    int dominated_idx = bv_iterator.Next();
     if (dominated_idx == -1) break;
     BasicBlock* dominated_bb = GetBasicBlock(dominated_idx);
-    ArenaBitVectorIterator df_iterator;
-    BitVectorIteratorInit(dominated_bb->dom_frontier, &df_iterator);
+    ArenaBitVector::Iterator df_iterator(dominated_bb->dom_frontier);
     while (true) {
       //TUNING: hot call to BitVectorIteratorNext
-      int df_up_idx = BitVectorIteratorNext(&df_iterator);
+      int df_up_idx = df_iterator.Next();
       if (df_up_idx == -1) break;
       BasicBlock* df_up_block = GetBasicBlock(df_up_idx);
       CheckForDominanceFrontier(bb, df_up_block);
@@ -245,29 +236,26 @@
 }
 
 /* Worker function for initializing domination-related data structures */
-bool MIRGraph::InitializeDominationInfo(BasicBlock* bb)
+void MIRGraph::InitializeDominationInfo(BasicBlock* bb)
 {
   int num_total_blocks = GetBasicBlockListCount();
 
   if (bb->dominators == NULL ) {
-    bb->dominators = AllocBitVector(cu_, num_total_blocks,
-                                       false /* expandable */,
-                                       kBitMapDominators);
-    bb->i_dominated = AllocBitVector(cu_, num_total_blocks,
-                                       false /* expandable */,
-                                       kBitMapIDominated);
-    bb->dom_frontier = AllocBitVector(cu_, num_total_blocks,
-                                        false /* expandable */,
-                                        kBitMapDomFrontier);
+    bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
+                                                 false /* expandable */, kBitMapDominators);
+    bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
+                                                  false /* expandable */, kBitMapIDominated);
+    bb->dom_frontier = new (arena_) ArenaBitVector(arena_, num_total_blocks,
+                                                   false /* expandable */, kBitMapDomFrontier);
   } else {
-    ClearAllBits(bb->dominators);
-    ClearAllBits(bb->i_dominated);
-    ClearAllBits(bb->dom_frontier);
+    bb->dominators->ClearAllBits();
+    bb->i_dominated->ClearAllBits();
+    bb->dom_frontier->ClearAllBits();
   }
   /* Set all bits in the dominator vector */
-  SetInitialBits(bb->dominators, num_total_blocks);
+  bb->dominators->SetInitialBits(num_total_blocks);
 
-  return true;
+  return;
 }
 
 /*
@@ -293,20 +281,18 @@
 /* Worker function to compute each block's immediate dominator */
 bool MIRGraph::ComputeblockIDom(BasicBlock* bb)
 {
-  GrowableListIterator iter;
-  int idom = -1;
-
   /* Special-case entry block */
   if (bb == GetEntryBlock()) {
     return false;
   }
 
   /* Iterate through the predecessors */
-  GrowableListIteratorInit(bb->predecessors, &iter);
+  GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
 
   /* Find the first processed predecessor */
+  int idom = -1;
   while (true) {
-    BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+    BasicBlock* pred_bb = iter.Next();
     CHECK(pred_bb != NULL);
     if (i_dom_list_[pred_bb->dfs_id] != NOTVISITED) {
       idom = pred_bb->dfs_id;
@@ -316,7 +302,7 @@
 
   /* Scan the rest of the predecessors */
   while (true) {
-      BasicBlock* pred_bb = reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+      BasicBlock* pred_bb = iter.Next();
       if (!pred_bb) break;
       if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) {
         continue;
@@ -339,11 +325,11 @@
 bool MIRGraph::ComputeBlockDominators(BasicBlock* bb)
 {
   if (bb == GetEntryBlock()) {
-    ClearAllBits(bb->dominators);
+    bb->dominators->ClearAllBits();
   } else {
-    CopyBitVector(bb->dominators, bb->i_dom->dominators);
+    bb->dominators->Copy(bb->i_dom->dominators);
   }
-  SetBit(cu_, bb->dominators, bb->id);
+  bb->dominators->SetBit(bb->id);
   return false;
 }
 
@@ -352,11 +338,11 @@
   if (bb != GetEntryBlock()) {
     int idom_dfs_idx = i_dom_list_[bb->dfs_id];
     DCHECK_NE(idom_dfs_idx, NOTVISITED);
-    int i_dom_idx = dfs_post_order_.elem_list[idom_dfs_idx];
+    int i_dom_idx = dfs_post_order_->Get(idom_dfs_idx);
     BasicBlock* i_dom = GetBasicBlock(i_dom_idx);
     bb->i_dom = i_dom;
     /* Add bb to the i_dominated set of the immediate dominator block */
-    SetBit(cu_, i_dom->i_dominated, bb->id);
+    i_dom->i_dominated->SetBit(bb->id);
   }
   return false;
 }
@@ -375,8 +361,8 @@
 
   /* Initalize & Clear i_dom_list */
   if (i_dom_list_ == NULL) {
-    i_dom_list_ = static_cast<int*>(NewMem(cu_, sizeof(int) * num_reachable_blocks,
-                                               false, kAllocDFInfo));
+    i_dom_list_ = static_cast<int*>(arena_->NewMem(sizeof(int) * num_reachable_blocks, false,
+                                                   ArenaAllocator::kAllocDFInfo));
   }
   for (int i = 0; i < num_reachable_blocks; i++) {
     i_dom_list_[i] = NOTVISITED;
@@ -394,13 +380,14 @@
   }
 
   /* Set the dominator for the root node */
-  ClearAllBits(GetEntryBlock()->dominators);
-  SetBit(cu_, GetEntryBlock()->dominators, GetEntryBlock()->id);
+  GetEntryBlock()->dominators->ClearAllBits();
+  GetEntryBlock()->dominators->SetBit(GetEntryBlock()->id);
 
   if (temp_block_v_ == NULL) {
-    temp_block_v_ = AllocBitVector(cu_, num_total_blocks, false /* expandable */, kBitMapTmpBlockV);
+    temp_block_v_ = new (arena_) ArenaBitVector(arena_, num_total_blocks,
+                                                false /* expandable */, kBitMapTmpBlockV);
   } else {
-    ClearAllBits(temp_block_v_);
+    temp_block_v_->ClearAllBits();
   }
   GetEntryBlock()->i_dom = NULL;
 
@@ -418,15 +405,15 @@
    * Now go ahead and compute the post order traversal based on the
    * i_dominated sets.
    */
-  if (dom_post_order_traversal_.elem_list == NULL) {
-    CompilerInitGrowableList(cu_, &dom_post_order_traversal_,
-                        num_reachable_blocks, kListDomPostOrderTraversal);
+  if (dom_post_order_traversal_ == NULL) {
+    dom_post_order_traversal_ =
+        new (arena_) GrowableArray<int>(arena_, num_reachable_blocks, kGrowableArrayDomPostOrderTraversal);
   } else {
-    dom_post_order_traversal_.num_used = 0;
+    dom_post_order_traversal_->Reset();
   }
 
   ComputeDomPostOrderTraversal(GetEntryBlock());
-  DCHECK_EQ(dom_post_order_traversal_.num_used, static_cast<unsigned>(num_reachable_blocks_));
+  DCHECK_EQ(dom_post_order_traversal_->Size(), num_reachable_blocks_);
 
   /* Now compute the dominance frontier for each block */
   PostOrderDOMIterator iter5(this, false /* not iterative */);
@@ -442,16 +429,16 @@
 void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
                                  const ArenaBitVector* src2)
 {
-  if (dest->storage_size != src1->storage_size ||
-    dest->storage_size != src2->storage_size ||
-    dest->expandable != src1->expandable ||
-    dest->expandable != src2->expandable) {
+  if (dest->GetStorageSize() != src1->GetStorageSize() ||
+      dest->GetStorageSize() != src2->GetStorageSize() ||
+      dest->IsExpandable() != src1->IsExpandable() ||
+      dest->IsExpandable() != src2->IsExpandable()) {
     LOG(FATAL) << "Incompatible set properties";
   }
 
   unsigned int idx;
-  for (idx = 0; idx < dest->storage_size; idx++) {
-    dest->storage[idx] |= src1->storage[idx] & ~src2->storage[idx];
+  for (idx = 0; idx < dest->GetStorageSize(); idx++) {
+    dest->GetRawStorage()[idx] |= src1->GetRawStorageWord(idx) & ~(src2->GetRawStorageWord(idx));
   }
 }
 
@@ -465,7 +452,7 @@
   ArenaBitVector* temp_dalvik_register_v = temp_dalvik_register_v_;
 
   if (bb->data_flow_info == NULL) return false;
-  CopyBitVector(temp_dalvik_register_v, bb->data_flow_info->live_in_v);
+  temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v);
   if (bb->taken && bb->taken->data_flow_info)
     ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v,
                       bb->data_flow_info->def_v);
@@ -473,12 +460,9 @@
     ComputeSuccLineIn(temp_dalvik_register_v, bb->fall_through->data_flow_info->live_in_v,
                       bb->data_flow_info->def_v);
   if (bb->successor_block_list.block_list_type != kNotUsed) {
-    GrowableListIterator iterator;
-    GrowableListIteratorInit(&bb->successor_block_list.blocks,
-                                &iterator);
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
     while (true) {
-      SuccessorBlockInfo *successor_block_info =
-          reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+      SuccessorBlockInfo *successor_block_info = iterator.Next();
       if (successor_block_info == NULL) break;
       BasicBlock* succ_bb = successor_block_info->block;
       if (succ_bb->data_flow_info) {
@@ -487,8 +471,8 @@
       }
     }
   }
-  if (CompareBitVectors(temp_dalvik_register_v, bb->data_flow_info->live_in_v)) {
-    CopyBitVector(bb->data_flow_info->live_in_v, temp_dalvik_register_v);
+  if (!temp_dalvik_register_v->Equal(bb->data_flow_info->live_in_v)) {
+    bb->data_flow_info->live_in_v->Copy(temp_dalvik_register_v);
     return true;
   }
   return false;
@@ -498,12 +482,15 @@
 void MIRGraph::InsertPhiNodes()
 {
   int dalvik_reg;
-  ArenaBitVector* phi_blocks = AllocBitVector(cu_, GetNumBlocks(), false, kBitMapPhi);
-  ArenaBitVector* tmp_blocks = AllocBitVector(cu_, GetNumBlocks(), false, kBitMapTmpBlocks);
-  ArenaBitVector* input_blocks = AllocBitVector(cu_, GetNumBlocks(), false, kBitMapInputBlocks);
+  ArenaBitVector* phi_blocks =
+      new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapPhi);
+  ArenaBitVector* tmp_blocks =
+      new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapTmpBlocks);
+  ArenaBitVector* input_blocks =
+      new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapInputBlocks);
 
   temp_dalvik_register_v_ =
-      AllocBitVector(cu_, cu_->num_dalvik_registers, false, kBitMapRegisterV);
+      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapRegisterV);
 
   PostOrderDfsIterator iter(this, true /* iterative */);
   bool change = false;
@@ -514,38 +501,37 @@
   /* Iterate through each Dalvik register */
   for (dalvik_reg = cu_->num_dalvik_registers - 1; dalvik_reg >= 0; dalvik_reg--) {
     bool change;
-    ArenaBitVectorIterator iterator;
 
-    CopyBitVector(input_blocks, def_block_matrix_[dalvik_reg]);
-    ClearAllBits(phi_blocks);
+    input_blocks->Copy(def_block_matrix_[dalvik_reg]);
+    phi_blocks->ClearAllBits();
 
     /* Calculate the phi blocks for each Dalvik register */
     do {
       change = false;
-      ClearAllBits(tmp_blocks);
-      BitVectorIteratorInit(input_blocks, &iterator);
+      tmp_blocks->ClearAllBits();
+      ArenaBitVector::Iterator iterator(input_blocks);
 
       while (true) {
-        int idx = BitVectorIteratorNext(&iterator);
+        int idx = iterator.Next();
         if (idx == -1) break;
           BasicBlock* def_bb = GetBasicBlock(idx);
 
           /* Merge the dominance frontier to tmp_blocks */
-          //TUNING: hot call to UnifyBitVetors
+          //TUNING: hot call to Union().
           if (def_bb->dom_frontier != NULL) {
-            UnifyBitVetors(tmp_blocks, tmp_blocks, def_bb->dom_frontier);
+            tmp_blocks->Union(def_bb->dom_frontier);
           }
         }
-        if (CompareBitVectors(phi_blocks, tmp_blocks)) {
+        if (!phi_blocks->Equal(tmp_blocks)) {
           change = true;
-          CopyBitVector(phi_blocks, tmp_blocks);
+          phi_blocks->Copy(tmp_blocks);
 
           /*
            * Iterate through the original blocks plus the new ones in
            * the dominance frontier.
            */
-          CopyBitVector(input_blocks, phi_blocks);
-          UnifyBitVetors(input_blocks, input_blocks, def_block_matrix_[dalvik_reg]);
+          input_blocks->Copy(phi_blocks);
+          input_blocks->Union(def_block_matrix_[dalvik_reg]);
       }
     } while (change);
 
@@ -553,14 +539,15 @@
      * Insert a phi node for dalvik_reg in the phi_blocks if the Dalvik
      * register is in the live-in set.
      */
-    BitVectorIteratorInit(phi_blocks, &iterator);
+    ArenaBitVector::Iterator iterator(phi_blocks);
     while (true) {
-      int idx = BitVectorIteratorNext(&iterator);
+      int idx = iterator.Next();
       if (idx == -1) break;
       BasicBlock* phi_bb = GetBasicBlock(idx);
       /* Variable will be clobbered before being used - no need for phi */
-      if (!IsBitSet(phi_bb->data_flow_info->live_in_v, dalvik_reg)) continue;
-      MIR *phi = static_cast<MIR*>(NewMem(cu_, sizeof(MIR), true, kAllocDFInfo));
+      if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) continue;
+      MIR *phi =
+          static_cast<MIR*>(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocDFInfo));
       phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
       phi->dalvikInsn.vA = dalvik_reg;
       phi->offset = phi_bb->start_offset;
@@ -576,7 +563,6 @@
  */
 bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb)
 {
-  GrowableListIterator iter;
   MIR *mir;
   std::vector<int> uses;
   std::vector<int> incoming_arc;
@@ -593,10 +579,9 @@
     incoming_arc.clear();
 
     /* Iterate through the predecessors */
-    GrowableListIteratorInit(bb->predecessors, &iter);
+    GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
     while (true) {
-      BasicBlock* pred_bb =
-         reinterpret_cast<BasicBlock*>(GrowableListIteratorNext(&iter));
+      BasicBlock* pred_bb = iter.Next();
       if (!pred_bb) break;
       int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
       uses.push_back(ssa_reg);
@@ -607,11 +592,14 @@
     int num_uses = uses.size();
     mir->ssa_rep->num_uses = num_uses;
     mir->ssa_rep->uses =
-        static_cast<int*>(NewMem(cu_, sizeof(int) * num_uses, false, kAllocDFInfo));
+        static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, false,
+                                         ArenaAllocator::kAllocDFInfo));
     mir->ssa_rep->fp_use =
-        static_cast<bool*>(NewMem(cu_, sizeof(bool) * num_uses, true, kAllocDFInfo));
+        static_cast<bool*>(arena_->NewMem(sizeof(bool) * num_uses, true,
+                                          ArenaAllocator::kAllocDFInfo));
     int* incoming =
-        static_cast<int*>(NewMem(cu_, sizeof(int) * num_uses, false, kAllocDFInfo));
+        static_cast<int*>(arena_->NewMem(sizeof(int) * num_uses, false,
+                                         ArenaAllocator::kAllocDFInfo));
     // TODO: Ugly, rework (but don't burden each MIR/LIR for Phi-only needs)
     mir->dalvikInsn.vB = reinterpret_cast<uintptr_t>(incoming);
 
@@ -637,7 +625,8 @@
   int map_size = sizeof(int) * cu_->num_dalvik_registers;
 
   /* Save SSA map snapshot */
-  int* saved_ssa_map = static_cast<int*>(NewMem(cu_, map_size, false, kAllocDalvikToSSAMap));
+  int* saved_ssa_map =
+      static_cast<int*>(arena_->NewMem(map_size, false, ArenaAllocator::kAllocDalvikToSSAMap));
   memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
 
   if (block->fall_through) {
@@ -651,11 +640,9 @@
     memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
   }
   if (block->successor_block_list.block_list_type != kNotUsed) {
-    GrowableListIterator iterator;
-    GrowableListIteratorInit(&block->successor_block_list.blocks, &iterator);
+    GrowableArray<SuccessorBlockInfo*>::Iterator iterator(block->successor_block_list.blocks);
     while (true) {
-      SuccessorBlockInfo *successor_block_info =
-          reinterpret_cast<SuccessorBlockInfo*>(GrowableListIteratorNext(&iterator));
+      SuccessorBlockInfo *successor_block_info = iterator.Next();
       if (successor_block_info == NULL) break;
       BasicBlock* succ_bb = successor_block_info->block;
       DoDFSPreOrderSSARename(succ_bb);
@@ -696,7 +683,8 @@
    * Shared temp bit vector used by each block to count the number of defs
    * from all the predecessor blocks.
    */
-  temp_ssa_register_v_ = AllocBitVector(cu_, GetNumSSARegs(), false, kBitMapTempSSARegisterV);
+  temp_ssa_register_v_ =
+      new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapTempSSARegisterV);
 
   /* Insert phi-operands with latest SSA names from predecessor blocks */
   ReachableNodesIterator iter2(this, false /* not iterative */);
diff --git a/src/compiler/dex/vreg_analysis.cc b/src/compiler/dex/vreg_analysis.cc
index 36daaea..d4223f1 100644
--- a/src/compiler/dex/vreg_analysis.cc
+++ b/src/compiler/dex/vreg_analysis.cc
@@ -388,19 +388,19 @@
   RegLocation* loc;
 
   /* Allocate the location map */
-  loc = static_cast<RegLocation*>(NewMem(cu_, GetNumSSARegs() * sizeof(*loc),
-                                  true, kAllocRegAlloc));
+  loc = static_cast<RegLocation*>(arena_->NewMem(GetNumSSARegs() * sizeof(*loc), true,
+                                                 ArenaAllocator::kAllocRegAlloc));
   for (i=0; i < GetNumSSARegs(); i++) {
     loc[i] = fresh_loc;
     loc[i].s_reg_low = i;
-    loc[i].is_const = IsBitSet(is_constant_v_, i);
+    loc[i].is_const = is_constant_v_->IsBitSet(i);
   }
 
   /* Patch up the locations for Method* and the compiler temps */
   loc[method_sreg_].location = kLocCompilerTemp;
   loc[method_sreg_].defined = true;
   for (i = 0; i < cu_->num_compiler_temps; i++) {
-    CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(compiler_temps_.elem_list[i]);
+    CompilerTemp* ct = compiler_temps_.Get(i);
     loc[ct->s_reg].location = kLocCompilerTemp;
     loc[ct->s_reg].defined = true;
   }