Compacting collector.

The compacting collector is currently similar to semispace. It works by
copying objects back and forth between two bump pointer spaces. There
are types of objects which are "non-movable" due to current runtime
limitations. These are Classes, Methods, and Fields.

Bump pointer spaces are a new type of continuous alloc space which have
no lock in the allocation code path. When you allocate from these it uses
atomic operations to increase an index. Traversing the objects in the bump
pointer space relies on Object::SizeOf matching the allocated size exactly.

Runtime changes:
JNI::GetArrayElements returns copies objects if you attempt to get the
backing data of a movable array. For GetArrayElementsCritical, we return
direct backing storage for any types of arrays, but temporarily disable
the GC until the critical region is completed.

Added a new runtime call called VisitObjects, this is used in place of
the old pattern which was flushing the allocation stack and walking
the bitmaps.

Changed image writer to be compaction safe and use object monitor word
for forwarding addresses.

Added a bunch of added SIRTs to ClassLinker, MethodLinker, etc..

TODO: Enable switching allocators, compacting on background, etc..

Bug: 8981901

Change-Id: I3c886fd322a6eef2b99388d19a765042ec26ab99
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 7cbe94d..faa198a 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -82,7 +82,7 @@
     if (ref != nullptr) {
       Object* new_ref = visitor_(ref, arg_);
       if (new_ref != ref) {
-        obj->SetFieldObject(offset, ref, false, true);
+        obj->SetFieldObject(offset, new_ref, true);
       }
     }
   }
@@ -154,7 +154,7 @@
     // We don't have an early exit since we use the visitor pattern, an early
     // exit should significantly speed this up.
     AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
-    collector::MarkSweep::VisitObjectReferences(obj, visitor);
+    collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
   }
  private:
   ModUnionTableReferenceCache* const mod_union_table_;
@@ -206,7 +206,7 @@
     Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
     DCHECK(obj != NULL);
     CheckReferenceVisitor visitor(mod_union_table_, references_);
-    collector::MarkSweep::VisitObjectReferences(obj, visitor);
+    collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
   }
 
  private:
@@ -334,7 +334,7 @@
   for (const byte* card_addr : cleared_cards_) {
     auto start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
     auto end = start + CardTable::kCardSize;
-    os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
+    os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "\n";
   }
   os << "]";
 }
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 6691cad..1789103 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -36,6 +36,7 @@
 GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
     : heap_(heap),
       name_(name),
+      clear_soft_references_(false),
       verbose_(VLOG_IS_ON(heap)),
       duration_ns_(0),
       timings_(name_.c_str(), true, verbose_),
@@ -60,11 +61,18 @@
   total_freed_bytes_ = 0;
 }
 
-void GarbageCollector::Run() {
+void GarbageCollector::Run(bool clear_soft_references) {
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
   uint64_t start_time = NanoTime();
   pause_times_.clear();
   duration_ns_ = 0;
+  clear_soft_references_ = clear_soft_references;
+
+  // Reset stats.
+  freed_bytes_ = 0;
+  freed_large_object_bytes_ = 0;
+  freed_objects_ = 0;
+  freed_large_objects_ = 0;
 
   InitializePhase();
 
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 0f566c9..6111c2f 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -46,7 +46,7 @@
   virtual GcType GetGcType() const = 0;
 
   // Run the garbage collector.
-  void Run();
+  void Run(bool clear_soft_references);
 
   Heap* GetHeap() const {
     return heap_;
@@ -78,6 +78,34 @@
   // this is the allocation space, for full GC then we swap the zygote bitmaps too.
   void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
+  size_t GetFreedBytes() const {
+    return freed_bytes_;
+  }
+
+  size_t GetFreedLargeObjectBytes() const {
+    return freed_large_object_bytes_;
+  }
+
+  size_t GetFreedObjects() const {
+    return freed_objects_;
+  }
+
+  size_t GetFreedLargeObjects() const {
+    return freed_large_objects_;
+  }
+
+  uint64_t GetTotalPausedTimeNs() const {
+    return total_paused_time_ns_;
+  }
+
+  uint64_t GetTotalFreedBytes() const {
+    return total_freed_bytes_;
+  }
+
+  uint64_t GetTotalFreedObjects() const {
+    return total_freed_objects_;
+  }
+
  protected:
   // The initial phase. Done without mutators paused.
   virtual void InitializePhase() = 0;
@@ -98,6 +126,8 @@
 
   std::string name_;
 
+  bool clear_soft_references_;
+
   const bool verbose_;
 
   uint64_t duration_ns_;
@@ -109,6 +139,12 @@
   uint64_t total_freed_objects_;
   uint64_t total_freed_bytes_;
 
+  // Single GC statitstics.
+  AtomicInteger freed_bytes_;
+  AtomicInteger freed_large_object_bytes_;
+  AtomicInteger freed_objects_;
+  AtomicInteger freed_large_objects_;
+
   CumulativeLogger cumulative_timings_;
 
   std::vector<uint64_t> pause_times_;
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 270c9ef..7a51553 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -44,8 +44,7 @@
     if (klass->IsObjectArrayClass()) {
       VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
     }
-  } else if (UNLIKELY(klass == java_lang_Class_)) {
-    DCHECK_EQ(klass->GetClass(), java_lang_Class_);
+  } else if (UNLIKELY(klass == mirror::Class::GetJavaLangClass())) {
     if (kCountScannedTypes) {
       ++class_count_;
     }
@@ -56,7 +55,7 @@
     }
     VisitOtherReferences(klass, obj, visitor);
     if (UNLIKELY(klass->IsReferenceClass())) {
-      DelayReferenceReferent(klass, const_cast<mirror::Object*>(obj));
+      DelayReferenceReferent(klass, obj);
     }
   }
 }
@@ -68,11 +67,10 @@
                           Locks::mutator_lock_) {
   DCHECK(obj != NULL);
   DCHECK(obj->GetClass() != NULL);
-
   mirror::Class* klass = obj->GetClass();
   DCHECK(klass != NULL);
   if (visit_class) {
-    visitor(obj, klass, MemberOffset(0), false);
+    visitor(obj, klass, mirror::Object::ClassOffset(), false);
   }
   if (klass == mirror::Class::GetJavaLangClass()) {
     DCHECK_EQ(klass->GetClass(), mirror::Class::GetJavaLangClass());
@@ -90,8 +88,7 @@
 }
 
 template <typename Visitor>
-inline void MarkSweep::VisitInstanceFieldsReferences(mirror::Class* klass,
-                                                     mirror::Object* obj,
+inline void MarkSweep::VisitInstanceFieldsReferences(mirror::Class* klass, mirror::Object* obj,
                                                      const Visitor& visitor)
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
   DCHECK(obj != NULL);
@@ -119,11 +116,6 @@
                                              bool is_static, const Visitor& visitor) {
   if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
     // Found a reference offset bitmap.  Mark the specified offsets.
-#ifndef MOVING_COLLECTOR
-    // Clear the class bit since we mark the class as part of marking the classlinker roots.
-    DCHECK_EQ(mirror::Object::ClassOffset().Uint32Value(), 0U);
-    ref_offsets &= (1U << (sizeof(ref_offsets) * 8 - 1)) - 1;
-#endif
     while (ref_offsets != 0) {
       size_t right_shift = CLZ(ref_offsets);
       MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 2c69c77..11e911c 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -93,6 +93,8 @@
   }
 
   // Add the space to the immune region.
+  // TODO: Use space limits instead of current end_ since the end_ can be changed by dlmalloc
+  // callbacks.
   if (immune_begin_ == NULL) {
     DCHECK(immune_end_ == NULL);
     SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
@@ -108,14 +110,14 @@
     }
     // If previous space was immune, then extend the immune region. Relies on continuous spaces
     // being sorted by Heap::AddContinuousSpace.
-    if (prev_space != NULL && IsImmuneSpace(prev_space)) {
+    if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
       immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
       immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
     }
   }
 }
 
-bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) {
+bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) const {
   return
       immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
       immune_end_ >= reinterpret_cast<Object*>(space->End());
@@ -135,10 +137,9 @@
 
 MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
     : GarbageCollector(heap,
-                       name_prefix + (name_prefix.empty() ? "" : " ") +
+                       name_prefix +
                        (is_concurrent ? "concurrent mark sweep": "mark sweep")),
       current_mark_bitmap_(NULL),
-      java_lang_Class_(NULL),
       mark_stack_(NULL),
       immune_begin_(NULL),
       immune_end_(NULL),
@@ -150,8 +151,7 @@
       gc_barrier_(new Barrier(0)),
       large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
       mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
-      is_concurrent_(is_concurrent),
-      clear_soft_references_(false) {
+      is_concurrent_(is_concurrent) {
 }
 
 void MarkSweep::InitializePhase() {
@@ -165,10 +165,6 @@
   finalizer_reference_list_ = nullptr;
   phantom_reference_list_ = nullptr;
   cleared_reference_list_ = nullptr;
-  freed_bytes_ = 0;
-  freed_large_object_bytes_ = 0;
-  freed_objects_ = 0;
-  freed_large_objects_ = 0;
   class_count_ = 0;
   array_count_ = 0;
   other_count_ = 0;
@@ -179,8 +175,6 @@
   work_chunks_created_ = 0;
   work_chunks_deleted_ = 0;
   reference_count_ = 0;
-  java_lang_Class_ = Class::GetJavaLangClass();
-  CHECK(java_lang_Class_ != nullptr);
 
   FindDefaultMarkBitmap();
 
@@ -294,8 +288,7 @@
   // knowing that new allocations won't be marked as live.
   timings_.StartSplit("MarkStackAsLive");
   accounting::ObjectStack* live_stack = heap_->GetLiveStack();
-  heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
-                        heap_->large_object_space_->GetLiveObjects(), live_stack);
+  heap_->MarkAllocStackAsLive(live_stack);
   live_stack->Reset();
   timings_.EndSplit();
   // Recursively mark all the non-image bits set in the mark bitmap.
@@ -371,8 +364,10 @@
 void MarkSweep::FindDefaultMarkBitmap() {
   base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
-    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
-      current_mark_bitmap_ = space->GetMarkBitmap();
+    accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
+    if (bitmap != nullptr &&
+        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
+      current_mark_bitmap_ = bitmap;
       CHECK(current_mark_bitmap_ != NULL);
       return;
     }
@@ -613,10 +608,8 @@
   CHECK(space->IsDlMallocSpace());
   space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
   accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
-  accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
+  accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap();
   GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
-  alloc_space->temp_bitmap_.reset(mark_bitmap);
-  alloc_space->mark_bitmap_.reset(live_bitmap);
 }
 
 class ScanObjectVisitor {
@@ -625,7 +618,7 @@
       : mark_sweep_(mark_sweep) {}
 
   // TODO: Fixme when anotatalysis works with visitors.
-  void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+  void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
     if (kCheckLocks) {
       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -814,6 +807,9 @@
     const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
                                              mark_stack_size / mark_stack_tasks + 1);
     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+      if (space->GetMarkBitmap() == nullptr) {
+        continue;
+      }
       byte* card_begin = space->Begin();
       byte* card_end = space->End();
       // Align up the end address. For example, the image space's end
@@ -856,24 +852,26 @@
     timings_.EndSplit();
   } else {
     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
-      // Image spaces are handled properly since live == marked for them.
-      switch (space->GetGcRetentionPolicy()) {
-        case space::kGcRetentionPolicyNeverCollect:
-          timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
-              "ScanGrayImageSpaceObjects");
-          break;
-        case space::kGcRetentionPolicyFullCollect:
-          timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
-              "ScanGrayZygoteSpaceObjects");
-          break;
-        case space::kGcRetentionPolicyAlwaysCollect:
-          timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
-              "ScanGrayAllocSpaceObjects");
-          break;
-        }
-      ScanObjectVisitor visitor(this);
-      card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
-      timings_.EndSplit();
+      if (space->GetMarkBitmap() != nullptr) {
+        // Image spaces are handled properly since live == marked for them.
+        switch (space->GetGcRetentionPolicy()) {
+          case space::kGcRetentionPolicyNeverCollect:
+            timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
+                "ScanGrayImageSpaceObjects");
+            break;
+          case space::kGcRetentionPolicyFullCollect:
+            timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
+                "ScanGrayZygoteSpaceObjects");
+            break;
+          case space::kGcRetentionPolicyAlwaysCollect:
+            timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
+                "ScanGrayAllocSpaceObjects");
+            break;
+          }
+        ScanObjectVisitor visitor(this);
+        card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
+        timings_.EndSplit();
+      }
     }
   }
 }
@@ -954,9 +952,8 @@
       if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
           (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
         current_mark_bitmap_ = space->GetMarkBitmap();
-        if (current_mark_bitmap_ == NULL) {
-          GetHeap()->DumpSpaces();
-          LOG(FATAL) << "invalid bitmap";
+        if (current_mark_bitmap_ == nullptr) {
+          continue;
         }
         if (parallel) {
           // We will use the mark stack the future.
@@ -1121,7 +1118,7 @@
 }
 
 void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
-  space::DlMallocSpace* space = heap_->GetAllocSpace();
+  space::DlMallocSpace* space = heap_->GetNonMovingSpace();
   timings_.StartSplit("SweepArray");
   // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
   // going to free.
@@ -1207,8 +1204,11 @@
   scc.mark_sweep = this;
   scc.self = Thread::Current();
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (!space->IsDlMallocSpace()) {
+      continue;
+    }
     // We always sweep always collect spaces.
-    bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
+    bool sweep_space = space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect;
     if (!partial && !sweep_space) {
       // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
       sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
@@ -1370,9 +1370,9 @@
 
 // Scans an object reference.  Determines the type of the reference
 // and dispatches to a specialized scanning routine.
-void MarkSweep::ScanObject(const Object* obj) {
+void MarkSweep::ScanObject(Object* obj) {
   MarkObjectVisitor visitor(this);
-  ScanObjectVisit(const_cast<Object*>(obj), visitor);
+  ScanObjectVisit(obj, visitor);
 }
 
 void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
@@ -1406,12 +1406,12 @@
   } else {
     // TODO: Tune this.
     static const size_t kFifoSize = 4;
-    BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
+    BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
     for (;;) {
-      const Object* obj = NULL;
+      Object* obj = NULL;
       if (kUseMarkStackPrefetch) {
         while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
-          const Object* obj = mark_stack_->PopBack();
+          Object* obj = mark_stack_->PopBack();
           DCHECK(obj != NULL);
           __builtin_prefetch(obj);
           prefetch_fifo.push_back(obj);
@@ -1603,9 +1603,6 @@
   timings_.NewSplit("PostGcVerification");
   heap->PostGcVerification(this);
 
-  timings_.NewSplit("GrowForUtilization");
-  heap->GrowForUtilization(GetGcType(), GetDurationNs());
-
   timings_.NewSplit("RequestHeapTrim");
   heap->RequestHeapTrim();
 
@@ -1651,8 +1648,10 @@
 
   // Clear all of the spaces' mark bitmaps.
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
-    if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
-      space->GetMarkBitmap()->Clear();
+    accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
+    if (bitmap != nullptr &&
+        space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
+      bitmap->Clear();
     }
   }
   mark_stack_->Reset();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 3bc014a..cc58412 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -114,7 +114,7 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsImmuneSpace(const space::ContinuousSpace* space)
+  bool IsImmuneSpace(const space::ContinuousSpace* space) const;
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
@@ -140,6 +140,7 @@
   void ProcessReferences(Thread* self)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Update and mark references from immune spaces.
   virtual void UpdateAndMarkModUnion()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -158,7 +159,7 @@
   }
 
   // Blackens an object.
-  void ScanObject(const mirror::Object* obj)
+  void ScanObject(mirror::Object* obj)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -167,38 +168,6 @@
   void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor)
       NO_THREAD_SAFETY_ANALYSIS;
 
-  size_t GetFreedBytes() const {
-    return freed_bytes_;
-  }
-
-  size_t GetFreedLargeObjectBytes() const {
-    return freed_large_object_bytes_;
-  }
-
-  size_t GetFreedObjects() const {
-    return freed_objects_;
-  }
-
-  size_t GetFreedLargeObjects() const {
-    return freed_large_objects_;
-  }
-
-  uint64_t GetTotalTimeNs() const {
-    return total_time_ns_;
-  }
-
-  uint64_t GetTotalPausedTimeNs() const {
-    return total_paused_time_ns_;
-  }
-
-  uint64_t GetTotalFreedObjects() const {
-    return total_freed_objects_;
-  }
-
-  uint64_t GetTotalFreedBytes() const {
-    return total_freed_bytes_;
-  }
-
   // Everything inside the immune range is assumed to be marked.
   void SetImmuneRange(mirror::Object* begin, mirror::Object* end);
 
@@ -216,8 +185,7 @@
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   template <typename Visitor>
-  static void VisitObjectReferences(mirror::Object* obj, const Visitor& visitor,
-                                    bool visit_class = false)
+  static void VisitObjectReferences(mirror::Object* obj, const Visitor& visitor, bool visit_class)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
                             Locks::mutator_lock_);
 
@@ -395,9 +363,6 @@
   // object.
   accounting::SpaceBitmap* current_mark_bitmap_;
 
-  // Cache java.lang.Class for optimization.
-  mirror::Class* java_lang_Class_;
-
   accounting::ObjectStack* mark_stack_;
 
   // Immune range, every object inside the immune range is assumed to be marked.
@@ -412,14 +377,6 @@
 
   // Parallel finger.
   AtomicInteger atomic_finger_;
-  // Number of non large object bytes freed in this collection.
-  AtomicInteger freed_bytes_;
-  // Number of large object bytes freed.
-  AtomicInteger freed_large_object_bytes_;
-  // Number of objects freed in this collection.
-  AtomicInteger freed_objects_;
-  // Number of freed large objects.
-  AtomicInteger freed_large_objects_;
   // Number of classes scanned, if kCountScannedTypes.
   AtomicInteger class_count_;
   // Number of arrays scanned, if kCountScannedTypes.
@@ -443,8 +400,6 @@
 
   const bool is_concurrent_;
 
-  bool clear_soft_references_;
-
  private:
   friend class AddIfReachesAllocSpaceVisitor;  // Used by mod-union table.
   friend class CardScanTask;
diff --git a/runtime/gc/collector/partial_mark_sweep.cc b/runtime/gc/collector/partial_mark_sweep.cc
index 29367ce..8ec28f3 100644
--- a/runtime/gc/collector/partial_mark_sweep.cc
+++ b/runtime/gc/collector/partial_mark_sweep.cc
@@ -26,7 +26,7 @@
 namespace collector {
 
 PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
-    : MarkSweep(heap, is_concurrent, name_prefix + (name_prefix.empty() ? "" : " ") + "partial") {
+    : MarkSweep(heap, is_concurrent, name_prefix.empty() ? "partial " : name_prefix) {
   cumulative_timings_.SetName(GetName());
 }
 
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
new file mode 100644
index 0000000..3b8f7c3
--- /dev/null
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
+#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
+
+namespace art {
+namespace gc {
+namespace collector {
+
+inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object* obj) const {
+  DCHECK(from_space_->HasAddress(obj));
+  LockWord lock_word = obj->GetLockWord();
+  if (lock_word.GetState() != LockWord::kForwardingAddress) {
+    return nullptr;
+  }
+  return reinterpret_cast<mirror::Object*>(lock_word.ForwardingAddress());
+}
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
new file mode 100644
index 0000000..d833631
--- /dev/null
+++ b/runtime/gc/collector/semi_space.cc
@@ -0,0 +1,799 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "semi_space.h"
+
+#include <functional>
+#include <numeric>
+#include <climits>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex-inl.h"
+#include "base/timing_logger.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/mod_union_table.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
+#include "gc/space/bump_pointer_space.h"
+#include "gc/space/bump_pointer_space-inl.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
+#include "indirect_reference_table.h"
+#include "intern_table.h"
+#include "jni_internal.h"
+#include "mark_sweep-inl.h"
+#include "monitor.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array.h"
+#include "mirror/object_array-inl.h"
+#include "runtime.h"
+#include "semi_space-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+#include "verifier/method_verifier.h"
+
+using ::art::mirror::Class;
+using ::art::mirror::Object;
+
+namespace art {
+namespace gc {
+namespace collector {
+
+static constexpr bool kProtectFromSpace = true;
+static constexpr bool kResetFromSpace = true;
+
+// TODO: Unduplicate logic.
+void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
+  // Bind live to mark bitmap if necessary.
+  if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
+    BindLiveToMarkBitmap(space);
+  }
+  // Add the space to the immune region.
+  if (immune_begin_ == nullptr) {
+    DCHECK(immune_end_ == nullptr);
+    immune_begin_ = reinterpret_cast<Object*>(space->Begin());
+    immune_end_ = reinterpret_cast<Object*>(space->End());
+  } else {
+    const space::ContinuousSpace* prev_space = nullptr;
+    // Find out if the previous space is immune.
+    for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
+      if (cur_space == space) {
+        break;
+      }
+      prev_space = cur_space;
+    }
+    // If previous space was immune, then extend the immune region. Relies on continuous spaces
+    // being sorted by Heap::AddContinuousSpace.
+    if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
+      immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
+      immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
+    }
+  }
+}
+
+void SemiSpace::BindBitmaps() {
+  timings_.StartSplit("BindBitmaps");
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  // Mark all of the spaces we never collect as immune.
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
+        || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+      ImmuneSpace(space);
+    }
+  }
+  timings_.EndSplit();
+}
+
+SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
+    : GarbageCollector(heap,
+                       name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
+      mark_stack_(nullptr),
+      immune_begin_(nullptr),
+      immune_end_(nullptr),
+      to_space_(nullptr),
+      from_space_(nullptr),
+      soft_reference_list_(nullptr),
+      weak_reference_list_(nullptr),
+      finalizer_reference_list_(nullptr),
+      phantom_reference_list_(nullptr),
+      cleared_reference_list_(nullptr),
+      self_(nullptr) {
+}
+
+void SemiSpace::InitializePhase() {
+  timings_.Reset();
+  base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+  mark_stack_ = heap_->mark_stack_.get();
+  DCHECK(mark_stack_ != nullptr);
+  immune_begin_ = nullptr;
+  immune_end_ = nullptr;
+  soft_reference_list_ = nullptr;
+  weak_reference_list_ = nullptr;
+  finalizer_reference_list_ = nullptr;
+  phantom_reference_list_ = nullptr;
+  cleared_reference_list_ = nullptr;
+  self_ = Thread::Current();
+  // Do any pre GC verification.
+  timings_.NewSplit("PreGcVerification");
+  heap_->PreGcVerification(this);
+}
+
+void SemiSpace::ProcessReferences(Thread* self) {
+  base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
+                    &finalizer_reference_list_, &phantom_reference_list_);
+}
+
+void SemiSpace::MarkingPhase() {
+  Thread* self = Thread::Current();
+  Locks::mutator_lock_->AssertExclusiveHeld(self);
+  base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+  // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
+  // wrong space.
+  heap_->SwapSemiSpaces();
+  // Assume the cleared space is already empty.
+  BindBitmaps();
+  // Process dirty cards and add dirty cards to mod-union tables.
+  heap_->ProcessCards(timings_);
+  // Need to do this before the checkpoint since we don't want any threads to add references to
+  // the live stack during the recursive mark.
+  timings_.NewSplit("SwapStacks");
+  heap_->SwapStacks();
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  MarkRoots();
+  // Mark roots of immune spaces.
+  UpdateAndMarkModUnion();
+  // Recursively mark remaining objects.
+  MarkReachableObjects();
+}
+
+bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const {
+  return
+    immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
+    immune_end_ >= reinterpret_cast<Object*>(space->End());
+}
+
+void SemiSpace::UpdateAndMarkModUnion() {
+  for (auto& space : heap_->GetContinuousSpaces()) {
+    // If the space is immune then we need to mark the references to other spaces.
+    if (IsImmuneSpace(space)) {
+      accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+      CHECK(table != nullptr);
+      // TODO: Improve naming.
+      base::TimingLogger::ScopedSplit split(
+          space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+                                   "UpdateAndMarkImageModUnionTable",
+                                   &timings_);
+      table->UpdateAndMarkReferences(MarkRootCallback, this);
+    }
+  }
+}
+
+void SemiSpace::MarkReachableObjects() {
+  timings_.StartSplit("MarkStackAsLive");
+  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+  heap_->MarkAllocStackAsLive(live_stack);
+  live_stack->Reset();
+  timings_.EndSplit();
+  // Recursively process the mark stack.
+  ProcessMarkStack(true);
+}
+
+void SemiSpace::ReclaimPhase() {
+  base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+  Thread* self = Thread::Current();
+  ProcessReferences(self);
+  {
+    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    SweepSystemWeaks();
+  }
+  // Record freed memory.
+  int from_bytes = from_space_->GetBytesAllocated();
+  int to_bytes = to_space_->GetBytesAllocated();
+  int from_objects = from_space_->GetObjectsAllocated();
+  int to_objects = to_space_->GetObjectsAllocated();
+  int freed_bytes = from_bytes - to_bytes;
+  int freed_objects = from_objects - to_objects;
+  CHECK_GE(freed_bytes, 0);
+  freed_bytes_.fetch_add(freed_bytes);
+  freed_objects_.fetch_add(freed_objects);
+  heap_->RecordFree(static_cast<size_t>(freed_objects), static_cast<size_t>(freed_bytes));
+
+  timings_.StartSplit("PreSweepingGcVerification");
+  heap_->PreSweepingGcVerification(this);
+  timings_.EndSplit();
+
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    // Reclaim unmarked objects.
+    Sweep(false);
+    // Swap the live and mark bitmaps for each space which we modified space. This is an
+    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
+    // bitmaps.
+    timings_.StartSplit("SwapBitmaps");
+    SwapBitmaps();
+    timings_.EndSplit();
+    // Unbind the live and mark bitmaps.
+    UnBindBitmaps();
+  }
+  // Release the memory used by the from space.
+  if (kResetFromSpace) {
+    // Clearing from space.
+    from_space_->Clear();
+  }
+  // Protect the from space.
+  VLOG(heap)
+      << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
+      << reinterpret_cast<void*>(from_space_->Limit());
+  if (kProtectFromSpace) {
+    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
+  } else {
+    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
+  }
+}
+
+void SemiSpace::ResizeMarkStack(size_t new_size) {
+  std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
+  CHECK_LE(mark_stack_->Size(), new_size);
+  mark_stack_->Resize(new_size);
+  for (const auto& obj : temp) {
+    mark_stack_->PushBack(obj);
+  }
+}
+
+inline void SemiSpace::MarkStackPush(Object* obj) {
+  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+    ResizeMarkStack(mark_stack_->Capacity() * 2);
+  }
+  // The object must be pushed on to the mark stack.
+  mark_stack_->PushBack(obj);
+}
+
+// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
+bool SemiSpace::MarkLargeObject(const Object* obj) {
+  // TODO: support >1 discontinuous space.
+  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
+  if (UNLIKELY(!large_objects->Test(obj))) {
+    large_objects->Set(obj);
+    return true;
+  }
+  return false;
+}
+
+// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
+// the to-space and have their forward address updated. Objects which have been newly marked are
+// pushed on the mark stack.
+Object* SemiSpace::MarkObject(Object* obj) {
+  Object* ret = obj;
+  if (obj != nullptr && !IsImmune(obj)) {
+    if (from_space_->HasAddress(obj)) {
+      mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
+      // If the object has already been moved, return the new forward address.
+      if (!to_space_->HasAddress(forward_address)) {
+        // Otherwise, we need to move the object and add it to the markstack for processing.
+        size_t object_size = obj->SizeOf();
+        size_t dummy = 0;
+        forward_address = to_space_->Alloc(self_, object_size, &dummy);
+        // Copy over the object and add it to the mark stack since we still need to update it's
+        // references.
+        memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
+        // Make sure to only update the forwarding address AFTER you copy the object so that the
+        // monitor word doesn't get stomped over.
+        COMPILE_ASSERT(sizeof(uint32_t) == sizeof(mirror::Object*),
+                       monitor_size_must_be_same_as_object);
+        obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)));
+        MarkStackPush(forward_address);
+      }
+      ret = forward_address;
+      // TODO: Do we need this if in the else statement?
+    } else {
+      accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
+      if (LIKELY(object_bitmap != nullptr)) {
+        // This object was not previously marked.
+        if (!object_bitmap->Test(obj)) {
+          object_bitmap->Set(obj);
+          MarkStackPush(obj);
+        }
+      } else {
+        DCHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_";
+        if (MarkLargeObject(obj)) {
+          MarkStackPush(obj);
+        }
+      }
+    }
+  }
+  return ret;
+}
+
+Object* SemiSpace::MarkRootCallback(Object* root, void* arg) {
+  DCHECK(root != nullptr);
+  DCHECK(arg != nullptr);
+  return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
+}
+
+// Marks all objects in the root set.
+void SemiSpace::MarkRoots() {
+  timings_.StartSplit("MarkRoots");
+  // TODO: Visit up image roots as well?
+  Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true);
+  timings_.EndSplit();
+}
+
+void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
+  CHECK(space->IsDlMallocSpace());
+  space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+  accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+  accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap();
+  GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
+}
+
+mirror::Object* SemiSpace::GetForwardingAddress(mirror::Object* obj) {
+  if (from_space_->HasAddress(obj)) {
+    LOG(FATAL) << "Shouldn't happen!";
+    return GetForwardingAddressInFromSpace(obj);
+  }
+  return obj;
+}
+
+mirror::Object* SemiSpace::SystemWeakIsMarkedCallback(Object* object, void* arg) {
+  return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
+}
+
+void SemiSpace::SweepSystemWeaks() {
+  timings_.StartSplit("SweepSystemWeaks");
+  Runtime::Current()->SweepSystemWeaks(SystemWeakIsMarkedCallback, this);
+  timings_.EndSplit();
+}
+
+struct SweepCallbackContext {
+  SemiSpace* mark_sweep;
+  space::AllocSpace* space;
+  Thread* self;
+};
+
+void SemiSpace::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
+  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+  SemiSpace* gc = context->mark_sweep;
+  Heap* heap = gc->GetHeap();
+  space::AllocSpace* space = context->space;
+  Thread* self = context->self;
+  Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
+  size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
+  heap->RecordFree(num_ptrs, freed_bytes);
+  gc->freed_objects_.fetch_add(num_ptrs);
+  gc->freed_bytes_.fetch_add(freed_bytes);
+}
+
+void SemiSpace::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
+  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+  Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
+  Heap* heap = context->mark_sweep->GetHeap();
+  // We don't free any actual memory to avoid dirtying the shared zygote pages.
+  for (size_t i = 0; i < num_ptrs; ++i) {
+    Object* obj = static_cast<Object*>(ptrs[i]);
+    heap->GetLiveBitmap()->Clear(obj);
+    heap->GetCardTable()->MarkCard(obj);
+  }
+}
+
+void SemiSpace::Sweep(bool swap_bitmaps) {
+  DCHECK(mark_stack_->IsEmpty());
+  base::TimingLogger::ScopedSplit("Sweep", &timings_);
+
+  const bool partial = (GetGcType() == kGcTypePartial);
+  SweepCallbackContext scc;
+  scc.mark_sweep = this;
+  scc.self = Thread::Current();
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (!space->IsDlMallocSpace()) {
+      continue;
+    }
+    // We always sweep always collect spaces.
+    bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
+    if (!partial && !sweep_space) {
+      // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
+      sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
+    }
+    if (sweep_space && space->IsDlMallocSpace()) {
+      uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+      uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+      scc.space = space->AsDlMallocSpace();
+      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+      if (swap_bitmaps) {
+        std::swap(live_bitmap, mark_bitmap);
+      }
+      if (!space->IsZygoteSpace()) {
+        base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
+        // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
+        accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+                                           &SweepCallback, reinterpret_cast<void*>(&scc));
+      } else {
+        base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
+        // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
+        // memory.
+        accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+                                           &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
+      }
+    }
+  }
+
+  SweepLargeObjects(swap_bitmaps);
+}
+
+void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
+  base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
+  // Sweep large objects
+  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
+  accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
+  if (swap_bitmaps) {
+    std::swap(large_live_objects, large_mark_objects);
+  }
+  // O(n*log(n)) but hopefully there are not too many large objects.
+  size_t freed_objects = 0;
+  size_t freed_bytes = 0;
+  Thread* self = Thread::Current();
+  for (const Object* obj : large_live_objects->GetObjects()) {
+    if (!large_mark_objects->Test(obj)) {
+      freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
+      ++freed_objects;
+    }
+  }
+  freed_large_objects_.fetch_add(freed_objects);
+  freed_large_object_bytes_.fetch_add(freed_bytes);
+  GetHeap()->RecordFree(freed_objects, freed_bytes);
+}
+
+// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
+  DCHECK(klass != nullptr);
+  DCHECK(klass->IsReferenceClass());
+  DCHECK(obj != nullptr);
+  Object* referent = heap_->GetReferenceReferent(obj);
+  if (referent != nullptr) {
+    Object* forward_address = GetMarkedForwardAddress(referent);
+    if (forward_address == nullptr) {
+      Thread* self = Thread::Current();
+      // TODO: Remove these locks, and use atomic stacks for storing references?
+      // We need to check that the references haven't already been enqueued since we can end up
+      // scanning the same reference multiple times due to dirty cards.
+      if (klass->IsSoftReferenceClass()) {
+        MutexLock mu(self, *heap_->GetSoftRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &soft_reference_list_);
+        }
+      } else if (klass->IsWeakReferenceClass()) {
+        MutexLock mu(self, *heap_->GetWeakRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &weak_reference_list_);
+        }
+      } else if (klass->IsFinalizerReferenceClass()) {
+        MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
+        }
+      } else if (klass->IsPhantomReferenceClass()) {
+        MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
+        }
+      } else {
+        LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
+                   << klass->GetAccessFlags();
+      }
+    } else if (referent != forward_address) {
+      heap_->SetReferenceReferent(obj, forward_address);
+    }
+  }
+}
+
+// Visit all of the references of an object and update.
+void SemiSpace::ScanObject(Object* obj) {
+  DCHECK(obj != NULL);
+  DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
+  MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset,
+     bool /* is_static */) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+    mirror::Object* new_address = MarkObject(ref);
+    if (new_address != ref) {
+      DCHECK(new_address != nullptr);
+      obj->SetFieldObject(offset, new_address, false);
+    }
+  }, kMovingClasses);
+  mirror::Class* klass = obj->GetClass();
+  if (UNLIKELY(klass->IsReferenceClass())) {
+    DelayReferenceReferent(klass, obj);
+  }
+}
+
+// Scan anything that's on the mark stack.
+void SemiSpace::ProcessMarkStack(bool paused) {
+  timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
+  while (!mark_stack_->IsEmpty()) {
+    ScanObject(mark_stack_->PopBack());
+  }
+  timings_.EndSplit();
+}
+
+// Walks the reference list marking any references subject to the
+// reference clearing policy.  References with a black referent are
+// removed from the list.  References with white referents biased
+// toward saving are blackened and also removed from the list.
+void SemiSpace::PreserveSomeSoftReferences(Object** list) {
+  DCHECK(list != NULL);
+  Object* clear = NULL;
+  size_t counter = 0;
+  DCHECK(mark_stack_->IsEmpty());
+  timings_.StartSplit("PreserveSomeSoftReferences");
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent == NULL) {
+      // Referent was cleared by the user during marking.
+      continue;
+    }
+    Object* forward_address = GetMarkedForwardAddress(referent);
+    bool is_marked = forward_address != nullptr;
+    if (!is_marked && ((++counter) & 1)) {
+      // Referent is white and biased toward saving, mark it.
+      forward_address = MarkObject(referent);
+      if (referent != forward_address) {
+        // Update the referent if we moved it.
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    } else {
+      if (!is_marked) {
+        // Referent is white, queue it for clearing.
+        heap_->EnqueuePendingReference(ref, &clear);
+      } else if (referent != forward_address) {
+        CHECK(forward_address != nullptr);
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+  *list = clear;
+  timings_.EndSplit();
+  // Restart the mark with the newly black references added to the root set.
+  ProcessMarkStack(true);
+}
+
+inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
+    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  // All immune objects are assumed marked.
+  if (IsImmune(obj)) {
+    return obj;
+  }
+  if (from_space_->HasAddress(obj)) {
+    mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj));
+    // If the object is forwarded then it MUST be marked.
+    if (to_space_->HasAddress(forwarding_address)) {
+      return forwarding_address;
+    }
+    // Must not be marked, return nullptr;
+    return nullptr;
+  } else if (to_space_->HasAddress(obj)) {
+    // Already forwarded, must be marked.
+    return obj;
+  }
+  return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
+}
+
+// Unlink the reference list clearing references objects with white
+// referents.  Cleared references registered to a reference queue are
+// scheduled for appending by the heap worker thread.
+void SemiSpace::ClearWhiteReferences(Object** list) {
+  DCHECK(list != NULL);
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent != nullptr) {
+      Object* forward_address = GetMarkedForwardAddress(referent);
+      if (forward_address == nullptr) {
+        // Referent is white, clear it.
+        heap_->ClearReferenceReferent(ref);
+        if (heap_->IsEnqueuable(ref)) {
+          heap_->EnqueueReference(ref, &cleared_reference_list_);
+        }
+      } else if (referent != forward_address) {
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+  DCHECK(*list == NULL);
+}
+
+// Enqueues finalizer references with white referents.  White
+// referents are blackened, moved to the zombie field, and the
+// referent field is cleared.
+void SemiSpace::EnqueueFinalizerReferences(Object** list) {
+  // *list = NULL;
+  // return;
+  DCHECK(list != NULL);
+  timings_.StartSplit("EnqueueFinalizerReferences");
+  MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
+  bool has_enqueued = false;
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent != nullptr) {
+      Object* forward_address = GetMarkedForwardAddress(referent);
+      // Not marked.
+      if (forward_address == nullptr) {
+        forward_address = MarkObject(referent);
+        // If the referent is non-null the reference must queuable.
+        DCHECK(heap_->IsEnqueuable(ref));
+        // Move the referent to the zombie field.
+        ref->SetFieldObject(zombie_offset, forward_address, false);
+        heap_->ClearReferenceReferent(ref);
+        heap_->EnqueueReference(ref, &cleared_reference_list_);
+        has_enqueued = true;
+      } else if (referent != forward_address) {
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+  timings_.EndSplit();
+  if (has_enqueued) {
+    ProcessMarkStack(true);
+  }
+  DCHECK(*list == NULL);
+}
+
+// Process reference class instances and schedule finalizations.
+void SemiSpace::ProcessReferences(Object** soft_references, bool clear_soft,
+                                  Object** weak_references,
+                                  Object** finalizer_references,
+                                  Object** phantom_references) {
+  CHECK(soft_references != NULL);
+  CHECK(weak_references != NULL);
+  CHECK(finalizer_references != NULL);
+  CHECK(phantom_references != NULL);
+  CHECK(mark_stack_->IsEmpty());
+
+  // Unless we are in the zygote or required to clear soft references
+  // with white references, preserve some white referents.
+  if (!clear_soft && !Runtime::Current()->IsZygote()) {
+    PreserveSomeSoftReferences(soft_references);
+  }
+
+  timings_.StartSplit("ProcessReferences");
+  // Clear all remaining soft and weak references with white
+  // referents.
+  ClearWhiteReferences(soft_references);
+  ClearWhiteReferences(weak_references);
+  timings_.EndSplit();
+
+  // Preserve all white objects with finalize methods and schedule
+  // them for finalization.
+  EnqueueFinalizerReferences(finalizer_references);
+
+  timings_.StartSplit("ProcessReferences");
+  // Clear all f-reachable soft and weak references with white
+  // referents.
+  ClearWhiteReferences(soft_references);
+  ClearWhiteReferences(weak_references);
+
+  // Clear all phantom references with white referents.
+  ClearWhiteReferences(phantom_references);
+
+  // At this point all reference lists should be empty.
+  DCHECK(*soft_references == NULL);
+  DCHECK(*weak_references == NULL);
+  DCHECK(*finalizer_references == NULL);
+  DCHECK(*phantom_references == NULL);
+  timings_.EndSplit();
+}
+
+void SemiSpace::UnBindBitmaps() {
+  base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (space->IsDlMallocSpace()) {
+      space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+      if (alloc_space->HasBoundBitmaps()) {
+        alloc_space->UnBindBitmaps();
+        heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(),
+                                              alloc_space->GetMarkBitmap());
+      }
+    }
+  }
+}
+
+void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
+  DCHECK(to_space != nullptr);
+  to_space_ = to_space;
+}
+
+void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
+  DCHECK(from_space != nullptr);
+  from_space_ = from_space;
+}
+
+void SemiSpace::FinishPhase() {
+  base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+  // Can't enqueue references if we hold the mutator lock.
+  Object* cleared_references = GetClearedReferences();
+  Heap* heap = GetHeap();
+  timings_.NewSplit("EnqueueClearedReferences");
+  heap->EnqueueClearedReferences(&cleared_references);
+
+  timings_.NewSplit("PostGcVerification");
+  heap->PostGcVerification(this);
+
+  // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
+  // further action is done by the heap.
+  to_space_ = nullptr;
+  from_space_ = nullptr;
+
+  // Update the cumulative statistics
+  total_time_ns_ += GetDurationNs();
+  total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
+                                           std::plus<uint64_t>());
+  total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
+  total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
+
+  // Ensure that the mark stack is empty.
+  CHECK(mark_stack_->IsEmpty());
+
+  // Update the cumulative loggers.
+  cumulative_timings_.Start();
+  cumulative_timings_.AddLogger(timings_);
+  cumulative_timings_.End();
+
+  // Clear all of the spaces' mark bitmaps.
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
+    if (bitmap != nullptr &&
+        space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
+      bitmap->Clear();
+    }
+  }
+  mark_stack_->Reset();
+
+  // Reset the marked large objects.
+  space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
+  large_objects->GetMarkObjects()->Clear();
+}
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
new file mode 100644
index 0000000..13d5195
--- /dev/null
+++ b/runtime/gc/collector/semi_space.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
+#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
+
+#include "atomic_integer.h"
+#include "barrier.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "garbage_collector.h"
+#include "offsets.h"
+#include "root_visitor.h"
+#include "UniquePtr.h"
+
+namespace art {
+
+namespace mirror {
+  class Class;
+  class Object;
+  template<class T> class ObjectArray;
+}  // namespace mirror
+
+class StackVisitor;
+class Thread;
+
+namespace gc {
+
+namespace accounting {
+  template <typename T> class AtomicStack;
+  class MarkIfReachesAllocspaceVisitor;
+  class ModUnionClearCardVisitor;
+  class ModUnionVisitor;
+  class ModUnionTableBitmap;
+  class MarkStackChunk;
+  typedef AtomicStack<mirror::Object*> ObjectStack;
+  class SpaceBitmap;
+}  // namespace accounting
+
+namespace space {
+  class BumpPointerSpace;
+  class ContinuousMemMapAllocSpace;
+  class ContinuousSpace;
+}  // namespace space
+
+class Heap;
+
+namespace collector {
+
+class SemiSpace : public GarbageCollector {
+ public:
+  explicit SemiSpace(Heap* heap, const std::string& name_prefix = "");
+
+  ~SemiSpace() {}
+
+  virtual void InitializePhase();
+  virtual bool IsConcurrent() const {
+    return false;
+  }
+  virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void MarkReachableObjects()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  virtual GcType GetGcType() const {
+    return kGcTypePartial;
+  }
+
+  // Sets which space we will be copying objects to.
+  void SetToSpace(space::ContinuousMemMapAllocSpace* to_space);
+
+  // Set the space where we copy objects from.
+  void SetFromSpace(space::ContinuousMemMapAllocSpace* from_space);
+
+  // Initializes internal structures.
+  void Init();
+
+  // Find the default mark bitmap.
+  void FindDefaultMarkBitmap();
+
+  // Returns the new address of the object.
+  mirror::Object* MarkObject(mirror::Object* object)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  void ScanObject(mirror::Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Marks the root set at the start of a garbage collection.
+  void MarkRoots()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Make a space immune, immune spaces have all live objects marked - that is the mark and
+  // live bitmaps are bound together.
+  void ImmuneSpace(space::ContinuousSpace* space)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
+  // the image. Mark that portion of the heap as immune.
+  virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void BindLiveToMarkBitmap(space::ContinuousSpace* space)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void UnBindBitmaps()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void ProcessReferences(Thread* self)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Sweeps unmarked objects to complete the garbage collection.
+  virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Sweeps unmarked objects to complete the garbage collection.
+  void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Sweep only pointers within an array. WARNING: Trashes objects.
+  void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  mirror::Object* GetClearedReferences() {
+    return cleared_reference_list_;
+  }
+
+  // TODO: enable thread safety analysis when in use by multiple worker threads.
+  template <typename MarkVisitor>
+  void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor)
+      NO_THREAD_SAFETY_ANALYSIS;
+
+  void SweepSystemWeaks()
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  template <typename Visitor>
+  static void VisitObjectReferencesAndClass(mirror::Object* obj, const Visitor& visitor)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ protected:
+  // Returns null if the object is not marked, otherwise returns the forwarding address (same as
+  // object for non movable things).
+  mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const;
+
+  static mirror::Object* SystemWeakIsMarkedCallback(mirror::Object* object, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
+  // mark, otherwise we unmark.
+  bool MarkLargeObject(const mirror::Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Special sweep for zygote that just marks objects / dirties cards.
+  static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Expand mark stack to 2x its current size.
+  void ResizeMarkStack(size_t new_size);
+
+  // Returns how many threads we should use for the current GC phase based on if we are paused,
+  // whether or not we care about pauses.
+  size_t GetThreadCount(bool paused) const;
+
+  // Returns true if an object is inside of the immune region (assumed to be marked).
+  bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE {
+    return obj >= immune_begin_ && obj < immune_end_;
+  }
+
+  bool IsImmuneSpace(const space::ContinuousSpace* space) const;
+
+  static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
+                                 const StackVisitor *visitor);
+
+  void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor)
+      NO_THREAD_SAFETY_ANALYSIS;
+
+  template <typename Visitor>
+  static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj,
+                                            const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Visit the header, static field references, and interface pointers of a class object.
+  template <typename Visitor>
+  static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
+                                   const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  template <typename Visitor>
+  static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  template <typename Visitor>
+  static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static,
+                                    const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Visit all of the references in an object array.
+  template <typename Visitor>
+  static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
+                                         const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Visits the header and field references of a data object.
+  template <typename Visitor>
+  static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj,
+                                   const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+    return VisitInstanceFieldsReferences(klass, obj, visitor);
+  }
+
+  // Push an object onto the mark stack.
+  inline void MarkStackPush(mirror::Object* obj);
+
+  void UpdateAndMarkModUnion()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Schedules an unmarked object for reference processing.
+  void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Recursively blackens objects on the mark stack.
+  void ProcessMarkStack(bool paused)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  void EnqueueFinalizerReferences(mirror::Object** ref)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  void PreserveSomeSoftReferences(mirror::Object** ref)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  void ClearWhiteReferences(mirror::Object** list)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references,
+                         mirror::Object** weak_references,
+                         mirror::Object** finalizer_references,
+                         mirror::Object** phantom_references)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const;
+
+  mirror::Object* GetForwardingAddress(mirror::Object* obj);
+
+  // Current space, we check this space first to avoid searching for the appropriate space for an
+  // object.
+  accounting::ObjectStack* mark_stack_;
+
+  // Immune range, every object inside the immune range is assumed to be marked.
+  mirror::Object* immune_begin_;
+  mirror::Object* immune_end_;
+
+  // Destination and source spaces.
+  space::ContinuousMemMapAllocSpace* to_space_;
+  space::ContinuousMemMapAllocSpace* from_space_;
+
+  mirror::Object* soft_reference_list_;
+  mirror::Object* weak_reference_list_;
+  mirror::Object* finalizer_reference_list_;
+  mirror::Object* phantom_reference_list_;
+  mirror::Object* cleared_reference_list_;
+
+  Thread* self_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SemiSpace);
+};
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 9f0bf33..b27b8df 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -26,7 +26,7 @@
 
 StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
     : PartialMarkSweep(heap, is_concurrent,
-                       name_prefix + (name_prefix.empty() ? "" : " ") + "sticky") {
+                       name_prefix.empty() ? "sticky " : name_prefix) {
   cumulative_timings_.SetName(GetName());
 }
 
@@ -38,7 +38,8 @@
   // know what was allocated since the last GC. A side-effect of binding the allocation space mark
   // and live bitmap is that marking the objects will place them in the live bitmap.
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
-    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
+    if (space->IsDlMallocSpace() &&
+        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
       BindLiveToMarkBitmap(space);
     }
   }
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 8bee00f..b675877 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -31,10 +31,6 @@
     return kGcTypeSticky;
   }
 
-  // Don't need to do anything special here since we scan all the cards which may have references
-  // to the newly allocated objects.
-  virtual void UpdateAndMarkModUnion() { }
-
   explicit StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
   ~StickyMarkSweep() {}
 
@@ -53,6 +49,10 @@
 
   void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
+  // Don't need to do anything special here since we scan all the cards which may have references
+  // to the newly allocated objects.
+  virtual void UpdateAndMarkModUnion() { }
+
  private:
   DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
 };
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 873eadc..1d3c0d8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -20,6 +20,7 @@
 #include "heap.h"
 
 #include "debugger.h"
+#include "gc/space/bump_pointer_space-inl.h"
 #include "gc/space/dlmalloc_space-inl.h"
 #include "gc/space/large_object_space.h"
 #include "object_utils.h"
@@ -30,8 +31,9 @@
 namespace art {
 namespace gc {
 
-inline mirror::Object* Heap::AllocObjectUninstrumented(Thread* self, mirror::Class* c, size_t byte_count) {
-  DebugCheckPreconditionsForAllobObject(c, byte_count);
+inline mirror::Object* Heap::AllocNonMovableObjectUninstrumented(Thread* self, mirror::Class* c,
+                                                                 size_t byte_count) {
+  DebugCheckPreconditionsForAllocObject(c, byte_count);
   mirror::Object* obj;
   size_t bytes_allocated;
   AllocationTimer alloc_timer(this, &obj);
@@ -39,7 +41,7 @@
                                                                    &obj, &bytes_allocated);
   if (LIKELY(!large_object_allocation)) {
     // Non-large object allocation.
-    obj = AllocateUninstrumented(self, alloc_space_, byte_count, &bytes_allocated);
+    obj = AllocateUninstrumented(self, non_moving_space_, byte_count, &bytes_allocated);
     // Ensure that we did not allocate into a zygote space.
     DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
   }
@@ -53,10 +55,45 @@
     if (kDesiredHeapVerification > kNoHeapVerification) {
       VerifyObject(obj);
     }
-    return obj;
+  } else {
+    ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
   }
-  ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
-  return NULL;
+  if (kIsDebugBuild) {
+    self->VerifyStack();
+  }
+  return obj;
+}
+
+inline mirror::Object* Heap::AllocMovableObjectUninstrumented(Thread* self, mirror::Class* c,
+                                                              size_t byte_count) {
+  DebugCheckPreconditionsForAllocObject(c, byte_count);
+  mirror::Object* obj;
+  AllocationTimer alloc_timer(this, &obj);
+  byte_count = (byte_count + 7) & ~7;
+  if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, false))) {
+    CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, false);
+    if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, true))) {
+      CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
+    }
+  }
+  obj = bump_pointer_space_->AllocNonvirtual(byte_count);
+  if (LIKELY(obj != NULL)) {
+    obj->SetClass(c);
+    DCHECK(!obj->IsClass());
+    // Record allocation after since we want to use the atomic add for the atomic fence to guard
+    // the SetClass since we do not want the class to appear NULL in another thread.
+    num_bytes_allocated_.fetch_add(byte_count);
+    DCHECK(!Dbg::IsAllocTrackingEnabled());
+    if (kDesiredHeapVerification > kNoHeapVerification) {
+      VerifyObject(obj);
+    }
+  } else {
+    ThrowOutOfMemoryError(self, byte_count, false);
+  }
+  if (kIsDebugBuild) {
+    self->VerifyStack();
+  }
+  return obj;
 }
 
 inline size_t Heap::RecordAllocationUninstrumented(size_t size, mirror::Object* obj) {
@@ -124,7 +161,7 @@
   return large_object_allocation;
 }
 
-inline void Heap::DebugCheckPreconditionsForAllobObject(mirror::Class* c, size_t byte_count) {
+inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
   DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
          (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
          strlen(ClassHelper(c).GetDescriptor()) == 0);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index de3ab0e..69ca620 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -30,11 +30,14 @@
 #include "gc/accounting/atomic_stack.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/accounting/mod_union_table.h"
 #include "gc/accounting/mod_union_table-inl.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/collector/mark_sweep-inl.h"
 #include "gc/collector/partial_mark_sweep.h"
+#include "gc/collector/semi_space.h"
 #include "gc/collector/sticky_mark_sweep.h"
+#include "gc/space/bump_pointer_space.h"
 #include "gc/space/dlmalloc_space-inl.h"
 #include "gc/space/image_space.h"
 #include "gc/space/large_object_space.h"
@@ -70,9 +73,8 @@
            bool concurrent_gc, size_t parallel_gc_threads, size_t conc_gc_threads,
            bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold,
            bool ignore_max_footprint)
-    : alloc_space_(NULL),
-      card_table_(NULL),
-      concurrent_gc_(concurrent_gc),
+    : non_moving_space_(NULL),
+      concurrent_gc_(!kMovingCollector && concurrent_gc),
       parallel_gc_threads_(parallel_gc_threads),
       conc_gc_threads_(conc_gc_threads),
       low_memory_mode_(low_memory_mode),
@@ -92,6 +94,7 @@
       max_allowed_footprint_(initial_size),
       native_footprint_gc_watermark_(initial_size),
       native_footprint_limit_(2 * initial_size),
+      native_need_to_run_finalization_(false),
       activity_thread_class_(NULL),
       application_thread_class_(NULL),
       activity_thread_(NULL),
@@ -122,7 +125,9 @@
        * searching.
        */
       max_allocation_stack_size_(kGCALotMode ? kGcAlotInterval
-          : (kDesiredHeapVerification > kNoHeapVerification) ? KB : MB),
+          : (kDesiredHeapVerification > kVerifyAllFast) ? KB : MB),
+      bump_pointer_space_(nullptr),
+      temp_space_(nullptr),
       reference_referent_offset_(0),
       reference_queue_offset_(0),
       reference_queueNext_offset_(0),
@@ -134,6 +139,7 @@
       total_wait_time_(0),
       total_allocation_time_(0),
       verify_object_mode_(kHeapVerificationNotPermitted),
+      gc_disable_count_(0),
       running_on_valgrind_(RUNNING_ON_VALGRIND) {
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
@@ -147,7 +153,7 @@
   if (!image_file_name.empty()) {
     space::ImageSpace* image_space = space::ImageSpace::Create(image_file_name.c_str());
     CHECK(image_space != NULL) << "Failed to create space for " << image_file_name;
-    AddContinuousSpace(image_space);
+    AddSpace(image_space);
     // Oat files referenced by image files immediately follow them in memory, ensure alloc space
     // isn't going to get in the middle
     byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
@@ -159,13 +165,28 @@
     }
   }
 
-  alloc_space_ = space::DlMallocSpace::Create(Runtime::Current()->IsZygote() ? "zygote space" : "alloc space",
-                                              initial_size,
-                                              growth_limit, capacity,
-                                              requested_alloc_space_begin);
-  CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
-  alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
-  AddContinuousSpace(alloc_space_);
+  const char* name = Runtime::Current()->IsZygote() ? "zygote space" : "alloc space";
+  non_moving_space_ = space::DlMallocSpace::Create(name, initial_size, growth_limit, capacity,
+                                                   requested_alloc_space_begin);
+
+  if (kMovingCollector) {
+    // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
+    // TODO: Having 3+ spaces as big as the large heap size can cause virtual memory fragmentation
+    // issues.
+    const size_t bump_pointer_space_size = std::min(non_moving_space_->Capacity(), 128 * MB);
+    bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
+                                                          bump_pointer_space_size, nullptr);
+    CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
+    AddSpace(bump_pointer_space_);
+    temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2", bump_pointer_space_size,
+                                                  nullptr);
+    CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
+    AddSpace(temp_space_);
+  }
+
+  CHECK(non_moving_space_ != NULL) << "Failed to create non-moving space";
+  non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
+  AddSpace(non_moving_space_);
 
   // Allocate the large object space.
   const bool kUseFreeListSpaceForLOS = false;
@@ -175,22 +196,23 @@
     large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
   }
   CHECK(large_object_space_ != NULL) << "Failed to create large object space";
-  AddDiscontinuousSpace(large_object_space_);
+  AddSpace(large_object_space_);
 
   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
+  CHECK(!continuous_spaces_.empty());
+  // Relies on the spaces being sorted.
   byte* heap_begin = continuous_spaces_.front()->Begin();
-  size_t heap_capacity = continuous_spaces_.back()->End() - continuous_spaces_.front()->Begin();
-  if (continuous_spaces_.back()->IsDlMallocSpace()) {
-    heap_capacity += continuous_spaces_.back()->AsDlMallocSpace()->NonGrowthLimitCapacity();
-  }
+  byte* heap_end = continuous_spaces_.back()->Limit();
+  size_t heap_capacity = heap_end - heap_begin;
 
   // Allocate the card table.
   card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
   CHECK(card_table_.get() != NULL) << "Failed to create card table";
 
+  // Card cache for now since it makes it easier for us to update the references to the copying
+  // spaces.
   accounting::ModUnionTable* mod_union_table =
-      new accounting::ModUnionTableToZygoteAllocspace("Image mod-union table", this,
-                                                      GetImageSpace());
+      new accounting::ModUnionTableCardCache("Image mod-union table", this, GetImageSpace());
   CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
   AddModUnionTable(mod_union_table);
 
@@ -223,19 +245,23 @@
 
   if (ignore_max_footprint_) {
     SetIdealFootprint(std::numeric_limits<size_t>::max());
-    concurrent_start_bytes_ = max_allowed_footprint_;
+    concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
   }
+  CHECK_NE(max_allowed_footprint_, 0U);
 
   // Create our garbage collectors.
-  for (size_t i = 0; i < 2; ++i) {
-    const bool concurrent = i != 0;
-    mark_sweep_collectors_.push_back(new collector::MarkSweep(this, concurrent));
-    mark_sweep_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
-    mark_sweep_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+  if (!kMovingCollector) {
+    for (size_t i = 0; i < 2; ++i) {
+      const bool concurrent = i != 0;
+      garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
+      garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
+      garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+    }
+  } else {
+    semi_space_collector_ = new collector::SemiSpace(this);
+    garbage_collectors_.push_back(semi_space_collector_);
   }
 
-  CHECK_NE(max_allowed_footprint_, 0U);
-
   if (running_on_valgrind_) {
     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
   }
@@ -245,6 +271,41 @@
   }
 }
 
+bool Heap::IsCompilingBoot() const {
+  for (const auto& space : continuous_spaces_) {
+    if (space->IsImageSpace()) {
+      return false;
+    } else if (space->IsZygoteSpace()) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool Heap::HasImageSpace() const {
+  for (const auto& space : continuous_spaces_) {
+    if (space->IsImageSpace()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+void Heap::IncrementDisableGC(Thread* self) {
+  // Need to do this holding the lock to prevent races where the GC is about to run / running when
+  // we attempt to disable it.
+  ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+  MutexLock mu(self, *gc_complete_lock_);
+  WaitForGcToCompleteLocked(self);
+  ++gc_disable_count_;
+}
+
+void Heap::DecrementDisableGC(Thread* self) {
+  MutexLock mu(self, *gc_complete_lock_);
+  CHECK_GE(gc_disable_count_, 0U);
+  --gc_disable_count_;
+}
+
 void Heap::CreateThreadPool() {
   const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
   if (num_threads != 0) {
@@ -252,12 +313,49 @@
   }
 }
 
+void Heap::VisitObjects(ObjectVisitorCallback callback, void* arg) {
+  // Visit objects in bump pointer space.
+  Thread* self = Thread::Current();
+  // TODO: Use reference block.
+  std::vector<SirtRef<mirror::Object>*> saved_refs;
+  if (bump_pointer_space_ != nullptr) {
+    // Need to put all these in sirts since the callback may trigger a GC. TODO: Use a better data
+    // structure.
+    mirror::Object* obj = reinterpret_cast<mirror::Object*>(bump_pointer_space_->Begin());
+    const mirror::Object* end = reinterpret_cast<const mirror::Object*>(
+        bump_pointer_space_->End());
+    while (obj < end) {
+      saved_refs.push_back(new SirtRef<mirror::Object>(self, obj));
+      obj = space::BumpPointerSpace::GetNextObject(obj);
+    }
+  }
+  // TODO: Switch to standard begin and end to use ranged a based loop.
+  for (mirror::Object** it = allocation_stack_->Begin(), **end = allocation_stack_->End();
+      it < end; ++it) {
+    mirror::Object* obj = *it;
+    // Objects in the allocation stack might be in a movable space.
+    saved_refs.push_back(new SirtRef<mirror::Object>(self, obj));
+  }
+  GetLiveBitmap()->Walk(callback, arg);
+  for (const auto& ref : saved_refs) {
+    callback(ref->get(), arg);
+  }
+  // Need to free the sirts in reverse order they were allocated.
+  for (size_t i = saved_refs.size(); i != 0; --i) {
+    delete saved_refs[i - 1];
+  }
+}
+
+void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
+  MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(), stack);
+}
+
 void Heap::DeleteThreadPool() {
   thread_pool_.reset(nullptr);
 }
 
 static bool ReadStaticInt(JNIEnvExt* env, jclass clz, const char* name, int* out_value) {
-  CHECK(out_value != NULL);
+  DCHECK(out_value != NULL);
   jfieldID field = env->GetStaticFieldID(clz, name, "I");
   if (field == NULL) {
     env->ExceptionClear();
@@ -374,62 +472,71 @@
   }
 }
 
-void Heap::AddContinuousSpace(space::ContinuousSpace* space) {
-  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+void Heap::AddSpace(space::Space* space) {
   DCHECK(space != NULL);
-  DCHECK(space->GetLiveBitmap() != NULL);
-  live_bitmap_->AddContinuousSpaceBitmap(space->GetLiveBitmap());
-  DCHECK(space->GetMarkBitmap() != NULL);
-  mark_bitmap_->AddContinuousSpaceBitmap(space->GetMarkBitmap());
-  continuous_spaces_.push_back(space);
-  if (space->IsDlMallocSpace() && !space->IsLargeObjectSpace()) {
-    alloc_space_ = space->AsDlMallocSpace();
-  }
-
-  // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger)
-  std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
-            [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
-              return a->Begin() < b->Begin();
-            });
-
-  // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
-  // avoid redundant marking.
-  bool seen_zygote = false, seen_alloc = false;
-  for (const auto& space : continuous_spaces_) {
-    if (space->IsImageSpace()) {
-      DCHECK(!seen_zygote);
-      DCHECK(!seen_alloc);
-    } else if (space->IsZygoteSpace()) {
-      DCHECK(!seen_alloc);
-      seen_zygote = true;
-    } else if (space->IsDlMallocSpace()) {
-      seen_alloc = true;
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  if (space->IsContinuousSpace()) {
+    DCHECK(!space->IsDiscontinuousSpace());
+    space::ContinuousSpace* continuous_space = space->AsContinuousSpace();
+    // Continuous spaces don't necessarily have bitmaps.
+    accounting::SpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
+    accounting::SpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
+    if (live_bitmap != nullptr) {
+      DCHECK(mark_bitmap != nullptr);
+      live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
+      mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
     }
+
+    continuous_spaces_.push_back(continuous_space);
+    if (continuous_space->IsDlMallocSpace()) {
+      non_moving_space_ = continuous_space->AsDlMallocSpace();
+    }
+
+    // Ensure that spaces remain sorted in increasing order of start address.
+    std::sort(continuous_spaces_.begin(), continuous_spaces_.end(),
+              [](const space::ContinuousSpace* a, const space::ContinuousSpace* b) {
+      return a->Begin() < b->Begin();
+    });
+    // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
+    // avoid redundant marking.
+    bool seen_zygote = false, seen_alloc = false;
+    for (const auto& space : continuous_spaces_) {
+      if (space->IsImageSpace()) {
+        CHECK(!seen_zygote);
+        CHECK(!seen_alloc);
+      } else if (space->IsZygoteSpace()) {
+        CHECK(!seen_alloc);
+        seen_zygote = true;
+      } else if (space->IsDlMallocSpace()) {
+        seen_alloc = true;
+      }
+    }
+  } else {
+    DCHECK(space->IsDiscontinuousSpace());
+    space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
+    DCHECK(discontinuous_space->GetLiveObjects() != nullptr);
+    live_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetLiveObjects());
+    DCHECK(discontinuous_space->GetMarkObjects() != nullptr);
+    mark_bitmap_->AddDiscontinuousObjectSet(discontinuous_space->GetMarkObjects());
+    discontinuous_spaces_.push_back(discontinuous_space);
+  }
+  if (space->IsAllocSpace()) {
+    alloc_spaces_.push_back(space->AsAllocSpace());
   }
 }
 
 void Heap::RegisterGCAllocation(size_t bytes) {
-  if (this != NULL) {
+  if (this != nullptr) {
     gc_memory_overhead_.fetch_add(bytes);
   }
 }
 
 void Heap::RegisterGCDeAllocation(size_t bytes) {
-  if (this != NULL) {
+  if (this != nullptr) {
     gc_memory_overhead_.fetch_sub(bytes);
   }
 }
 
-void Heap::AddDiscontinuousSpace(space::DiscontinuousSpace* space) {
-  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-  DCHECK(space != NULL);
-  DCHECK(space->GetLiveObjects() != NULL);
-  live_bitmap_->AddDiscontinuousObjectSet(space->GetLiveObjects());
-  DCHECK(space->GetMarkObjects() != NULL);
-  mark_bitmap_->AddDiscontinuousObjectSet(space->GetMarkObjects());
-  discontinuous_spaces_.push_back(space);
-}
-
 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
   // Dump cumulative timings.
   os << "Dumping cumulative Gc timings\n";
@@ -437,7 +544,7 @@
 
   // Dump cumulative loggers for each GC type.
   uint64_t total_paused_time = 0;
-  for (const auto& collector : mark_sweep_collectors_) {
+  for (const auto& collector : garbage_collectors_) {
     CumulativeLogger& logger = collector->GetCumulativeTimings();
     if (logger.GetTotalNs() != 0) {
       os << Dumpable<CumulativeLogger>(logger);
@@ -480,17 +587,14 @@
 }
 
 Heap::~Heap() {
+  VLOG(heap) << "Starting ~Heap()";
   if (kDumpGcPerformanceOnShutdown) {
     DumpGcPerformanceInfo(LOG(INFO));
   }
-
-  STLDeleteElements(&mark_sweep_collectors_);
-
-  // If we don't reset then the mark stack complains in it's destructor.
+  STLDeleteElements(&garbage_collectors_);
+  // If we don't reset then the mark stack complains in its destructor.
   allocation_stack_->Reset();
   live_stack_->Reset();
-
-  VLOG(heap) << "~Heap()";
   STLDeleteValues(&mod_union_tables_);
   STLDeleteElements(&continuous_spaces_);
   STLDeleteElements(&discontinuous_spaces_);
@@ -499,6 +603,7 @@
   delete weak_ref_queue_lock_;
   delete finalizer_ref_queue_lock_;
   delete phantom_ref_queue_lock_;
+  VLOG(heap) << "Finished ~Heap()";
 }
 
 space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
@@ -579,7 +684,7 @@
     mirror::Object* obj = AllocateInstrumented(self, large_object_space_, byte_count, bytes_allocated);
     // Make sure that our large object didn't get placed anywhere within the space interval or else
     // it breaks the immune range.
-    DCHECK(obj == NULL ||
+    DCHECK(obj == nullptr ||
            reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() ||
            reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End());
     *obj_ptr = obj;
@@ -587,16 +692,59 @@
   return large_object_allocation;
 }
 
-mirror::Object* Heap::AllocObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count) {
-  DebugCheckPreconditionsForAllobObject(c, byte_count);
+mirror::Object* Heap::AllocMovableObjectInstrumented(Thread* self, mirror::Class* c,
+                                                     size_t byte_count) {
+  DebugCheckPreconditionsForAllocObject(c, byte_count);
+  mirror::Object* obj;
+  AllocationTimer alloc_timer(this, &obj);
+  byte_count = RoundUp(byte_count, 8);
+  if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, false))) {
+    CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, false);
+    if (UNLIKELY(IsOutOfMemoryOnAllocation(byte_count, true))) {
+      CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
+    }
+  }
+  obj = bump_pointer_space_->AllocNonvirtual(byte_count);
+  if (LIKELY(obj != NULL)) {
+    obj->SetClass(c);
+    DCHECK(!obj->IsClass());
+    // Record allocation after since we want to use the atomic add for the atomic fence to guard
+    // the SetClass since we do not want the class to appear NULL in another thread.
+    num_bytes_allocated_.fetch_add(byte_count);
+    if (Runtime::Current()->HasStatsEnabled()) {
+      RuntimeStats* thread_stats = Thread::Current()->GetStats();
+      ++thread_stats->allocated_objects;
+      thread_stats->allocated_bytes += byte_count;
+      RuntimeStats* global_stats = Runtime::Current()->GetStats();
+      ++global_stats->allocated_objects;
+      global_stats->allocated_bytes += byte_count;
+    }
+    if (Dbg::IsAllocTrackingEnabled()) {
+      Dbg::RecordAllocation(c, byte_count);
+    }
+    if (kDesiredHeapVerification > kNoHeapVerification) {
+      VerifyObject(obj);
+    }
+  } else {
+    ThrowOutOfMemoryError(self, byte_count, false);
+  }
+  if (kIsDebugBuild) {
+    self->VerifyStack();
+  }
+  return obj;
+}
+
+mirror::Object* Heap::AllocNonMovableObjectInstrumented(Thread* self, mirror::Class* c,
+                                                        size_t byte_count) {
+  DebugCheckPreconditionsForAllocObject(c, byte_count);
   mirror::Object* obj;
   size_t bytes_allocated;
   AllocationTimer alloc_timer(this, &obj);
-  bool large_object_allocation = TryAllocLargeObjectInstrumented(self, c, byte_count,
-                                                                 &obj, &bytes_allocated);
+  bool large_object_allocation = TryAllocLargeObjectInstrumented(self, c, byte_count, &obj,
+                                                                 &bytes_allocated);
   if (LIKELY(!large_object_allocation)) {
     // Non-large object allocation.
-    obj = AllocateInstrumented(self, alloc_space_, byte_count, &bytes_allocated);
+    obj = AllocateInstrumented(self, non_moving_space_, byte_count, &bytes_allocated);
     // Ensure that we did not allocate into a zygote space.
     DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
   }
@@ -612,28 +760,66 @@
     if (kDesiredHeapVerification > kNoHeapVerification) {
       VerifyObject(obj);
     }
-    return obj;
+  } else {
+    ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
   }
-  ThrowOutOfMemoryError(self, byte_count, large_object_allocation);
-  return NULL;
+  if (kIsDebugBuild) {
+    self->VerifyStack();
+  }
+  return obj;
 }
 
-bool Heap::IsHeapAddress(const mirror::Object* obj) {
-  // Note: we deliberately don't take the lock here, and mustn't test anything that would
-  // require taking the lock.
-  if (obj == NULL) {
+void Heap::Trim() {
+  uint64_t start_ns = NanoTime();
+  // Trim the managed spaces.
+  uint64_t total_alloc_space_allocated = 0;
+  uint64_t total_alloc_space_size = 0;
+  uint64_t managed_reclaimed = 0;
+  for (const auto& space : continuous_spaces_) {
+    if (space->IsDlMallocSpace() && !space->IsZygoteSpace()) {
+      gc::space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+      total_alloc_space_size += alloc_space->Size();
+      managed_reclaimed += alloc_space->Trim();
+    }
+  }
+  total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated() -
+      bump_pointer_space_->GetBytesAllocated();
+  const float managed_utilization = static_cast<float>(total_alloc_space_allocated) /
+      static_cast<float>(total_alloc_space_size);
+  uint64_t gc_heap_end_ns = NanoTime();
+  // Trim the native heap.
+  dlmalloc_trim(0);
+  size_t native_reclaimed = 0;
+  dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
+  uint64_t end_ns = NanoTime();
+  VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
+      << ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
+      << PrettyDuration(end_ns - gc_heap_end_ns) << ", advised=" << PrettySize(native_reclaimed)
+      << ") heaps. Managed heap utilization of " << static_cast<int>(100 * managed_utilization)
+      << "%.";
+}
+
+bool Heap::IsValidObjectAddress(const mirror::Object* obj) const {
+  // Note: we deliberately don't take the lock here, and mustn't test anything that would require
+  // taking the lock.
+  if (obj == nullptr) {
     return true;
   }
-  if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
-    return false;
+  return IsAligned<kObjectAlignment>(obj) && IsHeapAddress(obj);
+}
+
+bool Heap::IsHeapAddress(const mirror::Object* obj) const {
+  if (kMovingCollector && bump_pointer_space_->HasAddress(obj)) {
+    return true;
   }
-  return FindSpaceFromObject(obj, true) != NULL;
+  // TODO: This probably doesn't work for large objects.
+  return FindSpaceFromObject(obj, true) != nullptr;
 }
 
 bool Heap::IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack,
                               bool search_live_stack, bool sorted) {
   // Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
-  if (obj == NULL || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
+  if (obj == nullptr || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
     return false;
   }
   space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
@@ -642,6 +828,8 @@
     if (c_space->GetLiveBitmap()->Test(obj)) {
       return true;
     }
+  } else if (bump_pointer_space_->Contains(obj) || temp_space_->Contains(obj)) {
+      return true;
   } else {
     d_space = FindDiscontinuousSpaceFromObject(obj, true);
     if (d_space != NULL) {
@@ -699,16 +887,20 @@
   VerifyObjectBody(obj);
 }
 
-void Heap::DumpSpaces() {
+void Heap::DumpSpaces(std::ostream& stream) {
   for (const auto& space : continuous_spaces_) {
     accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
     accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
-    LOG(INFO) << space << " " << *space << "\n"
-              << live_bitmap << " " << *live_bitmap << "\n"
-              << mark_bitmap << " " << *mark_bitmap;
+    stream << space << " " << *space << "\n";
+    if (live_bitmap != nullptr) {
+      stream << live_bitmap << " " << *live_bitmap << "\n";
+    }
+    if (mark_bitmap != nullptr) {
+      stream << mark_bitmap << " " << *mark_bitmap << "\n";
+    }
   }
   for (const auto& space : discontinuous_spaces_) {
-    LOG(INFO) << space << " " << *space << "\n";
+    stream << space << " " << *space << "\n";
   }
 }
 
@@ -735,7 +927,7 @@
   const mirror::Class* c_c_c = *reinterpret_cast<mirror::Class* const *>(raw_addr);
   CHECK_EQ(c_c, c_c_c);
 
-  if (verify_object_mode_ != kVerifyAllFast) {
+  if (verify_object_mode_ > kVerifyAllFast) {
     // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
     //       heap_bitmap_lock_.
     if (!IsLiveObjectLocked(obj)) {
@@ -811,7 +1003,7 @@
 inline mirror::Object* Heap::TryToAllocateInstrumented(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
                                                        bool grow, size_t* bytes_allocated) {
   if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
-    return NULL;
+    return nullptr;
   }
   if (LIKELY(!running_on_valgrind_)) {
     return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
@@ -841,7 +1033,7 @@
 
   // The allocation failed. If the GC is running, block until it completes, and then retry the
   // allocation.
-  collector::GcType last_gc = WaitForConcurrentGcToComplete(self);
+  collector::GcType last_gc = WaitForGcToComplete(self);
   if (last_gc != collector::kGcTypeNone) {
     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
     ptr = TryToAllocateInstrumented(self, space, alloc_size, false, bytes_allocated);
@@ -857,9 +1049,10 @@
     collector::GcType gc_type = static_cast<collector::GcType>(i);
     switch (gc_type) {
       case collector::kGcTypeSticky: {
-          const size_t alloc_space_size = alloc_space_->Size();
+          const size_t alloc_space_size = non_moving_space_->Size();
           run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ &&
-              alloc_space_->Capacity() - alloc_space_size >= min_remaining_space_for_sticky_gc_;
+              non_moving_space_->Capacity() - alloc_space_size >=
+              min_remaining_space_for_sticky_gc_;
           break;
         }
       case collector::kGcTypePartial:
@@ -869,7 +1062,7 @@
         run_gc = true;
         break;
       default:
-        break;
+        LOG(FATAL) << "Invalid GC type";
     }
 
     if (run_gc) {
@@ -897,11 +1090,11 @@
   // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
   // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME.
 
-  // OLD-TODO: wait for the finalizers from the previous GC to finish
+  // TODO: Run finalization, but this can cause more allocations to occur.
   VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
            << " allocation";
 
-  // We don't need a WaitForConcurrentGcToComplete here either.
+  // We don't need a WaitForGcToComplete here either.
   CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
   return TryToAllocateInstrumented(self, space, alloc_size, true, bytes_allocated);
 }
@@ -914,51 +1107,24 @@
 
 size_t Heap::GetObjectsAllocated() const {
   size_t total = 0;
-  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
-  for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
-    space::ContinuousSpace* space = *it;
-    if (space->IsDlMallocSpace()) {
-      total += space->AsDlMallocSpace()->GetObjectsAllocated();
-    }
-  }
-  typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
-  for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
-    space::DiscontinuousSpace* space = *it;
-    total += space->AsLargeObjectSpace()->GetObjectsAllocated();
+  for (space::AllocSpace* space : alloc_spaces_) {
+    total += space->GetObjectsAllocated();
   }
   return total;
 }
 
 size_t Heap::GetObjectsAllocatedEver() const {
   size_t total = 0;
-  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
-  for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
-    space::ContinuousSpace* space = *it;
-    if (space->IsDlMallocSpace()) {
-      total += space->AsDlMallocSpace()->GetTotalObjectsAllocated();
-    }
-  }
-  typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
-  for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
-    space::DiscontinuousSpace* space = *it;
-    total += space->AsLargeObjectSpace()->GetTotalObjectsAllocated();
+  for (space::AllocSpace* space : alloc_spaces_) {
+    total += space->GetTotalObjectsAllocated();
   }
   return total;
 }
 
 size_t Heap::GetBytesAllocatedEver() const {
   size_t total = 0;
-  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
-  for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
-    space::ContinuousSpace* space = *it;
-    if (space->IsDlMallocSpace()) {
-      total += space->AsDlMallocSpace()->GetTotalBytesAllocated();
-    }
-  }
-  typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
-  for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
-    space::DiscontinuousSpace* space = *it;
-    total += space->AsLargeObjectSpace()->GetTotalBytesAllocated();
+  for (space::AllocSpace* space : alloc_spaces_) {
+    total += space->GetTotalBytesAllocated();
   }
   return total;
 }
@@ -1056,8 +1222,8 @@
   // For bitmap Visit.
   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
   // annotalysis on visitors.
-  void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
-    collector::MarkSweep::VisitObjectReferences(obj, *this, true);
+  void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
+    collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(o), *this, true);
   }
 
   // For MarkSweep::VisitObjectReferences.
@@ -1093,56 +1259,69 @@
 void Heap::CollectGarbage(bool clear_soft_references) {
   // Even if we waited for a GC we still need to do another GC since weaks allocated during the
   // last GC will not have necessarily been cleared.
-  Thread* self = Thread::Current();
-  WaitForConcurrentGcToComplete(self);
   CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references);
 }
 
 void Heap::PreZygoteFork() {
   static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
-  // Do this before acquiring the zygote creation lock so that we don't get lock order violations.
-  CollectGarbage(false);
   Thread* self = Thread::Current();
   MutexLock mu(self, zygote_creation_lock_);
-
   // Try to see if we have any Zygote spaces.
   if (have_zygote_space_) {
     return;
   }
-
-  VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size());
-
-  {
-    // Flush the alloc stack.
-    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    FlushAllocStack();
+  VLOG(heap) << "Starting PreZygoteFork";
+  // Do this before acquiring the zygote creation lock so that we don't get lock order violations.
+  CollectGarbageInternal(collector::kGcTypeFull, kGcCauseBackground, false);
+  // Trim the pages at the end of the non moving space.
+  non_moving_space_->Trim();
+  non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+  // Create a new bump pointer space which we will compact into.
+  if (semi_space_collector_ != nullptr) {
+    space::BumpPointerSpace target_space("zygote bump space", non_moving_space_->End(),
+                                         non_moving_space_->Limit());
+    // Compact the bump pointer space to a new zygote bump pointer space.
+    temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+    Compact(&target_space, bump_pointer_space_);
+    CHECK_EQ(temp_space_->GetBytesAllocated(), 0U);
+    total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
+    total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
+    // Update the end and write out image.
+    non_moving_space_->SetEnd(target_space.End());
+    non_moving_space_->SetLimit(target_space.Limit());
+    accounting::SpaceBitmap* bitmap = non_moving_space_->GetLiveBitmap();
+    // Record the allocations in the bitmap.
+    VLOG(heap) << "Recording zygote allocations";
+    mirror::Object* obj = reinterpret_cast<mirror::Object*>(target_space.Begin());
+    const mirror::Object* end = reinterpret_cast<const mirror::Object*>(target_space.End());
+    while (obj < end) {
+      bitmap->Set(obj);
+      obj = space::BumpPointerSpace::GetNextObject(obj);
+    }
   }
-
-  // Turns the current alloc space into a Zygote space and obtain the new alloc space composed
-  // of the remaining available heap memory.
-  space::DlMallocSpace* zygote_space = alloc_space_;
-  alloc_space_ = zygote_space->CreateZygoteSpace("alloc space");
-  alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
-
+  // Turn the current alloc space into a zygote space and obtain the new alloc space composed of
+  // the remaining available heap memory.
+  space::DlMallocSpace* zygote_space = non_moving_space_;
+  non_moving_space_ = zygote_space->CreateZygoteSpace("alloc space");
+  non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
   // Change the GC retention policy of the zygote space to only collect when full.
   zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect);
-  AddContinuousSpace(alloc_space_);
+  AddSpace(non_moving_space_);
   have_zygote_space_ = true;
-
+  zygote_space->InvalidateMSpace();
   // Create the zygote space mod union table.
   accounting::ModUnionTable* mod_union_table =
       new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
   CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
   AddModUnionTable(mod_union_table);
-
   // Reset the cumulative loggers since we now have a few additional timing phases.
-  for (const auto& collector : mark_sweep_collectors_) {
+  for (const auto& collector : garbage_collectors_) {
     collector->ResetCumulativeStatistics();
   }
 }
 
 void Heap::FlushAllocStack() {
-  MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
+  MarkAllocStack(non_moving_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
                  allocation_stack_.get());
   allocation_stack_->Reset();
 }
@@ -1161,86 +1340,111 @@
   }
 }
 
+const char* PrettyCause(GcCause cause) {
+  switch (cause) {
+    case kGcCauseForAlloc: return "Alloc";
+    case kGcCauseBackground: return "Background";
+    case kGcCauseExplicit: return "Explicit";
+    default:
+      LOG(FATAL) << "Unreachable";
+  }
+  return "";
+}
 
-const char* gc_cause_and_type_strings[3][4] = {
-    {"", "GC Alloc Sticky", "GC Alloc Partial", "GC Alloc Full"},
-    {"", "GC Background Sticky", "GC Background Partial", "GC Background Full"},
-    {"", "GC Explicit Sticky", "GC Explicit Partial", "GC Explicit Full"}};
+void Heap::SwapSemiSpaces() {
+  // Swap the spaces so we allocate into the space which we just evacuated.
+  std::swap(bump_pointer_space_, temp_space_);
+}
+
+void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
+                   space::ContinuousMemMapAllocSpace* source_space) {
+  CHECK(kMovingCollector);
+  CHECK_NE(target_space, source_space) << "In-place compaction unsupported";
+  if (target_space != source_space) {
+    semi_space_collector_->SetFromSpace(source_space);
+    semi_space_collector_->SetToSpace(target_space);
+    semi_space_collector_->Run(false);
+  }
+}
 
 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
                                                bool clear_soft_references) {
   Thread* self = Thread::Current();
-
+  Runtime* runtime = Runtime::Current();
   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
   Locks::mutator_lock_->AssertNotHeld(self);
-
   if (self->IsHandlingStackOverflow()) {
     LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow.";
   }
-
-  // Ensure there is only one GC at a time.
-  bool start_collect = false;
-  while (!start_collect) {
-    {
-      MutexLock mu(self, *gc_complete_lock_);
-      if (!is_gc_running_) {
-        is_gc_running_ = true;
-        start_collect = true;
-      }
+  {
+    gc_complete_lock_->AssertNotHeld(self);
+    MutexLock mu(self, *gc_complete_lock_);
+    // Ensure there is only one GC at a time.
+    WaitForGcToCompleteLocked(self);
+    // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
+    //       Not doing at the moment to ensure soft references are cleared.
+    // GC can be disabled if someone has a used GetPrimitiveArrayCritical.
+    if (gc_disable_count_ != 0) {
+      LOG(WARNING) << "Skipping GC due to disable count " << gc_disable_count_;
+      return collector::kGcTypeNone;
     }
-    if (!start_collect) {
-      // TODO: timinglog this.
-      WaitForConcurrentGcToComplete(self);
-
-      // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
-      //       Not doing at the moment to ensure soft references are cleared.
-    }
+    is_gc_running_ = true;
   }
-  gc_complete_lock_->AssertNotHeld(self);
 
-  if (gc_cause == kGcCauseForAlloc && Runtime::Current()->HasStatsEnabled()) {
-    ++Runtime::Current()->GetStats()->gc_for_alloc_count;
-    ++Thread::Current()->GetStats()->gc_for_alloc_count;
+  if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) {
+    ++runtime->GetStats()->gc_for_alloc_count;
+    ++self->GetStats()->gc_for_alloc_count;
   }
 
   uint64_t gc_start_time_ns = NanoTime();
   uint64_t gc_start_size = GetBytesAllocated();
   // Approximate allocation rate in bytes / second.
-  if (UNLIKELY(gc_start_time_ns == last_gc_time_ns_)) {
-    LOG(WARNING) << "Timers are broken (gc_start_time == last_gc_time_).";
-  }
   uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
-  if (ms_delta != 0) {
+  // Back to back GCs can cause 0 ms of wait time in between GC invocations.
+  if (LIKELY(ms_delta != 0)) {
     allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
     VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
   }
 
   if (gc_type == collector::kGcTypeSticky &&
-      alloc_space_->Size() < min_alloc_space_size_for_sticky_gc_) {
+      non_moving_space_->Size() < min_alloc_space_size_for_sticky_gc_) {
     gc_type = collector::kGcTypePartial;
   }
 
   DCHECK_LT(gc_type, collector::kGcTypeMax);
   DCHECK_NE(gc_type, collector::kGcTypeNone);
-  DCHECK_LE(gc_cause, kGcCauseExplicit);
 
-  ATRACE_BEGIN(gc_cause_and_type_strings[gc_cause][gc_type]);
-
-  collector::MarkSweep* collector = NULL;
-  for (const auto& cur_collector : mark_sweep_collectors_) {
-    if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) {
+  collector::GarbageCollector* collector = nullptr;
+  if (kMovingCollector) {
+    gc_type = semi_space_collector_->GetGcType();
+    CHECK_EQ(temp_space_->GetObjectsAllocated(), 0U);
+    semi_space_collector_->SetFromSpace(bump_pointer_space_);
+    semi_space_collector_->SetToSpace(temp_space_);
+    mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
+  }
+  for (const auto& cur_collector : garbage_collectors_) {
+    if (cur_collector->IsConcurrent() == concurrent_gc_ &&
+        cur_collector->GetGcType() == gc_type) {
       collector = cur_collector;
       break;
     }
   }
+  if (kMovingCollector) {
+    gc_type = collector::kGcTypeFull;
+  }
   CHECK(collector != NULL)
       << "Could not find garbage collector with concurrent=" << concurrent_gc_
       << " and type=" << gc_type;
 
-  collector->clear_soft_references_ = clear_soft_references;
-  collector->Run();
+  ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
+
+  collector->Run(clear_soft_references);
   total_objects_freed_ever_ += collector->GetFreedObjects();
   total_bytes_freed_ever_ += collector->GetFreedBytes();
+
+  // Grow the heap so that we know when to perform the next GC.
+  GrowForUtilization(gc_type, collector->GetDurationNs());
+
   if (care_about_pause_times_) {
     const size_t duration = collector->GetDurationNs();
     std::vector<uint64_t> pauses = collector->GetPauseTimes();
@@ -1252,7 +1456,6 @@
         was_slow = was_slow || pause > long_pause_log_threshold_;
       }
     }
-
     if (was_slow) {
         const size_t percent_free = GetPercentFree();
         const size_t current_heap_size = GetBytesAllocated();
@@ -1327,7 +1530,6 @@
       accounting::CardTable* card_table = heap_->GetCardTable();
       accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
       accounting::ObjectStack* live_stack = heap_->live_stack_.get();
-
       if (!failed_) {
         // Print message on only on first failure to prevent spam.
         LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
@@ -1337,7 +1539,7 @@
         byte* card_addr = card_table->CardFromAddr(obj);
         LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset "
                    << offset << "\n card value = " << static_cast<int>(*card_addr);
-        if (heap_->IsHeapAddress(obj->GetClass())) {
+        if (heap_->IsValidObjectAddress(obj->GetClass())) {
           LOG(ERROR) << "Obj type " << PrettyTypeOf(obj);
         } else {
           LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
@@ -1345,7 +1547,7 @@
 
         // Attmept to find the class inside of the recently freed objects.
         space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
-        if (ref_space->IsDlMallocSpace()) {
+        if (ref_space != nullptr && ref_space->IsDlMallocSpace()) {
           space::DlMallocSpace* space = ref_space->AsDlMallocSpace();
           mirror::Class* ref_class = space->FindRecentFreedObject(ref);
           if (ref_class != nullptr) {
@@ -1356,7 +1558,7 @@
           }
         }
 
-        if (ref->GetClass() != nullptr && heap_->IsHeapAddress(ref->GetClass()) &&
+        if (ref->GetClass() != nullptr && heap_->IsValidObjectAddress(ref->GetClass()) &&
             ref->GetClass()->IsClass()) {
           LOG(ERROR) << "Ref type " << PrettyTypeOf(ref);
         } else {
@@ -1427,17 +1629,25 @@
  public:
   explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
 
-  void operator()(const mirror::Object* obj) const
+  void operator()(mirror::Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     // Note: we are verifying the references in obj but not obj itself, this is because obj must
     // be live or else how did we find it in the live bitmap?
     VerifyReferenceVisitor visitor(heap_);
     // The class doesn't count as a reference but we should verify it anyways.
-    visitor(obj, obj->GetClass(), MemberOffset(0), false);
-    collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
+    collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
+    if (obj->GetClass()->IsReferenceClass()) {
+      visitor(obj, heap_->GetReferenceReferent(obj), MemberOffset(0), false);
+    }
     failed_ = failed_ || visitor.Failed();
   }
 
+  static void VisitCallback(mirror::Object* obj, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+    VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
+    visitor->operator()(obj);
+  }
+
   bool Failed() const {
     return failed_;
   }
@@ -1453,18 +1663,15 @@
   // Lets sort our allocation stacks so that we can efficiently binary search them.
   allocation_stack_->Sort();
   live_stack_->Sort();
-  // Perform the verification.
   VerifyObjectVisitor visitor(this);
-  Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
-  GetLiveBitmap()->Visit(visitor);
   // Verify objects in the allocation stack since these will be objects which were:
   // 1. Allocated prior to the GC (pre GC verification).
   // 2. Allocated during the GC (pre sweep GC verification).
-  for (mirror::Object** it = allocation_stack_->Begin(); it != allocation_stack_->End(); ++it) {
-    visitor(*it);
-  }
   // We don't want to verify the objects in the live stack since they themselves may be
   // pointing to dead objects if they are not reachable.
+  VisitObjects(VerifyObjectVisitor::VisitCallback, &visitor);
+  // Verify the roots:
+  Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
   if (visitor.Failed()) {
     // Dump mod-union tables.
     for (const auto& table_pair : mod_union_tables_) {
@@ -1557,7 +1764,7 @@
   void operator()(mirror::Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
-    collector::MarkSweep::VisitObjectReferences(obj, visitor, true);
+    collector::MarkSweep::VisitObjectReferences(const_cast<mirror::Object*>(obj), visitor, true);
   }
 
   bool Failed() const {
@@ -1610,10 +1817,14 @@
           "ImageModUnionClearCards";
       base::TimingLogger::ScopedSplit split(name, &timings);
       table->ClearCards();
-    } else {
+    } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) {
       base::TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
       // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
       // were dirty before the GC started.
+      // TODO: Don't need to use atomic.
+      // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
+      // roots and then we scan / update mod union tables after. We will always scan either card.//
+      // If we end up with the non aged card, we scan it it in the pause.
       card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
     }
   }
@@ -1692,36 +1903,27 @@
   }
 }
 
-collector::GcType Heap::WaitForConcurrentGcToComplete(Thread* self) {
+collector::GcType Heap::WaitForGcToComplete(Thread* self) {
+  ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+  MutexLock mu(self, *gc_complete_lock_);
+  return WaitForGcToCompleteLocked(self);
+}
+
+collector::GcType Heap::WaitForGcToCompleteLocked(Thread* self) {
   collector::GcType last_gc_type = collector::kGcTypeNone;
-  if (concurrent_gc_) {
-    ATRACE_BEGIN("GC: Wait For Concurrent");
-    bool do_wait;
-    uint64_t wait_start = NanoTime();
-    {
-      // Check if GC is running holding gc_complete_lock_.
-      MutexLock mu(self, *gc_complete_lock_);
-      do_wait = is_gc_running_;
-    }
-    if (do_wait) {
-      uint64_t wait_time;
-      // We must wait, change thread state then sleep on gc_complete_cond_;
-      ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete);
-      {
-        MutexLock mu(self, *gc_complete_lock_);
-        while (is_gc_running_) {
-          gc_complete_cond_->Wait(self);
-        }
-        last_gc_type = last_gc_type_;
-        wait_time = NanoTime() - wait_start;
-        total_wait_time_ += wait_time;
-      }
-      if (wait_time > long_pause_log_threshold_) {
-        LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time);
-      }
-    }
+  uint64_t wait_start = NanoTime();
+  while (is_gc_running_) {
+    ATRACE_BEGIN("GC: Wait For Completion");
+    // We must wait, change thread state then sleep on gc_complete_cond_;
+    gc_complete_cond_->Wait(self);
+    last_gc_type = last_gc_type_;
     ATRACE_END();
   }
+  uint64_t wait_time = NanoTime() - wait_start;
+  total_wait_time_ += wait_time;
+  if (wait_time > long_pause_log_threshold_) {
+    LOG(INFO) << "WaitForGcToComplete blocked for " << PrettyDuration(wait_time);
+  }
   return last_gc_type;
 }
 
@@ -1744,6 +1946,23 @@
   max_allowed_footprint_ = max_allowed_footprint;
 }
 
+bool Heap::IsMovableObject(const mirror::Object* obj) const {
+  if (kMovingCollector) {
+    DCHECK(!IsInTempSpace(obj));
+    if (bump_pointer_space_->HasAddress(obj)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+bool Heap::IsInTempSpace(const mirror::Object* obj) const {
+  if (temp_space_->HasAddress(obj) && !temp_space_->Contains(obj)) {
+    return true;
+  }
+  return false;
+}
+
 void Heap::UpdateMaxNativeFootprint() {
   size_t native_size = native_bytes_allocated_;
   // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
@@ -1773,6 +1992,7 @@
     } else if (target_size < bytes_allocated + min_free_) {
       target_size = bytes_allocated + min_free_;
     }
+    native_need_to_run_finalization_ = true;
     next_gc_type_ = collector::kGcTypeSticky;
   } else {
     // Based on how close the current heap size is to the target size, decide
@@ -1796,7 +2016,6 @@
 
     if (concurrent_gc_) {
       // Calculate when to perform the next ConcurrentGC.
-
       // Calculate the estimated GC duration.
       double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
       // Estimate how many remaining bytes we will have when we need to start the next GC.
@@ -1817,13 +2036,11 @@
       DCHECK_LE(max_allowed_footprint_, growth_limit_);
     }
   }
-
-  UpdateMaxNativeFootprint();
 }
 
 void Heap::ClearGrowthLimit() {
   growth_limit_ = capacity_;
-  alloc_space_->ClearGrowthLimit();
+  non_moving_space_->ClearGrowthLimit();
 }
 
 void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
@@ -1843,6 +2060,12 @@
   CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
 }
 
+void Heap::SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) {
+  DCHECK(reference != NULL);
+  DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
+  reference->SetFieldObject(reference_referent_offset_, referent, true);
+}
+
 mirror::Object* Heap::GetReferenceReferent(mirror::Object* reference) {
   DCHECK(reference != NULL);
   DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
@@ -1852,7 +2075,7 @@
 void Heap::ClearReferenceReferent(mirror::Object* reference) {
   DCHECK(reference != NULL);
   DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
-  reference->SetFieldObject(reference_referent_offset_, NULL, true);
+  reference->SetFieldObject(reference_referent_offset_, nullptr, true);
 }
 
 // Returns true if the reference object has not yet been enqueued.
@@ -1924,19 +2147,41 @@
       arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
 }
 
+void Heap::PrintReferenceQueue(std::ostream& os, mirror::Object** queue) {
+  os << "Refernece queue " << queue << "\n";
+  if (queue != nullptr) {
+    mirror::Object* list = *queue;
+    if (list != nullptr) {
+      mirror::Object* cur = list;
+      do {
+        mirror::Object* pending_next =
+            cur->GetFieldObject<mirror::Object*>(reference_pendingNext_offset_, false);
+        os << "PendingNext=" << pending_next;
+        if (cur->GetClass()->IsFinalizerReferenceClass()) {
+          os << " Zombie=" <<
+              cur->GetFieldObject<mirror::Object*>(finalizer_reference_zombie_offset_, false);
+        }
+        os << "\n";
+        cur = pending_next;
+      } while (cur != list);
+    }
+  }
+}
+
 void Heap::EnqueueClearedReferences(mirror::Object** cleared) {
-  DCHECK(cleared != NULL);
-  if (*cleared != NULL) {
+  DCHECK(cleared != nullptr);
+  mirror::Object* list = *cleared;
+  if (list != nullptr) {
     // When a runtime isn't started there are no reference queues to care about so ignore.
     if (LIKELY(Runtime::Current()->IsStarted())) {
       ScopedObjectAccess soa(Thread::Current());
       JValue result;
       ArgArray arg_array(NULL, 0);
-      arg_array.Append(reinterpret_cast<uint32_t>(*cleared));
+      arg_array.Append(reinterpret_cast<uint32_t>(list));
       soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(),
           arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
     }
-    *cleared = NULL;
+    *cleared = nullptr;
   }
 }
 
@@ -1944,42 +2189,27 @@
   // Make sure that we can do a concurrent GC.
   Runtime* runtime = Runtime::Current();
   DCHECK(concurrent_gc_);
-  if (runtime == NULL || !runtime->IsFinishedStarting() ||
-      !runtime->IsConcurrentGcEnabled()) {
+  if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
+      self->IsHandlingStackOverflow()) {
     return;
   }
-  {
-    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
-    if (runtime->IsShuttingDown()) {
-      return;
-    }
-  }
-  if (self->IsHandlingStackOverflow()) {
-    return;
-  }
-
   // We already have a request pending, no reason to start more until we update
   // concurrent_start_bytes_.
   concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
-
   JNIEnv* env = self->GetJniEnv();
-  DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
-  DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
+  DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
+  DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
   env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
                             WellKnownClasses::java_lang_Daemons_requestGC);
   CHECK(!env->ExceptionCheck());
 }
 
 void Heap::ConcurrentGC(Thread* self) {
-  {
-    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
-    if (Runtime::Current()->IsShuttingDown()) {
-      return;
-    }
+  if (Runtime::Current()->IsShuttingDown(self)) {
+    return;
   }
-
   // Wait for any GCs currently running to finish.
-  if (WaitForConcurrentGcToComplete(self) == collector::kGcTypeNone) {
+  if (WaitForGcToComplete(self) == collector::kGcTypeNone) {
     CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false);
   }
 }
@@ -1998,26 +2228,18 @@
   // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
   // not how much use we're making of those pages.
   uint64_t ms_time = MilliTime();
-  // Note the large object space's bytes allocated is equal to its capacity.
-  uint64_t los_bytes_allocated = large_object_space_->GetBytesAllocated();
-  float utilization = static_cast<float>(GetBytesAllocated() - los_bytes_allocated) /
-      (GetTotalMemory() - los_bytes_allocated);
-  if ((utilization > 0.75f && !IsLowMemoryMode()) || ((ms_time - last_trim_time_ms_) < 2 * 1000)) {
-    // Don't bother trimming the alloc space if it's more than 75% utilized and low memory mode is
-    // not enabled, or if a heap trim occurred in the last two seconds.
+  // Don't bother trimming the alloc space if a heap trim occurred in the last two seconds.
+  if (ms_time - last_trim_time_ms_ < 2 * 1000) {
     return;
   }
 
   Thread* self = Thread::Current();
-  {
-    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
-    Runtime* runtime = Runtime::Current();
-    if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown()) {
-      // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
-      // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
-      // as we don't hold the lock while requesting the trim).
-      return;
-    }
+  Runtime* runtime = Runtime::Current();
+  if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self)) {
+    // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
+    // Also: we do not wish to start a heap trim if the runtime is shutting down (a racy check
+    // as we don't hold the lock while requesting the trim).
+    return;
   }
 
   last_trim_time_ms_ = ms_time;
@@ -2034,50 +2256,55 @@
   }
 }
 
-size_t Heap::Trim() {
-  // Handle a requested heap trim on a thread outside of the main GC thread.
-  return alloc_space_->Trim();
-}
-
 bool Heap::IsGCRequestPending() const {
   return concurrent_start_bytes_ != std::numeric_limits<size_t>::max();
 }
 
+void Heap::RunFinalization(JNIEnv* env) {
+  // Can't do this in WellKnownClasses::Init since System is not properly set up at that point.
+  if (WellKnownClasses::java_lang_System_runFinalization == nullptr) {
+    CHECK(WellKnownClasses::java_lang_System != nullptr);
+    WellKnownClasses::java_lang_System_runFinalization =
+        CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
+    CHECK(WellKnownClasses::java_lang_System_runFinalization != nullptr);
+  }
+  env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
+                            WellKnownClasses::java_lang_System_runFinalization);
+}
+
 void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
+  Thread* self = ThreadForEnv(env);
+  if (native_need_to_run_finalization_) {
+    RunFinalization(env);
+    UpdateMaxNativeFootprint();
+    native_need_to_run_finalization_ = false;
+  }
   // Total number of native bytes allocated.
   native_bytes_allocated_.fetch_add(bytes);
   if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
     // The second watermark is higher than the gc watermark. If you hit this it means you are
     // allocating native objects faster than the GC can keep up with.
     if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
-        // Can't do this in WellKnownClasses::Init since System is not properly set up at that
-        // point.
-        if (UNLIKELY(WellKnownClasses::java_lang_System_runFinalization == NULL)) {
-          DCHECK(WellKnownClasses::java_lang_System != NULL);
-          WellKnownClasses::java_lang_System_runFinalization =
-              CacheMethod(env, WellKnownClasses::java_lang_System, true, "runFinalization", "()V");
-          CHECK(WellKnownClasses::java_lang_System_runFinalization != NULL);
-        }
-        if (WaitForConcurrentGcToComplete(ThreadForEnv(env)) != collector::kGcTypeNone) {
-          // Just finished a GC, attempt to run finalizers.
-          env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
-                                    WellKnownClasses::java_lang_System_runFinalization);
-          CHECK(!env->ExceptionCheck());
-        }
-
-        // If we still are over the watermark, attempt a GC for alloc and run finalizers.
-        if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
-          CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
-          env->CallStaticVoidMethod(WellKnownClasses::java_lang_System,
-                                    WellKnownClasses::java_lang_System_runFinalization);
-          CHECK(!env->ExceptionCheck());
-        }
-        // We have just run finalizers, update the native watermark since it is very likely that
-        // finalizers released native managed allocations.
-        UpdateMaxNativeFootprint();
-    } else {
-      if (!IsGCRequestPending()) {
-        RequestConcurrentGC(ThreadForEnv(env));
+      if (WaitForGcToComplete(self) != collector::kGcTypeNone) {
+        // Just finished a GC, attempt to run finalizers.
+        RunFinalization(env);
+        CHECK(!env->ExceptionCheck());
+      }
+      // If we still are over the watermark, attempt a GC for alloc and run finalizers.
+      if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+        CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
+        RunFinalization(env);
+        native_need_to_run_finalization_ = false;
+        CHECK(!env->ExceptionCheck());
+      }
+      // We have just run finalizers, update the native watermark since it is very likely that
+      // finalizers released native managed allocations.
+      UpdateMaxNativeFootprint();
+    } else if (!IsGCRequestPending()) {
+      if (concurrent_gc_) {
+        RequestConcurrentGC(self);
+      } else {
+        CollectGarbageInternal(collector::kGcTypePartial, kGcCauseForAlloc, false);
       }
     }
   }
@@ -2086,26 +2313,24 @@
 void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
   int expected_size, new_size;
   do {
-      expected_size = native_bytes_allocated_.load();
-      new_size = expected_size - bytes;
-      if (UNLIKELY(new_size < 0)) {
-        ScopedObjectAccess soa(env);
-        env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
-                      StringPrintf("Attempted to free %d native bytes with only %d native bytes "
-                                   "registered as allocated", bytes, expected_size).c_str());
-        break;
-      }
+    expected_size = native_bytes_allocated_.load();
+    new_size = expected_size - bytes;
+    if (UNLIKELY(new_size < 0)) {
+      ScopedObjectAccess soa(env);
+      env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
+                    StringPrintf("Attempted to free %d native bytes with only %d native bytes "
+                                 "registered as allocated", bytes, expected_size).c_str());
+      break;
+    }
   } while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size));
 }
 
 int64_t Heap::GetTotalMemory() const {
   int64_t ret = 0;
   for (const auto& space : continuous_spaces_) {
-    if (space->IsImageSpace()) {
-      // Currently don't include the image space.
-    } else if (space->IsDlMallocSpace()) {
-      // Zygote or alloc space
-      ret += space->AsDlMallocSpace()->GetFootprint();
+    // Currently don't include the image space.
+    if (!space->IsImageSpace()) {
+      ret += space->Size();
     }
   }
   for (const auto& space : discontinuous_spaces_) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 91909e4..0fa000f 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -31,6 +31,7 @@
 #include "jni.h"
 #include "locks.h"
 #include "offsets.h"
+#include "root_visitor.h"
 #include "safe_map.h"
 #include "thread_pool.h"
 
@@ -57,16 +58,19 @@
 namespace collector {
   class GarbageCollector;
   class MarkSweep;
+  class SemiSpace;
 }  // namespace collector
 
 namespace space {
   class AllocSpace;
+  class BumpPointerSpace;
   class DiscontinuousSpace;
   class DlMallocSpace;
   class ImageSpace;
   class LargeObjectSpace;
   class Space;
   class SpaceTest;
+  class ContinuousMemMapAllocSpace;
 }  // namespace space
 
 class AgeCardVisitor {
@@ -101,13 +105,13 @@
 };
 static constexpr HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification;
 
-// If true, measure the total allocation time.
-static constexpr bool kMeasureAllocationTime = false;
-// Primitive arrays larger than this size are put in the large object space.
-static constexpr size_t kLargeObjectThreshold = 3 * kPageSize;
-
 class Heap {
  public:
+  // If true, measure the total allocation time.
+  static constexpr bool kMeasureAllocationTime = false;
+  // Primitive arrays larger than this size are put in the large object space.
+  static constexpr size_t kLargeObjectThreshold = 3 * kPageSize;
+
   static constexpr size_t kDefaultInitialSize = 2 * MB;
   static constexpr size_t kDefaultMaximumSize = 32 * MB;
   static constexpr size_t kDefaultMaxFree = 2 * MB;
@@ -135,14 +139,47 @@
   // Allocates and initializes storage for an object instance.
   mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    CHECK(!kMovingClasses);
     return AllocObjectInstrumented(self, klass, num_bytes);
   }
+  // Allocates and initializes storage for an object instance.
+  mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    CHECK(!kMovingClasses);
+    return AllocNonMovableObjectInstrumented(self, klass, num_bytes);
+  }
   mirror::Object* AllocObjectInstrumented(Thread* self, mirror::Class* klass, size_t num_bytes)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    CHECK(!kMovingClasses);
+    if (kMovingCollector) {
+      return AllocMovableObjectInstrumented(self, klass, num_bytes);
+    } else {
+      return AllocNonMovableObjectInstrumented(self, klass, num_bytes);
+    }
+  }
   mirror::Object* AllocObjectUninstrumented(Thread* self, mirror::Class* klass, size_t num_bytes)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    CHECK(!kMovingClasses);
+    if (kMovingCollector) {
+      return AllocMovableObjectUninstrumented(self, klass, num_bytes);
+    } else {
+      return AllocNonMovableObjectUninstrumented(self, klass, num_bytes);
+    }
+  }
+  mirror::Object* AllocNonMovableObjectInstrumented(Thread* self, mirror::Class* klass,
+                                                    size_t num_bytes)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* AllocNonMovableObjectUninstrumented(Thread* self, mirror::Class* klass,
+                                                      size_t num_bytes)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void DebugCheckPreconditionsForAllobObject(mirror::Class* c, size_t byte_count)
+  // Visit all of the live objects in the heap.
+  void VisitObjects(ObjectVisitorCallback callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void ThrowOutOfMemoryError(size_t byte_count, bool large_object_allocation);
 
@@ -152,7 +189,7 @@
   // The given reference is believed to be to an object in the Java heap, check the soundness of it.
   void VerifyObjectImpl(const mirror::Object* o);
   void VerifyObject(const mirror::Object* o) {
-    if (o != NULL && this != NULL && verify_object_mode_ > kNoHeapVerification) {
+    if (o != nullptr && this != nullptr && verify_object_mode_ > kNoHeapVerification) {
       VerifyObjectImpl(o);
     }
   }
@@ -169,7 +206,10 @@
   // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
   // and doesn't abort on error, allowing the caller to report more
   // meaningful diagnostics.
-  bool IsHeapAddress(const mirror::Object* obj);
+  bool IsValidObjectAddress(const mirror::Object* obj) const;
+
+  // Returns true if the address passed in is a heap address, doesn't need to be aligned.
+  bool IsHeapAddress(const mirror::Object* obj) const;
 
   // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
   // Requires the heap lock to be held.
@@ -177,6 +217,17 @@
                           bool search_live_stack = true, bool sorted = false)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
+  // Returns true if there is any chance that the object (obj) will move.
+  bool IsMovableObject(const mirror::Object* obj) const;
+
+  // Returns true if an object is in the temp space, if this happens its usually indicative of
+  // compaction related errors.
+  bool IsInTempSpace(const mirror::Object* obj) const;
+
+  // Enables us to prevent GC until objects are released.
+  void IncrementDisableGC(Thread* self);
+  void DecrementDisableGC(Thread* self);
+
   // Initiates an explicit garbage collection.
   void CollectGarbage(bool clear_soft_references) LOCKS_EXCLUDED(Locks::mutator_lock_);
 
@@ -221,9 +272,9 @@
   // from the system. Doesn't allow the space to exceed its growth limit.
   void SetIdealFootprint(size_t max_allowed_footprint);
 
-  // Blocks the caller until the garbage collector becomes idle and returns
-  // true if we waited for the GC to complete.
-  collector::GcType WaitForConcurrentGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
+  // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
+  // waited for.
+  collector::GcType WaitForGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
 
   const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
     return continuous_spaces_;
@@ -239,7 +290,10 @@
                            MemberOffset reference_pendingNext_offset,
                            MemberOffset finalizer_reference_zombie_offset);
 
-  mirror::Object* GetReferenceReferent(mirror::Object* reference);
+  void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* GetReferenceReferent(mirror::Object* reference)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void ClearReferenceReferent(mirror::Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Returns true if the reference object has not yet been enqueued.
@@ -316,7 +370,7 @@
   }
 
   // Returns the number of objects currently allocated.
-  size_t GetObjectsAllocated() const;
+  size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
 
   // Returns the total number of objects allocated since the heap was created.
   size_t GetObjectsAllocatedEver() const;
@@ -361,7 +415,8 @@
 
   void DumpForSigQuit(std::ostream& os);
 
-  size_t Trim();
+  // Trim the managed and native heaps by releasing unused memory back to the OS.
+  void Trim();
 
   accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
     return live_bitmap_.get();
@@ -375,7 +430,7 @@
     return live_stack_.get();
   }
 
-  void PreZygoteFork() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
 
   // Mark and empty stack.
   void FlushAllocStack()
@@ -386,6 +441,10 @@
                       accounting::ObjectStack* stack)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
+  // Mark the specified allocation stack as live.
+  void MarkAllocStackAsLive(accounting::ObjectStack* stack)
+        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
   // Gets called when we get notified by ActivityThread that the process state has changed.
   void ListenForProcessStateChange();
 
@@ -393,8 +452,8 @@
   // Assumes there is only one image space.
   space::ImageSpace* GetImageSpace() const;
 
-  space::DlMallocSpace* GetAllocSpace() const {
-    return alloc_space_;
+  space::DlMallocSpace* GetNonMovingSpace() const {
+    return non_moving_space_;
   }
 
   space::LargeObjectSpace* GetLargeObjectsSpace() const {
@@ -417,7 +476,7 @@
     return phantom_ref_queue_lock_;
   }
 
-  void DumpSpaces();
+  void DumpSpaces(std::ostream& stream = LOG(INFO));
 
   // GC performance measuring
   void DumpGcPerformanceInfo(std::ostream& os);
@@ -442,7 +501,20 @@
   accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
   void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
 
+  mirror::Object* AllocMovableObjectInstrumented(Thread* self, mirror::Class* klass,
+                                                 size_t num_bytes)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  mirror::Object* AllocMovableObjectUninstrumented(Thread* self, mirror::Class* klass,
+                                                   size_t num_bytes)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  bool IsCompilingBoot() const;
+  bool HasImageSpace() const;
+
  private:
+  void Compact(space::ContinuousMemMapAllocSpace* target_space,
+               space::ContinuousMemMapAllocSpace* source_space);
+
   bool TryAllocLargeObjectInstrumented(Thread* self, mirror::Class* c, size_t byte_count,
                                        mirror::Object** obj_ptr, size_t* bytes_allocated)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -471,6 +543,11 @@
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Allocate into a specific space.
+  mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c,
+                               size_t bytes)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Try to allocate a number of bytes, this function never does any GCs.
   mirror::Object* TryToAllocateInstrumented(Thread* self, space::AllocSpace* space, size_t alloc_size,
                                             bool grow, size_t* bytes_allocated)
@@ -500,6 +577,17 @@
   // Pushes a list of cleared references out to the managed heap.
   void EnqueueClearedReferences(mirror::Object** cleared_references);
 
+  // Print a reference queue.
+  void PrintReferenceQueue(std::ostream& os, mirror::Object** queue);
+
+  // Run the finalizers.
+  void RunFinalization(JNIEnv* env);
+
+  // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
+  // waited for.
+  collector::GcType WaitForGcToCompleteLocked(Thread* self)
+      EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
+
   void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
   void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
   bool IsGCRequestPending() const;
@@ -537,9 +625,7 @@
 
   size_t GetPercentFree();
 
-  void AddContinuousSpace(space::ContinuousSpace* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
-  void AddDiscontinuousSpace(space::DiscontinuousSpace* space)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void AddSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
 
   // No thread saftey analysis since we call this everywhere and it is impossible to find a proper
   // lock ordering for it.
@@ -560,8 +646,12 @@
   // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
   std::vector<space::DiscontinuousSpace*> discontinuous_spaces_;
 
-  // The allocation space we are currently allocating into.
-  space::DlMallocSpace* alloc_space_;
+  // All-known alloc spaces, where objects may be or have been allocated.
+  std::vector<space::AllocSpace*> alloc_spaces_;
+
+  // A space where non-movable objects are allocated, when compaction is enabled it contains
+  // Classes, ArtMethods, ArtFields, and non moving objects.
+  space::DlMallocSpace* non_moving_space_;
 
   // The large object space we are currently allocating into.
   space::LargeObjectSpace* large_object_space_;
@@ -599,6 +689,11 @@
   // If we have a zygote space.
   bool have_zygote_space_;
 
+  // Number of pinned primitive arrays in the movable space.
+  // Block all GC until this hits zero, or we hit the timeout!
+  size_t number_gc_blockers_;
+  static constexpr size_t KGCBlockTimeout = 30000;
+
   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
   // completes.
   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -635,6 +730,9 @@
   // The watermark at which a GC is performed inside of registerNativeAllocation.
   size_t native_footprint_limit_;
 
+  // Whether or not we need to run finalizers in the next native allocation.
+  bool native_need_to_run_finalization_;
+
   // Activity manager members.
   jclass activity_thread_class_;
   jclass application_thread_class_;
@@ -714,6 +812,11 @@
   // Second allocation stack so that we can process allocation with the heap unlocked.
   UniquePtr<accounting::ObjectStack> live_stack_;
 
+  // Bump pointer spaces.
+  space::BumpPointerSpace* bump_pointer_space_;
+  // Temp space is the space which the semispace collector copies to.
+  space::BumpPointerSpace* temp_space_;
+
   // offset of java.lang.ref.Reference.referent
   MemberOffset reference_referent_offset_;
 
@@ -748,11 +851,16 @@
   // The current state of heap verification, may be enabled or disabled.
   HeapVerificationMode verify_object_mode_;
 
-  std::vector<collector::MarkSweep*> mark_sweep_collectors_;
+  // GC disable count, error on GC if > 0.
+  size_t gc_disable_count_ GUARDED_BY(gc_complete_lock_);
+
+  std::vector<collector::GarbageCollector*> garbage_collectors_;
+  collector::SemiSpace* semi_space_collector_;
 
   const bool running_on_valgrind_;
 
   friend class collector::MarkSweep;
+  friend class collector::SemiSpace;
   friend class VerifyReferenceCardVisitor;
   friend class VerifyReferenceVisitor;
   friend class VerifyObjectVisitor;
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 02708e8..8af2725 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -43,12 +43,14 @@
     ScopedObjectAccess soa(Thread::Current());
     // garbage is created during ClassLinker::Init
 
-    mirror::Class* c = class_linker_->FindSystemClass("[Ljava/lang/Object;");
+    SirtRef<mirror::Class> c(soa.Self(), class_linker_->FindSystemClass("[Ljava/lang/Object;"));
     for (size_t i = 0; i < 1024; ++i) {
       SirtRef<mirror::ObjectArray<mirror::Object> > array(soa.Self(),
-          mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c, 2048));
+          mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.get(), 2048));
       for (size_t j = 0; j < 2048; ++j) {
-        array->Set(j, mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"));
+        mirror::String* string = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!");
+        // SIRT operator -> deferences the SIRT before running the method.
+        array->Set(j, string);
       }
     }
   }
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
new file mode 100644
index 0000000..85ef2f4
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
+#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
+
+#include "bump_pointer_space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
+  num_bytes = RoundUp(num_bytes, kAlignment);
+  byte* old_end;
+  byte* new_end;
+  do {
+    old_end = end_;
+    new_end = old_end + num_bytes;
+    // If there is no more room in the region, we are out of memory.
+    if (UNLIKELY(new_end > growth_end_)) {
+      return nullptr;
+    }
+    // TODO: Use a cas which always equals the size of pointers.
+  } while (android_atomic_cas(reinterpret_cast<int32_t>(old_end),
+                              reinterpret_cast<int32_t>(new_end),
+                              reinterpret_cast<volatile int32_t*>(&end_)) != 0);
+  // TODO: Less statistics?
+  total_bytes_allocated_.fetch_add(num_bytes);
+  num_objects_allocated_.fetch_add(1);
+  total_objects_allocated_.fetch_add(1);
+  return reinterpret_cast<mirror::Object*>(old_end);
+}
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_INL_H_
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
new file mode 100644
index 0000000..06ba57e
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "bump_pointer_space.h"
+#include "bump_pointer_space-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/class-inl.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity,
+                                           byte* requested_begin) {
+  capacity = RoundUp(capacity, kPageSize);
+  std::string error_msg;
+  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
+                                                 PROT_READ | PROT_WRITE, &error_msg));
+  if (mem_map.get() == nullptr) {
+    LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
+        << PrettySize(capacity) << " with message " << error_msg;
+    return nullptr;
+  }
+  return new BumpPointerSpace(name, mem_map.release());
+}
+
+BumpPointerSpace::BumpPointerSpace(const std::string& name, byte* begin, byte* limit)
+    : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit,
+                                 kGcRetentionPolicyAlwaysCollect),
+      num_objects_allocated_(0), total_bytes_allocated_(0), total_objects_allocated_(0),
+      growth_end_(limit) {
+}
+
+BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map)
+    : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(),
+                                 kGcRetentionPolicyAlwaysCollect),
+      num_objects_allocated_(0), total_bytes_allocated_(0), total_objects_allocated_(0),
+      growth_end_(mem_map->End()) {
+}
+
+mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated) {
+  mirror::Object* ret = AllocNonvirtual(num_bytes);
+  if (LIKELY(ret != nullptr)) {
+    *bytes_allocated = num_bytes;
+  }
+  return ret;
+}
+
+size_t BumpPointerSpace::AllocationSize(const mirror::Object* obj) {
+  return AllocationSizeNonvirtual(obj);
+}
+
+void BumpPointerSpace::Clear() {
+  // Release the pages back to the operating system.
+  CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed";
+  // Reset the end of the space back to the beginning, we move the end forward as we allocate
+  // objects.
+  SetEnd(Begin());
+  growth_end_ = Limit();
+  num_objects_allocated_ = 0;
+}
+
+void BumpPointerSpace::Dump(std::ostream& os) const {
+  os << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
+     << reinterpret_cast<void*>(Limit());
+}
+
+mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
+  const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
+  return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
+}
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
new file mode 100644
index 0000000..0faac0c
--- /dev/null
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
+#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
+
+#include "space.h"
+
+namespace art {
+namespace gc {
+
+namespace collector {
+  class MarkSweep;
+}  // namespace collector
+
+namespace space {
+
+// A bump pointer space is a space where objects may be allocated and garbage collected.
+class BumpPointerSpace : public ContinuousMemMapAllocSpace {
+ public:
+  typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+
+  SpaceType GetType() const {
+    return kSpaceTypeBumpPointerSpace;
+  }
+
+  // Create a bump pointer space with the requested sizes. The requested base address is not
+  // guaranteed to be granted, if it is required, the caller should call Begin on the returned
+  // space to confirm the request was granted.
+  static BumpPointerSpace* Create(const std::string& name, size_t capacity, byte* requested_begin);
+
+  // Allocate num_bytes, returns nullptr if the space is full.
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
+  mirror::Object* AllocNonvirtual(size_t num_bytes);
+
+  // Return the storage space required by obj.
+  virtual size_t AllocationSize(const mirror::Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Nos unless we support free lists.
+  virtual size_t Free(Thread*, mirror::Object*) {
+    return 0;
+  }
+  virtual size_t FreeList(Thread*, size_t, mirror::Object**) {
+    return 0;
+  }
+
+  size_t AllocationSizeNonvirtual(const mirror::Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return obj->SizeOf();
+  }
+
+  // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
+  // maximum reserved size of the heap.
+  void ClearGrowthLimit() {
+    growth_end_ = Limit();
+  }
+
+  // Override capacity so that we only return the possibly limited capacity
+  size_t Capacity() const {
+    return growth_end_ - begin_;
+  }
+
+  // The total amount of memory reserved for the space.
+  size_t NonGrowthLimitCapacity() const {
+    return GetMemMap()->Size();
+  }
+
+  accounting::SpaceBitmap* GetLiveBitmap() const {
+    return nullptr;
+  }
+
+  accounting::SpaceBitmap* GetMarkBitmap() const {
+    return nullptr;
+  }
+
+  // Clear the memory and reset the pointer to the start of the space.
+  void Clear();
+
+  void Dump(std::ostream& os) const;
+
+  uint64_t GetBytesAllocated() {
+    return Size();
+  }
+
+  uint64_t GetObjectsAllocated() {
+    return num_objects_allocated_;
+  }
+
+  uint64_t GetTotalBytesAllocated() {
+    return total_bytes_allocated_;
+  }
+
+  uint64_t GetTotalObjectsAllocated() {
+    return total_objects_allocated_;
+  }
+
+  bool Contains(const mirror::Object* obj) const {
+    const byte* byte_obj = reinterpret_cast<const byte*>(obj);
+    return byte_obj >= Begin() && byte_obj < End();
+  }
+
+  // TODO: Change this? Mainly used for compacting to a particular region of memory.
+  BumpPointerSpace(const std::string& name, byte* begin, byte* limit);
+
+  // Return the object which comes after obj, while ensuring alignment.
+  static mirror::Object* GetNextObject(mirror::Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ protected:
+  BumpPointerSpace(const std::string& name, MemMap* mem_map);
+
+  size_t InternalAllocationSize(const mirror::Object* obj);
+  mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated)
+      EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+  // Approximate number of bytes which have been allocated into the space.
+  AtomicInteger num_objects_allocated_;
+  AtomicInteger total_bytes_allocated_;
+  AtomicInteger total_objects_allocated_;
+
+  // Alignment.
+  static constexpr size_t kAlignment = 8;
+
+  byte* growth_end_;
+
+ private:
+  friend class collector::MarkSweep;
+  DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace);
+};
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 9ebc16a..8a5e33a 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -102,8 +102,8 @@
   }
 
   ValgrindDlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
-                        byte* end, size_t growth_limit, size_t initial_size) :
-      DlMallocSpace(name, mem_map, mspace, begin, end, growth_limit) {
+                        byte* end, byte* limit, size_t growth_limit, size_t initial_size) :
+      DlMallocSpace(name, mem_map, mspace, begin, end, limit, growth_limit) {
     VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
   }
 
@@ -117,15 +117,13 @@
 size_t DlMallocSpace::bitmap_index_ = 0;
 
 DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
-                       byte* end, size_t growth_limit)
-    : MemMapSpace(name, mem_map, end - begin, kGcRetentionPolicyAlwaysCollect),
+                       byte* end, byte* limit, size_t growth_limit)
+    : ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect),
       recent_free_pos_(0), total_bytes_freed_(0), total_objects_freed_(0),
       lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
       growth_limit_(growth_limit) {
   CHECK(mspace != NULL);
-
   size_t bitmap_index = bitmap_index_++;
-
   static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
   CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin())));
   CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End())));
@@ -133,12 +131,10 @@
       StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
       Begin(), Capacity()));
   DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" << bitmap_index;
-
   mark_bitmap_.reset(accounting::SpaceBitmap::Create(
       StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
       Begin(), Capacity()));
   DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
-
   for (auto& freed : recent_freed_objects_) {
     freed.first = nullptr;
     freed.second = nullptr;
@@ -207,12 +203,14 @@
   // Everything is set so record in immutable structure and leave
   MemMap* mem_map_ptr = mem_map.release();
   DlMallocSpace* space;
+  byte* begin = mem_map_ptr->Begin();
   if (RUNNING_ON_VALGRIND > 0) {
-    space = new ValgrindDlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
+    space = new ValgrindDlMallocSpace(name, mem_map_ptr, mspace, begin, end, begin + capacity,
                                       growth_limit, initial_size);
   } else {
-    space = new DlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end, growth_limit);
+    space = new DlMallocSpace(name, mem_map_ptr, mspace, begin, end, begin + capacity, growth_limit);
   }
+  // We start out with only the initial size possibly containing objects.
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
         << " ) " << *space;
@@ -318,7 +316,8 @@
     CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), alloc_space_name);
   }
   DlMallocSpace* alloc_space =
-      new DlMallocSpace(alloc_space_name, mem_map.release(), mspace, end_, end, growth_limit);
+      new DlMallocSpace(alloc_space_name, mem_map.release(), mspace, end_, end, limit_,
+                        growth_limit);
   live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
   CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
   mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
@@ -343,8 +342,7 @@
 }
 
 void DlMallocSpace::RegisterRecentFree(mirror::Object* ptr) {
-  recent_freed_objects_[recent_free_pos_].first = ptr;
-  recent_freed_objects_[recent_free_pos_].second = ptr->GetClass();
+  recent_freed_objects_[recent_free_pos_] = std::make_pair(ptr, ptr->GetClass());
   recent_free_pos_ = (recent_free_pos_ + 1) & kRecentFreeMask;
 }
 
@@ -412,8 +410,8 @@
 // Callback from dlmalloc when it needs to increase the footprint
 extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
   Heap* heap = Runtime::Current()->GetHeap();
-  DCHECK_EQ(heap->GetAllocSpace()->GetMspace(), mspace);
-  return heap->GetAllocSpace()->MoreCore(increment);
+  DCHECK_EQ(heap->GetNonMovingSpace()->GetMspace(), mspace);
+  return heap->GetNonMovingSpace()->MoreCore(increment);
 }
 
 void* DlMallocSpace::MoreCore(intptr_t increment) {
@@ -482,6 +480,29 @@
   return mspace_footprint_limit(mspace_);
 }
 
+// Returns the old mark bitmap.
+accounting::SpaceBitmap* DlMallocSpace::BindLiveToMarkBitmap() {
+  accounting::SpaceBitmap* live_bitmap = GetLiveBitmap();
+  accounting::SpaceBitmap* mark_bitmap = mark_bitmap_.release();
+  temp_bitmap_.reset(mark_bitmap);
+  mark_bitmap_.reset(live_bitmap);
+  return mark_bitmap;
+}
+
+bool DlMallocSpace::HasBoundBitmaps() const {
+  return temp_bitmap_.get() != nullptr;
+}
+
+void DlMallocSpace::UnBindBitmaps() {
+  CHECK(HasBoundBitmaps());
+  // At this point, the temp_bitmap holds our old mark bitmap.
+  accounting::SpaceBitmap* new_bitmap = temp_bitmap_.release();
+  CHECK_EQ(mark_bitmap_.release(), live_bitmap_.get());
+  mark_bitmap_.reset(new_bitmap);
+  DCHECK(temp_bitmap_.get() == NULL);
+}
+
+
 void DlMallocSpace::SetFootprintLimit(size_t new_size) {
   MutexLock mu(Thread::Current(), lock_);
   VLOG(heap) << "DLMallocSpace::SetFootprintLimit " << PrettySize(new_size);
@@ -504,17 +525,25 @@
 }
 
 uint64_t DlMallocSpace::GetBytesAllocated() {
-  MutexLock mu(Thread::Current(), lock_);
-  size_t bytes_allocated = 0;
-  mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
-  return bytes_allocated;
+  if (mspace_ != nullptr) {
+    MutexLock mu(Thread::Current(), lock_);
+    size_t bytes_allocated = 0;
+    mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
+    return bytes_allocated;
+  } else {
+    return Size();
+  }
 }
 
 uint64_t DlMallocSpace::GetObjectsAllocated() {
-  MutexLock mu(Thread::Current(), lock_);
-  size_t objects_allocated = 0;
-  mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
-  return objects_allocated;
+  if (mspace_ != nullptr) {
+    MutexLock mu(Thread::Current(), lock_);
+    size_t objects_allocated = 0;
+    mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
+    return objects_allocated;
+  } else {
+    return 0;
+  }
 }
 
 }  // namespace space
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 522535e..59dafe3 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -30,7 +30,7 @@
 namespace space {
 
 // An alloc space is a space where objects may be allocated and garbage collected.
-class DlMallocSpace : public MemMapSpace, public AllocSpace {
+class DlMallocSpace : public ContinuousMemMapAllocSpace {
  public:
   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
 
@@ -136,19 +136,30 @@
     return GetObjectsAllocated() + total_objects_freed_;
   }
 
+  // Returns the old mark bitmap.
+  accounting::SpaceBitmap* BindLiveToMarkBitmap();
+  bool HasBoundBitmaps() const;
+  void UnBindBitmaps();
+
   // Returns the class of a recently freed object.
   mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
 
+  // Used to ensure that failure happens when you free / allocate into an invalidated space. If we
+  // don't do this we may get heap corruption instead of a segfault at null.
+  void InvalidateMSpace() {
+    mspace_ = nullptr;
+  }
+
  protected:
   DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
-                size_t growth_limit);
+                byte* limit, size_t growth_limit);
 
  private:
   size_t InternalAllocationSize(const mirror::Object* obj);
   mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated)
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
   bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
-  void RegisterRecentFree(mirror::Object* ptr);
+  void RegisterRecentFree(mirror::Object* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
   static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
 
   UniquePtr<accounting::SpaceBitmap> live_bitmap_;
@@ -174,7 +185,7 @@
   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
   // Underlying malloc space
-  void* const mspace_;
+  void* mspace_;
 
   // The capacity of the alloc space until such time that ClearGrowthLimit is called.
   // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e12ee06..c6177bd 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -39,8 +39,9 @@
 
 ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map,
                        accounting::SpaceBitmap* live_bitmap)
-    : MemMapSpace(name, mem_map, mem_map->Size(), kGcRetentionPolicyNeverCollect) {
-  DCHECK(live_bitmap != NULL);
+    : MemMapSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
+                  kGcRetentionPolicyNeverCollect) {
+  DCHECK(live_bitmap != nullptr);
   live_bitmap_.reset(live_bitmap);
 }
 
@@ -332,7 +333,7 @@
 
 void ImageSpace::Dump(std::ostream& os) const {
   os << GetType()
-      << "begin=" << reinterpret_cast<void*>(Begin())
+      << " begin=" << reinterpret_cast<void*>(Begin())
       << ",end=" << reinterpret_cast<void*>(End())
       << ",size=" << PrettySize(Size())
       << ",name=\"" << GetName() << "\"]";
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index ef889d4..07fb288 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -59,6 +59,14 @@
 
   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
 
+  virtual bool IsAllocSpace() const {
+    return true;
+  }
+
+  virtual AllocSpace* AsAllocSpace() {
+    return this;
+  }
+
  protected:
   explicit LargeObjectSpace(const std::string& name);
 
diff --git a/runtime/gc/space/space-inl.h b/runtime/gc/space/space-inl.h
index 2c3b93c..f1031ff 100644
--- a/runtime/gc/space/space-inl.h
+++ b/runtime/gc/space/space-inl.h
@@ -27,18 +27,28 @@
 namespace space {
 
 inline ImageSpace* Space::AsImageSpace() {
-  DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
+  DCHECK(IsImageSpace());
   return down_cast<ImageSpace*>(down_cast<MemMapSpace*>(this));
 }
 
 inline DlMallocSpace* Space::AsDlMallocSpace() {
-  DCHECK(GetType() == kSpaceTypeAllocSpace || GetType() == kSpaceTypeZygoteSpace);
+  DCHECK(IsDlMallocSpace());
   return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
 }
 
 inline LargeObjectSpace* Space::AsLargeObjectSpace() {
-  DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
-  return reinterpret_cast<LargeObjectSpace*>(this);
+  DCHECK(IsLargeObjectSpace());
+  return down_cast<LargeObjectSpace*>(this);
+}
+
+inline ContinuousSpace* Space::AsContinuousSpace() {
+  DCHECK(IsContinuousSpace());
+  return down_cast<ContinuousSpace*>(this);
+}
+
+inline DiscontinuousSpace* Space::AsDiscontinuousSpace() {
+  DCHECK(IsDiscontinuousSpace());
+  return down_cast<DiscontinuousSpace*>(this);
 }
 
 }  // namespace space
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index de48b74..8eb17e0 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -34,7 +34,6 @@
   return os;
 }
 
-
 DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
                                        GcRetentionPolicy gc_retention_policy) :
     Space(name, gc_retention_policy),
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 6dd7952..4c05dde 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -42,7 +42,10 @@
 
 namespace space {
 
+class AllocSpace;
+class ContinuousSpace;
 class DlMallocSpace;
+class DiscontinuousSpace;
 class ImageSpace;
 class LargeObjectSpace;
 
@@ -64,6 +67,7 @@
   kSpaceTypeImageSpace,
   kSpaceTypeAllocSpace,
   kSpaceTypeZygoteSpace,
+  kSpaceTypeBumpPointerSpace,
   kSpaceTypeLargeObjectSpace,
 };
 std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
@@ -113,12 +117,35 @@
     return GetType() == kSpaceTypeZygoteSpace;
   }
 
+  // Is this space a bump pointer space?
+  bool IsBumpPointerSpace() const {
+    return GetType() == kSpaceTypeBumpPointerSpace;
+  }
+
   // Does this space hold large objects and implement the large object space abstraction?
   bool IsLargeObjectSpace() const {
     return GetType() == kSpaceTypeLargeObjectSpace;
   }
   LargeObjectSpace* AsLargeObjectSpace();
 
+  virtual bool IsContinuousSpace() const {
+    return false;
+  }
+  ContinuousSpace* AsContinuousSpace();
+
+  virtual bool IsDiscontinuousSpace() const {
+    return false;
+  }
+  DiscontinuousSpace* AsDiscontinuousSpace();
+
+  virtual bool IsAllocSpace() const {
+    return false;
+  }
+  virtual AllocSpace* AsAllocSpace() {
+    LOG(FATAL) << "Unimplemented";
+    return nullptr;
+  }
+
   virtual ~Space() {}
 
  protected:
@@ -131,13 +158,13 @@
   // Name of the space that may vary due to the Zygote fork.
   std::string name_;
 
- private:
+ protected:
   // When should objects within this space be reclaimed? Not constant as we vary it in the case
   // of Zygote forking.
   GcRetentionPolicy gc_retention_policy_;
 
+ private:
   friend class art::gc::Heap;
-
   DISALLOW_COPY_AND_ASSIGN(Space);
 };
 std::ostream& operator<<(std::ostream& os, const Space& space);
@@ -180,16 +207,31 @@
 // continuous spaces can be marked in the card table.
 class ContinuousSpace : public Space {
  public:
-  // Address at which the space begins
+  // Address at which the space begins.
   byte* Begin() const {
     return begin_;
   }
 
-  // Address at which the space ends, which may vary as the space is filled.
+  // Current address at which the space ends, which may vary as the space is filled.
   byte* End() const {
     return end_;
   }
 
+  // The end of the address range covered by the space.
+  byte* Limit() const {
+    return limit_;
+  }
+
+  // Change the end of the space. Be careful with use since changing the end of a space to an
+  // invalid value may break the GC.
+  void SetEnd(byte* end) {
+    end_ = end;
+  }
+
+  void SetLimit(byte* limit) {
+    limit_ = limit;
+  }
+
   // Current size of space
   size_t Size() const {
     return End() - Begin();
@@ -198,31 +240,42 @@
   virtual accounting::SpaceBitmap* GetLiveBitmap() const = 0;
   virtual accounting::SpaceBitmap* GetMarkBitmap() const = 0;
 
+  // Maximum which the mapped space can grow to.
+  virtual size_t Capacity() const {
+    return Limit() - Begin();
+  }
+
   // Is object within this space? We check to see if the pointer is beyond the end first as
   // continuous spaces are iterated over from low to high.
   bool HasAddress(const mirror::Object* obj) const {
     const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
-    return byte_ptr < End() && byte_ptr >= Begin();
+    return byte_ptr >= Begin() && byte_ptr < Limit();
   }
 
   bool Contains(const mirror::Object* obj) const {
     return HasAddress(obj);
   }
 
+  virtual bool IsContinuousSpace() const {
+    return true;
+  }
+
   virtual ~ContinuousSpace() {}
 
  protected:
   ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
-                  byte* begin, byte* end) :
-      Space(name, gc_retention_policy), begin_(begin), end_(end) {
+                  byte* begin, byte* end, byte* limit) :
+      Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
   }
 
-
   // The beginning of the storage for fast access.
-  byte* const begin_;
+  byte* begin_;
 
   // Current end of the space.
-  byte* end_;
+  byte* volatile end_;
+
+  // Limit of the space.
+  byte* limit_;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
@@ -241,6 +294,10 @@
     return mark_objects_.get();
   }
 
+  virtual bool IsDiscontinuousSpace() const {
+    return true;
+  }
+
   virtual ~DiscontinuousSpace() {}
 
  protected:
@@ -255,25 +312,12 @@
 
 class MemMapSpace : public ContinuousSpace {
  public:
-  // Maximum which the mapped space can grow to.
-  virtual size_t Capacity() const {
-    return mem_map_->Size();
-  }
-
   // Size of the space without a limit on its growth. By default this is just the Capacity, but
   // for the allocation space we support starting with a small heap and then extending it.
   virtual size_t NonGrowthLimitCapacity() const {
     return Capacity();
   }
 
- protected:
-  MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
-              GcRetentionPolicy gc_retention_policy)
-      : ContinuousSpace(name, gc_retention_policy,
-                        mem_map->Begin(), mem_map->Begin() + initial_size),
-        mem_map_(mem_map) {
-  }
-
   MemMap* GetMemMap() {
     return mem_map_.get();
   }
@@ -282,13 +326,45 @@
     return mem_map_.get();
   }
 
- private:
+ protected:
+  MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit,
+              GcRetentionPolicy gc_retention_policy)
+      : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
+        mem_map_(mem_map) {
+  }
+
   // Underlying storage of the space
   UniquePtr<MemMap> mem_map_;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
 };
 
+// Used by the heap compaction interface to enable copying from one type of alloc space to another.
+class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
+ public:
+  virtual bool IsAllocSpace() const {
+    return true;
+  }
+
+  virtual AllocSpace* AsAllocSpace() {
+    return this;
+  }
+
+  virtual void Clear() {
+    LOG(FATAL) << "Unimplemented";
+  }
+
+ protected:
+  ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin,
+                             byte* end, byte* limit, GcRetentionPolicy gc_retention_policy)
+      : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace);
+};
+
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc
index 455168c..383714b 100644
--- a/runtime/gc/space/space_test.cc
+++ b/runtime/gc/space/space_test.cc
@@ -33,8 +33,8 @@
                                            int round, size_t growth_limit);
   void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size);
 
-  void AddContinuousSpace(ContinuousSpace* space) {
-    Runtime::Current()->GetHeap()->AddContinuousSpace(space);
+  void AddSpace(ContinuousSpace* space) {
+    Runtime::Current()->GetHeap()->AddSpace(space);
   }
 };
 
@@ -91,7 +91,7 @@
     ASSERT_TRUE(space != NULL);
 
     // Make space findable to the heap, will also delete space when runtime is cleaned up
-    AddContinuousSpace(space);
+    AddSpace(space);
     Thread* self = Thread::Current();
 
     // Succeeds, fits without adjusting the footprint limit.
@@ -136,7 +136,7 @@
     space = space->CreateZygoteSpace("alloc space");
 
     // Make space findable to the heap, will also delete space when runtime is cleaned up
-    AddContinuousSpace(space);
+    AddSpace(space);
 
     // Succeeds, fits without adjusting the footprint limit.
     ptr1 = space->Alloc(self, 1 * MB, &dummy);
@@ -164,7 +164,7 @@
   Thread* self = Thread::Current();
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
-  AddContinuousSpace(space);
+  AddSpace(space);
 
   // Succeeds, fits without adjusting the footprint limit.
   mirror::Object* ptr1 = space->Alloc(self, 1 * MB, &dummy);
@@ -270,7 +270,7 @@
   ASSERT_TRUE(space != NULL);
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
-  AddContinuousSpace(space);
+  AddSpace(space);
   Thread* self = Thread::Current();
 
   // Succeeds, fits without adjusting the max allowed footprint.
@@ -467,7 +467,7 @@
   EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
-  AddContinuousSpace(space);
+  AddSpace(space);
 
   // In this round we don't allocate with growth and therefore can't grow past the initial size.
   // This effectively makes the growth_limit the initial_size, so assert this.