Compacting collector.

The compacting collector is currently similar to semispace. It works by
copying objects back and forth between two bump pointer spaces. There
are types of objects which are "non-movable" due to current runtime
limitations. These are Classes, Methods, and Fields.

Bump pointer spaces are a new type of continuous alloc space which have
no lock in the allocation code path. When you allocate from these it uses
atomic operations to increase an index. Traversing the objects in the bump
pointer space relies on Object::SizeOf matching the allocated size exactly.

Runtime changes:
JNI::GetArrayElements returns copies objects if you attempt to get the
backing data of a movable array. For GetArrayElementsCritical, we return
direct backing storage for any types of arrays, but temporarily disable
the GC until the critical region is completed.

Added a new runtime call called VisitObjects, this is used in place of
the old pattern which was flushing the allocation stack and walking
the bitmaps.

Changed image writer to be compaction safe and use object monitor word
for forwarding addresses.

Added a bunch of added SIRTs to ClassLinker, MethodLinker, etc..

TODO: Enable switching allocators, compacting on background, etc..

Bug: 8981901

Change-Id: I3c886fd322a6eef2b99388d19a765042ec26ab99
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 6691cad..1789103 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -36,6 +36,7 @@
 GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
     : heap_(heap),
       name_(name),
+      clear_soft_references_(false),
       verbose_(VLOG_IS_ON(heap)),
       duration_ns_(0),
       timings_(name_.c_str(), true, verbose_),
@@ -60,11 +61,18 @@
   total_freed_bytes_ = 0;
 }
 
-void GarbageCollector::Run() {
+void GarbageCollector::Run(bool clear_soft_references) {
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
   uint64_t start_time = NanoTime();
   pause_times_.clear();
   duration_ns_ = 0;
+  clear_soft_references_ = clear_soft_references;
+
+  // Reset stats.
+  freed_bytes_ = 0;
+  freed_large_object_bytes_ = 0;
+  freed_objects_ = 0;
+  freed_large_objects_ = 0;
 
   InitializePhase();
 
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 0f566c9..6111c2f 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -46,7 +46,7 @@
   virtual GcType GetGcType() const = 0;
 
   // Run the garbage collector.
-  void Run();
+  void Run(bool clear_soft_references);
 
   Heap* GetHeap() const {
     return heap_;
@@ -78,6 +78,34 @@
   // this is the allocation space, for full GC then we swap the zygote bitmaps too.
   void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
+  size_t GetFreedBytes() const {
+    return freed_bytes_;
+  }
+
+  size_t GetFreedLargeObjectBytes() const {
+    return freed_large_object_bytes_;
+  }
+
+  size_t GetFreedObjects() const {
+    return freed_objects_;
+  }
+
+  size_t GetFreedLargeObjects() const {
+    return freed_large_objects_;
+  }
+
+  uint64_t GetTotalPausedTimeNs() const {
+    return total_paused_time_ns_;
+  }
+
+  uint64_t GetTotalFreedBytes() const {
+    return total_freed_bytes_;
+  }
+
+  uint64_t GetTotalFreedObjects() const {
+    return total_freed_objects_;
+  }
+
  protected:
   // The initial phase. Done without mutators paused.
   virtual void InitializePhase() = 0;
@@ -98,6 +126,8 @@
 
   std::string name_;
 
+  bool clear_soft_references_;
+
   const bool verbose_;
 
   uint64_t duration_ns_;
@@ -109,6 +139,12 @@
   uint64_t total_freed_objects_;
   uint64_t total_freed_bytes_;
 
+  // Single GC statitstics.
+  AtomicInteger freed_bytes_;
+  AtomicInteger freed_large_object_bytes_;
+  AtomicInteger freed_objects_;
+  AtomicInteger freed_large_objects_;
+
   CumulativeLogger cumulative_timings_;
 
   std::vector<uint64_t> pause_times_;
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 270c9ef..7a51553 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -44,8 +44,7 @@
     if (klass->IsObjectArrayClass()) {
       VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
     }
-  } else if (UNLIKELY(klass == java_lang_Class_)) {
-    DCHECK_EQ(klass->GetClass(), java_lang_Class_);
+  } else if (UNLIKELY(klass == mirror::Class::GetJavaLangClass())) {
     if (kCountScannedTypes) {
       ++class_count_;
     }
@@ -56,7 +55,7 @@
     }
     VisitOtherReferences(klass, obj, visitor);
     if (UNLIKELY(klass->IsReferenceClass())) {
-      DelayReferenceReferent(klass, const_cast<mirror::Object*>(obj));
+      DelayReferenceReferent(klass, obj);
     }
   }
 }
@@ -68,11 +67,10 @@
                           Locks::mutator_lock_) {
   DCHECK(obj != NULL);
   DCHECK(obj->GetClass() != NULL);
-
   mirror::Class* klass = obj->GetClass();
   DCHECK(klass != NULL);
   if (visit_class) {
-    visitor(obj, klass, MemberOffset(0), false);
+    visitor(obj, klass, mirror::Object::ClassOffset(), false);
   }
   if (klass == mirror::Class::GetJavaLangClass()) {
     DCHECK_EQ(klass->GetClass(), mirror::Class::GetJavaLangClass());
@@ -90,8 +88,7 @@
 }
 
 template <typename Visitor>
-inline void MarkSweep::VisitInstanceFieldsReferences(mirror::Class* klass,
-                                                     mirror::Object* obj,
+inline void MarkSweep::VisitInstanceFieldsReferences(mirror::Class* klass, mirror::Object* obj,
                                                      const Visitor& visitor)
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
   DCHECK(obj != NULL);
@@ -119,11 +116,6 @@
                                              bool is_static, const Visitor& visitor) {
   if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
     // Found a reference offset bitmap.  Mark the specified offsets.
-#ifndef MOVING_COLLECTOR
-    // Clear the class bit since we mark the class as part of marking the classlinker roots.
-    DCHECK_EQ(mirror::Object::ClassOffset().Uint32Value(), 0U);
-    ref_offsets &= (1U << (sizeof(ref_offsets) * 8 - 1)) - 1;
-#endif
     while (ref_offsets != 0) {
       size_t right_shift = CLZ(ref_offsets);
       MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 2c69c77..11e911c 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -93,6 +93,8 @@
   }
 
   // Add the space to the immune region.
+  // TODO: Use space limits instead of current end_ since the end_ can be changed by dlmalloc
+  // callbacks.
   if (immune_begin_ == NULL) {
     DCHECK(immune_end_ == NULL);
     SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
@@ -108,14 +110,14 @@
     }
     // If previous space was immune, then extend the immune region. Relies on continuous spaces
     // being sorted by Heap::AddContinuousSpace.
-    if (prev_space != NULL && IsImmuneSpace(prev_space)) {
+    if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
       immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
       immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
     }
   }
 }
 
-bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) {
+bool MarkSweep::IsImmuneSpace(const space::ContinuousSpace* space) const {
   return
       immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
       immune_end_ >= reinterpret_cast<Object*>(space->End());
@@ -135,10 +137,9 @@
 
 MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
     : GarbageCollector(heap,
-                       name_prefix + (name_prefix.empty() ? "" : " ") +
+                       name_prefix +
                        (is_concurrent ? "concurrent mark sweep": "mark sweep")),
       current_mark_bitmap_(NULL),
-      java_lang_Class_(NULL),
       mark_stack_(NULL),
       immune_begin_(NULL),
       immune_end_(NULL),
@@ -150,8 +151,7 @@
       gc_barrier_(new Barrier(0)),
       large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
       mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
-      is_concurrent_(is_concurrent),
-      clear_soft_references_(false) {
+      is_concurrent_(is_concurrent) {
 }
 
 void MarkSweep::InitializePhase() {
@@ -165,10 +165,6 @@
   finalizer_reference_list_ = nullptr;
   phantom_reference_list_ = nullptr;
   cleared_reference_list_ = nullptr;
-  freed_bytes_ = 0;
-  freed_large_object_bytes_ = 0;
-  freed_objects_ = 0;
-  freed_large_objects_ = 0;
   class_count_ = 0;
   array_count_ = 0;
   other_count_ = 0;
@@ -179,8 +175,6 @@
   work_chunks_created_ = 0;
   work_chunks_deleted_ = 0;
   reference_count_ = 0;
-  java_lang_Class_ = Class::GetJavaLangClass();
-  CHECK(java_lang_Class_ != nullptr);
 
   FindDefaultMarkBitmap();
 
@@ -294,8 +288,7 @@
   // knowing that new allocations won't be marked as live.
   timings_.StartSplit("MarkStackAsLive");
   accounting::ObjectStack* live_stack = heap_->GetLiveStack();
-  heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
-                        heap_->large_object_space_->GetLiveObjects(), live_stack);
+  heap_->MarkAllocStackAsLive(live_stack);
   live_stack->Reset();
   timings_.EndSplit();
   // Recursively mark all the non-image bits set in the mark bitmap.
@@ -371,8 +364,10 @@
 void MarkSweep::FindDefaultMarkBitmap() {
   base::TimingLogger::ScopedSplit split("FindDefaultMarkBitmap", &timings_);
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
-    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
-      current_mark_bitmap_ = space->GetMarkBitmap();
+    accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
+    if (bitmap != nullptr &&
+        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
+      current_mark_bitmap_ = bitmap;
       CHECK(current_mark_bitmap_ != NULL);
       return;
     }
@@ -613,10 +608,8 @@
   CHECK(space->IsDlMallocSpace());
   space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
   accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
-  accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
+  accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap();
   GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
-  alloc_space->temp_bitmap_.reset(mark_bitmap);
-  alloc_space->mark_bitmap_.reset(live_bitmap);
 }
 
 class ScanObjectVisitor {
@@ -625,7 +618,7 @@
       : mark_sweep_(mark_sweep) {}
 
   // TODO: Fixme when anotatalysis works with visitors.
-  void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+  void operator()(Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
     if (kCheckLocks) {
       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -814,6 +807,9 @@
     const size_t mark_stack_delta = std::min(CardScanTask::kMaxSize / 2,
                                              mark_stack_size / mark_stack_tasks + 1);
     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+      if (space->GetMarkBitmap() == nullptr) {
+        continue;
+      }
       byte* card_begin = space->Begin();
       byte* card_end = space->End();
       // Align up the end address. For example, the image space's end
@@ -856,24 +852,26 @@
     timings_.EndSplit();
   } else {
     for (const auto& space : GetHeap()->GetContinuousSpaces()) {
-      // Image spaces are handled properly since live == marked for them.
-      switch (space->GetGcRetentionPolicy()) {
-        case space::kGcRetentionPolicyNeverCollect:
-          timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
-              "ScanGrayImageSpaceObjects");
-          break;
-        case space::kGcRetentionPolicyFullCollect:
-          timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
-              "ScanGrayZygoteSpaceObjects");
-          break;
-        case space::kGcRetentionPolicyAlwaysCollect:
-          timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
-              "ScanGrayAllocSpaceObjects");
-          break;
-        }
-      ScanObjectVisitor visitor(this);
-      card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
-      timings_.EndSplit();
+      if (space->GetMarkBitmap() != nullptr) {
+        // Image spaces are handled properly since live == marked for them.
+        switch (space->GetGcRetentionPolicy()) {
+          case space::kGcRetentionPolicyNeverCollect:
+            timings_.StartSplit(paused ? "(Paused)ScanGrayImageSpaceObjects" :
+                "ScanGrayImageSpaceObjects");
+            break;
+          case space::kGcRetentionPolicyFullCollect:
+            timings_.StartSplit(paused ? "(Paused)ScanGrayZygoteSpaceObjects" :
+                "ScanGrayZygoteSpaceObjects");
+            break;
+          case space::kGcRetentionPolicyAlwaysCollect:
+            timings_.StartSplit(paused ? "(Paused)ScanGrayAllocSpaceObjects" :
+                "ScanGrayAllocSpaceObjects");
+            break;
+          }
+        ScanObjectVisitor visitor(this);
+        card_table->Scan(space->GetMarkBitmap(), space->Begin(), space->End(), visitor, minimum_age);
+        timings_.EndSplit();
+      }
     }
   }
 }
@@ -954,9 +952,8 @@
       if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
           (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
         current_mark_bitmap_ = space->GetMarkBitmap();
-        if (current_mark_bitmap_ == NULL) {
-          GetHeap()->DumpSpaces();
-          LOG(FATAL) << "invalid bitmap";
+        if (current_mark_bitmap_ == nullptr) {
+          continue;
         }
         if (parallel) {
           // We will use the mark stack the future.
@@ -1121,7 +1118,7 @@
 }
 
 void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
-  space::DlMallocSpace* space = heap_->GetAllocSpace();
+  space::DlMallocSpace* space = heap_->GetNonMovingSpace();
   timings_.StartSplit("SweepArray");
   // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
   // going to free.
@@ -1207,8 +1204,11 @@
   scc.mark_sweep = this;
   scc.self = Thread::Current();
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (!space->IsDlMallocSpace()) {
+      continue;
+    }
     // We always sweep always collect spaces.
-    bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
+    bool sweep_space = space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect;
     if (!partial && !sweep_space) {
       // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
       sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
@@ -1370,9 +1370,9 @@
 
 // Scans an object reference.  Determines the type of the reference
 // and dispatches to a specialized scanning routine.
-void MarkSweep::ScanObject(const Object* obj) {
+void MarkSweep::ScanObject(Object* obj) {
   MarkObjectVisitor visitor(this);
-  ScanObjectVisit(const_cast<Object*>(obj), visitor);
+  ScanObjectVisit(obj, visitor);
 }
 
 void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
@@ -1406,12 +1406,12 @@
   } else {
     // TODO: Tune this.
     static const size_t kFifoSize = 4;
-    BoundedFifoPowerOfTwo<const Object*, kFifoSize> prefetch_fifo;
+    BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
     for (;;) {
-      const Object* obj = NULL;
+      Object* obj = NULL;
       if (kUseMarkStackPrefetch) {
         while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
-          const Object* obj = mark_stack_->PopBack();
+          Object* obj = mark_stack_->PopBack();
           DCHECK(obj != NULL);
           __builtin_prefetch(obj);
           prefetch_fifo.push_back(obj);
@@ -1603,9 +1603,6 @@
   timings_.NewSplit("PostGcVerification");
   heap->PostGcVerification(this);
 
-  timings_.NewSplit("GrowForUtilization");
-  heap->GrowForUtilization(GetGcType(), GetDurationNs());
-
   timings_.NewSplit("RequestHeapTrim");
   heap->RequestHeapTrim();
 
@@ -1651,8 +1648,10 @@
 
   // Clear all of the spaces' mark bitmaps.
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
-    if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
-      space->GetMarkBitmap()->Clear();
+    accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
+    if (bitmap != nullptr &&
+        space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
+      bitmap->Clear();
     }
   }
   mark_stack_->Reset();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 3bc014a..cc58412 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -114,7 +114,7 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool IsImmuneSpace(const space::ContinuousSpace* space)
+  bool IsImmuneSpace(const space::ContinuousSpace* space) const;
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
@@ -140,6 +140,7 @@
   void ProcessReferences(Thread* self)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Update and mark references from immune spaces.
   virtual void UpdateAndMarkModUnion()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -158,7 +159,7 @@
   }
 
   // Blackens an object.
-  void ScanObject(const mirror::Object* obj)
+  void ScanObject(mirror::Object* obj)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -167,38 +168,6 @@
   void ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor)
       NO_THREAD_SAFETY_ANALYSIS;
 
-  size_t GetFreedBytes() const {
-    return freed_bytes_;
-  }
-
-  size_t GetFreedLargeObjectBytes() const {
-    return freed_large_object_bytes_;
-  }
-
-  size_t GetFreedObjects() const {
-    return freed_objects_;
-  }
-
-  size_t GetFreedLargeObjects() const {
-    return freed_large_objects_;
-  }
-
-  uint64_t GetTotalTimeNs() const {
-    return total_time_ns_;
-  }
-
-  uint64_t GetTotalPausedTimeNs() const {
-    return total_paused_time_ns_;
-  }
-
-  uint64_t GetTotalFreedObjects() const {
-    return total_freed_objects_;
-  }
-
-  uint64_t GetTotalFreedBytes() const {
-    return total_freed_bytes_;
-  }
-
   // Everything inside the immune range is assumed to be marked.
   void SetImmuneRange(mirror::Object* begin, mirror::Object* end);
 
@@ -216,8 +185,7 @@
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   template <typename Visitor>
-  static void VisitObjectReferences(mirror::Object* obj, const Visitor& visitor,
-                                    bool visit_class = false)
+  static void VisitObjectReferences(mirror::Object* obj, const Visitor& visitor, bool visit_class)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
                             Locks::mutator_lock_);
 
@@ -395,9 +363,6 @@
   // object.
   accounting::SpaceBitmap* current_mark_bitmap_;
 
-  // Cache java.lang.Class for optimization.
-  mirror::Class* java_lang_Class_;
-
   accounting::ObjectStack* mark_stack_;
 
   // Immune range, every object inside the immune range is assumed to be marked.
@@ -412,14 +377,6 @@
 
   // Parallel finger.
   AtomicInteger atomic_finger_;
-  // Number of non large object bytes freed in this collection.
-  AtomicInteger freed_bytes_;
-  // Number of large object bytes freed.
-  AtomicInteger freed_large_object_bytes_;
-  // Number of objects freed in this collection.
-  AtomicInteger freed_objects_;
-  // Number of freed large objects.
-  AtomicInteger freed_large_objects_;
   // Number of classes scanned, if kCountScannedTypes.
   AtomicInteger class_count_;
   // Number of arrays scanned, if kCountScannedTypes.
@@ -443,8 +400,6 @@
 
   const bool is_concurrent_;
 
-  bool clear_soft_references_;
-
  private:
   friend class AddIfReachesAllocSpaceVisitor;  // Used by mod-union table.
   friend class CardScanTask;
diff --git a/runtime/gc/collector/partial_mark_sweep.cc b/runtime/gc/collector/partial_mark_sweep.cc
index 29367ce..8ec28f3 100644
--- a/runtime/gc/collector/partial_mark_sweep.cc
+++ b/runtime/gc/collector/partial_mark_sweep.cc
@@ -26,7 +26,7 @@
 namespace collector {
 
 PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
-    : MarkSweep(heap, is_concurrent, name_prefix + (name_prefix.empty() ? "" : " ") + "partial") {
+    : MarkSweep(heap, is_concurrent, name_prefix.empty() ? "partial " : name_prefix) {
   cumulative_timings_.SetName(GetName());
 }
 
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
new file mode 100644
index 0000000..3b8f7c3
--- /dev/null
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
+#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
+
+namespace art {
+namespace gc {
+namespace collector {
+
+inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object* obj) const {
+  DCHECK(from_space_->HasAddress(obj));
+  LockWord lock_word = obj->GetLockWord();
+  if (lock_word.GetState() != LockWord::kForwardingAddress) {
+    return nullptr;
+  }
+  return reinterpret_cast<mirror::Object*>(lock_word.ForwardingAddress());
+}
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_INL_H_
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
new file mode 100644
index 0000000..d833631
--- /dev/null
+++ b/runtime/gc/collector/semi_space.cc
@@ -0,0 +1,799 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "semi_space.h"
+
+#include <functional>
+#include <numeric>
+#include <climits>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex-inl.h"
+#include "base/timing_logger.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/mod_union_table.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
+#include "gc/space/bump_pointer_space.h"
+#include "gc/space/bump_pointer_space-inl.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
+#include "indirect_reference_table.h"
+#include "intern_table.h"
+#include "jni_internal.h"
+#include "mark_sweep-inl.h"
+#include "monitor.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array.h"
+#include "mirror/object_array-inl.h"
+#include "runtime.h"
+#include "semi_space-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+#include "verifier/method_verifier.h"
+
+using ::art::mirror::Class;
+using ::art::mirror::Object;
+
+namespace art {
+namespace gc {
+namespace collector {
+
+static constexpr bool kProtectFromSpace = true;
+static constexpr bool kResetFromSpace = true;
+
+// TODO: Unduplicate logic.
+void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
+  // Bind live to mark bitmap if necessary.
+  if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
+    BindLiveToMarkBitmap(space);
+  }
+  // Add the space to the immune region.
+  if (immune_begin_ == nullptr) {
+    DCHECK(immune_end_ == nullptr);
+    immune_begin_ = reinterpret_cast<Object*>(space->Begin());
+    immune_end_ = reinterpret_cast<Object*>(space->End());
+  } else {
+    const space::ContinuousSpace* prev_space = nullptr;
+    // Find out if the previous space is immune.
+    for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
+      if (cur_space == space) {
+        break;
+      }
+      prev_space = cur_space;
+    }
+    // If previous space was immune, then extend the immune region. Relies on continuous spaces
+    // being sorted by Heap::AddContinuousSpace.
+    if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
+      immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
+      immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
+    }
+  }
+}
+
+void SemiSpace::BindBitmaps() {
+  timings_.StartSplit("BindBitmaps");
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  // Mark all of the spaces we never collect as immune.
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
+        || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+      ImmuneSpace(space);
+    }
+  }
+  timings_.EndSplit();
+}
+
+SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
+    : GarbageCollector(heap,
+                       name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
+      mark_stack_(nullptr),
+      immune_begin_(nullptr),
+      immune_end_(nullptr),
+      to_space_(nullptr),
+      from_space_(nullptr),
+      soft_reference_list_(nullptr),
+      weak_reference_list_(nullptr),
+      finalizer_reference_list_(nullptr),
+      phantom_reference_list_(nullptr),
+      cleared_reference_list_(nullptr),
+      self_(nullptr) {
+}
+
+void SemiSpace::InitializePhase() {
+  timings_.Reset();
+  base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+  mark_stack_ = heap_->mark_stack_.get();
+  DCHECK(mark_stack_ != nullptr);
+  immune_begin_ = nullptr;
+  immune_end_ = nullptr;
+  soft_reference_list_ = nullptr;
+  weak_reference_list_ = nullptr;
+  finalizer_reference_list_ = nullptr;
+  phantom_reference_list_ = nullptr;
+  cleared_reference_list_ = nullptr;
+  self_ = Thread::Current();
+  // Do any pre GC verification.
+  timings_.NewSplit("PreGcVerification");
+  heap_->PreGcVerification(this);
+}
+
+void SemiSpace::ProcessReferences(Thread* self) {
+  base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
+                    &finalizer_reference_list_, &phantom_reference_list_);
+}
+
+void SemiSpace::MarkingPhase() {
+  Thread* self = Thread::Current();
+  Locks::mutator_lock_->AssertExclusiveHeld(self);
+  base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+  // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
+  // wrong space.
+  heap_->SwapSemiSpaces();
+  // Assume the cleared space is already empty.
+  BindBitmaps();
+  // Process dirty cards and add dirty cards to mod-union tables.
+  heap_->ProcessCards(timings_);
+  // Need to do this before the checkpoint since we don't want any threads to add references to
+  // the live stack during the recursive mark.
+  timings_.NewSplit("SwapStacks");
+  heap_->SwapStacks();
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  MarkRoots();
+  // Mark roots of immune spaces.
+  UpdateAndMarkModUnion();
+  // Recursively mark remaining objects.
+  MarkReachableObjects();
+}
+
+bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const {
+  return
+    immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
+    immune_end_ >= reinterpret_cast<Object*>(space->End());
+}
+
+void SemiSpace::UpdateAndMarkModUnion() {
+  for (auto& space : heap_->GetContinuousSpaces()) {
+    // If the space is immune then we need to mark the references to other spaces.
+    if (IsImmuneSpace(space)) {
+      accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+      CHECK(table != nullptr);
+      // TODO: Improve naming.
+      base::TimingLogger::ScopedSplit split(
+          space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+                                   "UpdateAndMarkImageModUnionTable",
+                                   &timings_);
+      table->UpdateAndMarkReferences(MarkRootCallback, this);
+    }
+  }
+}
+
+void SemiSpace::MarkReachableObjects() {
+  timings_.StartSplit("MarkStackAsLive");
+  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+  heap_->MarkAllocStackAsLive(live_stack);
+  live_stack->Reset();
+  timings_.EndSplit();
+  // Recursively process the mark stack.
+  ProcessMarkStack(true);
+}
+
+void SemiSpace::ReclaimPhase() {
+  base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+  Thread* self = Thread::Current();
+  ProcessReferences(self);
+  {
+    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    SweepSystemWeaks();
+  }
+  // Record freed memory.
+  int from_bytes = from_space_->GetBytesAllocated();
+  int to_bytes = to_space_->GetBytesAllocated();
+  int from_objects = from_space_->GetObjectsAllocated();
+  int to_objects = to_space_->GetObjectsAllocated();
+  int freed_bytes = from_bytes - to_bytes;
+  int freed_objects = from_objects - to_objects;
+  CHECK_GE(freed_bytes, 0);
+  freed_bytes_.fetch_add(freed_bytes);
+  freed_objects_.fetch_add(freed_objects);
+  heap_->RecordFree(static_cast<size_t>(freed_objects), static_cast<size_t>(freed_bytes));
+
+  timings_.StartSplit("PreSweepingGcVerification");
+  heap_->PreSweepingGcVerification(this);
+  timings_.EndSplit();
+
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    // Reclaim unmarked objects.
+    Sweep(false);
+    // Swap the live and mark bitmaps for each space which we modified space. This is an
+    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
+    // bitmaps.
+    timings_.StartSplit("SwapBitmaps");
+    SwapBitmaps();
+    timings_.EndSplit();
+    // Unbind the live and mark bitmaps.
+    UnBindBitmaps();
+  }
+  // Release the memory used by the from space.
+  if (kResetFromSpace) {
+    // Clearing from space.
+    from_space_->Clear();
+  }
+  // Protect the from space.
+  VLOG(heap)
+      << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
+      << reinterpret_cast<void*>(from_space_->Limit());
+  if (kProtectFromSpace) {
+    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
+  } else {
+    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
+  }
+}
+
+void SemiSpace::ResizeMarkStack(size_t new_size) {
+  std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
+  CHECK_LE(mark_stack_->Size(), new_size);
+  mark_stack_->Resize(new_size);
+  for (const auto& obj : temp) {
+    mark_stack_->PushBack(obj);
+  }
+}
+
+inline void SemiSpace::MarkStackPush(Object* obj) {
+  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+    ResizeMarkStack(mark_stack_->Capacity() * 2);
+  }
+  // The object must be pushed on to the mark stack.
+  mark_stack_->PushBack(obj);
+}
+
+// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
+bool SemiSpace::MarkLargeObject(const Object* obj) {
+  // TODO: support >1 discontinuous space.
+  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
+  if (UNLIKELY(!large_objects->Test(obj))) {
+    large_objects->Set(obj);
+    return true;
+  }
+  return false;
+}
+
+// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
+// the to-space and have their forward address updated. Objects which have been newly marked are
+// pushed on the mark stack.
+Object* SemiSpace::MarkObject(Object* obj) {
+  Object* ret = obj;
+  if (obj != nullptr && !IsImmune(obj)) {
+    if (from_space_->HasAddress(obj)) {
+      mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
+      // If the object has already been moved, return the new forward address.
+      if (!to_space_->HasAddress(forward_address)) {
+        // Otherwise, we need to move the object and add it to the markstack for processing.
+        size_t object_size = obj->SizeOf();
+        size_t dummy = 0;
+        forward_address = to_space_->Alloc(self_, object_size, &dummy);
+        // Copy over the object and add it to the mark stack since we still need to update it's
+        // references.
+        memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
+        // Make sure to only update the forwarding address AFTER you copy the object so that the
+        // monitor word doesn't get stomped over.
+        COMPILE_ASSERT(sizeof(uint32_t) == sizeof(mirror::Object*),
+                       monitor_size_must_be_same_as_object);
+        obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)));
+        MarkStackPush(forward_address);
+      }
+      ret = forward_address;
+      // TODO: Do we need this if in the else statement?
+    } else {
+      accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
+      if (LIKELY(object_bitmap != nullptr)) {
+        // This object was not previously marked.
+        if (!object_bitmap->Test(obj)) {
+          object_bitmap->Set(obj);
+          MarkStackPush(obj);
+        }
+      } else {
+        DCHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_";
+        if (MarkLargeObject(obj)) {
+          MarkStackPush(obj);
+        }
+      }
+    }
+  }
+  return ret;
+}
+
+Object* SemiSpace::MarkRootCallback(Object* root, void* arg) {
+  DCHECK(root != nullptr);
+  DCHECK(arg != nullptr);
+  return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
+}
+
+// Marks all objects in the root set.
+void SemiSpace::MarkRoots() {
+  timings_.StartSplit("MarkRoots");
+  // TODO: Visit up image roots as well?
+  Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true);
+  timings_.EndSplit();
+}
+
+void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
+  CHECK(space->IsDlMallocSpace());
+  space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+  accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+  accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap();
+  GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
+}
+
+mirror::Object* SemiSpace::GetForwardingAddress(mirror::Object* obj) {
+  if (from_space_->HasAddress(obj)) {
+    LOG(FATAL) << "Shouldn't happen!";
+    return GetForwardingAddressInFromSpace(obj);
+  }
+  return obj;
+}
+
+mirror::Object* SemiSpace::SystemWeakIsMarkedCallback(Object* object, void* arg) {
+  return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
+}
+
+void SemiSpace::SweepSystemWeaks() {
+  timings_.StartSplit("SweepSystemWeaks");
+  Runtime::Current()->SweepSystemWeaks(SystemWeakIsMarkedCallback, this);
+  timings_.EndSplit();
+}
+
+struct SweepCallbackContext {
+  SemiSpace* mark_sweep;
+  space::AllocSpace* space;
+  Thread* self;
+};
+
+void SemiSpace::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
+  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+  SemiSpace* gc = context->mark_sweep;
+  Heap* heap = gc->GetHeap();
+  space::AllocSpace* space = context->space;
+  Thread* self = context->self;
+  Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
+  size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
+  heap->RecordFree(num_ptrs, freed_bytes);
+  gc->freed_objects_.fetch_add(num_ptrs);
+  gc->freed_bytes_.fetch_add(freed_bytes);
+}
+
+void SemiSpace::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
+  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+  Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
+  Heap* heap = context->mark_sweep->GetHeap();
+  // We don't free any actual memory to avoid dirtying the shared zygote pages.
+  for (size_t i = 0; i < num_ptrs; ++i) {
+    Object* obj = static_cast<Object*>(ptrs[i]);
+    heap->GetLiveBitmap()->Clear(obj);
+    heap->GetCardTable()->MarkCard(obj);
+  }
+}
+
+void SemiSpace::Sweep(bool swap_bitmaps) {
+  DCHECK(mark_stack_->IsEmpty());
+  base::TimingLogger::ScopedSplit("Sweep", &timings_);
+
+  const bool partial = (GetGcType() == kGcTypePartial);
+  SweepCallbackContext scc;
+  scc.mark_sweep = this;
+  scc.self = Thread::Current();
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (!space->IsDlMallocSpace()) {
+      continue;
+    }
+    // We always sweep always collect spaces.
+    bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
+    if (!partial && !sweep_space) {
+      // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
+      sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
+    }
+    if (sweep_space && space->IsDlMallocSpace()) {
+      uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+      uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+      scc.space = space->AsDlMallocSpace();
+      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+      if (swap_bitmaps) {
+        std::swap(live_bitmap, mark_bitmap);
+      }
+      if (!space->IsZygoteSpace()) {
+        base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
+        // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
+        accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+                                           &SweepCallback, reinterpret_cast<void*>(&scc));
+      } else {
+        base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
+        // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
+        // memory.
+        accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+                                           &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
+      }
+    }
+  }
+
+  SweepLargeObjects(swap_bitmaps);
+}
+
+void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
+  base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
+  // Sweep large objects
+  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
+  accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
+  if (swap_bitmaps) {
+    std::swap(large_live_objects, large_mark_objects);
+  }
+  // O(n*log(n)) but hopefully there are not too many large objects.
+  size_t freed_objects = 0;
+  size_t freed_bytes = 0;
+  Thread* self = Thread::Current();
+  for (const Object* obj : large_live_objects->GetObjects()) {
+    if (!large_mark_objects->Test(obj)) {
+      freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
+      ++freed_objects;
+    }
+  }
+  freed_large_objects_.fetch_add(freed_objects);
+  freed_large_object_bytes_.fetch_add(freed_bytes);
+  GetHeap()->RecordFree(freed_objects, freed_bytes);
+}
+
+// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
+  DCHECK(klass != nullptr);
+  DCHECK(klass->IsReferenceClass());
+  DCHECK(obj != nullptr);
+  Object* referent = heap_->GetReferenceReferent(obj);
+  if (referent != nullptr) {
+    Object* forward_address = GetMarkedForwardAddress(referent);
+    if (forward_address == nullptr) {
+      Thread* self = Thread::Current();
+      // TODO: Remove these locks, and use atomic stacks for storing references?
+      // We need to check that the references haven't already been enqueued since we can end up
+      // scanning the same reference multiple times due to dirty cards.
+      if (klass->IsSoftReferenceClass()) {
+        MutexLock mu(self, *heap_->GetSoftRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &soft_reference_list_);
+        }
+      } else if (klass->IsWeakReferenceClass()) {
+        MutexLock mu(self, *heap_->GetWeakRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &weak_reference_list_);
+        }
+      } else if (klass->IsFinalizerReferenceClass()) {
+        MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
+        }
+      } else if (klass->IsPhantomReferenceClass()) {
+        MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
+        }
+      } else {
+        LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
+                   << klass->GetAccessFlags();
+      }
+    } else if (referent != forward_address) {
+      heap_->SetReferenceReferent(obj, forward_address);
+    }
+  }
+}
+
+// Visit all of the references of an object and update.
+void SemiSpace::ScanObject(Object* obj) {
+  DCHECK(obj != NULL);
+  DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
+  MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset,
+     bool /* is_static */) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+    mirror::Object* new_address = MarkObject(ref);
+    if (new_address != ref) {
+      DCHECK(new_address != nullptr);
+      obj->SetFieldObject(offset, new_address, false);
+    }
+  }, kMovingClasses);
+  mirror::Class* klass = obj->GetClass();
+  if (UNLIKELY(klass->IsReferenceClass())) {
+    DelayReferenceReferent(klass, obj);
+  }
+}
+
+// Scan anything that's on the mark stack.
+void SemiSpace::ProcessMarkStack(bool paused) {
+  timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
+  while (!mark_stack_->IsEmpty()) {
+    ScanObject(mark_stack_->PopBack());
+  }
+  timings_.EndSplit();
+}
+
+// Walks the reference list marking any references subject to the
+// reference clearing policy.  References with a black referent are
+// removed from the list.  References with white referents biased
+// toward saving are blackened and also removed from the list.
+void SemiSpace::PreserveSomeSoftReferences(Object** list) {
+  DCHECK(list != NULL);
+  Object* clear = NULL;
+  size_t counter = 0;
+  DCHECK(mark_stack_->IsEmpty());
+  timings_.StartSplit("PreserveSomeSoftReferences");
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent == NULL) {
+      // Referent was cleared by the user during marking.
+      continue;
+    }
+    Object* forward_address = GetMarkedForwardAddress(referent);
+    bool is_marked = forward_address != nullptr;
+    if (!is_marked && ((++counter) & 1)) {
+      // Referent is white and biased toward saving, mark it.
+      forward_address = MarkObject(referent);
+      if (referent != forward_address) {
+        // Update the referent if we moved it.
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    } else {
+      if (!is_marked) {
+        // Referent is white, queue it for clearing.
+        heap_->EnqueuePendingReference(ref, &clear);
+      } else if (referent != forward_address) {
+        CHECK(forward_address != nullptr);
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+  *list = clear;
+  timings_.EndSplit();
+  // Restart the mark with the newly black references added to the root set.
+  ProcessMarkStack(true);
+}
+
+inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
+    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  // All immune objects are assumed marked.
+  if (IsImmune(obj)) {
+    return obj;
+  }
+  if (from_space_->HasAddress(obj)) {
+    mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj));
+    // If the object is forwarded then it MUST be marked.
+    if (to_space_->HasAddress(forwarding_address)) {
+      return forwarding_address;
+    }
+    // Must not be marked, return nullptr;
+    return nullptr;
+  } else if (to_space_->HasAddress(obj)) {
+    // Already forwarded, must be marked.
+    return obj;
+  }
+  return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
+}
+
+// Unlink the reference list clearing references objects with white
+// referents.  Cleared references registered to a reference queue are
+// scheduled for appending by the heap worker thread.
+void SemiSpace::ClearWhiteReferences(Object** list) {
+  DCHECK(list != NULL);
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent != nullptr) {
+      Object* forward_address = GetMarkedForwardAddress(referent);
+      if (forward_address == nullptr) {
+        // Referent is white, clear it.
+        heap_->ClearReferenceReferent(ref);
+        if (heap_->IsEnqueuable(ref)) {
+          heap_->EnqueueReference(ref, &cleared_reference_list_);
+        }
+      } else if (referent != forward_address) {
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+  DCHECK(*list == NULL);
+}
+
+// Enqueues finalizer references with white referents.  White
+// referents are blackened, moved to the zombie field, and the
+// referent field is cleared.
+void SemiSpace::EnqueueFinalizerReferences(Object** list) {
+  // *list = NULL;
+  // return;
+  DCHECK(list != NULL);
+  timings_.StartSplit("EnqueueFinalizerReferences");
+  MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
+  bool has_enqueued = false;
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent != nullptr) {
+      Object* forward_address = GetMarkedForwardAddress(referent);
+      // Not marked.
+      if (forward_address == nullptr) {
+        forward_address = MarkObject(referent);
+        // If the referent is non-null the reference must queuable.
+        DCHECK(heap_->IsEnqueuable(ref));
+        // Move the referent to the zombie field.
+        ref->SetFieldObject(zombie_offset, forward_address, false);
+        heap_->ClearReferenceReferent(ref);
+        heap_->EnqueueReference(ref, &cleared_reference_list_);
+        has_enqueued = true;
+      } else if (referent != forward_address) {
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+  timings_.EndSplit();
+  if (has_enqueued) {
+    ProcessMarkStack(true);
+  }
+  DCHECK(*list == NULL);
+}
+
+// Process reference class instances and schedule finalizations.
+void SemiSpace::ProcessReferences(Object** soft_references, bool clear_soft,
+                                  Object** weak_references,
+                                  Object** finalizer_references,
+                                  Object** phantom_references) {
+  CHECK(soft_references != NULL);
+  CHECK(weak_references != NULL);
+  CHECK(finalizer_references != NULL);
+  CHECK(phantom_references != NULL);
+  CHECK(mark_stack_->IsEmpty());
+
+  // Unless we are in the zygote or required to clear soft references
+  // with white references, preserve some white referents.
+  if (!clear_soft && !Runtime::Current()->IsZygote()) {
+    PreserveSomeSoftReferences(soft_references);
+  }
+
+  timings_.StartSplit("ProcessReferences");
+  // Clear all remaining soft and weak references with white
+  // referents.
+  ClearWhiteReferences(soft_references);
+  ClearWhiteReferences(weak_references);
+  timings_.EndSplit();
+
+  // Preserve all white objects with finalize methods and schedule
+  // them for finalization.
+  EnqueueFinalizerReferences(finalizer_references);
+
+  timings_.StartSplit("ProcessReferences");
+  // Clear all f-reachable soft and weak references with white
+  // referents.
+  ClearWhiteReferences(soft_references);
+  ClearWhiteReferences(weak_references);
+
+  // Clear all phantom references with white referents.
+  ClearWhiteReferences(phantom_references);
+
+  // At this point all reference lists should be empty.
+  DCHECK(*soft_references == NULL);
+  DCHECK(*weak_references == NULL);
+  DCHECK(*finalizer_references == NULL);
+  DCHECK(*phantom_references == NULL);
+  timings_.EndSplit();
+}
+
+void SemiSpace::UnBindBitmaps() {
+  base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (space->IsDlMallocSpace()) {
+      space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+      if (alloc_space->HasBoundBitmaps()) {
+        alloc_space->UnBindBitmaps();
+        heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(),
+                                              alloc_space->GetMarkBitmap());
+      }
+    }
+  }
+}
+
+void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
+  DCHECK(to_space != nullptr);
+  to_space_ = to_space;
+}
+
+void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
+  DCHECK(from_space != nullptr);
+  from_space_ = from_space;
+}
+
+void SemiSpace::FinishPhase() {
+  base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+  // Can't enqueue references if we hold the mutator lock.
+  Object* cleared_references = GetClearedReferences();
+  Heap* heap = GetHeap();
+  timings_.NewSplit("EnqueueClearedReferences");
+  heap->EnqueueClearedReferences(&cleared_references);
+
+  timings_.NewSplit("PostGcVerification");
+  heap->PostGcVerification(this);
+
+  // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
+  // further action is done by the heap.
+  to_space_ = nullptr;
+  from_space_ = nullptr;
+
+  // Update the cumulative statistics
+  total_time_ns_ += GetDurationNs();
+  total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
+                                           std::plus<uint64_t>());
+  total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
+  total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
+
+  // Ensure that the mark stack is empty.
+  CHECK(mark_stack_->IsEmpty());
+
+  // Update the cumulative loggers.
+  cumulative_timings_.Start();
+  cumulative_timings_.AddLogger(timings_);
+  cumulative_timings_.End();
+
+  // Clear all of the spaces' mark bitmaps.
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
+    if (bitmap != nullptr &&
+        space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
+      bitmap->Clear();
+    }
+  }
+  mark_stack_->Reset();
+
+  // Reset the marked large objects.
+  space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
+  large_objects->GetMarkObjects()->Clear();
+}
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
new file mode 100644
index 0000000..13d5195
--- /dev/null
+++ b/runtime/gc/collector/semi_space.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
+#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
+
+#include "atomic_integer.h"
+#include "barrier.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "garbage_collector.h"
+#include "offsets.h"
+#include "root_visitor.h"
+#include "UniquePtr.h"
+
+namespace art {
+
+namespace mirror {
+  class Class;
+  class Object;
+  template<class T> class ObjectArray;
+}  // namespace mirror
+
+class StackVisitor;
+class Thread;
+
+namespace gc {
+
+namespace accounting {
+  template <typename T> class AtomicStack;
+  class MarkIfReachesAllocspaceVisitor;
+  class ModUnionClearCardVisitor;
+  class ModUnionVisitor;
+  class ModUnionTableBitmap;
+  class MarkStackChunk;
+  typedef AtomicStack<mirror::Object*> ObjectStack;
+  class SpaceBitmap;
+}  // namespace accounting
+
+namespace space {
+  class BumpPointerSpace;
+  class ContinuousMemMapAllocSpace;
+  class ContinuousSpace;
+}  // namespace space
+
+class Heap;
+
+namespace collector {
+
+class SemiSpace : public GarbageCollector {
+ public:
+  explicit SemiSpace(Heap* heap, const std::string& name_prefix = "");
+
+  ~SemiSpace() {}
+
+  virtual void InitializePhase();
+  virtual bool IsConcurrent() const {
+    return false;
+  }
+  virtual void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void MarkReachableObjects()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  virtual GcType GetGcType() const {
+    return kGcTypePartial;
+  }
+
+  // Sets which space we will be copying objects to.
+  void SetToSpace(space::ContinuousMemMapAllocSpace* to_space);
+
+  // Set the space where we copy objects from.
+  void SetFromSpace(space::ContinuousMemMapAllocSpace* from_space);
+
+  // Initializes internal structures.
+  void Init();
+
+  // Find the default mark bitmap.
+  void FindDefaultMarkBitmap();
+
+  // Returns the new address of the object.
+  mirror::Object* MarkObject(mirror::Object* object)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  void ScanObject(mirror::Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Marks the root set at the start of a garbage collection.
+  void MarkRoots()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Make a space immune, immune spaces have all live objects marked - that is the mark and
+  // live bitmaps are bound together.
+  void ImmuneSpace(space::ContinuousSpace* space)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
+  // the image. Mark that portion of the heap as immune.
+  virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void BindLiveToMarkBitmap(space::ContinuousSpace* space)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void UnBindBitmaps()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void ProcessReferences(Thread* self)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Sweeps unmarked objects to complete the garbage collection.
+  virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Sweeps unmarked objects to complete the garbage collection.
+  void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Sweep only pointers within an array. WARNING: Trashes objects.
+  void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  mirror::Object* GetClearedReferences() {
+    return cleared_reference_list_;
+  }
+
+  // TODO: enable thread safety analysis when in use by multiple worker threads.
+  template <typename MarkVisitor>
+  void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor)
+      NO_THREAD_SAFETY_ANALYSIS;
+
+  void SweepSystemWeaks()
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  template <typename Visitor>
+  static void VisitObjectReferencesAndClass(mirror::Object* obj, const Visitor& visitor)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static mirror::Object* MarkRootCallback(mirror::Object* root, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ protected:
+  // Returns null if the object is not marked, otherwise returns the forwarding address (same as
+  // object for non movable things).
+  mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const;
+
+  static mirror::Object* SystemWeakIsMarkedCallback(mirror::Object* object, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
+  // mark, otherwise we unmark.
+  bool MarkLargeObject(const mirror::Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Special sweep for zygote that just marks objects / dirties cards.
+  static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Expand mark stack to 2x its current size.
+  void ResizeMarkStack(size_t new_size);
+
+  // Returns how many threads we should use for the current GC phase based on if we are paused,
+  // whether or not we care about pauses.
+  size_t GetThreadCount(bool paused) const;
+
+  // Returns true if an object is inside of the immune region (assumed to be marked).
+  bool IsImmune(const mirror::Object* obj) const ALWAYS_INLINE {
+    return obj >= immune_begin_ && obj < immune_end_;
+  }
+
+  bool IsImmuneSpace(const space::ContinuousSpace* space) const;
+
+  static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
+                                 const StackVisitor *visitor);
+
+  void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor)
+      NO_THREAD_SAFETY_ANALYSIS;
+
+  template <typename Visitor>
+  static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj,
+                                            const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Visit the header, static field references, and interface pointers of a class object.
+  template <typename Visitor>
+  static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
+                                   const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  template <typename Visitor>
+  static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  template <typename Visitor>
+  static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static,
+                                    const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Visit all of the references in an object array.
+  template <typename Visitor>
+  static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
+                                         const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Visits the header and field references of a data object.
+  template <typename Visitor>
+  static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj,
+                                   const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+    return VisitInstanceFieldsReferences(klass, obj, visitor);
+  }
+
+  // Push an object onto the mark stack.
+  inline void MarkStackPush(mirror::Object* obj);
+
+  void UpdateAndMarkModUnion()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Schedules an unmarked object for reference processing.
+  void DelayReferenceReferent(mirror::Class* klass, mirror::Object* reference)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Recursively blackens objects on the mark stack.
+  void ProcessMarkStack(bool paused)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  void EnqueueFinalizerReferences(mirror::Object** ref)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  void PreserveSomeSoftReferences(mirror::Object** ref)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  void ClearWhiteReferences(mirror::Object** list)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references,
+                         mirror::Object** weak_references,
+                         mirror::Object** finalizer_references,
+                         mirror::Object** phantom_references)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const;
+
+  mirror::Object* GetForwardingAddress(mirror::Object* obj);
+
+  // Current space, we check this space first to avoid searching for the appropriate space for an
+  // object.
+  accounting::ObjectStack* mark_stack_;
+
+  // Immune range, every object inside the immune range is assumed to be marked.
+  mirror::Object* immune_begin_;
+  mirror::Object* immune_end_;
+
+  // Destination and source spaces.
+  space::ContinuousMemMapAllocSpace* to_space_;
+  space::ContinuousMemMapAllocSpace* from_space_;
+
+  mirror::Object* soft_reference_list_;
+  mirror::Object* weak_reference_list_;
+  mirror::Object* finalizer_reference_list_;
+  mirror::Object* phantom_reference_list_;
+  mirror::Object* cleared_reference_list_;
+
+  Thread* self_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SemiSpace);
+};
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 9f0bf33..b27b8df 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -26,7 +26,7 @@
 
 StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
     : PartialMarkSweep(heap, is_concurrent,
-                       name_prefix + (name_prefix.empty() ? "" : " ") + "sticky") {
+                       name_prefix.empty() ? "sticky " : name_prefix) {
   cumulative_timings_.SetName(GetName());
 }
 
@@ -38,7 +38,8 @@
   // know what was allocated since the last GC. A side-effect of binding the allocation space mark
   // and live bitmap is that marking the objects will place them in the live bitmap.
   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
-    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
+    if (space->IsDlMallocSpace() &&
+        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
       BindLiveToMarkBitmap(space);
     }
   }
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 8bee00f..b675877 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -31,10 +31,6 @@
     return kGcTypeSticky;
   }
 
-  // Don't need to do anything special here since we scan all the cards which may have references
-  // to the newly allocated objects.
-  virtual void UpdateAndMarkModUnion() { }
-
   explicit StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
   ~StickyMarkSweep() {}
 
@@ -53,6 +49,10 @@
 
   void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
+  // Don't need to do anything special here since we scan all the cards which may have references
+  // to the newly allocated objects.
+  virtual void UpdateAndMarkModUnion() { }
+
  private:
   DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
 };