Add mark compact collector.

The mark compact collector is a 4 phase collection, doing a normal
full mark_sweep, calculating forwarding addresses of objects in the
from space, updating references of objects in the from space, and
moving the objects in the from space.

Support is diabled by default since it needs to have non movable
classes and field arrays. Performance numbers is around 50% as fast.

The main advantage that this has over semispace is that the worst
case memory usage is 50% since we only need one space isntead of two.

TODO: Make field arrays and classes movable. This causes complication
since Object::VisitReferences relies on these, so if we update the
fields of an object but another future object uses this object to
figure out what fields are reference fields it doesn't work.

Bug: 14059466

Change-Id: I661ed3b71ad4dde124ef80312c95696b4a5665a1
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
new file mode 100644
index 0000000..595dc8f
--- /dev/null
+++ b/runtime/gc/collector/mark_compact.cc
@@ -0,0 +1,634 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mark_compact.h"
+
+#include "base/logging.h"
+#include "base/mutex-inl.h"
+#include "base/timing_logger.h"
+#include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/accounting/mod_union_table.h"
+#include "gc/accounting/remembered_set.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
+#include "gc/reference_processor.h"
+#include "gc/space/bump_pointer_space.h"
+#include "gc/space/bump_pointer_space-inl.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
+#include "indirect_reference_table.h"
+#include "intern_table.h"
+#include "jni_internal.h"
+#include "mark_sweep-inl.h"
+#include "monitor.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/reference-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array.h"
+#include "mirror/object_array-inl.h"
+#include "runtime.h"
+#include "stack.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+using ::art::mirror::Class;
+using ::art::mirror::Object;
+
+namespace art {
+namespace gc {
+namespace collector {
+
+void MarkCompact::BindBitmaps() {
+  timings_.StartSplit("BindBitmaps");
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  // Mark all of the spaces we never collect as immune.
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
+        space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+      CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
+    }
+  }
+  timings_.EndSplit();
+}
+
+MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
+    : GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"),
+      space_(nullptr), collector_name_(name_) {
+}
+
+void MarkCompact::RunPhases() {
+  Thread* self = Thread::Current();
+  InitializePhase();
+  CHECK(!Locks::mutator_lock_->IsExclusiveHeld(self));
+  {
+    ScopedPause pause(this);
+    GetHeap()->PreGcVerificationPaused(this);
+    GetHeap()->PrePauseRosAllocVerification(this);
+    MarkingPhase();
+    ReclaimPhase();
+  }
+  GetHeap()->PostGcVerification(this);
+  FinishPhase();
+}
+
+void MarkCompact::ForwardObject(mirror::Object* obj) {
+  const size_t alloc_size = RoundUp(obj->SizeOf(), space::BumpPointerSpace::kAlignment);
+  LockWord lock_word = obj->GetLockWord(false);
+  // If we have a non empty lock word, store it and restore it later.
+  if (lock_word.GetValue() != LockWord().GetValue()) {
+    // Set the bit in the bitmap so that we know to restore it later.
+    objects_with_lockword_->Set(obj);
+    lock_words_to_restore_.push_back(lock_word);
+  }
+  obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(bump_pointer_)),
+                   false);
+  bump_pointer_ += alloc_size;
+  ++live_objects_in_space_;
+}
+
+class CalculateObjectForwardingAddressVisitor {
+ public:
+  explicit CalculateObjectForwardingAddressVisitor(MarkCompact* collector)
+      : collector_(collector) {}
+  void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_,
+                                                                      Locks::heap_bitmap_lock_) {
+    DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
+    DCHECK(collector_->IsMarked(obj));
+    collector_->ForwardObject(obj);
+  }
+
+ private:
+  MarkCompact* const collector_;
+};
+
+void MarkCompact::CalculateObjectForwardingAddresses() {
+  timings_.NewSplit(__FUNCTION__);
+  // The bump pointer in the space where the next forwarding address will be.
+  bump_pointer_ = reinterpret_cast<byte*>(space_->Begin());
+  // Visit all the marked objects in the bitmap.
+  CalculateObjectForwardingAddressVisitor visitor(this);
+  objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
+                                               reinterpret_cast<uintptr_t>(space_->End()),
+                                               visitor);
+}
+
+void MarkCompact::InitializePhase() {
+  TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+  mark_stack_ = heap_->GetMarkStack();
+  DCHECK(mark_stack_ != nullptr);
+  immune_region_.Reset();
+  CHECK(space_->CanMoveObjects()) << "Attempting compact non-movable space from " << *space_;
+  // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
+  ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  mark_bitmap_ = heap_->GetMarkBitmap();
+  live_objects_in_space_ = 0;
+}
+
+void MarkCompact::ProcessReferences(Thread* self) {
+  TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  heap_->GetReferenceProcessor()->ProcessReferences(
+      false, &timings_, clear_soft_references_, &HeapReferenceMarkedCallback, &MarkObjectCallback,
+      &ProcessMarkStackCallback, this);
+}
+
+class BitmapSetSlowPathVisitor {
+ public:
+  void operator()(const mirror::Object* obj) const {
+    // Marking a large object, make sure its aligned as a sanity check.
+    if (!IsAligned<kPageSize>(obj)) {
+      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+      LOG(FATAL) << obj;
+    }
+  }
+};
+
+inline void MarkCompact::MarkObject(mirror::Object* obj) {
+  if (obj == nullptr) {
+    return;
+  }
+  if (kUseBakerOrBrooksReadBarrier) {
+    // Verify all the objects have the correct forward pointer installed.
+    obj->AssertReadBarrierPointer();
+  }
+  if (immune_region_.ContainsObject(obj)) {
+    return;
+  }
+  if (objects_before_forwarding_->HasAddress(obj)) {
+    if (!objects_before_forwarding_->Set(obj)) {
+      MarkStackPush(obj);  // This object was not previously marked.
+    }
+  } else {
+    DCHECK(!space_->HasAddress(obj));
+    BitmapSetSlowPathVisitor visitor;
+    if (!mark_bitmap_->Set(obj, visitor)) {
+      // This object was not previously marked.
+      MarkStackPush(obj);
+    }
+  }
+}
+
+void MarkCompact::MarkingPhase() {
+  Thread* self = Thread::Current();
+  // Bitmap which describes which objects we have to move.
+  objects_before_forwarding_.reset(accounting::ContinuousSpaceBitmap::Create(
+      "objects before forwarding", space_->Begin(), space_->Size()));
+  // Bitmap which describes which lock words we need to restore.
+  objects_with_lockword_.reset(accounting::ContinuousSpaceBitmap::Create(
+      "objects with lock words", space_->Begin(), space_->Size()));
+  CHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
+  TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+  // Assume the cleared space is already empty.
+  BindBitmaps();
+  // Process dirty cards and add dirty cards to mod-union tables.
+  heap_->ProcessCards(timings_, false);
+  // Clear the whole card table since we can not Get any additional dirty cards during the
+  // paused GC. This saves memory but only works for pause the world collectors.
+  timings_.NewSplit("ClearCardTable");
+  heap_->GetCardTable()->ClearCardTable();
+  // Need to do this before the checkpoint since we don't want any threads to add references to
+  // the live stack during the recursive mark.
+  timings_.NewSplit("SwapStacks");
+  if (kUseThreadLocalAllocationStack) {
+    heap_->RevokeAllThreadLocalAllocationStacks(self);
+  }
+  heap_->SwapStacks(self);
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    MarkRoots();
+    // Mark roots of immune spaces.
+    UpdateAndMarkModUnion();
+    // Recursively mark remaining objects.
+    MarkReachableObjects();
+  }
+  ProcessReferences(self);
+  {
+    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    SweepSystemWeaks();
+  }
+  // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
+  // before they are properly counted.
+  RevokeAllThreadLocalBuffers();
+  timings_.StartSplit("PreSweepingGcVerification");
+  // Disabled due to an issue where we have objects in the bump pointer space which reference dead
+  // objects.
+  // heap_->PreSweepingGcVerification(this);
+  timings_.EndSplit();
+}
+
+void MarkCompact::UpdateAndMarkModUnion() {
+  for (auto& space : heap_->GetContinuousSpaces()) {
+    // If the space is immune then we need to mark the references to other spaces.
+    if (immune_region_.ContainsSpace(space)) {
+      accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+      if (table != nullptr) {
+        // TODO: Improve naming.
+        TimingLogger::ScopedSplit split(
+            space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+                                     "UpdateAndMarkImageModUnionTable",
+                                     &timings_);
+        table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+      }
+    }
+  }
+}
+
+void MarkCompact::MarkReachableObjects() {
+  timings_.StartSplit("MarkStackAsLive");
+  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+  heap_->MarkAllocStackAsLive(live_stack);
+  live_stack->Reset();
+  // Recursively process the mark stack.
+  ProcessMarkStack();
+}
+
+void MarkCompact::ReclaimPhase() {
+  TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  // Reclaim unmarked objects.
+  Sweep(false);
+  // Swap the live and mark bitmaps for each space which we modified space. This is an
+  // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
+  // bitmaps.
+  timings_.StartSplit("SwapBitmapsAndUnBindBitmaps");
+  SwapBitmaps();
+  GetHeap()->UnBindBitmaps();  // Unbind the live and mark bitmaps.
+  Compact();
+  timings_.EndSplit();
+}
+
+void MarkCompact::ResizeMarkStack(size_t new_size) {
+  std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
+  CHECK_LE(mark_stack_->Size(), new_size);
+  mark_stack_->Resize(new_size);
+  for (const auto& obj : temp) {
+    mark_stack_->PushBack(obj);
+  }
+}
+
+inline void MarkCompact::MarkStackPush(Object* obj) {
+  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+    ResizeMarkStack(mark_stack_->Capacity() * 2);
+  }
+  // The object must be pushed on to the mark stack.
+  mark_stack_->PushBack(obj);
+}
+
+void MarkCompact::ProcessMarkStackCallback(void* arg) {
+  reinterpret_cast<MarkCompact*>(arg)->ProcessMarkStack();
+}
+
+mirror::Object* MarkCompact::MarkObjectCallback(mirror::Object* root, void* arg) {
+  reinterpret_cast<MarkCompact*>(arg)->MarkObject(root);
+  return root;
+}
+
+void MarkCompact::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr,
+                                          void* arg) {
+  reinterpret_cast<MarkCompact*>(arg)->MarkObject(obj_ptr->AsMirrorPtr());
+}
+
+void MarkCompact::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
+                                               void* arg) {
+  reinterpret_cast<MarkCompact*>(arg)->DelayReferenceReferent(klass, ref);
+}
+
+void MarkCompact::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
+                                   RootType /*root_type*/) {
+  reinterpret_cast<MarkCompact*>(arg)->MarkObject(*root);
+}
+
+void MarkCompact::UpdateRootCallback(Object** root, void* arg, uint32_t /*thread_id*/,
+                                     RootType /*root_type*/) {
+  mirror::Object* obj = *root;
+  mirror::Object* new_obj = reinterpret_cast<MarkCompact*>(arg)->GetMarkedForwardAddress(obj);
+  if (obj != new_obj) {
+    *root = new_obj;
+    DCHECK(new_obj != nullptr);
+  }
+}
+
+class UpdateObjectReferencesVisitor {
+ public:
+  explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {
+  }
+  void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+          EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+    collector_->UpdateObjectReferences(obj);
+  }
+
+ private:
+  MarkCompact* const collector_;
+};
+
+void MarkCompact::UpdateReferences() {
+  timings_.NewSplit(__FUNCTION__);
+  Runtime* runtime = Runtime::Current();
+  // Update roots.
+  runtime->VisitRoots(UpdateRootCallback, this);
+  // Update object references in mod union tables and spaces.
+  for (const auto& space : heap_->GetContinuousSpaces()) {
+    // If the space is immune then we need to mark the references to other spaces.
+    accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+    if (table != nullptr) {
+      // TODO: Improve naming.
+      TimingLogger::ScopedSplit split(
+          space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" :
+                                   "UpdateImageModUnionTableReferences",
+                                   &timings_);
+      table->UpdateAndMarkReferences(&UpdateHeapReferenceCallback, this);
+    } else {
+      // No mod union table, so we need to scan the space using bitmap visit.
+      // Scan the space using bitmap visit.
+      accounting::ContinuousSpaceBitmap* bitmap = space->GetLiveBitmap();
+      if (bitmap != nullptr) {
+        UpdateObjectReferencesVisitor visitor(this);
+        bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
+                                 reinterpret_cast<uintptr_t>(space->End()),
+                                 visitor);
+      }
+    }
+  }
+  CHECK(!kMovingClasses)
+      << "Didn't update large object classes since they are assumed to not move.";
+  // Update the system weaks, these should already have been swept.
+  runtime->SweepSystemWeaks(&MarkedForwardingAddressCallback, this);
+  // Update the objects in the bump pointer space last, these objects don't have a bitmap.
+  UpdateObjectReferencesVisitor visitor(this);
+  objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
+                                               reinterpret_cast<uintptr_t>(space_->End()),
+                                               visitor);
+  // Update the reference processor cleared list.
+  heap_->GetReferenceProcessor()->UpdateRoots(&MarkedForwardingAddressCallback, this);
+}
+
+void MarkCompact::Compact() {
+  timings_.NewSplit(__FUNCTION__);
+  CalculateObjectForwardingAddresses();
+  UpdateReferences();
+  MoveObjects();
+  // Space
+  int64_t objects_freed = space_->GetObjectsAllocated() - live_objects_in_space_;
+  int64_t bytes_freed = reinterpret_cast<int64_t>(space_->End()) -
+      reinterpret_cast<int64_t>(bump_pointer_);
+  timings_.NewSplit("RecordFree");
+  space_->RecordFree(objects_freed, bytes_freed);
+  RecordFree(objects_freed, bytes_freed);
+  space_->SetEnd(bump_pointer_);
+  // Need to zero out the memory we freed. TODO: Use madvise for pages.
+  memset(bump_pointer_, 0, bytes_freed);
+}
+
+// Marks all objects in the root set.
+void MarkCompact::MarkRoots() {
+  timings_.NewSplit("MarkRoots");
+  Runtime::Current()->VisitRoots(MarkRootCallback, this);
+}
+
+mirror::Object* MarkCompact::MarkedForwardingAddressCallback(mirror::Object* obj, void* arg) {
+  return reinterpret_cast<MarkCompact*>(arg)->GetMarkedForwardAddress(obj);
+}
+
+inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference) {
+  mirror::Object* obj = reference->AsMirrorPtr();
+  if (obj != nullptr) {
+    mirror::Object* new_obj = GetMarkedForwardAddress(obj);
+    if (obj != new_obj) {
+      DCHECK(new_obj != nullptr);
+      reference->Assign(new_obj);
+    }
+  }
+}
+
+void MarkCompact::UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference,
+                                              void* arg) {
+  reinterpret_cast<MarkCompact*>(arg)->UpdateHeapReference(reference);
+}
+
+class UpdateReferenceVisitor {
+ public:
+  explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) {
+  }
+
+  void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+      ALWAYS_INLINE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+    collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
+  }
+
+  void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+    collector_->UpdateHeapReference(
+        ref->GetFieldObjectReferenceAddr<kVerifyNone>(mirror::Reference::ReferentOffset()));
+  }
+
+ private:
+  MarkCompact* const collector_;
+};
+
+void MarkCompact::UpdateObjectReferences(mirror::Object* obj) {
+  UpdateReferenceVisitor visitor(this);
+  obj->VisitReferences<kMovingClasses>(visitor, visitor);
+}
+
+inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj) const {
+  DCHECK(obj != nullptr);
+  if (objects_before_forwarding_->HasAddress(obj)) {
+    DCHECK(objects_before_forwarding_->Test(obj));
+    mirror::Object* ret =
+        reinterpret_cast<mirror::Object*>(obj->GetLockWord(false).ForwardingAddress());
+    DCHECK(ret != nullptr);
+    return ret;
+  }
+  DCHECK(!space_->HasAddress(obj));
+  DCHECK(IsMarked(obj));
+  return obj;
+}
+
+inline bool MarkCompact::IsMarked(const Object* object) const {
+  if (immune_region_.ContainsObject(object)) {
+    return true;
+  }
+  if (objects_before_forwarding_->HasAddress(object)) {
+    return objects_before_forwarding_->Test(object);
+  }
+  return mark_bitmap_->Test(object);
+}
+
+mirror::Object* MarkCompact::IsMarkedCallback(mirror::Object* object, void* arg) {
+  return reinterpret_cast<MarkCompact*>(arg)->IsMarked(object) ? object : nullptr;
+}
+
+bool MarkCompact::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr,
+                                              void* arg) {
+  // Side effect free since we call this before ever moving objects.
+  return reinterpret_cast<MarkCompact*>(arg)->IsMarked(ref_ptr->AsMirrorPtr());
+}
+
+void MarkCompact::SweepSystemWeaks() {
+  timings_.StartSplit("SweepSystemWeaks");
+  Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
+  timings_.EndSplit();
+}
+
+bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const {
+  return space != space_ && !immune_region_.ContainsSpace(space);
+}
+
+class MoveObjectVisitor {
+ public:
+  explicit MoveObjectVisitor(MarkCompact* collector) : collector_(collector) {
+  }
+  void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+          EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+      collector_->MoveObject(obj, obj->SizeOf());
+  }
+
+ private:
+  MarkCompact* const collector_;
+};
+
+void MarkCompact::MoveObject(mirror::Object* obj, size_t len) {
+  // Look at the forwarding address stored in the lock word to know where to copy.
+  DCHECK(space_->HasAddress(obj)) << obj;
+  uintptr_t dest_addr = obj->GetLockWord(false).ForwardingAddress();
+  mirror::Object* dest_obj = reinterpret_cast<mirror::Object*>(dest_addr);
+  DCHECK(space_->HasAddress(dest_obj)) << dest_obj;
+  // Use memmove since there may be overlap.
+  memmove(reinterpret_cast<void*>(dest_addr), reinterpret_cast<const void*>(obj), len);
+  // Restore the saved lock word if needed.
+  LockWord lock_word;
+  if (UNLIKELY(objects_with_lockword_->Test(obj))) {
+    lock_word = lock_words_to_restore_.front();
+    lock_words_to_restore_.pop_front();
+  }
+  dest_obj->SetLockWord(lock_word, false);
+}
+
+void MarkCompact::MoveObjects() {
+  timings_.NewSplit(__FUNCTION__);
+  // Move the objects in the before forwarding bitmap.
+  MoveObjectVisitor visitor(this);
+  objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
+                                               reinterpret_cast<uintptr_t>(space_->End()),
+                                               visitor);
+  CHECK(lock_words_to_restore_.empty());
+}
+
+void MarkCompact::Sweep(bool swap_bitmaps) {
+  DCHECK(mark_stack_->IsEmpty());
+  TimingLogger::ScopedSplit split("Sweep", &timings_);
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (space->IsContinuousMemMapAllocSpace()) {
+      space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
+      if (!ShouldSweepSpace(alloc_space)) {
+        continue;
+      }
+      TimingLogger::ScopedSplit split(
+          alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_);
+      size_t freed_objects = 0;
+      size_t freed_bytes = 0;
+      alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+      RecordFree(freed_objects, freed_bytes);
+    }
+  }
+  SweepLargeObjects(swap_bitmaps);
+}
+
+void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
+  TimingLogger::ScopedSplit split("SweepLargeObjects", &timings_);
+  size_t freed_objects = 0;
+  size_t freed_bytes = 0;
+  heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes);
+  RecordFreeLargeObjects(freed_objects, freed_bytes);
+}
+
+// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
+  heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
+                                                         &HeapReferenceMarkedCallback, this);
+}
+
+class MarkCompactMarkObjectVisitor {
+ public:
+  explicit MarkCompactMarkObjectVisitor(MarkCompact* collector) : collector_(collector) {
+  }
+
+  void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+    // Object was already verified when we scanned it.
+    collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset));
+  }
+
+  void operator()(mirror::Class* klass, mirror::Reference* ref) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    collector_->DelayReferenceReferent(klass, ref);
+  }
+
+ private:
+  MarkCompact* const collector_;
+};
+
+// Visit all of the references of an object and update.
+void MarkCompact::ScanObject(Object* obj) {
+  MarkCompactMarkObjectVisitor visitor(this);
+  obj->VisitReferences<kMovingClasses>(visitor, visitor);
+}
+
+// Scan anything that's on the mark stack.
+void MarkCompact::ProcessMarkStack() {
+  timings_.StartSplit("ProcessMarkStack");
+  while (!mark_stack_->IsEmpty()) {
+    Object* obj = mark_stack_->PopBack();
+    DCHECK(obj != nullptr);
+    ScanObject(obj);
+  }
+  timings_.EndSplit();
+}
+
+void MarkCompact::SetSpace(space::BumpPointerSpace* space) {
+  DCHECK(space != nullptr);
+  space_ = space;
+}
+
+void MarkCompact::FinishPhase() {
+  TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+  space_ = nullptr;
+  CHECK(mark_stack_->IsEmpty());
+  mark_stack_->Reset();
+  // Clear all of the spaces' mark bitmaps.
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  heap_->ClearMarkedObjects();
+  // Release our bitmaps.
+  objects_before_forwarding_.reset(nullptr);
+  objects_with_lockword_.reset(nullptr);
+}
+
+void MarkCompact::RevokeAllThreadLocalBuffers() {
+  timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers");
+  GetHeap()->RevokeAllThreadLocalBuffers();
+  timings_.EndSplit();
+}
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
new file mode 100644
index 0000000..25cfe0f
--- /dev/null
+++ b/runtime/gc/collector/mark_compact.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
+#define ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
+
+#include <deque>
+#include <memory>  // For unique_ptr.
+
+#include "atomic.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "garbage_collector.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "immune_region.h"
+#include "lock_word.h"
+#include "object_callbacks.h"
+#include "offsets.h"
+
+namespace art {
+
+class Thread;
+
+namespace mirror {
+  class Class;
+  class Object;
+}  // namespace mirror
+
+namespace gc {
+
+class Heap;
+
+namespace accounting {
+  template <typename T> class AtomicStack;
+  typedef AtomicStack<mirror::Object*> ObjectStack;
+}  // namespace accounting
+
+namespace space {
+  class ContinuousMemMapAllocSpace;
+  class ContinuousSpace;
+}  // namespace space
+
+namespace collector {
+
+class MarkCompact : public GarbageCollector {
+ public:
+  explicit MarkCompact(Heap* heap, const std::string& name_prefix = "");
+  ~MarkCompact() {}
+
+  virtual void RunPhases() OVERRIDE NO_THREAD_SAFETY_ANALYSIS;
+  void InitializePhase();
+  void MarkingPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void ReclaimPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void FinishPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void MarkReachableObjects()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  virtual GcType GetGcType() const OVERRIDE {
+    return kGcTypePartial;
+  }
+  virtual CollectorType GetCollectorType() const OVERRIDE {
+    return kCollectorTypeMC;
+  }
+
+  // Sets which space we will be copying objects in.
+  void SetSpace(space::BumpPointerSpace* space);
+
+  // Initializes internal structures.
+  void Init();
+
+  // Find the default mark bitmap.
+  void FindDefaultMarkBitmap();
+
+  void ScanObject(mirror::Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Marks the root set at the start of a garbage collection.
+  void MarkRoots()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
+  // the image. Mark that portion of the heap as immune.
+  void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+
+  void UnBindBitmaps()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void ProcessReferences(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Sweeps unmarked objects to complete the garbage collection.
+  void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Sweeps unmarked objects to complete the garbage collection.
+  void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void SweepSystemWeaks()
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static void MarkRootCallback(mirror::Object** root, void* arg, uint32_t /*tid*/,
+                               RootType /*root_type*/)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr,
+                                          void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  static void ProcessMarkStackCallback(void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  static void DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
+                                             void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+  // Schedules an unmarked object for reference processing.
+  void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ protected:
+  // Returns null if the object is not marked, otherwise returns the forwarding address (same as
+  // object for non movable things).
+  mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
+  // mark, otherwise we unmark.
+  bool MarkLargeObject(const mirror::Object* obj)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Expand mark stack to 2x its current size.
+  void ResizeMarkStack(size_t new_size);
+
+  // Returns true if we should sweep the space.
+  bool ShouldSweepSpace(space::ContinuousSpace* space) const;
+
+  // Push an object onto the mark stack.
+  void MarkStackPush(mirror::Object* obj);
+
+  void UpdateAndMarkModUnion()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Recursively blackens objects on the mark stack.
+  void ProcessMarkStack()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+  // 3 pass mark compact approach.
+  void Compact() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  // Calculate the forwarding address of objects marked as "live" in the objects_before_forwarding
+  // bitmap.
+  void CalculateObjectForwardingAddresses()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  // Update the references of objects by using the forwarding addresses.
+  void UpdateReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+  static void UpdateRootCallback(mirror::Object** root, void* arg, uint32_t /*thread_id*/,
+                                 RootType /*root_type*/)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  // Move objects and restore lock words.
+  void MoveObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Move a single object to its forward address.
+  void MoveObject(mirror::Object* obj, size_t len) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Mark a single object.
+  void MarkObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                                                                Locks::mutator_lock_);
+  bool IsMarked(const mirror::Object* obj) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void ForwardObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                                                                   Locks::mutator_lock_);
+  // Update a single heap reference.
+  void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static void UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference,
+                                          void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Update all of the references of a single object.
+  void UpdateObjectReferences(mirror::Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Revoke all the thread-local buffers.
+  void RevokeAllThreadLocalBuffers();
+
+  accounting::ObjectStack* mark_stack_;
+
+  // Immune region, every object inside the immune region is assumed to be marked.
+  ImmuneRegion immune_region_;
+
+  // Bump pointer space which we are collecting.
+  space::BumpPointerSpace* space_;
+  // Cached mark bitmap as an optimization.
+  accounting::HeapBitmap* mark_bitmap_;
+
+  // The name of the collector.
+  std::string collector_name_;
+
+  // The bump pointer in the space where the next forwarding address will be.
+  byte* bump_pointer_;
+  // How many live objects we have in the space.
+  size_t live_objects_in_space_;
+
+  // Bitmap which describes which objects we have to move, need to do / 2 so that we can handle
+  // objects which are only 8 bytes.
+  std::unique_ptr<accounting::ContinuousSpaceBitmap> objects_before_forwarding_;
+  // Bitmap which describes which lock words we need to restore.
+  std::unique_ptr<accounting::ContinuousSpaceBitmap> objects_with_lockword_;
+  // Which lock words we need to restore as we are moving objects.
+  std::deque<LockWord> lock_words_to_restore_;
+
+ private:
+  friend class BitmapSetSlowPathVisitor;
+  friend class CalculateObjectForwardingAddressVisitor;
+  friend class MarkCompactMarkObjectVisitor;
+  friend class MoveObjectVisitor;
+  friend class UpdateObjectReferencesVisitor;
+  friend class UpdateReferenceVisitor;
+  DISALLOW_COPY_AND_ASSIGN(MarkCompact);
+};
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index c72913a..fbb349e 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -43,10 +43,7 @@
 #include "thread-inl.h"
 #include "thread_list.h"
 
-using ::art::mirror::ArtField;
-using ::art::mirror::Class;
 using ::art::mirror::Object;
-using ::art::mirror::ObjectArray;
 
 namespace art {
 namespace gc {
@@ -1272,9 +1269,7 @@
   timings_.EndSplit();
 }
 
-inline bool MarkSweep::IsMarked(const Object* object) const
-    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
-  DCHECK(object != nullptr);
+inline bool MarkSweep::IsMarked(const Object* object) const {
   if (immune_region_.ContainsObject(object)) {
     return true;
   }
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index a44d8a1..2780099 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -215,7 +215,8 @@
 
  protected:
   // Returns true if the object has its bit set in the mark bitmap.
-  bool IsMarked(const mirror::Object* object) const;
+  bool IsMarked(const mirror::Object* object) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index badf8b3..54e77a7 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -41,22 +41,12 @@
 #include "jni_internal.h"
 #include "mark_sweep-inl.h"
 #include "monitor.h"
-#include "mirror/art_field.h"
-#include "mirror/art_field-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/class_loader.h"
-#include "mirror/dex_cache.h"
 #include "mirror/reference-inl.h"
 #include "mirror/object-inl.h"
-#include "mirror/object_array.h"
-#include "mirror/object_array-inl.h"
 #include "runtime.h"
-#include "stack.h"
 #include "thread-inl.h"
 #include "thread_list.h"
-#include "verifier/method_verifier.h"
 
-using ::art::mirror::Class;
 using ::art::mirror::Object;
 
 namespace art {
@@ -788,7 +778,7 @@
     // Already forwarded, must be marked.
     return obj;
   }
-  return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
+  return mark_bitmap_->Test(obj) ? obj : nullptr;
 }
 
 void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {