Refactor GC to have a class for each different type of GC.

Added a seperate files for mark sweep, partial mark sweep,
sticky mark sweep.

Added a common superclass for GC.

Added additional statistics for each GC.

Moved main garbage collection code away from Heap.cc.

Change-Id: Ida0021ab2f740fc8228bbbf4d43cd9bc56b4ba46
diff --git a/build/Android.common.mk b/build/Android.common.mk
index d86a785..27b7ae5 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -151,12 +151,15 @@
 	src/file.cc \
 	src/file_linux.cc \
 	src/gc/card_table.cc \
+	src/gc/garbage_collector.cc \
 	src/gc/heap_bitmap.cc \
 	src/gc/large_object_space.cc \
 	src/gc/mark_sweep.cc \
 	src/gc/mod_union_table.cc \
+	src/gc/partial_mark_sweep.cc \
 	src/gc/space.cc \
 	src/gc/space_bitmap.cc \
+	src/gc/sticky_mark_sweep.cc \
 	src/heap.cc \
 	src/hprof/hprof.cc \
 	src/image.cc \
diff --git a/src/atomic_integer.h b/src/atomic_integer.h
index 22cc7b4..0e07d2d 100644
--- a/src/atomic_integer.h
+++ b/src/atomic_integer.h
@@ -24,6 +24,9 @@
 
 class AtomicInteger {
  public:
+  // Default to uninitialized
+  AtomicInteger() { }
+
   AtomicInteger(int32_t value) : value_(value) { }
 
   // Unsafe = operator for non atomic operations on the integer.
diff --git a/src/gc/garbage_collector.cc b/src/gc/garbage_collector.cc
new file mode 100644
index 0000000..bcc7b63
--- /dev/null
+++ b/src/gc/garbage_collector.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "garbage_collector.h"
+#include "thread_list.h"
+
+namespace art {
+  GarbageCollector::GarbageCollector(Heap* heap)
+      : heap_(heap),
+        duration_(0) {
+
+  }
+
+  bool GarbageCollector::HandleDirtyObjectsPhase() {
+    DCHECK(IsConcurrent());
+    return true;
+  }
+
+  void GarbageCollector::RegisterPause(uint64_t nano_length) {
+    pause_times_.push_back(nano_length);
+  }
+
+  void GarbageCollector::Run() {
+    Thread* self = Thread::Current();
+    ThreadList* thread_list = Runtime::Current()->GetThreadList();
+
+    uint64_t start_time = NanoTime();
+    pause_times_.clear();
+    duration_ = 0;
+
+    InitializePhase();
+
+    if (!IsConcurrent()) {
+      // Pause is the entire length of the GC.
+      uint64_t pause_start = NanoTime();
+      thread_list->SuspendAll();
+      MarkingPhase();
+      ReclaimPhase();
+      thread_list->ResumeAll();
+      uint64_t pause_end = NanoTime();
+      pause_times_.push_back(pause_end - pause_start);
+    } else {
+      {
+        ReaderMutexLock mu(self, *Locks::mutator_lock_);
+        MarkingPhase();
+      }
+      bool done = false;
+      while (!done) {
+        uint64_t pause_start = NanoTime();
+        thread_list->SuspendAll();
+        done = HandleDirtyObjectsPhase();
+        thread_list->ResumeAll();
+        uint64_t pause_end = NanoTime();
+        pause_times_.push_back(pause_end - pause_start);
+      }
+      {
+        ReaderMutexLock mu(self, *Locks::mutator_lock_);
+        ReclaimPhase();
+      }
+    }
+
+    uint64_t end_time = NanoTime();
+    duration_ = end_time - start_time;
+
+    FinishPhase();
+  }
+
+  GarbageCollector::~GarbageCollector() {
+
+  }
+}  // namespace art
diff --git a/src/gc/garbage_collector.h b/src/gc/garbage_collector.h
new file mode 100644
index 0000000..9ddf45f
--- /dev/null
+++ b/src/gc/garbage_collector.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_GARBAGE_COLLECTR_H_
+#define ART_SRC_GC_GARBAGE_COLLECTR_H_
+
+#include "locks.h"
+#include "utils.h"
+
+namespace art {
+
+class Heap;
+
+class GarbageCollector {
+ public:
+  // Returns true iff the garbage collector is concurrent.
+  virtual bool IsConcurrent() const = 0;
+
+  GarbageCollector(Heap* heap);
+
+  virtual ~GarbageCollector();
+
+  // Run the garbage collector.
+  void Run();
+
+  Heap* GetHeap() {
+    return heap_;
+  }
+
+  // Returns how long the mutators were paused in nanoseconds.
+  const std::vector<uint64_t>& GetPauseTimes() const {
+    return pause_times_;
+  }
+
+  // Returns how long the GC took to complete in nanoseconds.
+  uint64_t GetDuration() const {
+    return duration_;
+  }
+
+
+  virtual std::string GetName() const = 0;
+
+  void RegisterPause(uint64_t nano_length);
+
+ protected:
+  // The initial phase. Done with mutators upaused.
+  virtual void InitializePhase() = 0;
+
+  // Mark all reachable objects, done concurrently.
+  virtual void MarkingPhase() = 0;
+
+  // Only called for concurrent GCs. Gets called repeatedly until it succeeds.
+  virtual bool HandleDirtyObjectsPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Called with mutators running.
+  virtual void ReclaimPhase() = 0;
+
+  // Called after the GC is finished. Done with mutators upaused.
+  virtual void FinishPhase() = 0;
+
+  Heap* heap_;
+  std::vector<uint64_t> pause_times_;
+  uint64_t duration_;
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_GC_GARBAGE_COLLECTR_H_
diff --git a/src/gc/large_object_space.cc b/src/gc/large_object_space.cc
index b066dd5..b2e0d2f 100644
--- a/src/gc/large_object_space.cc
+++ b/src/gc/large_object_space.cc
@@ -28,9 +28,7 @@
 namespace art {
 
 void LargeObjectSpace::SwapBitmaps() {
-  SpaceSetMap* temp_live_objects = live_objects_.release();
-  live_objects_.reset(mark_objects_.release());
-  mark_objects_.reset(temp_live_objects);
+  live_objects_.swap(mark_objects_);
   // Swap names to get more descriptive diagnostics.
   std::string temp_name = live_objects_->GetName();
   live_objects_->SetName(mark_objects_->GetName());
diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc
index 1ccceaa..818eb81 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/mark_sweep.cc
@@ -16,6 +16,8 @@
 
 #include "mark_sweep.h"
 
+#include <functional>
+#include <numeric>
 #include <climits>
 #include <vector>
 
@@ -43,7 +45,7 @@
 
 // Performance options.
 static const bool kParallelMarkStack = true;
-static const bool kDisableFinger = true;
+static const bool kDisableFinger = kParallelMarkStack;
 static const bool kUseMarkStackPrefetch = true;
 
 // Profiling and information flags.
@@ -67,38 +69,267 @@
   MarkSweep* const mark_sweep_;
 };
 
-MarkSweep::MarkSweep(ObjectStack* mark_stack)
-    : current_mark_bitmap_(NULL),
-      mark_stack_(mark_stack),
-      heap_(NULL),
-      finger_(NULL),
-      immune_begin_(NULL),
-      immune_end_(NULL),
-      soft_reference_list_(NULL),
-      weak_reference_list_(NULL),
-      finalizer_reference_list_(NULL),
-      phantom_reference_list_(NULL),
-      cleared_reference_list_(NULL),
-      freed_bytes_(0), freed_objects_(0),
-      class_count_(0), array_count_(0), other_count_(0),
-      large_object_test_(0), large_object_mark_(0),
-      classes_marked_(0), overhead_time_(0),
-      work_chunks_created_(0), work_chunks_deleted_(0),
-      reference_count_(0),
-      gc_barrier_(new Barrier(0)),
-      large_object_lock_("large object lock"),
-      mark_stack_expand_lock_("mark stack expand lock") {
-  DCHECK(mark_stack_ != NULL);
+std::string MarkSweep::GetName() const {
+  std::ostringstream ss;
+  ss << (IsConcurrent() ? "Concurrent" : "") << GetGcType();
+  return ss.str();
 }
 
-void MarkSweep::Init() {
+void MarkSweep::ImmuneSpace(ContinuousSpace* space) {
+  // Bind live to mark bitmap if necessary.
+  if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
+    BindLiveToMarkBitmap(space);
+  }
+
+  // Add the space to the immune region.
+  if (immune_begin_ == NULL) {
+    DCHECK(immune_end_ == NULL);
+    SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
+                   reinterpret_cast<Object*>(space->End()));
+  } else {
+      const Spaces& spaces = GetHeap()->GetSpaces();
+      const ContinuousSpace* prev_space = NULL;
+      // Find out if the previous space is immune.
+      // TODO: C++0x
+      for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+        if (*it == space) {
+          break;
+        }
+        prev_space = *it;
+      }
+
+      // If previous space was immune, then extend the immune region.
+      if (prev_space != NULL &&
+          immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
+          immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
+      immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
+      immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
+    }
+  }
+}
+
+// Bind the live bits to the mark bits of bitmaps based on the gc type.
+void MarkSweep::BindBitmaps() {
+  Spaces& spaces = GetHeap()->GetSpaces();
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+
+  // Mark all of the spaces we never collect as immune.
+  for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    ContinuousSpace* space = *it;
+    if (space->GetGcRetentionPolicy() == kGcRetentionPolicyNeverCollect) {
+      ImmuneSpace(space);
+    }
+  }
+}
+
+MarkSweep::MarkSweep(Heap* heap, bool is_concurrent)
+    : GarbageCollector(heap),
+      gc_barrier_(new Barrier(0)),
+      large_object_lock_("large object lock"),
+      mark_stack_expand_lock_("mark stack expand lock"),
+      timings_(GetName(), true),
+      cumulative_timings_(GetName(), true),
+      is_concurrent_(is_concurrent) {
+  cumulative_timings_.SetName(GetName());
+  ResetCumulativeStatistics();
+}
+
+void MarkSweep::InitializePhase() {
+  mark_stack_ = GetHeap()->mark_stack_.get();
+  DCHECK(mark_stack_ != NULL);
+  finger_ = NULL;
+  SetImmuneRange(NULL, NULL);
+  soft_reference_list_ = NULL;
+  weak_reference_list_ = NULL;
+  finalizer_reference_list_ = NULL;
+  phantom_reference_list_ = NULL;
+  cleared_reference_list_ = NULL;
+  freed_bytes_ = 0;
+  freed_objects_ = 0;
+  class_count_ = 0;
+  array_count_ = 0;
+  other_count_ = 0;
+  large_object_test_ = 0;
+  large_object_mark_ = 0;
+  classes_marked_ = 0;
+  overhead_time_ = 0;
+  work_chunks_created_ = 0;
+  work_chunks_deleted_ = 0;
+  reference_count_ = 0;
   java_lang_Class_ = Class::GetJavaLangClass();
   CHECK(java_lang_Class_ != NULL);
-  heap_ = Runtime::Current()->GetHeap();
-  mark_stack_->Reset();
   FindDefaultMarkBitmap();
   // Mark any concurrent roots as dirty since we need to scan them at least once during this GC.
   Runtime::Current()->DirtyRoots();
+  timings_.Reset();
+  // Do any pre GC verification.
+  heap_->PreGcVerification(this);
+}
+
+void MarkSweep::ProcessReferences(Thread* self) {
+  ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
+                    &finalizer_reference_list_, &phantom_reference_list_);
+  timings_.AddSplit("ProcessReferences");
+}
+
+bool MarkSweep::HandleDirtyObjectsPhase() {
+  Thread* self = Thread::Current();
+  ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
+  Locks::mutator_lock_->AssertExclusiveHeld(self);
+
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+
+    // Re-mark root set.
+    ReMarkRoots();
+    timings_.AddSplit("ReMarkRoots");
+
+    // Scan dirty objects, this is only required if we are not doing concurrent GC.
+    RecursiveMarkDirtyObjects();
+  }
+
+  ProcessReferences(self);
+
+  // Only need to do this if we have the card mark verification on, and only during concurrent GC.
+  if (GetHeap()->verify_missing_card_marks_) {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    // This second sweep makes sure that we don't have any objects in the live stack which point to
+    // freed objects. These cause problems since their references may be previously freed objects.
+    SweepArray(timings_, allocation_stack, false);
+  } else {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    // We only sweep over the live stack, and the live stack should not intersect with the
+    // allocation stack, so it should be safe to UnMark anything in the allocation stack as live.
+    heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(),
+                           GetHeap()->large_object_space_->GetMarkObjects(),
+                           allocation_stack);
+    timings_.AddSplit("UnMarkAllocStack");
+  }
+  return true;
+}
+
+bool MarkSweep::IsConcurrent() const {
+  return is_concurrent_;
+}
+
+void MarkSweep::MarkingPhase() {
+  Heap* heap = GetHeap();
+  Thread* self = Thread::Current();
+
+  BindBitmaps();
+  FindDefaultMarkBitmap();
+  timings_.AddSplit("BindBitmaps");
+
+  // Process dirty cards and add dirty cards to mod union tables.
+  heap->ProcessCards(timings_);
+
+  // Need to do this before the checkpoint since we don't want any threads to add references to
+  // the live stack during the recursive mark.
+  heap->SwapStacks();
+  timings_.AddSplit("SwapStacks");
+
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
+    // If we exclusively hold the mutator lock, all threads must be suspended.
+    MarkRoots();
+    timings_.AddSplit("MarkConcurrentRoots");
+  } else {
+    MarkRootsCheckpoint();
+    timings_.AddSplit("MarkRootsCheckpoint");
+    MarkNonThreadRoots();
+    timings_.AddSplit("MarkNonThreadRoots");
+  }
+  MarkConcurrentRoots();
+  timings_.AddSplit("MarkConcurrentRoots");
+
+  heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
+  MarkReachableObjects();
+}
+
+void MarkSweep::MarkReachableObjects() {
+  // Mark everything allocated since the last as GC live so that we can sweep concurrently,
+  // knowing that new allocations won't be marked as live.
+  ObjectStack* live_stack = heap_->GetLiveStack();
+  heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
+                       heap_->large_object_space_->GetLiveObjects(),
+                       live_stack);
+  live_stack->Reset();
+  timings_.AddSplit("MarkStackAsLive");
+  // Recursively mark all the non-image bits set in the mark bitmap.
+  RecursiveMark();
+  DisableFinger();
+}
+
+void MarkSweep::ReclaimPhase() {
+  Thread* self = Thread::Current();
+
+  if (!IsConcurrent()) {
+    ProcessReferences(self);
+  }
+
+  // Before freeing anything, lets verify the heap.
+  if (kIsDebugBuild) {
+    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    VerifyImageRoots();
+  }
+  heap_->PreSweepingGcVerification(this);
+
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+
+    // Reclaim unmarked objects.
+    Sweep(timings_, false);
+
+    // Swap the live and mark bitmaps for each space which we modified space. This is an
+    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
+    // bitmaps.
+    SwapBitmaps();
+    timings_.AddSplit("SwapBitmaps");
+
+    // Unbind the live and mark bitmaps.
+    UnBindBitmaps();
+  }
+
+  heap_->GrowForUtilization();
+  timings_.AddSplit("GrowForUtilization");
+}
+
+void MarkSweep::SwapBitmaps() {
+  // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
+  // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
+  // bits of dead objects in the live bitmap.
+  const GcType gc_type = GetGcType();
+  // TODO: C++0x
+  Spaces& spaces = heap_->GetSpaces();
+  for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    ContinuousSpace* space = *it;
+    // We never allocate into zygote spaces.
+    if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
+        (gc_type == kGcTypeFull &&
+            space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)) {
+      SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+      if (live_bitmap != mark_bitmap) {
+        heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
+        heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
+        space->AsAllocSpace()->SwapBitmaps();
+      }
+    }
+  }
+  SwapLargeObjects();
+}
+
+void MarkSweep::SwapLargeObjects() {
+  LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
+  large_object_space->SwapBitmaps();
+  heap_->GetLiveBitmap()->SetLargeObjects(large_object_space->GetLiveObjects());
+  heap_->GetMarkBitmap()->SetLargeObjects(large_object_space->GetMarkObjects());
+}
+
+void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
+  immune_begin_ = begin;
+  immune_end_ = end;
 }
 
 void MarkSweep::FindDefaultMarkBitmap() {
@@ -238,6 +469,12 @@
   }
 }
 
+void MarkSweep::MarkRoot(const Object* obj) {
+  if (obj != NULL) {
+    MarkObjectNonNull(obj, false);
+  }
+}
+
 void MarkSweep::MarkRootParallelCallback(const Object* root, void* arg) {
   DCHECK(root != NULL);
   DCHECK(arg != NULL);
@@ -304,6 +541,9 @@
 
   void operator ()(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) const
       NO_THREAD_SAFETY_ANALYSIS {
+    if (kDebugLocking) {
+      Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
+    }
     mark_sweep_->CheckReference(obj, ref, offset, is_static);
   }
 
@@ -325,12 +565,6 @@
   mark_sweep->CheckObject(root);
 }
 
-void MarkSweep::CopyMarkBits(ContinuousSpace* space) {
-  SpaceBitmap* live_bitmap = space->GetLiveBitmap();
-  SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
-  mark_bitmap->CopyFrom(live_bitmap);
-}
-
 void MarkSweep::BindLiveToMarkBitmap(ContinuousSpace* space) {
   CHECK(space->IsAllocSpace());
   DlMallocSpace* alloc_space = space->AsAllocSpace();
@@ -347,9 +581,12 @@
 
   }
 
-  void operator ()(const Object* obj) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // TODO: Fixme when anotatalysis works with visitors.
+  void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+    if (kDebugLocking) {
+      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+    }
     mark_sweep_->ScanObject(obj);
   }
 
@@ -380,8 +617,10 @@
   }
 
   void operator ()(const Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
-                            Locks::mutator_lock_) {
+      NO_THREAD_SAFETY_ANALYSIS {
+    if (kDebugLocking) {
+      Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
+    }
     DCHECK(obj != NULL);
     mark_sweep_->CheckObject(obj);
   }
@@ -410,7 +649,7 @@
 
 // Populates the mark stack based on the set of marked objects and
 // recursively marks until the mark stack is emptied.
-void MarkSweep::RecursiveMark(bool partial, TimingLogger& timings) {
+void MarkSweep::RecursiveMark() {
   // RecursiveMark will build the lists of known instances of the Reference classes.
   // See DelayReferenceReferent for details.
   CHECK(soft_reference_list_ == NULL);
@@ -419,61 +658,33 @@
   CHECK(phantom_reference_list_ == NULL);
   CHECK(cleared_reference_list_ == NULL);
 
+  const bool partial = GetGcType() == kGcTypePartial;
   const Spaces& spaces = heap_->GetSpaces();
   SetFingerVisitor set_finger_visitor(this);
   ScanObjectVisitor scan_visitor(this);
-  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    ContinuousSpace* space = *it;
-    if ((!kDisableFinger && space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) ||
-        (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
-        ) {
-      current_mark_bitmap_ = space->GetMarkBitmap();
-      if (current_mark_bitmap_ == NULL) {
-        GetHeap()->DumpSpaces();
-        LOG(FATAL) << "invalid bitmap";
-      }
-      // This function does not handle heap end increasing, so we must use the space end.
-      uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
-      uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
-      current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor, set_finger_visitor);
-    }
-  }
-  finger_ = reinterpret_cast<Object*>(~0);
-  timings.AddSplit("RecursiveMark");
-  // TODO: tune the frequency of emptying the mark stack
-  ProcessMarkStack();
-  timings.AddSplit("ProcessMarkStack");
-}
-
-void MarkSweep::RecursiveMarkCards(CardTable* card_table, const std::vector<byte*>& cards,
-                                   TimingLogger& timings) {
-  ScanObjectVisitor image_root_visitor(this);
-  SetFingerVisitor finger_visitor(this);
-  const size_t card_count = cards.size();
-  SpaceBitmap* active_bitmap = NULL;
-  for (size_t i = 0;i < card_count;) {
-    Object* start_obj = reinterpret_cast<Object*>(card_table->AddrFromCard(cards[i]));
-    uintptr_t begin = reinterpret_cast<uintptr_t>(start_obj);
-    uintptr_t end = begin + CardTable::kCardSize;
-    for (++i; reinterpret_cast<uintptr_t>(cards[i]) == end && i < card_count; ++i) {
-      end += CardTable::kCardSize;
-    }
-    if (active_bitmap == NULL || !active_bitmap->HasAddress(start_obj)) {
-      active_bitmap = heap_->GetMarkBitmap()->GetSpaceBitmap(start_obj);
-      if (kIsDebugBuild && active_bitmap == NULL) {
-        GetHeap()->DumpSpaces();
-        LOG(FATAL) << "Object " << reinterpret_cast<const void*>(start_obj);
+  if (!kDisableFinger) {
+    finger_ = NULL;
+    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+      ContinuousSpace* space = *it;
+      if ((space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) ||
+          (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
+          ) {
+        current_mark_bitmap_ = space->GetMarkBitmap();
+        if (current_mark_bitmap_ == NULL) {
+          GetHeap()->DumpSpaces();
+          LOG(FATAL) << "invalid bitmap";
+        }
+        // This function does not handle heap end increasing, so we must use the space end.
+        uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+        uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+        current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor, set_finger_visitor);
       }
     }
-    if (kDisableFinger) {
-      active_bitmap->VisitMarkedRange(begin, end, image_root_visitor, VoidFunctor());
-    } else {
-      active_bitmap->VisitMarkedRange(begin, end, image_root_visitor, finger_visitor);
-    }
   }
-  timings.AddSplit("RecursiveMarkCards");
+  DisableFinger();
+  timings_.AddSplit("RecursiveMark");
   ProcessMarkStack();
-  timings.AddSplit("ProcessMarkStack");
+  timings_.AddSplit("ProcessMarkStack");
 }
 
 bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
@@ -484,7 +695,9 @@
 
 void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) {
   ScanGrayObjects(minimum_age);
+  timings_.AddSplit("ScanGrayObjects");
   ProcessMarkStack();
+  timings_.AddSplit("ProcessMarkStack");
 }
 
 void MarkSweep::ReMarkRoots() {
@@ -613,6 +826,22 @@
   return *gc_barrier_;
 }
 
+const TimingLogger& MarkSweep::GetTimings() const {
+  return timings_;
+}
+
+const CumulativeLogger& MarkSweep::GetCumulativeTimings() const {
+  return cumulative_timings_;
+}
+
+void MarkSweep::ResetCumulativeStatistics() {
+  cumulative_timings_.Reset();
+  total_time_ = 0;
+  total_paused_time_ = 0;
+  total_freed_objects_ = 0;
+  total_freed_bytes_ = 0;
+}
+
 void MarkSweep::MarkRootsCheckpoint() {
   CheckpointMarkThreadRoots check_point(this);
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
@@ -707,7 +936,7 @@
   logger.AddSplit("ResetStack");
 }
 
-void MarkSweep::Sweep(TimingLogger& timings, bool partial, bool swap_bitmaps) {
+void MarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
   DCHECK(mark_stack_->IsEmpty());
 
   // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
@@ -715,6 +944,7 @@
   SweepSystemWeaks();
   timings.AddSplit("SweepSystemWeaks");
 
+  const bool partial = GetGcType() == kGcTypePartial;
   const Spaces& spaces = heap_->GetSpaces();
   SweepCallbackContext scc;
   scc.mark_sweep = this;
@@ -746,6 +976,9 @@
     }
   }
   timings.AddSplit("Sweep");
+
+  SweepLargeObjects(swap_bitmaps);
+  timings.AddSplit("SweepLargeObjects");
 }
 
 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
@@ -857,8 +1090,14 @@
   MarkObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
   }
 
+  // TODO: Fixme when anotatalysis works with visitors.
   void operator ()(const Object* /* obj */, const Object* ref, const MemberOffset& /* offset */,
-                   bool /* is_static */) const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+                   bool /* is_static */) const
+      NO_THREAD_SAFETY_ANALYSIS {
+    if (kDebugLocking) {
+      Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+      Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+    }
     mark_sweep_->MarkObject(ref);
   }
 
@@ -1228,7 +1467,23 @@
   }
 }
 
-MarkSweep::~MarkSweep() {
+void MarkSweep::FinishPhase() {
+  // Can't enqueue referneces if we hold the mutator lock.
+  Object* cleared_references = GetClearedReferences();
+  heap_->EnqueueClearedReferences(&cleared_references);
+
+  heap_->PostGcVerification(this);
+
+  // Update the cumulative statistics
+  total_time_ += GetDuration();
+  total_paused_time_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
+                                        std::plus<uint64_t>());
+  total_freed_objects_ += GetFreedObjects();
+  total_freed_bytes_ += GetFreedBytes();
+
+  // Ensure that the mark stack is empty.
+  CHECK(mark_stack_->IsEmpty());
+
   if (kCountScannedTypes) {
     VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
              << " other=" << other_count_;
@@ -1254,8 +1509,10 @@
     VLOG(gc) << "References scanned " << reference_count_;
   }
 
-  // Ensure that the mark stack is empty.
-  CHECK(mark_stack_->IsEmpty());
+  // Update the cumulative loggers.
+  cumulative_timings_.Start();
+  cumulative_timings_.AddLogger(timings_);
+  cumulative_timings_.End();
 
   // Clear all of the spaces' mark bitmaps.
   const Spaces& spaces = heap_->GetSpaces();
@@ -1273,4 +1530,8 @@
   large_objects->GetMarkObjects()->Clear();
 }
 
+MarkSweep::~MarkSweep() {
+
+}
+
 }  // namespace art
diff --git a/src/gc/mark_sweep.h b/src/gc/mark_sweep.h
index 98445d4..554577b 100644
--- a/src/gc/mark_sweep.h
+++ b/src/gc/mark_sweep.h
@@ -18,6 +18,7 @@
 #define ART_SRC_MARK_SWEEP_H_
 
 #include "atomic_stack.h"
+#include "garbage_collector.h"
 #include "macros.h"
 #include "heap_bitmap.h"
 #include "object.h"
@@ -37,12 +38,26 @@
 class TimingLogger;
 class MarkStackChunk;
 
-class MarkSweep {
+class MarkSweep : public GarbageCollector {
  public:
-  explicit MarkSweep(ObjectStack* mark_stack);
+  explicit MarkSweep(Heap* heap, bool is_concurrent);
 
   ~MarkSweep();
 
+  virtual std::string GetName() const;
+  virtual void InitializePhase();
+  virtual bool IsConcurrent() const;
+  virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void FinishPhase();
+  virtual void MarkReachableObjects()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  virtual GcType GetGcType() const {
+    return kGcTypeFull;
+  }
+
   // Initializes internal structures.
   void Init();
 
@@ -61,19 +76,24 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   void MarkRootsCheckpoint();
-       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Verify that image roots point to only marked objects within the alloc space.
   void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Builds a mark stack and recursively mark until it empties.
-  void RecursiveMark(bool partial, TimingLogger& timings)
+  void RecursiveMark()
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Copies mark bits from live bitmap of ZygoteSpace to mark bitmap for partial GCs.
-  void CopyMarkBits(ContinuousSpace* space);
+  // Make a space immune, immune spaces are assumed to have all live objects marked.
+  void ImmuneSpace(ContinuousSpace* space)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);;
+
+  // Bind the live bits to the mark bits of bitmaps based on the gc type.
+  virtual void BindBitmaps()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void BindLiveToMarkBitmap(ContinuousSpace* space)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -86,32 +106,16 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Recursive mark objects on specified cards. Updates finger.
-  void RecursiveMarkCards(CardTable* card_table, const std::vector<byte*>& cards,
-                          TimingLogger& timings)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);;
-
   // Remarks the root set after completing the concurrent mark.
   void ReMarkRoots()
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  Heap* GetHeap() {
-    return heap_;
-  }
-
-  void ProcessReferences(bool clear_soft_references)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    ProcessReferences(&soft_reference_list_, clear_soft_references,
-                      &weak_reference_list_,
-                      &finalizer_reference_list_,
-                      &phantom_reference_list_);
-  }
+  void ProcessReferences(Thread* self)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
-  void Sweep(TimingLogger& timings, bool partial, bool swap_bitmaps)
+  virtual void Sweep(TimingLogger& timings, bool swap_bitmaps)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
@@ -122,6 +126,10 @@
   void SweepArray(TimingLogger& logger, ObjectStack* allocation_stack_, bool swap_bitmaps)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
+  // Swap bitmaps (if we are a full Gc then we swap the zygote bitmap too).
+  virtual void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void SwapLargeObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
   Object* GetClearedReferences() {
     return cleared_reference_list_;
   }
@@ -187,12 +195,25 @@
     return freed_objects_;
   }
 
-  // Everything inside the immune range is marked.
-  void SetImmuneRange(Object* begin, Object* end) {
-    immune_begin_ = begin;
-    immune_end_ = end;
+  uint64_t GetTotalTime() const {
+    return total_time_;
   }
 
+  uint64_t GetTotalPausedTime() const {
+    return total_paused_time_;
+  }
+
+  uint64_t GetTotalFreedObjects() const {
+    return total_freed_objects_;
+  }
+
+  uint64_t GetTotalFreedBytes() const {
+    return total_freed_bytes_;
+  }
+
+  // Everything inside the immune range is assumed to be marked.
+  void SetImmuneRange(Object* begin, Object* end);
+
   void SweepSystemWeaks()
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
@@ -235,17 +256,26 @@
   }
 
   static void MarkObjectCallback(const Object* root, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   static void MarkRootParallelCallback(const Object* root, void* arg);
 
   // Marks an object.
   void MarkObject(const Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  void MarkRoot(const Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   Barrier& GetBarrier();
+  const TimingLogger& GetTimings() const;
+  const CumulativeLogger& GetCumulativeTimings() const;
+  void ResetCumulativeStatistics();
 
- private:
+ protected:
   // Returns true if the object has its bit set in the mark bitmap.
   bool IsMarked(const Object* object) const;
 
@@ -256,6 +286,7 @@
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   static void ReMarkObjectVisitor(const Object* root, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   static void VerifyImageRootVisitor(Object* root, void* arg)
@@ -263,6 +294,7 @@
                             Locks::mutator_lock_);
 
   void MarkObjectNonNull(const Object* obj, bool check_finger)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   void MarkObjectNonNullParallel(const Object* obj, bool check_finger);
@@ -433,8 +465,6 @@
 
   ObjectStack* mark_stack_;
 
-  Heap* heap_;
-
   Object* finger_;
 
   // Immune range, every object inside the immune range is assumed to be marked.
@@ -460,14 +490,26 @@
   AtomicInteger work_chunks_deleted_;
   AtomicInteger reference_count_;
 
+  // Cumulative statistics.
+  uint64_t total_time_;
+  uint64_t total_paused_time_;
+  uint64_t total_freed_objects_;
+  uint64_t total_freed_bytes_;
+
   UniquePtr<Barrier> gc_barrier_;
   Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   Mutex mark_stack_expand_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  TimingLogger timings_;
+  CumulativeLogger cumulative_timings_;
+
+  bool is_concurrent_;
+  bool clear_soft_references_;
 
   friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
   friend class CheckBitmapVisitor;
   friend class CheckObjectVisitor;
   friend class CheckReferenceVisitor;
+  friend class Heap;
   friend class InternTableEntryIsUnmarked;
   friend class MarkIfReachesAllocspaceVisitor;
   friend class ModUnionCheckReferences;
diff --git a/src/gc/mod_union_table.cc b/src/gc/mod_union_table.cc
index 5dd61e7..4d9ffe2 100644
--- a/src/gc/mod_union_table.cc
+++ b/src/gc/mod_union_table.cc
@@ -256,6 +256,7 @@
   }
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
+  // TODO: Fixme when anotatalysis works with visitors.
   void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
                      bool /* is_static */) const
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
@@ -290,9 +291,11 @@
       references_(references) {
   }
 
-  void operator ()(const Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+  void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
     DCHECK(obj != NULL);
+    if (kDebugLocking) {
+      Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
+    }
     CheckReferenceVisitor visitor(mod_union_table_, references_);
     MarkSweep::VisitObjectReferences(obj, visitor);
   }
@@ -306,7 +309,8 @@
   // Start by checking that everything in the mod union table is marked.
   Heap* heap = GetHeap();
   for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
-    for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
+    for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end();
+        ++it_ref ) {
       DCHECK(heap->GetLiveBitmap()->Test(*it_ref));
     }
   }
@@ -368,7 +372,7 @@
   size_t count = 0;
   for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
     for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
-      mark_sweep->MarkObject(*it_ref);
+      mark_sweep->MarkRoot(*it_ref);
       ++count;
     }
   }
diff --git a/src/gc/mod_union_table.h b/src/gc/mod_union_table.h
index 84592a4..f3da41c 100644
--- a/src/gc/mod_union_table.h
+++ b/src/gc/mod_union_table.h
@@ -109,7 +109,9 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Mark all references to the alloc space(s).
-  void MarkReferences(MarkSweep* mark_sweep) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void MarkReferences(MarkSweep* mark_sweep)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
   // VisitMarkedRange can't know if the callback will modify the bitmap or not.
diff --git a/src/gc/partial_mark_sweep.cc b/src/gc/partial_mark_sweep.cc
new file mode 100644
index 0000000..64f09ff
--- /dev/null
+++ b/src/gc/partial_mark_sweep.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "large_object_space.h"
+#include "partial_mark_sweep.h"
+#include "space.h"
+
+namespace art {
+  PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent)
+      : MarkSweep(heap, is_concurrent) {
+    cumulative_timings_.SetName(GetName());
+  }
+
+  PartialMarkSweep::~PartialMarkSweep() {
+
+  }
+
+  void PartialMarkSweep::BindBitmaps() {
+    MarkSweep::BindBitmaps();
+
+    Spaces& spaces = GetHeap()->GetSpaces();
+    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+    // For partial GCs we need to bind the bitmap of the zygote space so that all objects in the
+    // zygote space are viewed as marked.
+    for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+      ContinuousSpace* space = *it;
+      if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+        ImmuneSpace(space);
+      }
+    }
+  }
+}  // namespace art
diff --git a/src/gc/partial_mark_sweep.h b/src/gc/partial_mark_sweep.h
new file mode 100644
index 0000000..80a1563
--- /dev/null
+++ b/src/gc/partial_mark_sweep.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_PARTIAL_MARK_SWEEP_H_
+#define ART_SRC_PARTIAL_MARK_SWEEP_H_
+
+#include "locks.h"
+#include "mark_sweep.h"
+#include "utils.h"
+
+namespace art {
+
+class Barrier;
+class CheckObjectVisitor;
+class Class;
+class Heap;
+class MarkIfReachesAllocspaceVisitor;
+class ModUnionClearCardVisitor;
+class ModUnionVisitor;
+class ModUnionTableBitmap;
+class Object;
+class TimingLogger;
+class MarkStackChunk;
+
+class PartialMarkSweep : public MarkSweep {
+ public:
+  virtual GcType GetGcType() const {
+    return kGcTypePartial;
+  }
+
+  explicit PartialMarkSweep(Heap* heap, bool is_concurrent);
+  ~PartialMarkSweep();
+
+protected:
+  virtual void BindBitmaps()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_PARTIAL_MARK_SWEEP_H_
diff --git a/src/gc/sticky_mark_sweep.cc b/src/gc/sticky_mark_sweep.cc
new file mode 100644
index 0000000..23196fd
--- /dev/null
+++ b/src/gc/sticky_mark_sweep.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "large_object_space.h"
+#include "space.h"
+#include "sticky_mark_sweep.h"
+
+namespace art {
+  StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent)
+      : PartialMarkSweep(heap, is_concurrent) {
+    cumulative_timings_.SetName(GetName());
+  }
+
+  StickyMarkSweep::~StickyMarkSweep() {
+
+  }
+
+  void StickyMarkSweep::BindBitmaps() {
+    PartialMarkSweep::BindBitmaps();
+
+    Spaces& spaces = GetHeap()->GetSpaces();
+    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+    // For sticky GC, we want to bind the bitmaps of both the zygote space and the alloc space.
+    // This lets us start with the mark bitmap of the previous garbage collection as the current
+    // mark bitmap of the alloc space. After the sticky GC finishes, we then unbind the bitmaps,
+    // making it so that the live bitmap of the alloc space is contains the newly marked objects
+    // from the sticky GC.
+    for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+      if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
+        BindLiveToMarkBitmap(*it);
+      }
+    }
+
+    GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+  }
+
+  void StickyMarkSweep::MarkReachableObjects() {
+    DisableFinger();
+    RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1);
+  }
+
+  void StickyMarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
+    ObjectStack* live_stack = GetHeap()->GetLiveStack();
+    SweepArray(timings_, live_stack, false);
+    timings_.AddSplit("SweepArray");
+  }
+}  // namespace art
diff --git a/src/gc/sticky_mark_sweep.h b/src/gc/sticky_mark_sweep.h
new file mode 100644
index 0000000..9c3b6a4
--- /dev/null
+++ b/src/gc/sticky_mark_sweep.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_STICKY_MARK_SWEEP_H_
+#define ART_SRC_STICKY_MARK_SWEEP_H_
+
+#include "locks.h"
+#include "macros.h"
+#include "partial_mark_sweep.h"
+#include "utils.h"
+
+namespace art {
+
+class Barrier;
+class CheckObjectVisitor;
+class Class;
+class Heap;
+class MarkIfReachesAllocspaceVisitor;
+class ModUnionClearCardVisitor;
+class ModUnionVisitor;
+class ModUnionTableBitmap;
+class Object;
+class TimingLogger;
+class MarkStackChunk;
+
+class StickyMarkSweep : public PartialMarkSweep {
+ public:
+  virtual GcType GetGcType() const {
+    return kGcTypeSticky;
+  }
+
+  explicit StickyMarkSweep(Heap* heap, bool is_concurrent);
+  ~StickyMarkSweep();
+protected:
+  virtual void BindBitmaps()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  virtual void MarkReachableObjects()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  virtual void Sweep(TimingLogger& timings, bool swap_bitmaps)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_STICKY_MARK_SWEEP_H_
diff --git a/src/heap.cc b/src/heap.cc
index f55efd6..645d402 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -28,6 +28,8 @@
 #include "gc/heap_bitmap.h"
 #include "gc/large_object_space.h"
 #include "gc/mark_sweep.h"
+#include "gc/partial_mark_sweep.h"
+#include "gc/sticky_mark_sweep.h"
 #include "gc/mod_union_table.h"
 #include "gc/space.h"
 #include "image.h"
@@ -45,7 +47,9 @@
 
 namespace art {
 
-static const bool kDumpGcPerformanceOnShutdown = false;
+static const uint64_t kSlowGcThreshold = MsToNs(100);
+static const uint64_t kLongGcPauseThreshold = MsToNs(5);
+static const bool kDumpGcPerformanceOnShutdown = true;
 const double Heap::kDefaultTargetUtilization = 0.5;
 
 static bool GenerateImage(const std::string& image_file_name) {
@@ -160,7 +164,6 @@
       min_alloc_space_size_for_sticky_gc_(2 * MB),
       min_remaining_space_for_sticky_gc_(1 * MB),
       last_trim_time_(0),
-      requesting_gc_(false),
       max_allocation_stack_size_(MB),
       reference_referent_offset_(0),
       reference_queue_offset_(0),
@@ -170,7 +173,6 @@
       min_free_(min_free),
       max_free_(max_free),
       target_utilization_(target_utilization),
-      total_paused_time_(0),
       total_wait_time_(0),
       measure_allocation_time_(false),
       total_allocation_time_(0),
@@ -286,17 +288,15 @@
   // Create the reference queue lock, this is required so for parrallel object scanning in the GC.
   reference_queue_lock_.reset(new Mutex("reference queue lock"));
 
-  CHECK(max_allowed_footprint_ != 0);
-
-  // Set up the cumulative timing loggers.
-  for (size_t i = static_cast<size_t>(kGcTypeSticky); i < static_cast<size_t>(kGcTypeMax);
-       ++i) {
-    std::ostringstream name;
-    name << static_cast<GcType>(i);
-    cumulative_timings_.Put(static_cast<GcType>(i),
-                            new CumulativeLogger(name.str().c_str(), true));
+  // Create our garbage collectors.
+  for (size_t i = 0; i < 2; ++i) {
+    const bool concurrent = i != 0;
+    mark_sweep_collectors_.push_back(new MarkSweep(this, concurrent));
+    mark_sweep_collectors_.push_back(new PartialMarkSweep(this, concurrent));
+    mark_sweep_collectors_.push_back(new StickyMarkSweep(this, concurrent));
   }
 
+  CHECK(max_allowed_footprint_ != 0);
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() exiting";
   }
@@ -356,12 +356,30 @@
   // Dump cumulative timings.
   LOG(INFO) << "Dumping cumulative Gc timings";
   uint64_t total_duration = 0;
-  for (CumulativeTimings::iterator it = cumulative_timings_.begin();
-      it != cumulative_timings_.end(); ++it) {
-    CumulativeLogger* logger = it->second;
-    if (logger->GetTotalNs() != 0) {
-      logger->Dump();
-      total_duration += logger->GetTotalNs();
+
+  // Dump cumulative loggers for each GC type.
+  // TODO: C++0x
+  uint64_t total_paused_time = 0;
+  for (Collectors::const_iterator it = mark_sweep_collectors_.begin();
+      it != mark_sweep_collectors_.end(); ++it) {
+    MarkSweep* collector = *it;
+    const CumulativeLogger& logger = collector->GetCumulativeTimings();
+    if (logger.GetTotalNs() != 0) {
+      logger.Dump();
+      const uint64_t total_ns = logger.GetTotalNs();
+      const uint64_t total_pause_ns = (*it)->GetTotalPausedTime();
+      double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
+      const uint64_t freed_bytes = collector->GetTotalFreedBytes();
+      const uint64_t freed_objects = collector->GetTotalFreedObjects();
+      LOG(INFO)
+          << collector->GetName() << " total time: " << PrettyDuration(total_ns) << "\n"
+          << collector->GetName() << " paused time: " << PrettyDuration(total_pause_ns) << "\n"
+          << collector->GetName() << " freed: " << freed_objects
+          << " objects with total size " << PrettySize(freed_bytes) << "\n"
+          << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
+          << PrettySize(freed_bytes / seconds) << "/s\n";
+      total_duration += total_ns;
+      total_paused_time += total_pause_ns;
     }
   }
   uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
@@ -381,8 +399,8 @@
     LOG(INFO) << "Mean allocation time: "
               << PrettyDuration(allocation_time / total_objects_allocated);
   }
-  LOG(INFO) << "Total mutator paused time: " << PrettyDuration(total_paused_time_);
-  LOG(INFO) << "Total waiting for Gc to complete time: " << PrettyDuration(total_wait_time_);
+  LOG(INFO) << "Total mutator paused time: " << PrettyDuration(total_paused_time);
+  LOG(INFO) << "Total time waiting for GC to complete time: " << PrettyDuration(total_wait_time_);
 }
 
 Heap::~Heap() {
@@ -390,6 +408,8 @@
     DumpGcPerformanceInfo();
   }
 
+  STLDeleteElements(&mark_sweep_collectors_);
+
   // If we don't reset then the mark stack complains in it's destructor.
   allocation_stack_->Reset();
   live_stack_->Reset();
@@ -401,7 +421,6 @@
   // those threads can't resume. We're the only running thread, and we can do whatever we like...
   STLDeleteElements(&spaces_);
   delete gc_complete_lock_;
-  STLDeleteValues(&cumulative_timings_);
 }
 
 ContinuousSpace* Heap::FindSpaceFromObject(const Object* obj) const {
@@ -634,7 +653,6 @@
   while (!allocation_stack_->AtomicPushBack(obj)) {
     Thread* self = Thread::Current();
     self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
-    // If we actually ran a different type of Gc than requested, we can skip the index forwards.
     CollectGarbageInternal(kGcTypeSticky, kGcCauseForAlloc, false);
     self->TransitionFromSuspendedToRunnable();
   }
@@ -658,20 +676,23 @@
 
 Object* Heap::TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow) {
   // Should we try to use a CAS here and fix up num_bytes_allocated_ later with AllocationSize?
-  if (enforce_heap_growth_rate_ && num_bytes_allocated_ + alloc_size > max_allowed_footprint_) {
-    if (grow) {
-      // Grow the heap by alloc_size extra bytes.
-      max_allowed_footprint_ = std::min(max_allowed_footprint_ + alloc_size, growth_limit_);
-      VLOG(gc) << "Grow heap to " << PrettySize(max_allowed_footprint_)
-               << " for a " << PrettySize(alloc_size) << " allocation";
-    } else {
+  if (num_bytes_allocated_ + alloc_size > max_allowed_footprint_) {
+    // max_allowed_footprint_ <= growth_limit_ so it is safe to check in here.
+    if (num_bytes_allocated_ + alloc_size > growth_limit_) {
+      // Completely out of memory.
       return NULL;
     }
-  }
 
-  if (num_bytes_allocated_ + alloc_size > growth_limit_) {
-    // Completely out of memory.
-    return NULL;
+    if (enforce_heap_growth_rate_) {
+      if (grow) {
+        // Grow the heap by alloc_size extra bytes.
+        max_allowed_footprint_ = std::min(max_allowed_footprint_ + alloc_size, growth_limit_);
+        VLOG(gc) << "Grow heap to " << PrettySize(max_allowed_footprint_)
+                 << " for a " << PrettySize(alloc_size) << " allocation";
+      } else {
+        return NULL;
+      }
+    }
   }
 
   return space->Alloc(self, alloc_size);
@@ -893,9 +914,9 @@
 
   // Reset the cumulative loggers since we now have a few additional timing phases.
   // TODO: C++0x
-  for (CumulativeTimings::iterator it = cumulative_timings_.begin();
-       it != cumulative_timings_.end(); ++it) {
-    it->second->Reset();
+  for (Collectors::const_iterator it = mark_sweep_collectors_.begin();
+        it != mark_sweep_collectors_.end(); ++it) {
+    (*it)->ResetCumulativeStatistics();
   }
 }
 
@@ -976,12 +997,54 @@
     sticky_gc_count_ = 0;
   }
 
-  if (concurrent_gc_) {
-    CollectGarbageConcurrentMarkSweepPlan(self, gc_type, gc_cause, clear_soft_references);
-  } else {
-    CollectGarbageMarkSweepPlan(self, gc_type, gc_cause, clear_soft_references);
+  DCHECK_LT(gc_type, kGcTypeMax);
+  DCHECK_NE(gc_type, kGcTypeNone);
+  MarkSweep* collector = NULL;
+  for (Collectors::iterator it = mark_sweep_collectors_.begin();
+      it != mark_sweep_collectors_.end(); ++it) {
+    MarkSweep* cur_collector = *it;
+    if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) {
+      collector = cur_collector;
+      break;
+    }
   }
-  bytes_since_last_gc_ = 0;
+  CHECK(collector != NULL)
+      << "Could not find garbage collector with concurrent=" << concurrent_gc_
+      << " and type=" << gc_type;
+  collector->clear_soft_references_ = clear_soft_references;
+  collector->Run();
+  total_objects_freed_ += collector->GetFreedObjects();
+  total_bytes_freed_ += collector->GetFreedBytes();
+  RequestHeapTrim();
+
+  uint64_t duration = collector->GetDuration();
+  std::vector<uint64_t> pauses = collector->GetPauseTimes();
+  bool was_slow = duration > kSlowGcThreshold ||
+      (gc_cause == kGcCauseForAlloc && duration > kLongGcPauseThreshold);
+  for (size_t i = 0; i < pauses.size(); ++i) {
+    if (pauses[i] > kLongGcPauseThreshold) {
+      was_slow = true;
+    }
+  }
+
+  if (was_slow) {
+    const size_t percent_free = GetPercentFree();
+    const size_t current_heap_size = GetUsedMemorySize();
+    const size_t total_memory = GetTotalMemory();
+    std::ostringstream pause_string;
+    for (size_t i = 0; i < pauses.size(); ++i) {
+      pause_string << PrettyDuration((pauses[i] / 1000) * 1000)
+                   << ((i != pauses.size() - 1) ? ", " : "");
+    }
+    LOG(INFO) << gc_cause << " " << collector->GetName()
+             << "GC freed " << PrettySize(collector->GetFreedBytes()) << ", "
+             << percent_free << "% free, " << PrettySize(current_heap_size) << "/"
+             << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
+             << " total " << PrettyDuration((duration / 1000) * 1000);
+    if (VLOG_IS_ON(heap)) {
+      collector->GetTimings().Dump();
+    }
+  }
 
   {
     MutexLock mu(self, *gc_complete_lock_);
@@ -995,155 +1058,6 @@
   return gc_type;
 }
 
-void Heap::CollectGarbageMarkSweepPlan(Thread* self, GcType gc_type, GcCause gc_cause,
-                                       bool clear_soft_references) {
-  TimingLogger timings("CollectGarbageInternal", true);
-
-  std::stringstream gc_type_str;
-  gc_type_str << gc_type << " ";
-
-  // Suspend all threads are get exclusive access to the heap.
-  uint64_t start_time = NanoTime();
-  ThreadList* thread_list = Runtime::Current()->GetThreadList();
-  thread_list->SuspendAll();
-  timings.AddSplit("SuspendAll");
-  Locks::mutator_lock_->AssertExclusiveHeld(self);
-
-  size_t bytes_freed = 0;
-  Object* cleared_references = NULL;
-  {
-    MarkSweep mark_sweep(mark_stack_.get());
-    mark_sweep.Init();
-    timings.AddSplit("Init");
-
-    if (verify_pre_gc_heap_) {
-      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-      if (!VerifyHeapReferences()) {
-        LOG(FATAL) << "Pre " << gc_type_str.str() << "Gc verification failed";
-      }
-      timings.AddSplit("VerifyHeapReferencesPreGC");
-    }
-
-    // Swap allocation stack and live stack, enabling us to have new allocations during this GC.
-    SwapStacks();
-
-    // Process dirty cards and add dirty cards to mod union tables.
-    ProcessCards(timings);
-
-    // Bind live to mark bitmaps.
-    BindBitmaps(gc_type, mark_sweep);
-    timings.AddSplit("BindBitmaps");
-
-    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    mark_sweep.MarkRoots();
-    mark_sweep.MarkConcurrentRoots();
-    timings.AddSplit("MarkRoots");
-
-    UpdateAndMarkModUnion(&mark_sweep, timings, gc_type);
-
-    if (gc_type != kGcTypeSticky) {
-      MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
-                     live_stack_.get());
-      timings.AddSplit("MarkStackAsLive");
-    }
-
-    if (verify_mod_union_table_) {
-      zygote_mod_union_table_->Update();
-      zygote_mod_union_table_->Verify();
-      mod_union_table_->Update();
-      mod_union_table_->Verify();
-    }
-
-    // Recursively mark all the non-image bits set in the mark bitmap.
-    if (gc_type != kGcTypeSticky) {
-      mark_sweep.RecursiveMark(gc_type == kGcTypePartial, timings);
-    } else {
-      // Use -1 since we want to scan all of the cards which we aged earlier when we did
-      // ClearCards. These are the cards which were dirty before the GC started.
-      mark_sweep.RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1);
-    }
-    mark_sweep.DisableFinger();
-
-    // Need to process references before the swap since it uses IsMarked.
-    mark_sweep.ProcessReferences(clear_soft_references);
-    timings.AddSplit("ProcessReferences");
-
-    if (kIsDebugBuild) {
-      // Verify that we only reach marked objects from the image space
-      mark_sweep.VerifyImageRoots();
-      timings.AddSplit("VerifyImageRoots");
-    }
-
-    if (gc_type != kGcTypeSticky) {
-      mark_sweep.Sweep(timings, gc_type == kGcTypePartial, false);
-      mark_sweep.SweepLargeObjects(false);
-      timings.AddSplit("SweepLargeObjects");
-    } else {
-      mark_sweep.SweepArray(timings, live_stack_.get(), false);
-      timings.AddSplit("SweepArray");
-    }
-    live_stack_->Reset();
-
-    // Unbind the live and mark bitmaps.
-    mark_sweep.UnBindBitmaps();
-    if (gc_type == kGcTypeSticky) {
-      SwapLargeObjects();
-    } else {
-      SwapBitmaps(gc_type);
-    }
-    timings.AddSplit("SwapBitmaps");
-
-    if (verify_system_weaks_) {
-      mark_sweep.VerifySystemWeaks();
-      timings.AddSplit("VerifySystemWeaks");
-    }
-
-    cleared_references = mark_sweep.GetClearedReferences();
-    bytes_freed = mark_sweep.GetFreedBytes();
-    total_bytes_freed_ += bytes_freed;
-    total_objects_freed_ += mark_sweep.GetFreedObjects();
-  }
-
-  if (verify_post_gc_heap_) {
-    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    if (!VerifyHeapReferences()) {
-      LOG(FATAL) << "Post " + gc_type_str.str() + "Gc verification failed";
-    }
-    timings.AddSplit("VerifyHeapReferencesPostGC");
-  }
-
-  GrowForUtilization();
-  timings.AddSplit("GrowForUtilization");
-
-  thread_list->ResumeAll();
-  timings.AddSplit("ResumeAll");
-
-  EnqueueClearedReferences(&cleared_references);
-  RequestHeapTrim();
-  timings.AddSplit("Finish");
-
-  // If the GC was slow, then print timings in the log.
-  uint64_t duration = (NanoTime() - start_time) / 1000 * 1000;
-  total_paused_time_ += duration;
-  if (duration > MsToNs(50)) {
-    const size_t percent_free = GetPercentFree();
-    const size_t current_heap_size = GetUsedMemorySize();
-    const size_t total_memory = GetTotalMemory();
-    LOG(INFO) << gc_cause << " " << gc_type_str.str()
-              << "GC freed " << PrettySize(bytes_freed) << ", " << percent_free << "% free, "
-              << PrettySize(current_heap_size) << "/" << PrettySize(total_memory) << ", "
-              << "paused " << PrettyDuration(duration);
-    if (VLOG_IS_ON(heap)) {
-      timings.Dump();
-    }
-  }
-
-  CumulativeLogger* logger = cumulative_timings_.Get(gc_type);
-  logger->Start();
-  logger->AddLogger(timings);
-  logger->End(); // Next iteration.
-}
-
 void Heap::UpdateAndMarkModUnion(MarkSweep* mark_sweep, TimingLogger& timings, GcType gc_type) {
   if (gc_type == kGcTypeSticky) {
     // Don't need to do anything for mod union table in this case since we are only scanning dirty
@@ -1235,25 +1149,6 @@
       card_table->Scan(bitmap, byte_cover_begin, byte_cover_begin + CardTable::kCardSize,
                        scan_visitor, VoidFunctor());
 
-      // Try and see if a mark sweep collector scans the reference.
-      ObjectStack* mark_stack = heap_->mark_stack_.get();
-      MarkSweep ms(mark_stack);
-      ms.Init();
-      mark_stack->Reset();
-      ms.DisableFinger();
-
-      // All the references should end up in the mark stack.
-      ms.ScanRoot(obj);
-      if (std::find(mark_stack->Begin(), mark_stack->End(), ref)) {
-        LOG(ERROR) << "Ref found in the mark_stack when rescanning the object!";
-      } else {
-        LOG(ERROR) << "Dumping mark stack contents";
-        for (Object** it = mark_stack->Begin(); it != mark_stack->End(); ++it) {
-          LOG(ERROR) << *it;
-        }
-      }
-      mark_stack->Reset();
-
       // Search to see if any of the roots reference our object.
       void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
       Runtime::Current()->VisitRoots(&Heap::RootMatchesObjectVisitor, arg);
@@ -1262,24 +1157,14 @@
   }
 
   bool IsLive(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
-    SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetSpaceBitmap(obj);
-    if (bitmap != NULL) {
-      if (bitmap->Test(obj)) {
-        return true;
-      }
-    } else if (heap_->GetLargeObjectsSpace()->Contains(obj)) {
+    if (heap_->GetLiveBitmap()->Test(obj)) {
       return true;
-    } else {
-      heap_->DumpSpaces();
-      LOG(ERROR) << "Object " << obj << " not found in any spaces";
     }
     ObjectStack* alloc_stack = heap_->allocation_stack_.get();
     // At this point we need to search the allocation since things in the live stack may get swept.
-    if (std::binary_search(alloc_stack->Begin(), alloc_stack->End(), const_cast<Object*>(obj))) {
-      return true;
-    }
-    // Not either in the live bitmap or allocation stack, so the object must be dead.
-    return false;
+    // If the object is not either in the live bitmap or allocation stack, so the object must be
+    // dead.
+    return std::binary_search(alloc_stack->Begin(), alloc_stack->End(), obj);
   }
 
  private:
@@ -1350,7 +1235,7 @@
       if (!card_table->AddrIsInCardTable(obj)) {
         LOG(ERROR) << "Object " << obj << " is not in the address range of the card table";
         *failed_ = true;
-      } else if (card_table->GetCard(obj) < CardTable::kCardDirty - 1) {
+      } else if (!card_table->IsDirty(obj)) {
         // Card should be either kCardDirty if it got re-dirtied after we aged it, or
         // kCardDirty - 1 if it didnt get touched since we aged it.
         ObjectStack* live_stack = heap_->live_stack_.get();
@@ -1424,6 +1309,8 @@
 bool Heap::VerifyMissingCardMarks() {
   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
 
+  // We need to sort the live stack since we binary search it.
+  std::sort(live_stack_->Begin(), live_stack_->End());
   VerifyLiveStackReferences visitor(this);
   GetLiveBitmap()->Visit(visitor);
 
@@ -1439,34 +1326,6 @@
   return true;
 }
 
-void Heap::SwapBitmaps(GcType gc_type) {
-  // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
-  // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
-  // bits of dead objects in the live bitmap.
-  for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-    ContinuousSpace* space = *it;
-    // We never allocate into zygote spaces.
-    if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
-        (gc_type == kGcTypeFull &&
-            space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)) {
-      SpaceBitmap* live_bitmap = space->GetLiveBitmap();
-      SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
-      if (live_bitmap != mark_bitmap) {
-        live_bitmap_->ReplaceBitmap(live_bitmap, mark_bitmap);
-        mark_bitmap_->ReplaceBitmap(mark_bitmap, live_bitmap);
-        space->AsAllocSpace()->SwapBitmaps();
-      }
-    }
-  }
-  SwapLargeObjects();
-}
-
-void Heap::SwapLargeObjects() {
-  large_object_space_->SwapBitmaps();
-  live_bitmap_->SetLargeObjects(large_object_space_->GetLiveObjects());
-  mark_bitmap_->SetLargeObjects(large_object_space_->GetMarkObjects());
-}
-
 void Heap::SwapStacks() {
   allocation_stack_.swap(live_stack_);
 
@@ -1495,283 +1354,74 @@
   }
 }
 
-// Bind the live bits to the mark bits of bitmaps based on the gc type.
-void Heap::BindBitmaps(GcType gc_type, MarkSweep& mark_sweep) {
-  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-  if (gc_type == kGcTypePartial) {
-    // For partial GCs we need to bind the bitmap of the zygote space so that all objects in the
-    // zygote space are viewed as marked.
-    for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-      if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
-        mark_sweep.BindLiveToMarkBitmap(*it);
-      }
-    }
-    mark_sweep.SetImmuneRange(reinterpret_cast<Object*>(spaces_.front()->Begin()),
-                              reinterpret_cast<Object*>(alloc_space_->Begin()));
-  } else if (gc_type == kGcTypeSticky) {
-    // For sticky GC, we want to bind the bitmaps of both the zygote space and the alloc space.
-    // This lets us start with the mark bitmap of the previous garbage collection as the current
-    // mark bitmap of the alloc space. After the sticky GC finishes, we then unbind the bitmaps,
-    // making it so that the live bitmap of the alloc space is contains the newly marked objects
-    // from the sticky GC.
-    for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-      if ((*it)->GetGcRetentionPolicy() != kGcRetentionPolicyNeverCollect) {
-        mark_sweep.BindLiveToMarkBitmap(*it);
-      }
-    }
+void Heap::PreGcVerification(GarbageCollector* gc) {
+  ThreadList* thread_list = Runtime::Current()->GetThreadList();
+  Thread* self = Thread::Current();
 
-    large_object_space_->CopyLiveToMarked();
-    mark_sweep.SetImmuneRange(reinterpret_cast<Object*>(spaces_.front()->Begin()),
-                              reinterpret_cast<Object*>(alloc_space_->Begin()));
+  if (verify_pre_gc_heap_) {
+    thread_list->SuspendAll();
+    {
+      ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+      if (!VerifyHeapReferences()) {
+        LOG(FATAL) << "Pre " << gc->GetName() << " heap verification failed";
+      }
+    }
+    thread_list->ResumeAll();
   }
-  mark_sweep.FindDefaultMarkBitmap();
+
+  // Check that all objects which reference things in the live stack are on dirty cards.
+  if (verify_missing_card_marks_) {
+    thread_list->SuspendAll();
+    {
+      ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+      SwapStacks();
+      // Sort the live stack so that we can quickly binary search it later.
+      if (!VerifyMissingCardMarks()) {
+        LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
+      }
+      SwapStacks();
+    }
+    thread_list->ResumeAll();
+  }
+
+  if (verify_mod_union_table_) {
+    thread_list->SuspendAll();
+    ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
+    zygote_mod_union_table_->Update();
+    zygote_mod_union_table_->Verify();
+    mod_union_table_->Update();
+    mod_union_table_->Verify();
+    thread_list->ResumeAll();
+  }
 }
 
-void Heap::CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_type, GcCause gc_cause,
-                                                 bool clear_soft_references) {
-  TimingLogger timings("ConcurrentCollectGarbageInternal", true);
-  uint64_t gc_begin = NanoTime(), dirty_begin = 0, dirty_end = 0;
-  std::stringstream gc_type_str;
-  gc_type_str << gc_type << " ";
-
+void Heap::PreSweepingGcVerification(GarbageCollector* gc) {
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
-  size_t bytes_freed = 0;
-  Object* cleared_references = NULL;
-  {
-    MarkSweep mark_sweep(mark_stack_.get());
-    timings.AddSplit("ctor");
+  Thread* self = Thread::Current();
 
-    mark_sweep.Init();
-    timings.AddSplit("Init");
-
-    BindBitmaps(gc_type, mark_sweep);
-    timings.AddSplit("BindBitmaps");
-
-    if (verify_pre_gc_heap_) {
-      thread_list->SuspendAll();
-      {
-        WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-        if (!VerifyHeapReferences()) {
-          LOG(FATAL) << "Pre " << gc_type_str.str() << "Gc verification failed";
-        }
-        timings.AddSplit("VerifyHeapReferencesPreGC");
-      }
-      thread_list->ResumeAll();
-    }
-
-    // Process dirty cards and add dirty cards to mod union tables.
-    ProcessCards(timings);
-
-    // Need to do this before the checkpoint since we don't want any threads to add references to
-    // the live stack during the recursive mark.
-    SwapStacks();
-    timings.AddSplit("SwapStacks");
-
-    // Tell the running threads to suspend and mark their roots.
-    mark_sweep.MarkRootsCheckpoint();
-    timings.AddSplit("MarkRootsCheckpoint");
-
-    // Check that all objects which reference things in the live stack are on dirty cards.
-    if (verify_missing_card_marks_) {
-      thread_list->SuspendAll();
-      {
-        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
-        // Sort the live stack so that we can quickly binary search it later.
-        std::sort(live_stack_->Begin(), live_stack_->End());
-        if (!VerifyMissingCardMarks()) {
-          LOG(FATAL) << "Pre GC verification of missing card marks failed";
-        }
-      }
-      thread_list->ResumeAll();
-    }
-
-    if (verify_mod_union_table_) {
-      thread_list->SuspendAll();
-      ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
-      zygote_mod_union_table_->Update();
-      zygote_mod_union_table_->Verify();
-      mod_union_table_->Update();
-      mod_union_table_->Verify();
-      thread_list->ResumeAll();
-    }
-
-    {
-      // Allow mutators to go again, acquire share on mutator_lock_ to continue.
-      ReaderMutexLock reader_lock(self, *Locks::mutator_lock_);
-
-      // Mark the roots which we can do concurrently.
-      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-      mark_sweep.MarkConcurrentRoots();
-      timings.AddSplit("MarkConcurrentRoots");
-      mark_sweep.MarkNonThreadRoots();
-      timings.AddSplit("MarkNonThreadRoots");
-
-      if (gc_type != kGcTypeSticky) {
-        // Mark everything allocated since the last as GC live so that we can sweep concurrently,
-        // knowing that new allocations won't be marked as live.
-        MarkAllocStack(alloc_space_->GetLiveBitmap(), large_object_space_->GetLiveObjects(),
-                       live_stack_.get());
-        timings.AddSplit("MarkStackAsLive");
-      }
-
-      UpdateAndMarkModUnion(&mark_sweep, timings, gc_type);
-
-      if (gc_type != kGcTypeSticky) {
-        // Recursively mark all the non-image bits set in the mark bitmap.
-        mark_sweep.RecursiveMark(gc_type == kGcTypePartial, timings);
-      } else {
-        mark_sweep.RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1);
-        timings.AddSplit("RecursiveMarkCards");
-      }
-      mark_sweep.DisableFinger();
-    }
-    // Release share on mutator_lock_ and then get exclusive access.
-    dirty_begin = NanoTime();
+  // Called before sweeping occurs since we want to make sure we are not going so reclaim any
+  // reachable objects.
+  if (verify_post_gc_heap_) {
     thread_list->SuspendAll();
-    timings.AddSplit("ReSuspend");
-    Locks::mutator_lock_->AssertExclusiveHeld(self);
-
-    {
-      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-
-      // Re-mark root set.
-      mark_sweep.ReMarkRoots();
-      timings.AddSplit("ReMarkRoots");
-
-      // Scan dirty objects, this is only required if we are not doing concurrent GC.
-      mark_sweep.RecursiveMarkDirtyObjects();
-      timings.AddSplit("RecursiveMarkDirtyObjects");
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    // Swapping bound bitmaps does nothing.
+    live_bitmap_.swap(mark_bitmap_);
+    if (!VerifyHeapReferences()) {
+      LOG(FATAL) << "Post " << gc->GetName() << "Gc verification failed";
     }
-
-    {
-      ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
-
-      mark_sweep.ProcessReferences(clear_soft_references);
-      timings.AddSplit("ProcessReferences");
-    }
-
-    // Only need to do this if we have the card mark verification on, and only during concurrent GC.
-    if (verify_missing_card_marks_) {
-      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-      mark_sweep.SweepArray(timings, allocation_stack_.get(), false);
-    } else {
-      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-      // We only sweep over the live stack, and the live stack should not intersect with the
-      // allocation stack, so it should be safe to UnMark anything in the allocation stack as live.
-      UnMarkAllocStack(alloc_space_->GetMarkBitmap(), large_object_space_->GetMarkObjects(),
-                      allocation_stack_.get());
-      timings.AddSplit("UnMarkAllocStack");
-      if (kIsDebugBuild) {
-        if (gc_type == kGcTypeSticky) {
-          // Make sure everything in the live stack isn't something we unmarked.
-          std::sort(allocation_stack_->Begin(), allocation_stack_->End());
-          for (Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
-            DCHECK(!std::binary_search(allocation_stack_->Begin(), allocation_stack_->End(), *it))
-                << "Unmarked object " << *it << " in the live stack";
-          }
-        } else {
-          for (Object** it = allocation_stack_->Begin(); it != allocation_stack_->End(); ++it) {
-            DCHECK(!GetLiveBitmap()->Test(*it)) << "Object " << *it << " is marked as live";
-          }
-        }
-      }
-    }
-
-    if (kIsDebugBuild) {
-      // Verify that we only reach marked objects from the image space.
-      ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
-      mark_sweep.VerifyImageRoots();
-      timings.AddSplit("VerifyImageRoots");
-    }
-
-    if (verify_post_gc_heap_) {
-      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-      // Swapping bound bitmaps does nothing.
-      SwapBitmaps(kGcTypeFull);
-      if (!VerifyHeapReferences()) {
-        LOG(FATAL) << "Post " << gc_type_str.str() << "Gc verification failed";
-      }
-      SwapBitmaps(kGcTypeFull);
-      timings.AddSplit("VerifyHeapReferencesPostGC");
-    }
-
+    live_bitmap_.swap(mark_bitmap_);
     thread_list->ResumeAll();
-    dirty_end = NanoTime();
-    Locks::mutator_lock_->AssertNotHeld(self);
-
-    {
-      // TODO: this lock shouldn't be necessary (it's why we did the bitmap flip above).
-      if (gc_type != kGcTypeSticky) {
-        WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-        mark_sweep.Sweep(timings, gc_type == kGcTypePartial, false);
-        mark_sweep.SweepLargeObjects(false);
-        timings.AddSplit("SweepLargeObjects");
-      } else {
-        WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-        mark_sweep.SweepArray(timings, live_stack_.get(), false);
-        timings.AddSplit("SweepArray");
-      }
-      live_stack_->Reset();
-    }
-
-    {
-      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-      // Unbind the live and mark bitmaps.
-      mark_sweep.UnBindBitmaps();
-
-      // Swap the live and mark bitmaps for each space which we modified space. This is an
-      // optimization that enables us to not clear live bits inside of the sweep.
-      if (gc_type == kGcTypeSticky) {
-        SwapLargeObjects();
-      } else {
-        SwapBitmaps(gc_type);
-      }
-      timings.AddSplit("SwapBitmaps");
-    }
-
-    if (verify_system_weaks_) {
-      ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
-      mark_sweep.VerifySystemWeaks();
-      timings.AddSplit("VerifySystemWeaks");
-    }
-
-    cleared_references = mark_sweep.GetClearedReferences();
-    bytes_freed = mark_sweep.GetFreedBytes();
-    total_bytes_freed_ += bytes_freed;
-    total_objects_freed_ += mark_sweep.GetFreedObjects();
   }
+}
 
-  GrowForUtilization();
-  timings.AddSplit("GrowForUtilization");
+void Heap::PostGcVerification(GarbageCollector* gc) {
+  Thread* self = Thread::Current();
 
-  EnqueueClearedReferences(&cleared_references);
-  timings.AddSplit("EnqueueClearedReferences");
-
-  RequestHeapTrim();
-  timings.AddSplit("Finish");
-
-  // If the GC was slow, then print timings in the log.
-  uint64_t pause_time = (dirty_end - dirty_begin) / 1000 * 1000;
-  uint64_t duration = (NanoTime() - gc_begin) / 1000 * 1000;
-  total_paused_time_ += pause_time;
-  if (pause_time > MsToNs(5) || (gc_cause == kGcCauseForAlloc && duration > MsToNs(20))) {
-    const size_t percent_free = GetPercentFree();
-    const size_t current_heap_size = GetUsedMemorySize();
-    const size_t total_memory = GetTotalMemory();
-    LOG(INFO) << gc_cause << " " << gc_type_str.str()
-              << "Concurrent GC freed " << PrettySize(bytes_freed) << ", " << percent_free
-              << "% free, " << PrettySize(current_heap_size) << "/"
-              << PrettySize(total_memory) << ", " << "paused " << PrettyDuration(pause_time)
-              << " total " << PrettyDuration(duration);
-    if (VLOG_IS_ON(heap)) {
-      timings.Dump();
-    }
+  if (verify_system_weaks_) {
+    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    MarkSweep* mark_sweep = down_cast<MarkSweep*>(gc);
+    mark_sweep->VerifySystemWeaks();
   }
-
-  CumulativeLogger* logger = cumulative_timings_.Get(gc_type);
-  logger->Start();
-  logger->AddLogger(timings);
-  logger->End(); // Next iteration.
 }
 
 GcType Heap::WaitForConcurrentGcToComplete(Thread* self) {
@@ -1797,7 +1447,7 @@
         wait_time = NanoTime() - wait_start;;
         total_wait_time_ += wait_time;
       }
-      if (wait_time > MsToNs(5)) {
+      if (wait_time > kLongGcPauseThreshold) {
         LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time);
       }
     }
@@ -1980,7 +1630,7 @@
 void Heap::RequestConcurrentGC(Thread* self) {
   // Make sure that we can do a concurrent GC.
   Runtime* runtime = Runtime::Current();
-  if (requesting_gc_ || runtime == NULL || !runtime->IsFinishedStarting() ||
+  if (runtime == NULL || !runtime->IsFinishedStarting() ||
       !runtime->IsConcurrentGcEnabled()) {
     return;
   }
@@ -1994,14 +1644,12 @@
     return;
   }
 
-  requesting_gc_ = true;
   JNIEnv* env = self->GetJniEnv();
   DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
   DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
   env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
                             WellKnownClasses::java_lang_Daemons_requestGC);
   CHECK(!env->ExceptionCheck());
-  requesting_gc_ = false;
 }
 
 void Heap::ConcurrentGC(Thread* self) {
diff --git a/src/heap.h b/src/heap.h
index 22b009c..5ba5c84 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -44,6 +44,7 @@
 class Class;
 class ConditionVariable;
 class DlMallocSpace;
+class GarbageCollector;
 class HeapBitmap;
 class ImageSpace;
 class LargeObjectSpace;
@@ -192,7 +193,11 @@
   // true if we waited for the GC to complete.
   GcType WaitForConcurrentGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
 
-  const Spaces& GetSpaces() {
+  const Spaces& GetSpaces() const {
+    return spaces_;
+  }
+
+  Spaces& GetSpaces() {
     return spaces_;
   }
 
@@ -291,6 +296,10 @@
     return mark_bitmap_.get();
   }
 
+  ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    return live_stack_.get();
+  }
+
   void PreZygoteFork() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
 
   // Mark and empty stack.
@@ -349,10 +358,6 @@
   void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
   void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
 
-  // Swap bitmaps (if we are a full Gc then we swap the zygote bitmap too).
-  void SwapBitmaps(GcType gc_type) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-  void SwapLargeObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
   void RecordAllocation(size_t size, Object* object)
       LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -364,14 +369,10 @@
                      Locks::heap_bitmap_lock_,
                      Locks::mutator_lock_,
                      Locks::thread_suspend_count_lock_);
-  void CollectGarbageMarkSweepPlan(Thread* self, GcType gc_plan, GcCause gc_cause,
-                                   bool clear_soft_references)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_,
-                     Locks::mutator_lock_);
-  void CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_plan, GcCause gc_cause,
-                                             bool clear_soft_references)
-      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_,
-                     Locks::mutator_lock_);
+
+  void PreGcVerification(GarbageCollector* gc);
+  void PreSweepingGcVerification(GarbageCollector* gc);
+  void PostGcVerification(GarbageCollector* gc);
 
   // Given the current contents of the alloc space, increase the allowed heap footprint to match
   // the target utilization ratio.  This should only be called immediately after a full garbage
@@ -392,9 +393,6 @@
   // Swap the allocation stack with the live stack.
   void SwapStacks();
 
-  // Bind bitmaps (makes the live and mark bitmaps for immune spaces point to the same bitmap).
-  void BindBitmaps(GcType gc_type, MarkSweep& mark_sweep);
-
   // Clear cards and update the mod union table.
   void ProcessCards(TimingLogger& timings);
 
@@ -406,10 +404,6 @@
   // The alloc space which we are currently allocating into.
   DlMallocSpace* alloc_space_;
 
-  // One cumulative logger for each type of Gc.
-  typedef SafeMap<GcType, CumulativeLogger*> CumulativeTimings;
-  CumulativeTimings cumulative_timings_;
-
   // The mod-union table remembers all of the references from the image space to the alloc /
   // zygote spaces.
   UniquePtr<ModUnionTable> mod_union_table_;
@@ -453,7 +447,6 @@
   size_t concurrent_start_bytes_;
 
   // Number of bytes allocated since the last Gc, we use this to help determine when to schedule concurrent GCs.
-  size_t bytes_since_last_gc_;
   size_t sticky_gc_count_;
 
   size_t total_bytes_freed_;
@@ -495,9 +488,6 @@
   UniquePtr<HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
   UniquePtr<HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
 
-  // Used to ensure that we don't ever recursively request GC.
-  volatile bool requesting_gc_;
-
   // Mark stack that we reuse to avoid re-allocating the mark stack.
   UniquePtr<ObjectStack> mark_stack_;
 
@@ -535,7 +525,6 @@
   double target_utilization_;
 
   // Total time which mutators are paused or waiting for GC to complete.
-  uint64_t total_paused_time_;
   uint64_t total_wait_time_;
 
   // Total number of objects allocated in microseconds.
@@ -543,6 +532,8 @@
   AtomicInteger total_allocation_time_;
 
   bool verify_objects_;
+  typedef std::vector<MarkSweep*> Collectors;
+  Collectors mark_sweep_collectors_;
 
   friend class MarkSweep;
   friend class VerifyReferenceCardVisitor;
diff --git a/src/thread_list.cc b/src/thread_list.cc
index 2a5b6c9..a2a8fe8 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -153,7 +153,7 @@
 size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
   Thread* self = Thread::Current();
   if (kIsDebugBuild) {
-    Locks::mutator_lock_->AssertNotHeld(self);
+    Locks::mutator_lock_->AssertNotExclusiveHeld(self);
     Locks::thread_list_lock_->AssertNotHeld(self);
     Locks::thread_suspend_count_lock_->AssertNotHeld(self);
     CHECK_NE(self->GetState(), kRunnable);
diff --git a/src/timing_logger.h b/src/timing_logger.h
index 7dc2671..7b20563 100644
--- a/src/timing_logger.h
+++ b/src/timing_logger.h
@@ -31,11 +31,17 @@
 
 class TimingLogger {
  public:
-  explicit TimingLogger(const char* name, bool precise = false)
+  explicit TimingLogger(const std::string& name, bool precise = false)
       : name_(name), precise_(precise) {
     AddSplit("");
   }
 
+  void Reset() {
+    times_.clear();
+    labels_.clear();
+    AddSplit("");
+  }
+
   void AddSplit(const std::string& label) {
     times_.push_back(NanoTime());
     labels_.push_back(label);
@@ -82,12 +88,16 @@
 
 class CumulativeLogger {
  public:
-  explicit CumulativeLogger(const char* name = "", bool precise = false)
+  explicit CumulativeLogger(const std::string& name = "", bool precise = false)
     : name_(name),
       precise_(precise) {
     Reset();
   }
 
+  void SetName(const std::string& name) {
+    name_ = name;
+  }
+
   void Start() {
     index_ = 0;
     last_split_ = NanoTime();