GC clean up.

Greater use of directories and namespaces.
Fix bugs that cause verify options to fail.
Address numerous other issues:

GC barrier wait occurring holding locks:
GC barrier waits occur when we wait for threads to run the check point function
on themselves. This is happening with the heap bitmap and mutator lock held
meaning that a thread that tries to take either lock exclusively will block
waiting on a thread that is waiting. If this thread is the thread we're waiting
to run the check point then the VM will deadlock.
This deadlock occurred unnoticed as the call to check for wait safety was
removed in: https://googleplex-android-review.googlesource.com/#/c/249423/1.

NewTimingLogger:
Existing timing log states when a split ends but not when it begins. This isn't
good for systrace, in the context of GC it means that races between mutators
and the GC are hard to discover what phase the GC is in, we know what phase it
just finished and derive but that's not ideal.

Support for only 1 discontinuous space:
Code special cases continuous and large object space, rather than assuming we
can have a collection of both.

Sorted atomic stacks:
Used to improve verification performance. Simplify their use and add extra
checks.

Simplify mod-union table abstractions.

Reduce use of std::strings and their associated overhead in hot code.

Make time units of fields explicit.

Reduce confusion that IsAllocSpace is really IsDlMallocSpace.

Make GetTotalMemory (exposed via System) equal to the footprint (as in Dalvik)
rather than the max memory footprint.

Change-Id: Ie87067140fa4499b15edab691fe6565d79599812
diff --git a/build/Android.common.mk b/build/Android.common.mk
index f11684e..a7bf944 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -52,6 +52,7 @@
 
 ART_C_INCLUDES := \
 	external/gtest/include \
+	external/valgrind/main/include \
 	external/zlib \
 	frameworks/compile/mclinker/include \
 	art/src
@@ -179,20 +180,22 @@
 	src/disassembler_arm.cc \
 	src/disassembler_mips.cc \
 	src/disassembler_x86.cc \
-	src/dlmalloc.cc \
 	src/elf_file.cc \
 	src/file_output_stream.cc \
-	src/gc/card_table.cc \
-	src/gc/garbage_collector.cc \
-	src/gc/heap_bitmap.cc \
-	src/gc/large_object_space.cc \
-	src/gc/mark_sweep.cc \
-	src/gc/mod_union_table.cc \
-	src/gc/partial_mark_sweep.cc \
-	src/gc/space.cc \
-	src/gc/space_bitmap.cc \
-	src/gc/sticky_mark_sweep.cc \
-	src/heap.cc \
+	src/gc/allocator/dlmalloc.cc \
+	src/gc/accounting/card_table.cc \
+	src/gc/accounting/heap_bitmap.cc \
+	src/gc/accounting/mod_union_table.cc \
+	src/gc/accounting/space_bitmap.cc \
+	src/gc/collector/garbage_collector.cc \
+	src/gc/collector/mark_sweep.cc \
+	src/gc/collector/partial_mark_sweep.cc \
+	src/gc/collector/sticky_mark_sweep.cc \
+	src/gc/heap.cc \
+	src/gc/space/dlmalloc_space.cc \
+	src/gc/space/image_space.cc \
+	src/gc/space/large_object_space.cc \
+	src/gc/space/space.cc \
 	src/hprof/hprof.cc \
 	src/image.cc \
 	src/image_writer.cc \
@@ -372,9 +375,9 @@
 	src/compiler/dex/compiler_enums.h \
 	src/dex_file.h \
 	src/dex_instruction.h \
-	src/gc/gc_type.h \
-	src/gc/space.h \
-	src/heap.h \
+	src/gc/collector/gc_type.h \
+	src/gc/space/space.h \
+	src/gc/heap.h \
 	src/indirect_reference_table.h \
 	src/instruction_set.h \
 	src/invoke_type.h \
@@ -407,10 +410,10 @@
 	src/dex_method_iterator_test.cc \
 	src/elf_writer_test.cc \
 	src/exception_test.cc \
-	src/gc/space_bitmap_test.cc \
-	src/gc/space_test.cc \
+	src/gc/accounting/space_bitmap_test.cc \
+	src/gc/heap_test.cc \
+	src/gc/space/space_test.cc \
 	src/gtest_test.cc \
-	src/heap_test.cc \
 	src/image_test.cc \
 	src/indenter_test.cc \
 	src/indirect_reference_table_test.cc \
diff --git a/src/atomic_integer.h b/src/atomic_integer.h
index 188f4c2..c4a8de9 100644
--- a/src/atomic_integer.h
+++ b/src/atomic_integer.h
@@ -71,7 +71,7 @@
     return success;
   }
  private:
-  int32_t value_;
+  volatile int32_t value_;
 };
 
 }
diff --git a/src/barrier_test.cc b/src/barrier_test.cc
index 093ba35..55d2d3d 100644
--- a/src/barrier_test.cc
+++ b/src/barrier_test.cc
@@ -88,7 +88,7 @@
   // at this point.
   EXPECT_EQ(num_threads, count2);
   // Wait for all the threads to finish.
-  thread_pool.Wait(self);
+  thread_pool.Wait(self, true, false);
   // All three counts should be equal to num_threads now.
   EXPECT_EQ(count1, count2);
   EXPECT_EQ(count2, count3);
diff --git a/src/base/mutex.cc b/src/base/mutex.cc
index a2851e5..fbec826 100644
--- a/src/base/mutex.cc
+++ b/src/base/mutex.cc
@@ -777,6 +777,11 @@
 }
 
 void ConditionVariable::Wait(Thread* self) {
+  guard_.CheckSafeToWait(self);
+  WaitHoldingLocks(self);
+}
+
+void ConditionVariable::WaitHoldingLocks(Thread* self) {
   DCHECK(self == NULL || self == Thread::Current());
   guard_.AssertExclusiveHeld(self);
   unsigned int old_recursion_count = guard_.recursion_count_;
@@ -811,6 +816,7 @@
 void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
   DCHECK(self == NULL || self == Thread::Current());
   guard_.AssertExclusiveHeld(self);
+  guard_.CheckSafeToWait(self);
   unsigned int old_recursion_count = guard_.recursion_count_;
 #if ART_USE_FUTEXES
   timespec rel_ts;
diff --git a/src/base/mutex.h b/src/base/mutex.h
index a393765..24df572 100644
--- a/src/base/mutex.h
+++ b/src/base/mutex.h
@@ -307,6 +307,10 @@
   //       pointer copy, thereby defeating annotalysis.
   void Wait(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
   void TimedWait(Thread* self, int64_t ms, int32_t ns) NO_THREAD_SAFETY_ANALYSIS;
+  // Variant of Wait that should be used with caution. Doesn't validate that no mutexes are held
+  // when waiting.
+  // TODO: remove this.
+  void WaitHoldingLocks(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
 
  private:
   const char* const name_;
diff --git a/src/base/timing_logger.cc b/src/base/timing_logger.cc
index 6d5586c..c7cbbe5 100644
--- a/src/base/timing_logger.cc
+++ b/src/base/timing_logger.cc
@@ -82,7 +82,7 @@
 }
 
 void CumulativeLogger::SetName(const std::string& name) {
-  name_ = name;
+  name_.assign(name);
 }
 
 void CumulativeLogger::Start() {
@@ -123,13 +123,40 @@
   }
 }
 
+void CumulativeLogger::AddNewLogger(const base::NewTimingLogger &logger) {
+  MutexLock mu(Thread::Current(), lock_);
+  const std::vector<std::pair<uint64_t, const char*> >& splits = logger.GetSplits();
+  typedef std::vector<std::pair<uint64_t, const char*> >::const_iterator It;
+  if (kIsDebugBuild && splits.size() != histograms_.size()) {
+    LOG(ERROR) << "Mismatch in splits.";
+    typedef std::vector<Histogram<uint64_t> *>::const_iterator It2;
+    It it = splits.begin();
+    It2 it2 = histograms_.begin();
+    while ((it != splits.end()) && (it2 != histograms_.end())) {
+      if (it != splits.end()) {
+        LOG(ERROR) << "\tsplit: " << it->second;
+        ++it;
+      }
+      if (it2 != histograms_.end()) {
+        LOG(ERROR) << "\tpreviously record: " << (*it2)->Name();
+        ++it2;
+      }
+    }
+  }
+  for (It it = splits.begin(), end = splits.end(); it != end; ++it) {
+    std::pair<uint64_t, const char*> split = *it;
+    uint64_t split_time = split.first;
+    const char* split_name = split.second;
+    AddPair(split_name, split_time);
+  }
+}
+
 void CumulativeLogger::Dump(std::ostream &os) {
   MutexLock mu(Thread::Current(), lock_);
   DumpHistogram(os);
 }
 
 void CumulativeLogger::AddPair(const std::string &label, uint64_t delta_time) {
-
   // Convert delta time to microseconds so that we don't overflow our counters.
   delta_time /= kAdjust;
   if (index_ >= histograms_.size()) {
@@ -154,4 +181,89 @@
   os << "Done Dumping histograms \n";
 }
 
+
+namespace base {
+
+NewTimingLogger::NewTimingLogger(const char* name, bool precise, bool verbose)
+    : name_(name), precise_(precise), verbose_(verbose),
+      current_split_(NULL), current_split_start_ns_(0) {
+}
+
+void NewTimingLogger::Reset() {
+  current_split_ = NULL;
+  current_split_start_ns_ = 0;
+  splits_.clear();
+}
+
+void NewTimingLogger::StartSplit(const char* new_split_label) {
+  DCHECK(current_split_ == NULL);
+  if (verbose_) {
+    LOG(INFO) << "Begin: " << new_split_label;
+  }
+  current_split_ = new_split_label;
+  current_split_start_ns_ = NanoTime();
+}
+
+// Ends the current split and starts the one given by the label.
+void NewTimingLogger::NewSplit(const char* new_split_label) {
+  DCHECK(current_split_ != NULL);
+  uint64_t current_time = NanoTime();
+  uint64_t split_time = current_time - current_split_start_ns_;
+  splits_.push_back(std::pair<uint64_t, const char*>(split_time, current_split_));
+  if (verbose_) {
+    LOG(INFO) << "End: " << current_split_ << " " << PrettyDuration(split_time) << "\n"
+        << "Begin: " << new_split_label;
+  }
+  current_split_ = new_split_label;
+  current_split_start_ns_ = current_time;
+}
+
+void NewTimingLogger::EndSplit() {
+  DCHECK(current_split_ != NULL);
+  uint64_t current_time = NanoTime();
+  uint64_t split_time = current_time - current_split_start_ns_;
+  if (verbose_) {
+    LOG(INFO) << "End: " << current_split_ << " " << PrettyDuration(split_time);
+  }
+  splits_.push_back(std::pair<uint64_t, const char*>(split_time, current_split_));
+}
+
+uint64_t NewTimingLogger::GetTotalNs() const {
+  uint64_t total_ns = 0;
+  typedef std::vector<std::pair<uint64_t, const char*> >::const_iterator It;
+  for (It it = splits_.begin(), end = splits_.end(); it != end; ++it) {
+    std::pair<uint64_t, const char*> split = *it;
+    total_ns += split.first;
+  }
+  return total_ns;
+}
+
+void NewTimingLogger::Dump(std::ostream &os) const {
+  uint64_t longest_split = 0;
+  uint64_t total_ns = 0;
+  typedef std::vector<std::pair<uint64_t, const char*> >::const_iterator It;
+  for (It it = splits_.begin(), end = splits_.end(); it != end; ++it) {
+    std::pair<uint64_t, const char*> split = *it;
+    uint64_t split_time = split.first;
+    longest_split = std::max(longest_split, split_time);
+    total_ns += split_time;
+  }
+  // Compute which type of unit we will use for printing the timings.
+  TimeUnit tu = GetAppropriateTimeUnit(longest_split);
+  uint64_t divisor = GetNsToTimeUnitDivisor(tu);
+  // Print formatted splits.
+  for (It it = splits_.begin(), end = splits_.end(); it != end; ++it) {
+    std::pair<uint64_t, const char*> split = *it;
+    uint64_t split_time = split.first;
+    if (!precise_ && divisor >= 1000) {
+      // Make the fractional part 0.
+      split_time -= split_time % (divisor / 1000);
+    }
+    os << name_ << ": " << std::setw(8) << FormatDuration(split_time, tu) << " "
+       << split.second << "\n";
+  }
+  os << name_ << ": end, " << NsToMs(total_ns) << " ms\n";
+}
+
+}  // namespace base
 }  // namespace art
diff --git a/src/base/timing_logger.h b/src/base/timing_logger.h
index bbcc286..65732b1 100644
--- a/src/base/timing_logger.h
+++ b/src/base/timing_logger.h
@@ -45,6 +45,10 @@
   friend class CumulativeLogger;
 };
 
+namespace base {
+  class NewTimingLogger;
+}  // namespace base
+
 class CumulativeLogger {
 
  public:
@@ -61,6 +65,7 @@
   // parent class that is unable to determine the "name" of a sub-class.
   void SetName(const std::string& name);
   void AddLogger(const TimingLogger& logger) LOCKS_EXCLUDED(lock_);
+  void AddNewLogger(const base::NewTimingLogger& logger) LOCKS_EXCLUDED(lock_);
 
  private:
 
@@ -79,6 +84,59 @@
   DISALLOW_COPY_AND_ASSIGN(CumulativeLogger);
 };
 
+namespace base {
+
+// A replacement to timing logger that know when a split starts for the purposes of logging.
+// TODO: replace uses of TimingLogger with base::NewTimingLogger.
+class NewTimingLogger {
+ public:
+  explicit NewTimingLogger(const char* name, bool precise, bool verbose);
+
+  // Clears current splits and labels.
+  void Reset();
+
+  // Starts a split, a split shouldn't be in progress.
+  void StartSplit(const char* new_split_label);
+
+  // Ends the current split and starts the one given by the label.
+  void NewSplit(const char* new_split_label);
+
+  // Ends the current split and records the end time.
+  void EndSplit();
+
+  uint64_t GetTotalNs() const;
+
+  void Dump(std::ostream& os) const;
+
+  const std::vector<std::pair<uint64_t, const char*> >& GetSplits() const {
+    return splits_;
+  }
+
+ protected:
+  // The name of the timing logger.
+  const std::string name_;
+
+  // Do we want to print the exactly recorded split (true) or round down to the time unit being
+  // used (false).
+  const bool precise_;
+
+  // Verbose logging.
+  const bool verbose_;
+
+  // The name of the current split.
+  const char* current_split_;
+
+  // The nanosecond time the current split started on.
+  uint64_t current_split_start_ns_;
+
+  // Splits are nanosecond times and split names.
+  std::vector<std::pair<uint64_t, const char*> > splits_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(NewTimingLogger);
+};
+
+}  // namespace base
 }  // namespace art
 
 #endif  // ART_SRC_TIMING_LOGGER_H_
diff --git a/src/check_jni.cc b/src/check_jni.cc
index f4ea235..403a2eb 100644
--- a/src/check_jni.cc
+++ b/src/check_jni.cc
@@ -23,7 +23,7 @@
 #include "class_linker.h"
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
-#include "gc/space.h"
+#include "gc/space/space.h"
 #include "mirror/class-inl.h"
 #include "mirror/field-inl.h"
 #include "mirror/abstract_method-inl.h"
@@ -36,7 +36,7 @@
 #include "thread.h"
 
 #define LIBCORE_CPP_JNI_HELPERS
-#include <JNIHelp.h> // from libcore
+#include <JNIHelp.h>  // from libcore
 #undef LIBCORE_CPP_JNI_HELPERS
 
 namespace art {
diff --git a/src/class_linker.cc b/src/class_linker.cc
index 77fd5e1..68d0fbb 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -34,8 +34,10 @@
 #include "class_linker-inl.h"
 #include "debugger.h"
 #include "dex_file-inl.h"
-#include "gc/card_table-inl.h"
-#include "heap.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/heap.h"
+#include "gc/space/image_space.h"
 #include "intern_table.h"
 #include "interpreter/interpreter.h"
 #include "leb128.h"
@@ -63,8 +65,6 @@
 #include "ScopedLocalRef.h"
 #include "scoped_thread_state_change.h"
 #include "sirt_ref.h"
-#include "gc/space.h"
-#include "gc/space_bitmap.h"
 #include "stack_indirect_reference_table.h"
 #include "thread.h"
 #include "UniquePtr.h"
@@ -216,7 +216,7 @@
 
   // java_lang_Class comes first, it's needed for AllocClass
   Thread* self = Thread::Current();
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   SirtRef<mirror::Class>
       java_lang_Class(self,
                       down_cast<mirror::Class*>(heap->AllocObject(self, NULL,
@@ -549,7 +549,7 @@
   CHECK_EQ(java_lang_dex.GetFieldId(zombie->GetDexFieldIndex()).type_idx_,
            GetClassRoot(kJavaLangObject)->GetDexTypeIndex());
 
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   heap->SetReferenceOffsets(referent->GetOffset(),
                             queue->GetOffset(),
                             queueNext->GetOffset(),
@@ -595,7 +595,7 @@
 
   const char* class_path = Runtime::Current()->GetClassPathString().c_str();
 
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   std::string boot_image_option_string("--boot-image=");
   boot_image_option_string += heap->GetImageSpace()->GetImageFilename();
   const char* boot_image_option = boot_image_option_string.c_str();
@@ -680,7 +680,7 @@
   oat_files_.push_back(&oat_file);
 }
 
-OatFile* ClassLinker::OpenOat(const ImageSpace* space) {
+OatFile* ClassLinker::OpenOat(const gc::space::ImageSpace* space) {
   WriterMutexLock mu(Thread::Current(), dex_lock_);
   const Runtime* runtime = Runtime::Current();
   const ImageHeader& image_header = space->GetImageHeader();
@@ -947,8 +947,8 @@
   VLOG(startup) << "ClassLinker::InitFromImage entering";
   CHECK(!init_done_);
 
-  Heap* heap = Runtime::Current()->GetHeap();
-  ImageSpace* space = heap->GetImageSpace();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  gc::space::ImageSpace* space = heap->GetImageSpace();
   OatFile* oat_file = OpenOat(space);
   CHECK(oat_file != NULL) << "Failed to open oat file for image";
   CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
@@ -1057,7 +1057,7 @@
 // Keep in sync with InitCallback. Anything we visit, we need to
 // reinit references to when reinitializing a ClassLinker from a
 // mapped image.
-void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg) {
+void ClassLinker::VisitRoots(RootVisitor* visitor, void* arg, bool clean_dirty) {
   visitor(class_roots_, arg);
   Thread* self = Thread::Current();
   {
@@ -1079,7 +1079,9 @@
   }
 
   visitor(array_iftable_, arg);
-  is_dirty_ = false;
+  if (clean_dirty) {
+    is_dirty_ = false;
+  }
 }
 
 void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) const {
@@ -1135,7 +1137,7 @@
 }
 
 mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   mirror::Class* dex_cache_class = GetClassRoot(kJavaLangDexCache);
   SirtRef<mirror::DexCache> dex_cache(self,
                               down_cast<mirror::DexCache*>(heap->AllocObject(self, dex_cache_class,
@@ -1188,7 +1190,7 @@
 mirror::Class* ClassLinker::AllocClass(Thread* self, mirror::Class* java_lang_Class,
                                        size_t class_size) {
   DCHECK_GE(class_size, sizeof(mirror::Class));
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   SirtRef<mirror::Class> klass(self,
                        heap->AllocObject(self, java_lang_Class, class_size)->AsClass());
   klass->SetPrimitiveType(Primitive::kPrimNot);  // default to not being primitive
@@ -2073,7 +2075,8 @@
   return NULL;
 }
 
-mirror::Class* ClassLinker::InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool image_class) {
+mirror::Class* ClassLinker::InsertClass(const StringPiece& descriptor, mirror::Class* klass,
+                                        bool image_class) {
   if (VLOG_IS_ON(class_linker)) {
     mirror::DexCache* dex_cache = klass->GetDexCache();
     std::string source;
@@ -2086,7 +2089,8 @@
   size_t hash = StringPieceHash()(descriptor);
   WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
   Table& classes = image_class ? image_classes_ : classes_;
-  mirror::Class* existing = LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, classes);
+  mirror::Class* existing =
+      LookupClassLocked(descriptor.data(), klass->GetClassLoader(), hash, classes);
 #ifndef NDEBUG
   // Check we don't have the class in the other table in error
   Table& other_classes = image_class ? classes_ : image_classes_;
@@ -2095,6 +2099,7 @@
   if (existing != NULL) {
     return existing;
   }
+  Runtime::Current()->GetHeap()->VerifyObject(klass);
   classes.insert(std::make_pair(hash, klass));
   Dirty();
   return NULL;
diff --git a/src/class_linker.h b/src/class_linker.h
index e0f297d..df33672 100644
--- a/src/class_linker.h
+++ b/src/class_linker.h
@@ -29,6 +29,11 @@
 #include "oat_file.h"
 
 namespace art {
+namespace gc {
+namespace space {
+  class ImageSpace;
+}  // namespace space
+}  // namespace gc
 namespace mirror {
   class ClassLoader;
   class DexCache;
@@ -37,7 +42,7 @@
   template<class T> class ObjectArray;
   class StackTraceElement;
 }  // namespace mirror
-class ImageSpace;
+
 class InternTable;
 class ObjectLock;
 template<class T> class SirtRef;
@@ -219,7 +224,7 @@
   void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) const
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
 
-  void VisitRoots(RootVisitor* visitor, void* arg)
+  void VisitRoots(RootVisitor* visitor, void* arg, bool clean_dirty)
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_, dex_lock_);
 
   mirror::DexCache* FindDexCache(const DexFile& dex_file) const
@@ -354,7 +359,7 @@
 
   // Initialize class linker from one or more images.
   void InitFromImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  OatFile* OpenOat(const ImageSpace* space)
+  OatFile* OpenOat(const gc::space::ImageSpace* space)
       LOCKS_EXCLUDED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static void InitFromImageCallback(mirror::Object* obj, void* arg)
diff --git a/src/class_linker_test.cc b/src/class_linker_test.cc
index 4540c01..e5844b0 100644
--- a/src/class_linker_test.cc
+++ b/src/class_linker_test.cc
@@ -22,7 +22,7 @@
 #include "class_linker-inl.h"
 #include "common_test.h"
 #include "dex_file.h"
-#include "heap.h"
+#include "gc/heap.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache.h"
 #include "mirror/field-inl.h"
@@ -333,7 +333,7 @@
       const char* descriptor = dex->GetTypeDescriptor(type_id);
       AssertDexFileClass(class_loader, descriptor);
     }
-    class_linker_->VisitRoots(TestRootVisitor, NULL);
+    class_linker_->VisitRoots(TestRootVisitor, NULL, false);
     // Verify the dex cache has resolution methods in all resolved method slots
     DexCache* dex_cache = class_linker_->FindDexCache(*dex);
     ObjectArray<AbstractMethod>* resolved_methods = dex_cache->GetResolvedMethods();
diff --git a/src/common_test.h b/src/common_test.h
index 0c171a8..88da8a2 100644
--- a/src/common_test.h
+++ b/src/common_test.h
@@ -28,8 +28,8 @@
 #include "class_linker.h"
 #include "compiler/driver/compiler_driver.h"
 #include "dex_file-inl.h"
+#include "gc/heap.h"
 #include "gtest/gtest.h"
-#include "heap.h"
 #include "instruction_set.h"
 #include "mirror/class_loader.h"
 #include "oat_file.h"
@@ -296,8 +296,8 @@
     boot_class_path_.push_back(java_lang_dex_file_);
     boot_class_path_.push_back(conscrypt_file_);
 
-    std::string min_heap_string(StringPrintf("-Xms%zdm", Heap::kDefaultInitialSize / MB));
-    std::string max_heap_string(StringPrintf("-Xmx%zdm", Heap::kDefaultMaximumSize / MB));
+    std::string min_heap_string(StringPrintf("-Xms%zdm", gc::Heap::kDefaultInitialSize / MB));
+    std::string max_heap_string(StringPrintf("-Xmx%zdm", gc::Heap::kDefaultMaximumSize / MB));
 
     Runtime::Options options;
     options.push_back(std::make_pair("compiler", reinterpret_cast<void*>(NULL)));
@@ -313,49 +313,50 @@
     // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
     // give it away now and then switch to a more managable ScopedObjectAccess.
     Thread::Current()->TransitionFromRunnableToSuspended(kNative);
-    // Whilst we're in native take the opportunity to initialize well known classes.
-    WellKnownClasses::InitClasses(Thread::Current()->GetJniEnv());
-    ScopedObjectAccess soa(Thread::Current());
-    ASSERT_TRUE(runtime_.get() != NULL);
-    class_linker_ = runtime_->GetClassLinker();
+    {
+      ScopedObjectAccess soa(Thread::Current());
+      ASSERT_TRUE(runtime_.get() != NULL);
+      class_linker_ = runtime_->GetClassLinker();
 
-    InstructionSet instruction_set = kNone;
+      InstructionSet instruction_set = kNone;
 #if defined(__arm__)
-    instruction_set = kThumb2;
+      instruction_set = kThumb2;
 #elif defined(__mips__)
-    instruction_set = kMips;
+      instruction_set = kMips;
 #elif defined(__i386__)
-    instruction_set = kX86;
+      instruction_set = kX86;
 #endif
 
-    // TODO: make selectable
+      // TODO: make selectable
 #if defined(ART_USE_PORTABLE_COMPILER)
-    CompilerBackend compiler_backend = kPortable;
+      CompilerBackend compiler_backend = kPortable;
 #else
-    CompilerBackend compiler_backend = kQuick;
+      CompilerBackend compiler_backend = kQuick;
 #endif
 
-    if (!runtime_->HasResolutionMethod()) {
-      runtime_->SetResolutionMethod(runtime_->CreateResolutionMethod());
-    }
-    for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
-      Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
-      if (!runtime_->HasCalleeSaveMethod(type)) {
-        runtime_->SetCalleeSaveMethod(
-            runtime_->CreateCalleeSaveMethod(instruction_set, type), type);
+      if (!runtime_->HasResolutionMethod()) {
+        runtime_->SetResolutionMethod(runtime_->CreateResolutionMethod());
       }
+      for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
+        Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
+        if (!runtime_->HasCalleeSaveMethod(type)) {
+          runtime_->SetCalleeSaveMethod(
+              runtime_->CreateCalleeSaveMethod(instruction_set, type), type);
+        }
+      }
+      class_linker_->FixupDexCaches(runtime_->GetResolutionMethod());
+      compiler_driver_.reset(new CompilerDriver(compiler_backend, instruction_set,
+                                                true, new CompilerDriver::DescriptorSet,
+                                                2, false, true, true));
     }
-    class_linker_->FixupDexCaches(runtime_->GetResolutionMethod());
-    compiler_driver_.reset(new CompilerDriver(compiler_backend, instruction_set,
-                                              true, new CompilerDriver::DescriptorSet,
-                                              2, false, true, true));
     // We typically don't generate an image in unit tests, disable this optimization by default.
     compiler_driver_->SetSupportBootImageFixup(false);
 
+    // We're back in native, take the opportunity to initialize well known classes.
+    WellKnownClasses::InitClasses(Thread::Current()->GetJniEnv());
     // Create the heap thread pool so that the GC runs in parallel for tests. Normally, the thread
     // pool is created by the runtime.
     runtime_->GetHeap()->CreateThreadPool();
-
     runtime_->GetHeap()->VerifyHeap();  // Check for heap corruption before the test
   }
 
diff --git a/src/compiler/dex/quick/arm/call_arm.cc b/src/compiler/dex/quick/arm/call_arm.cc
index 77e09b8..879065f 100644
--- a/src/compiler/dex/quick/arm/call_arm.cc
+++ b/src/compiler/dex/quick/arm/call_arm.cc
@@ -562,7 +562,7 @@
   int reg_card_no = AllocTemp();
   LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
   LoadWordDisp(rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
-  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
   StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
                    kUnsignedByte);
   LIR* target = NewLIR0(kPseudoTargetLabel);
diff --git a/src/compiler/dex/quick/mips/call_mips.cc b/src/compiler/dex/quick/mips/call_mips.cc
index 9f1d314..ddaf081 100644
--- a/src/compiler/dex/quick/mips/call_mips.cc
+++ b/src/compiler/dex/quick/mips/call_mips.cc
@@ -320,7 +320,7 @@
   int reg_card_no = AllocTemp();
   LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
   LoadWordDisp(rMIPS_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
-  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
   StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
                    kUnsignedByte);
   LIR* target = NewLIR0(kPseudoTargetLabel);
diff --git a/src/compiler/dex/quick/x86/call_x86.cc b/src/compiler/dex/quick/x86/call_x86.cc
index 1e37b2f..dba0e24 100644
--- a/src/compiler/dex/quick/x86/call_x86.cc
+++ b/src/compiler/dex/quick/x86/call_x86.cc
@@ -213,7 +213,7 @@
   int reg_card_no = AllocTemp();
   LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
   NewLIR2(kX86Mov32RT, reg_card_base, Thread::CardTableOffset().Int32Value());
-  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
   StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
                    kUnsignedByte);
   LIR* target = NewLIR0(kPseudoTargetLabel);
diff --git a/src/compiler/driver/compiler_driver.cc b/src/compiler/driver/compiler_driver.cc
index b04e5b1..6050108 100644
--- a/src/compiler/driver/compiler_driver.cc
+++ b/src/compiler/driver/compiler_driver.cc
@@ -31,8 +31,9 @@
 #include "oat_file.h"
 #include "object_utils.h"
 #include "runtime.h"
-#include "gc/card_table-inl.h"
-#include "gc/space.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/space/space.h"
 #include "mirror/class_loader.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache-inl.h"
@@ -761,7 +762,7 @@
   // Update image_classes_ with classes for objects created by <clinit> methods.
   Thread* self = Thread::Current();
   const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   // TODO: Image spaces only?
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   heap->FlushAllocStack();
@@ -1092,7 +1093,7 @@
     }
     stats_->DirectMethodsToBoot(type);
   }
-  bool compiling_boot = Runtime::Current()->GetHeap()->GetSpaces().size() == 1;
+  bool compiling_boot = Runtime::Current()->GetHeap()->GetContinuousSpaces().size() == 1;
   if (compiling_boot) {
     if (support_boot_image_fixup_) {
       MethodHelper mh(method);
@@ -1104,7 +1105,7 @@
       }
     }
   } else {
-    if (Runtime::Current()->GetHeap()->FindSpaceFromObject(method)->IsImageSpace()) {
+    if (Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace()) {
       direct_method = reinterpret_cast<uintptr_t>(method);
     }
     direct_code = reinterpret_cast<uintptr_t>(method->GetEntryPointFromCompiledCode());
@@ -1382,7 +1383,7 @@
     CHECK_NE(self->GetState(), kRunnable);
 
     // Wait for all the worker threads to finish.
-    thread_pool_->Wait(self);
+    thread_pool_->Wait(self, true, false);
   }
 
  private:
@@ -1915,7 +1916,7 @@
   mirror::ClassLoader* class_loader = soa.Decode<mirror::ClassLoader*>(manager->GetClassLoader());
   const char* descriptor = manager->GetDexFile()->GetClassDescriptor(class_def);
   mirror::Class* klass = manager->GetClassLinker()->FindClass(descriptor, class_loader);
-  bool compiling_boot = Runtime::Current()->GetHeap()->GetSpaces().size() == 1;
+  bool compiling_boot = Runtime::Current()->GetHeap()->GetContinuousSpaces().size() == 1;
   bool can_init_static_fields = compiling_boot &&
       manager->GetCompiler()->IsImageClass(descriptor);
   if (klass != NULL) {
@@ -1925,8 +1926,9 @@
     // on a second thread the sub-class is initialized (holding its lock) after first initializing
     // its parents, whose locks are acquired. This leads to a parent-to-child and a child-to-parent
     // lock ordering and consequent potential deadlock.
-    static Mutex lock1("Initializer lock", kMonitorLock);
-    MutexLock mu(soa.Self(), lock1);
+    // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather
+    // than use a special Object for the purpose we use the Class of java.lang.Class.
+    ObjectLock lock1(soa.Self(), klass->GetClass());
     // The lock required to initialize the class.
     ObjectLock lock2(soa.Self(), klass);
     // Only try to initialize classes that were successfully verified.
diff --git a/src/compiler/driver/compiler_driver_test.cc b/src/compiler/driver/compiler_driver_test.cc
index a7fad6f..abf8a9a 100644
--- a/src/compiler/driver/compiler_driver_test.cc
+++ b/src/compiler/driver/compiler_driver_test.cc
@@ -23,7 +23,7 @@
 #include "class_linker.h"
 #include "common_test.h"
 #include "dex_file.h"
-#include "heap.h"
+#include "gc/heap.h"
 #include "mirror/class.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache-inl.h"
diff --git a/src/compiler/llvm/runtime_support_builder.cc b/src/compiler/llvm/runtime_support_builder.cc
index b4ddb55..2be2ddf 100644
--- a/src/compiler/llvm/runtime_support_builder.cc
+++ b/src/compiler/llvm/runtime_support_builder.cc
@@ -16,7 +16,7 @@
 
 #include "runtime_support_builder.h"
 
-#include "gc/card_table.h"
+#include "gc/accounting/card_table.h"
 #include "ir_builder.h"
 #include "monitor.h"
 #include "mirror/object.h"
@@ -266,9 +266,11 @@
                                                irb_.getInt8Ty()->getPointerTo(),
                                                kTBAAConstJObject);
   Value* target_addr_int = irb_.CreatePtrToInt(target_addr, irb_.getPtrEquivIntTy());
-  Value* card_no = irb_.CreateLShr(target_addr_int, irb_.getPtrEquivInt(CardTable::kCardShift));
+  Value* card_no = irb_.CreateLShr(target_addr_int,
+                                   irb_.getPtrEquivInt(gc::accounting::CardTable::kCardShift));
   Value* card_table_entry = irb_.CreateGEP(card_table, card_no);
-  irb_.CreateStore(irb_.getInt8(CardTable::kCardDirty), card_table_entry, kTBAARuntimeInfo);
+  irb_.CreateStore(irb_.getInt8(gc::accounting::CardTable::kCardDirty), card_table_entry,
+                   kTBAARuntimeInfo);
   irb_.CreateBr(bb_cont);
 
   irb_.SetInsertPoint(bb_cont);
diff --git a/src/debugger.cc b/src/debugger.cc
index d7fac43..f2a10f0 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -24,9 +24,9 @@
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
 #include "dex_instruction.h"
-#include "gc/card_table-inl.h"
-#include "gc/large_object_space.h"
-#include "gc/space.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
 #include "invoke_arg_array_builder.h"
 #include "jdwp/object_registry.h"
 #include "mirror/abstract_method-inl.h"
@@ -1691,6 +1691,7 @@
     case kWaitingForDebuggerSuspension:   *pThreadStatus = JDWP::TS_WAIT;     break;
     case kWaitingForDebuggerToAttach:     *pThreadStatus = JDWP::TS_WAIT;     break;
     case kWaitingForGcToComplete:         *pThreadStatus = JDWP::TS_WAIT;     break;
+    case kWaitingForCheckPointsToRun:     *pThreadStatus = JDWP::TS_WAIT;     break;
     case kWaitingForJniOnLoad:            *pThreadStatus = JDWP::TS_WAIT;     break;
     case kWaitingForSignalCatcherOutput:  *pThreadStatus = JDWP::TS_WAIT;     break;
     case kWaitingInMainDebuggerLoop:      *pThreadStatus = JDWP::TS_WAIT;     break;
@@ -3137,7 +3138,7 @@
    *     [u4]: current number of objects allocated
    */
   uint8_t heap_count = 1;
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   std::vector<uint8_t> bytes;
   JDWP::Append4BE(bytes, heap_count);
   JDWP::Append4BE(bytes, 1); // Heap id (bogus; we only have one heap).
@@ -3416,17 +3417,16 @@
   // Send a series of heap segment chunks.
   HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
   if (native) {
-    // TODO: enable when bionic has moved to dlmalloc 2.8.5
-    // dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
-    UNIMPLEMENTED(WARNING) << "Native heap send heap segments";
+    dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
   } else {
-    Heap* heap = Runtime::Current()->GetHeap();
-    const Spaces& spaces = heap->GetSpaces();
+    gc::Heap* heap = Runtime::Current()->GetHeap();
+    const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
     Thread* self = Thread::Current();
     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
-      if ((*cur)->IsAllocSpace()) {
-        (*cur)->AsAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+    typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
+    for (It cur = spaces.begin(), end = spaces.end(); cur != end; ++cur) {
+      if ((*cur)->IsDlMallocSpace()) {
+        (*cur)->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
       }
     }
     // Walk the large objects, these are not in the AllocSpace.
diff --git a/src/dex2oat.cc b/src/dex2oat.cc
index f678ee9..b5dc319 100644
--- a/src/dex2oat.cc
+++ b/src/dex2oat.cc
@@ -30,6 +30,8 @@
 #include "class_linker.h"
 #include "compiler/driver/compiler_driver.h"
 #include "dex_file-inl.h"
+#include "gc/space/image_space.h"
+#include "gc/space/space-inl.h"
 #include "image_writer.h"
 #include "leb128.h"
 #include "mirror/abstract_method-inl.h"
@@ -236,7 +238,7 @@
     uint32_t image_file_location_oat_checksum = 0;
     uint32_t image_file_location_oat_data_begin = 0;
     if (!driver->IsImage()) {
-      ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+      gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
       image_file_location_oat_checksum = image_space->GetImageHeader().GetOatChecksum();
       image_file_location_oat_data_begin =
           reinterpret_cast<uint32_t>(image_space->GetImageHeader().GetOatDataBegin());
@@ -456,12 +458,14 @@
  private:
   static void* CallBack(void* arg) {
     WatchDog* self = reinterpret_cast<WatchDog*>(arg);
+    ::art::SetThreadName("dex2oat watch dog");
     self->Wait();
     return NULL;
   }
 
   static void Message(char severity, const std::string& message) {
-    // TODO: Remove when we switch to LOG when we can guarantee it won't prevent shutdown in error cases.
+    // TODO: Remove when we switch to LOG when we can guarantee it won't prevent shutdown in error
+    //       cases.
     fprintf(stderr, "dex2oat%s %c %d %d %s\n",
             kIsDebugBuild ? "d" : "",
             severity,
@@ -482,10 +486,13 @@
   void Wait() {
     bool warning = true;
     CHECK_GT(kWatchDogTimeoutSeconds, kWatchDogWarningSeconds);
+    // TODO: tune the multiplier for GC verification, the following is just to make the timeout
+    //       large.
+    int64_t multiplier = gc::kDesiredHeapVerification > gc::kVerifyAllFast ? 100 : 1;
     timespec warning_ts;
-    InitTimeSpec(true, CLOCK_REALTIME, kWatchDogWarningSeconds * 1000, 0, &warning_ts);
+    InitTimeSpec(true, CLOCK_REALTIME, multiplier * kWatchDogWarningSeconds * 1000, 0, &warning_ts);
     timespec timeout_ts;
-    InitTimeSpec(true, CLOCK_REALTIME, kWatchDogTimeoutSeconds * 1000, 0, &timeout_ts);
+    InitTimeSpec(true, CLOCK_REALTIME, multiplier * kWatchDogTimeoutSeconds * 1000, 0, &timeout_ts);
     const char* reason = "dex2oat watch dog thread waiting";
     CHECK_WATCH_DOG_PTHREAD_CALL(pthread_mutex_lock, (&mutex_), reason);
     while (!shutting_down_) {
@@ -522,7 +529,7 @@
 
   bool is_watch_dog_enabled_;
   bool shutting_down_;
-  // TODO: Switch to Mutex when we can guarantee it won't prevent shutdown in error cases
+  // TODO: Switch to Mutex when we can guarantee it won't prevent shutdown in error cases.
   pthread_mutex_t mutex_;
   pthread_cond_t cond_;
   pthread_attr_t attr_;
diff --git a/src/gc/atomic_stack.h b/src/gc/accounting/atomic_stack.h
similarity index 80%
rename from src/gc/atomic_stack.h
rename to src/gc/accounting/atomic_stack.h
index 0197bce..4e1c253 100644
--- a/src/gc/atomic_stack.h
+++ b/src/gc/accounting/atomic_stack.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_ATOMIC_STACK_H_
-#define ART_SRC_ATOMIC_STACK_H_
+#ifndef ART_SRC_GC_ACCOUNTING_ATOMIC_STACK_H_
+#define ART_SRC_GC_ACCOUNTING_ATOMIC_STACK_H_
 
 #include <string>
 
@@ -27,6 +27,8 @@
 #include "utils.h"
 
 namespace art {
+namespace gc {
+namespace accounting {
 
 template <typename T>
 class AtomicStack {
@@ -38,15 +40,14 @@
     return mark_stack.release();
   }
 
-  ~AtomicStack(){
-
-  }
+  ~AtomicStack() {}
 
   void Reset() {
     DCHECK(mem_map_.get() != NULL);
     DCHECK(begin_ != NULL);
     front_index_ = 0;
     back_index_ = 0;
+    is_sorted_ = true;
     int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
     if (result == -1) {
       PLOG(WARNING) << "madvise failed";
@@ -58,6 +59,7 @@
   // Returns false if we overflowed the stack.
   bool AtomicPushBack(const T& value) {
     int32_t index;
+    is_sorted_ = false;
     do {
       index = back_index_;
       if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
@@ -70,6 +72,7 @@
   }
 
   void PushBack(const T& value) {
+    is_sorted_ = false;
     int32_t index = back_index_;
     DCHECK_LT(static_cast<size_t>(index), capacity_);
     back_index_ = index + 1;
@@ -100,11 +103,11 @@
     return back_index_ - front_index_;
   }
 
-  T* Begin() {
+  T* Begin() const {
     return const_cast<mirror::Object**>(begin_ + front_index_);
   }
 
-  T* End() {
+  T* End() const {
     return const_cast<mirror::Object**>(begin_ + back_index_);
   }
 
@@ -118,14 +121,33 @@
     Init();
   }
 
+  void Sort() {
+    if (!is_sorted_) {
+      int32_t start_back_index = back_index_.get();
+      int32_t start_front_index = front_index_.get();
+      is_sorted_ = true;
+      std::sort(Begin(), End());
+      CHECK_EQ(start_back_index, back_index_.get());
+      CHECK_EQ(start_front_index, front_index_.get());
+    }
+  }
+
+  bool Contains(const T& value) const {
+    if (is_sorted_) {
+      return std::binary_search(Begin(), End(), value);
+    } else {
+      return std::find(Begin(), End(), value) != End();
+    }
+  }
+
  private:
   AtomicStack(const std::string& name, const size_t capacity)
       : name_(name),
         back_index_(0),
         front_index_(0),
         begin_(NULL),
-        capacity_(capacity) {
-
+        capacity_(capacity),
+        is_sorted_(true) {
   }
 
   // Size in number of elements.
@@ -156,11 +178,15 @@
   // Maximum number of elements.
   size_t capacity_;
 
+  bool is_sorted_;
+
   DISALLOW_COPY_AND_ASSIGN(AtomicStack);
 };
 
 typedef AtomicStack<mirror::Object*> ObjectStack;
 
+}  // namespace accounting
+}  // namespace gc
 }  // namespace art
 
-#endif  // ART_SRC_MARK_STACK_H_
+#endif  // ART_SRC_GC_ACCOUNTING_ATOMIC_STACK_H_
diff --git a/src/gc/card_table-inl.h b/src/gc/accounting/card_table-inl.h
similarity index 98%
rename from src/gc/card_table-inl.h
rename to src/gc/accounting/card_table-inl.h
index 13590b7..1e75290 100644
--- a/src/gc/card_table-inl.h
+++ b/src/gc/accounting/card_table-inl.h
@@ -24,6 +24,8 @@
 #include "utils.h"
 
 namespace art {
+namespace gc {
+namespace accounting {
 
 static inline bool byte_cas(byte old_value, byte new_value, byte* address) {
   // Little endian means most significant byte is on the left.
@@ -204,6 +206,8 @@
       << " end: " << reinterpret_cast<void*>(mem_map_->End());
 }
 
+}  // namespace accounting
+}  // namespace gc
 }  // namespace art
 
 #endif  // ART_SRC_GC_CARDTABLE_INL_H_
diff --git a/src/gc/card_table.cc b/src/gc/accounting/card_table.cc
similarity index 95%
rename from src/gc/card_table.cc
rename to src/gc/accounting/card_table.cc
index 57824e9..4f2ae26 100644
--- a/src/gc/card_table.cc
+++ b/src/gc/accounting/card_table.cc
@@ -17,14 +17,17 @@
 #include "card_table.h"
 
 #include "base/logging.h"
-#include "gc/card_table-inl.h"
-#include "heap.h"
+#include "card_table-inl.h"
+#include "gc/heap.h"
+#include "gc/space/space.h"
 #include "heap_bitmap.h"
 #include "runtime.h"
-#include "space.h"
 #include "utils.h"
 
 namespace art {
+namespace gc {
+namespace accounting {
+
 /*
  * Maintain a card table from the write barrier. All writes of
  * non-NULL values to heap addresses should go through an entry in
@@ -82,7 +85,7 @@
   byte* __attribute__((unused)) end = mem_map_->End();
 }
 
-void CardTable::ClearSpaceCards(ContinuousSpace* space) {
+void CardTable::ClearSpaceCards(space::ContinuousSpace* space) {
   // TODO: clear just the range of the table that has been modified
   byte* card_start = CardFromAddr(space->Begin());
   byte* card_end = CardFromAddr(space->End()); // Make sure to round up.
@@ -116,4 +119,6 @@
   UNIMPLEMENTED(WARNING) << "Card table verification";
 }
 
+}  // namespace accounting
+}  // namespace gc
 }  // namespace art
diff --git a/src/gc/card_table.h b/src/gc/accounting/card_table.h
similarity index 95%
rename from src/gc/card_table.h
rename to src/gc/accounting/card_table.h
index 842fcc3..cf85d15 100644
--- a/src/gc/card_table.h
+++ b/src/gc/accounting/card_table.h
@@ -23,11 +23,21 @@
 #include "UniquePtr.h"
 
 namespace art {
+
 namespace mirror {
-class Object;
+  class Object;
 }  // namespace mirror
+
+namespace gc {
+
+namespace space {
+  class ContinuousSpace;
+}  // namespace space
+
 class Heap;
-class ContinuousSpace;
+
+namespace accounting {
+
 class SpaceBitmap;
 
 // Maintain a card table from the the write barrier. All writes of
@@ -105,7 +115,7 @@
   void ClearCardTable();
 
   // Resets all of the bytes in the card table which do not map to the image space.
-  void ClearSpaceCards(ContinuousSpace* space);
+  void ClearSpaceCards(space::ContinuousSpace* space);
 
   // Returns the first address in the heap which maps to this card.
   void* AddrFromCard(const byte *card_addr) const;
@@ -139,5 +149,8 @@
   const size_t offset_;
 };
 
+}  // namespace accounting
+}  // namespace gc
 }  // namespace art
+
 #endif  // ART_SRC_GC_CARDTABLE_H_
diff --git a/src/gc/accounting/heap_bitmap-inl.h b/src/gc/accounting/heap_bitmap-inl.h
new file mode 100644
index 0000000..8e3123b
--- /dev/null
+++ b/src/gc/accounting/heap_bitmap-inl.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_INL_H_
+#define ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_INL_H_
+
+#include "heap_bitmap.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+template <typename Visitor>
+inline void HeapBitmap::Visit(const Visitor& visitor) {
+  // TODO: C++0x auto
+  typedef std::vector<SpaceBitmap*>::iterator It;
+  for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
+      it != end; ++it) {
+    SpaceBitmap* bitmap = *it;
+    bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor, VoidFunctor());
+  }
+  // TODO: C++0x auto
+  typedef std::vector<SpaceSetMap*>::iterator It2;
+  DCHECK(discontinuous_space_sets_.begin() !=  discontinuous_space_sets_.end());
+  for (It2 it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
+      it != end; ++it) {
+    SpaceSetMap* set = *it;
+    set->Visit(visitor);
+  }
+
+}
+
+}  // namespace accounting
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_INL_H_
diff --git a/src/gc/accounting/heap_bitmap.cc b/src/gc/accounting/heap_bitmap.cc
new file mode 100644
index 0000000..1bdc978
--- /dev/null
+++ b/src/gc/accounting/heap_bitmap.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "heap_bitmap.h"
+
+#include "gc/space/space.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+void HeapBitmap::ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) {
+  // TODO: C++0x auto
+  typedef std::vector<SpaceBitmap*>::iterator It;
+  for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
+      it != end; ++it) {
+    if (*it == old_bitmap) {
+      *it = new_bitmap;
+      return;
+    }
+  }
+  LOG(FATAL) << "bitmap " << static_cast<const void*>(old_bitmap) << " not found";
+}
+
+void HeapBitmap::ReplaceObjectSet(SpaceSetMap* old_set, SpaceSetMap* new_set) {
+  // TODO: C++0x auto
+  typedef std::vector<SpaceSetMap*>::iterator It;
+  for (It it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
+      it != end; ++it) {
+    if (*it == old_set) {
+      *it = new_set;
+      return;
+    }
+  }
+  LOG(FATAL) << "object set " << static_cast<const void*>(old_set) << " not found";
+}
+
+void HeapBitmap::AddContinuousSpaceBitmap(accounting::SpaceBitmap* bitmap) {
+  DCHECK(bitmap != NULL);
+
+  // Check for interval overlap.
+  typedef std::vector<SpaceBitmap*>::iterator It;
+  for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
+      it != end; ++it) {
+    SpaceBitmap* bitmap = *it;
+    SpaceBitmap* cur_bitmap = *it;
+    CHECK(bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
+          bitmap->HeapLimit() > cur_bitmap->HeapBegin())
+        << "Bitmap " << bitmap->Dump() << " overlaps with existing bitmap " << cur_bitmap->Dump();
+  }
+  continuous_space_bitmaps_.push_back(bitmap);
+}
+
+void HeapBitmap::AddDiscontinuousObjectSet(SpaceSetMap* set) {
+  DCHECK(set != NULL);
+  discontinuous_space_sets_.push_back(set);
+}
+
+void HeapBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
+  // TODO: C++0x auto
+  typedef std::vector<SpaceBitmap*>::iterator It;
+  for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
+      it != end; ++it) {
+    SpaceBitmap* bitmap = *it;
+    bitmap->Walk(callback, arg);
+  }
+  // TODO: C++0x auto
+  typedef std::vector<SpaceSetMap*>::iterator It2;
+  DCHECK(discontinuous_space_sets_.begin() !=  discontinuous_space_sets_.end());
+  for (It2 it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
+      it != end; ++it) {
+    SpaceSetMap* set = *it;
+    set->Walk(callback, arg);
+  }
+}
+
+}  // namespace accounting
+}  // namespace gc
+}  // namespace art
diff --git a/src/gc/accounting/heap_bitmap.h b/src/gc/accounting/heap_bitmap.h
new file mode 100644
index 0000000..5ff40c6
--- /dev/null
+++ b/src/gc/accounting/heap_bitmap.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_H_
+#define ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_H_
+
+#include "base/logging.h"
+#include "locks.h"
+#include "space_bitmap.h"
+
+namespace art {
+namespace gc {
+
+class Heap;
+
+namespace accounting {
+
+class HeapBitmap {
+ public:
+  bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+    if (LIKELY(bitmap != NULL)) {
+      return bitmap->Test(obj);
+    } else {
+      return GetDiscontinuousSpaceObjectSet(obj) != NULL;
+    }
+  }
+
+  void Clear(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+    if (LIKELY(bitmap != NULL)) {
+      bitmap->Clear(obj);
+    } else {
+      SpaceSetMap* set = GetDiscontinuousSpaceObjectSet(obj);
+      DCHECK(set != NULL);
+      set->Clear(obj);
+    }
+  }
+
+  void Set(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    SpaceBitmap* bitmap = GetContinuousSpaceBitmap(obj);
+    if (LIKELY(bitmap != NULL)) {
+      bitmap->Set(obj);
+    } else {
+      SpaceSetMap* set = GetDiscontinuousSpaceObjectSet(obj);
+      DCHECK(set != NULL);
+      set->Set(obj);
+    }
+  }
+
+  SpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) {
+    // TODO: C++0x auto
+    typedef std::vector<SpaceBitmap*>::iterator It;
+    for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
+        it != end; ++it) {
+      SpaceBitmap* bitmap = *it;
+      if (bitmap->HasAddress(obj)) {
+        return bitmap;
+      }
+    }
+    return NULL;
+  }
+
+  SpaceSetMap* GetDiscontinuousSpaceObjectSet(const mirror::Object* obj) {
+    // TODO: C++0x auto
+    typedef std::vector<SpaceSetMap*>::iterator It;
+    for (It it = discontinuous_space_sets_.begin(), end = discontinuous_space_sets_.end();
+        it != end; ++it) {
+      SpaceSetMap* set = *it;
+      if (set->Test(obj)) {
+        return set;
+      }
+    }
+    return NULL;
+  }
+
+  void Walk(SpaceBitmap::Callback* callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  template <typename Visitor>
+  void Visit(const Visitor& visitor)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
+  void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Find and replace a object set pointer, this is used by for the bitmap swapping in the GC.
+  void ReplaceObjectSet(SpaceSetMap* old_set, SpaceSetMap* new_set)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  HeapBitmap(Heap* heap) : heap_(heap) {
+  }
+
+ private:
+
+  const Heap* const heap_;
+
+  void AddContinuousSpaceBitmap(SpaceBitmap* bitmap);
+  void AddDiscontinuousObjectSet(SpaceSetMap* set);
+
+  // Bitmaps covering continuous spaces.
+  std::vector<SpaceBitmap*> continuous_space_bitmaps_;
+
+  // Sets covering discontinuous spaces.
+  std::vector<SpaceSetMap*> discontinuous_space_sets_;
+
+  friend class art::gc::Heap;
+};
+
+}  // namespace accounting
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_ACCOUNTING_HEAP_BITMAP_H_
diff --git a/src/gc/accounting/mod_union_table-inl.h b/src/gc/accounting/mod_union_table-inl.h
new file mode 100644
index 0000000..656af94
--- /dev/null
+++ b/src/gc/accounting/mod_union_table-inl.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_MOD_UNION_TABLE_INL_H_
+#define ART_SRC_GC_MOD_UNION_TABLE_INL_H_
+
+#include "mod_union_table.h"
+
+#include "gc/space/space.h"
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+// A mod-union table to record image references to the Zygote and alloc space.
+class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache {
+public:
+  ModUnionTableToZygoteAllocspace(Heap* heap) : ModUnionTableReferenceCache(heap) {
+  }
+
+  bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
+    const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+    typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+    for (It it = spaces.begin(); it != spaces.end(); ++it) {
+      if ((*it)->Contains(ref)) {
+        return (*it)->IsDlMallocSpace();
+      }
+    }
+    // Assume it points to a large object.
+    // TODO: Check.
+    return true;
+  }
+};
+
+// A mod-union table to record Zygote references to the alloc space.
+class ModUnionTableToAllocspace : public ModUnionTableReferenceCache {
+ public:
+  ModUnionTableToAllocspace(Heap* heap) : ModUnionTableReferenceCache(heap) {
+  }
+
+  bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
+    const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+    typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+    for (It it = spaces.begin(); it != spaces.end(); ++it) {
+      space::ContinuousSpace* space = *it;
+      if (space->Contains(ref)) {
+        // The allocation space is always considered for collection whereas the Zygote space is
+        //
+        return space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect;
+      }
+    }
+    // Assume it points to a large object.
+    // TODO: Check.
+    return true;
+  }
+};
+
+}  // namespace accounting
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_MOD_UNION_TABLE_INL_H_
diff --git a/src/gc/accounting/mod_union_table.cc b/src/gc/accounting/mod_union_table.cc
new file mode 100644
index 0000000..05b68c4
--- /dev/null
+++ b/src/gc/accounting/mod_union_table.cc
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mod_union_table.h"
+
+#include "base/stl_util.h"
+#include "card_table-inl.h"
+#include "heap_bitmap.h"
+#include "gc/collector/mark_sweep-inl.h"
+#include "gc/heap.h"
+#include "gc/space/space.h"
+#include "mirror/object-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/object_array-inl.h"
+#include "space_bitmap-inl.h"
+#include "thread.h"
+#include "UniquePtr.h"
+
+using namespace art::mirror;
+
+namespace art {
+namespace gc {
+namespace accounting {
+
+class MarkIfReachesAllocspaceVisitor {
+ public:
+  explicit MarkIfReachesAllocspaceVisitor(Heap* const heap, accounting::SpaceBitmap* bitmap)
+    : heap_(heap),
+      bitmap_(bitmap) {
+  }
+
+  // Extra parameters are required since we use this same visitor signature for checking objects.
+  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+                   bool /* is_static */) const {
+    // TODO: Optimize?
+    // TODO: C++0x auto
+    const std::vector<space::ContinuousSpace*>& spaces = heap_->GetContinuousSpaces();
+    typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+    for (It cur = spaces.begin(); cur != spaces.end(); ++cur) {
+      if ((*cur)->IsDlMallocSpace() && (*cur)->Contains(ref)) {
+        bitmap_->Set(obj);
+        break;
+      }
+    }
+  }
+
+ private:
+  Heap* const heap_;
+  accounting::SpaceBitmap* const bitmap_;
+};
+
+class ModUnionVisitor {
+ public:
+  explicit ModUnionVisitor(Heap* const heap, accounting::SpaceBitmap* bitmap)
+    : heap_(heap),
+      bitmap_(bitmap) {
+  }
+
+  void operator ()(const Object* obj) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                            Locks::mutator_lock_) {
+    DCHECK(obj != NULL);
+    // We don't have an early exit since we use the visitor pattern, an early exit should
+    // significantly speed this up.
+    MarkIfReachesAllocspaceVisitor visitor(heap_, bitmap_);
+    collector::MarkSweep::VisitObjectReferences(obj, visitor);
+  }
+ private:
+  Heap* const heap_;
+  accounting::SpaceBitmap* const bitmap_;
+};
+
+class ModUnionClearCardSetVisitor {
+ public:
+  explicit ModUnionClearCardSetVisitor(std::set<byte*>* const cleared_cards)
+    : cleared_cards_(cleared_cards) {
+  }
+
+  inline void operator ()(byte* card, byte expected_value, byte new_value) const {
+    if (expected_value == CardTable::kCardDirty) {
+      cleared_cards_->insert(card);
+    }
+  }
+
+ private:
+  std::set<byte*>* const cleared_cards_;
+};
+
+class ModUnionClearCardVisitor {
+ public:
+  explicit ModUnionClearCardVisitor(std::vector<byte*>* cleared_cards)
+    : cleared_cards_(cleared_cards) {
+  }
+
+  void operator ()(byte* card, byte expected_card, byte new_card) const {
+    if (expected_card == CardTable::kCardDirty) {
+      cleared_cards_->push_back(card);
+    }
+  }
+ private:
+  std::vector<byte*>* const cleared_cards_;
+};
+
+class ModUnionScanImageRootVisitor {
+ public:
+  ModUnionScanImageRootVisitor(collector::MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
+  }
+
+  void operator ()(const Object* root) const
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(root != NULL);
+    mark_sweep_->ScanRoot(root);
+  }
+
+ private:
+  collector::MarkSweep* const mark_sweep_;
+};
+
+void ModUnionTableReferenceCache::ClearCards(space::ContinuousSpace* space) {
+  CardTable* card_table = GetHeap()->GetCardTable();
+  ModUnionClearCardSetVisitor visitor(&cleared_cards_);
+  // Clear dirty cards in the this space and update the corresponding mod-union bits.
+  card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
+}
+
+class AddToReferenceArrayVisitor {
+ public:
+  explicit AddToReferenceArrayVisitor(ModUnionTableReferenceCache* mod_union_table,
+                                      std::vector<const mirror::Object*>* references)
+    : mod_union_table_(mod_union_table),
+      references_(references) {
+  }
+
+  // Extra parameters are required since we use this same visitor signature for checking objects.
+  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+                     bool /* is_static */) const {
+    // Only add the reference if it is non null and fits our criteria.
+    if (ref != NULL && mod_union_table_->AddReference(obj, ref)) {
+      references_->push_back(ref);
+    }
+  }
+
+ private:
+  ModUnionTableReferenceCache* const mod_union_table_;
+  std::vector<const mirror::Object*>* const references_;
+};
+
+class ModUnionReferenceVisitor {
+ public:
+  explicit ModUnionReferenceVisitor(ModUnionTableReferenceCache* const mod_union_table,
+                                    std::vector<const mirror::Object*>* references)
+    : mod_union_table_(mod_union_table),
+      references_(references) {
+  }
+
+  void operator ()(const Object* obj) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+    DCHECK(obj != NULL);
+    // We don't have an early exit since we use the visitor pattern, an early
+    // exit should significantly speed this up.
+    AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
+    collector::MarkSweep::VisitObjectReferences(obj, visitor);
+  }
+ private:
+  ModUnionTableReferenceCache* const mod_union_table_;
+  std::vector<const mirror::Object*>* const references_;
+};
+
+class CheckReferenceVisitor {
+ public:
+  explicit CheckReferenceVisitor(ModUnionTableReferenceCache* mod_union_table,
+                                 const std::set<const Object*>& references)
+    : mod_union_table_(mod_union_table),
+      references_(references) {
+  }
+
+  // Extra parameters are required since we use this same visitor signature for checking objects.
+  // TODO: Fixme when anotatalysis works with visitors.
+  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
+                   bool /* is_static */) const
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+    Heap* heap = mod_union_table_->GetHeap();
+    if (ref != NULL && mod_union_table_->AddReference(obj, ref) &&
+        references_.find(ref) == references_.end()) {
+      space::ContinuousSpace* from_space = heap->FindContinuousSpaceFromObject(obj, false);
+      space::ContinuousSpace* to_space = heap->FindContinuousSpaceFromObject(ref, false);
+      LOG(INFO) << "Object " << reinterpret_cast<const void*>(obj) << "(" << PrettyTypeOf(obj) << ")"
+                << "References " << reinterpret_cast<const void*>(ref)
+                << "(" << PrettyTypeOf(ref) << ") without being in mod-union table";
+      LOG(INFO) << "FromSpace " << from_space->GetName() << " type " << from_space->GetGcRetentionPolicy();
+      LOG(INFO) << "ToSpace " << to_space->GetName() << " type " << to_space->GetGcRetentionPolicy();
+      mod_union_table_->GetHeap()->DumpSpaces();
+      LOG(FATAL) << "FATAL ERROR";
+    }
+  }
+
+ private:
+  ModUnionTableReferenceCache* const mod_union_table_;
+  const std::set<const Object*>& references_;
+};
+
+class ModUnionCheckReferences {
+ public:
+  explicit ModUnionCheckReferences (ModUnionTableReferenceCache* mod_union_table,
+                                    const std::set<const Object*>& references)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      : mod_union_table_(mod_union_table), references_(references) {
+  }
+
+  void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+    Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
+    DCHECK(obj != NULL);
+    CheckReferenceVisitor visitor(mod_union_table_, references_);
+    collector::MarkSweep::VisitObjectReferences(obj, visitor);
+  }
+
+ private:
+  ModUnionTableReferenceCache* const mod_union_table_;
+  const std::set<const Object*>& references_;
+};
+
+void ModUnionTableReferenceCache::Verify() {
+  // Start by checking that everything in the mod union table is marked.
+  Heap* heap = GetHeap();
+  typedef SafeMap<const byte*, std::vector<const mirror::Object*> >::const_iterator It;
+  typedef std::vector<const mirror::Object*>::const_iterator It2;
+  for (It it = references_.begin(), end = references_.end(); it != end; ++it) {
+    for (It2 it_ref = it->second.begin(), end_ref = it->second.end(); it_ref != end_ref;
+        ++it_ref ) {
+      CHECK(heap->IsLiveObjectLocked(*it_ref));
+    }
+  }
+
+  // Check the references of each clean card which is also in the mod union table.
+  CardTable* card_table = heap->GetCardTable();
+  for (It it = references_.begin(); it != references_.end(); ++it) {
+    const byte* card = &*it->first;
+    if (*card == CardTable::kCardClean) {
+      std::set<const Object*> reference_set;
+      for (It2 itr = it->second.begin(); itr != it->second.end();++itr) {
+        reference_set.insert(*itr);
+      }
+      ModUnionCheckReferences visitor(this, reference_set);
+      uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+      uintptr_t end = start + CardTable::kCardSize;
+      space::ContinuousSpace* space =
+          heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+      SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+    }
+  }
+}
+
+void ModUnionTableReferenceCache::Dump(std::ostream& os) {
+  CardTable* card_table = heap_->GetCardTable();
+  typedef std::set<byte*>::const_iterator It;
+  os << "ModUnionTable cleared cards: [";
+  for (It it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
+    byte* card = *it;
+    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+    uintptr_t end = start + CardTable::kCardSize;
+    os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
+  }
+  os << "]\nModUnionTable references: [";
+  typedef SafeMap<const byte*, std::vector<const mirror::Object*> >::const_iterator It2;
+  for (It2 it = references_.begin(); it != references_.end(); ++it) {
+    const byte* card = &*it->first;
+    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+    uintptr_t end = start + CardTable::kCardSize;
+    os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "->{";
+    typedef std::vector<const mirror::Object*>::const_iterator It3;
+    for (It3 itr = it->second.begin(); itr != it->second.end();++itr) {
+      os << reinterpret_cast<const void*>(*itr) << ",";
+    }
+    os << "},";
+  }
+}
+
+void ModUnionTableReferenceCache::Update() {
+  Heap* heap = GetHeap();
+  CardTable* card_table = heap->GetCardTable();
+
+  std::vector<const mirror::Object*> cards_references;
+  ModUnionReferenceVisitor visitor(this, &cards_references);
+
+  typedef std::set<byte*>::iterator It;
+  for (It it = cleared_cards_.begin(), cc_end = cleared_cards_.end(); it != cc_end; ++it) {
+    byte* card = *it;
+    // Clear and re-compute alloc space references associated with this card.
+    cards_references.clear();
+    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+    uintptr_t end = start + CardTable::kCardSize;
+    SpaceBitmap* live_bitmap =
+        heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false)->GetLiveBitmap();
+    live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+
+    // Update the corresponding references for the card.
+    // TODO: C++0x auto
+    SafeMap<const byte*, std::vector<const mirror::Object*> >::iterator
+        found = references_.find(card);
+    if (found == references_.end()) {
+      if (cards_references.empty()) {
+        // No reason to add empty array.
+        continue;
+      }
+      references_.Put(card, cards_references);
+    } else {
+      found->second = cards_references;
+    }
+  }
+  cleared_cards_.clear();
+}
+
+void ModUnionTableReferenceCache::MarkReferences(collector::MarkSweep* mark_sweep) {
+  // TODO: C++0x auto
+  size_t count = 0;
+
+  typedef SafeMap<const byte*, std::vector<const mirror::Object*> >::const_iterator It;
+  for (It it = references_.begin(); it != references_.end(); ++it) {
+    typedef std::vector<const mirror::Object*>::const_iterator It2;
+    for (It2 it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
+      mark_sweep->MarkRoot(*it_ref);
+      ++count;
+    }
+  }
+  if (VLOG_IS_ON(heap)) {
+    VLOG(gc) << "Marked " << count << " references in mod union table";
+  }
+}
+
+void ModUnionTableCardCache::ClearCards(space::ContinuousSpace* space) {
+  CardTable* card_table = GetHeap()->GetCardTable();
+  ModUnionClearCardSetVisitor visitor(&cleared_cards_);
+  // Clear dirty cards in the this space and update the corresponding mod-union bits.
+  card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
+}
+
+// Mark all references to the alloc space(s).
+void ModUnionTableCardCache::MarkReferences(collector::MarkSweep* mark_sweep) {
+  CardTable* card_table = heap_->GetCardTable();
+  ModUnionScanImageRootVisitor visitor(mark_sweep);
+  typedef std::set<byte*>::const_iterator It;
+  It it = cleared_cards_.begin();
+  It cc_end = cleared_cards_.end();
+  if (it != cc_end) {
+    byte* card = *it;
+    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+    uintptr_t end = start + CardTable::kCardSize;
+    space::ContinuousSpace* cur_space =
+        heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+    accounting::SpaceBitmap* cur_live_bitmap = cur_space->GetLiveBitmap();
+    cur_live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+    for (++it; it != cc_end; ++it) {
+      card = *it;
+      start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+      end = start + CardTable::kCardSize;
+      if (UNLIKELY(!cur_space->Contains(reinterpret_cast<Object*>(start)))) {
+        cur_space = heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+        cur_live_bitmap = cur_space->GetLiveBitmap();
+      }
+      cur_live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+    }
+  }
+}
+
+void ModUnionTableCardCache::Dump(std::ostream& os) {
+  CardTable* card_table = heap_->GetCardTable();
+  typedef std::set<byte*>::const_iterator It;
+  os << "ModUnionTable dirty cards: [";
+  for (It it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
+    byte* card = *it;
+    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
+    uintptr_t end = start + CardTable::kCardSize;
+    os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << ",";
+  }
+  os << "]";
+}
+
+}  // namespace accounting
+}  // namespace gc
+}  // namespace art
diff --git a/src/gc/accounting/mod_union_table.h b/src/gc/accounting/mod_union_table.h
new file mode 100644
index 0000000..5d25e05
--- /dev/null
+++ b/src/gc/accounting/mod_union_table.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_ACCOUNTING_MOD_UNION_TABLE_H_
+#define ART_SRC_GC_ACCOUNTING_MOD_UNION_TABLE_H_
+
+#include "globals.h"
+#include "safe_map.h"
+
+#include <set>
+#include <vector>
+
+namespace art {
+namespace mirror {
+  class Object;
+}  // namespace mirror
+
+namespace gc {
+
+namespace collector {
+  class MarkSweep;
+}  // namespace collector
+namespace space {
+  class ContinuousSpace;
+  class Space;
+}  // namespace space
+
+class Heap;
+
+namespace accounting {
+
+class SpaceBitmap;
+class HeapBitmap;
+
+// The mod-union table is the union of modified cards. It is used to allow the card table to be
+// cleared between GC phases, reducing the number of dirty cards that need to be scanned.
+class ModUnionTable {
+ public:
+  ModUnionTable(Heap* heap) : heap_(heap) {
+  }
+
+  virtual ~ModUnionTable() {
+  }
+
+  // Clear cards which map to a memory range of a space. This doesn't immediately update the
+  // mod-union table, as updating the mod-union table may have an associated cost, such as
+  // determining references to track.
+  virtual void ClearCards(space::ContinuousSpace* space) = 0;
+
+  // Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards
+  // before a call to update, for example, back-to-back sticky GCs.
+  virtual void Update() = 0;
+
+  // Mark the bitmaps for all references which are stored in the mod-union table.
+  virtual void MarkReferences(collector::MarkSweep* mark_sweep) = 0;
+
+  // Verification, sanity checks that we don't have clean cards which conflict with out cached data
+  // for said cards. Exclusive lock is required since verify sometimes uses
+  // SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the
+  // bitmap or not.
+  virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0;
+
+  virtual void Dump(std::ostream& os) = 0;
+
+  Heap* GetHeap() const {
+    return heap_;
+  }
+
+ protected:
+  Heap* const heap_;
+};
+
+// Reference caching implementation. Caches references pointing to alloc space(s) for each card.
+class ModUnionTableReferenceCache : public ModUnionTable {
+ public:
+  ModUnionTableReferenceCache(Heap* heap) : ModUnionTable(heap) {}
+  virtual ~ModUnionTableReferenceCache() {}
+
+  // Clear and store cards for a space.
+  void ClearCards(space::ContinuousSpace* space);
+
+  // Update table based on cleared cards.
+  void Update()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Mark all references to the alloc space(s).
+  void MarkReferences(collector::MarkSweep* mark_sweep)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
+  // VisitMarkedRange can't know if the callback will modify the bitmap or not.
+  void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
+  // Function that tells whether or not to add a reference to the table.
+  virtual bool AddReference(const mirror::Object* obj, const mirror::Object* ref) = 0;
+
+  void Dump(std::ostream& os);
+
+ protected:
+  // Cleared card array, used to update the mod-union table.
+  std::set<byte*> cleared_cards_;
+
+  // Maps from dirty cards to their corresponding alloc space references.
+  SafeMap<const byte*, std::vector<const mirror::Object*> > references_;
+};
+
+// Card caching implementation. Keeps track of which cards we cleared and only this information.
+class ModUnionTableCardCache : public ModUnionTable {
+ public:
+  ModUnionTableCardCache(Heap* heap) : ModUnionTable(heap) {}
+  virtual ~ModUnionTableCardCache() {}
+
+  // Clear and store cards for a space.
+  void ClearCards(space::ContinuousSpace* space);
+
+  // Nothing to update as all dirty cards were placed into cleared cards during clearing.
+  void Update() {}
+
+  // Mark all references to the alloc space(s).
+  void MarkReferences(collector::MarkSweep* mark_sweep)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Nothing to verify.
+  void Verify() {}
+
+  void Dump(std::ostream& os);
+
+ protected:
+  // Cleared card array, used to update the mod-union table.
+  std::set<byte*> cleared_cards_;
+};
+
+}  // namespace accounting
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_ACCOUNTING_MOD_UNION_TABLE_H_
diff --git a/src/gc/space_bitmap-inl.h b/src/gc/accounting/space_bitmap-inl.h
similarity index 94%
rename from src/gc/space_bitmap-inl.h
rename to src/gc/accounting/space_bitmap-inl.h
index dd91403..a4fd330 100644
--- a/src/gc/space_bitmap-inl.h
+++ b/src/gc/accounting/space_bitmap-inl.h
@@ -14,13 +14,16 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_GC_SPACE_BITMAP_INL_H_
-#define ART_SRC_GC_SPACE_BITMAP_INL_H_
+#ifndef ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
+#define ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
 
 #include "base/logging.h"
 #include "cutils/atomic-inline.h"
+#include "utils.h"
 
 namespace art {
+namespace gc {
+namespace accounting {
 
 inline bool SpaceBitmap::AtomicTestAndSet(const mirror::Object* obj) {
   uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
@@ -136,6 +139,9 @@
   }
   return (old_word & mask) != 0;
 }
+
+}  // namespace accounting
+}  // namespace gc
 }  // namespace art
 
-#endif  // ART_SRC_GC_SPACE_BITMAP_INL_H_
+#endif  // ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_INL_H_
diff --git a/src/gc/space_bitmap.cc b/src/gc/accounting/space_bitmap.cc
similarity index 96%
rename from src/gc/space_bitmap.cc
rename to src/gc/accounting/space_bitmap.cc
index 773aa1e..19f1128 100644
--- a/src/gc/space_bitmap.cc
+++ b/src/gc/accounting/space_bitmap.cc
@@ -14,19 +14,21 @@
  * limitations under the License.
  */
 
-#include "heap_bitmap.h"
-
 #include "base/logging.h"
 #include "dex_file-inl.h"
+#include "heap_bitmap.h"
 #include "mirror/class-inl.h"
 #include "mirror/field-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "object_utils.h"
 #include "space_bitmap-inl.h"
 #include "UniquePtr.h"
 #include "utils.h"
 
 namespace art {
+namespace gc {
+namespace accounting {
 
 std::string SpaceBitmap::GetName() const {
   return name_;
@@ -36,6 +38,12 @@
   name_ = name;
 }
 
+std::string SpaceBitmap::Dump() const {
+  return StringPrintf("%s: %p-%p", name_.c_str(),
+                      reinterpret_cast<void*>(HeapBegin()),
+                      reinterpret_cast<void*>(HeapLimit()));
+}
+
 void SpaceSetMap::Walk(SpaceBitmap::Callback* callback, void* arg) {
   for (Objects::iterator it = contained_.begin(); it != contained_.end(); ++it) {
     callback(const_cast<mirror::Object*>(*it), arg);
@@ -72,8 +80,6 @@
   // mem_map_->Trim(reinterpret_cast<byte*>(heap_begin_ + bitmap_size_));
 }
 
-// Fill the bitmap with zeroes.  Returns the bitmap's memory to the
-// system as a side-effect.
 void SpaceBitmap::Clear() {
   if (bitmap_begin_ != NULL) {
     // This returns the memory to the system.  Successive page faults
@@ -164,14 +170,6 @@
   }
 }
 
-}  // namespace art
-
-// Support needed for in order traversal
-#include "mirror/object.h"
-#include "object_utils.h"
-
-namespace art {
-
 static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj,
                               void* arg);
 
@@ -273,10 +271,6 @@
   name_ = name;
 }
 
-SpaceSetMap::SpaceSetMap(const std::string& name) : name_(name) {
-
-}
-
 void SpaceSetMap::CopyFrom(const SpaceSetMap& space_set) {
   contained_ = space_set.contained_;
 }
@@ -287,6 +281,8 @@
     << "begin=" << reinterpret_cast<const void*>(bitmap.HeapBegin())
     << ",end=" << reinterpret_cast<const void*>(bitmap.HeapLimit())
     << "]";
-  }
+}
 
+}  // namespace accounting
+}  // namespace gc
 }  // namespace art
diff --git a/src/gc/space_bitmap.h b/src/gc/accounting/space_bitmap.h
similarity index 92%
rename from src/gc/space_bitmap.h
rename to src/gc/accounting/space_bitmap.h
index 6bc06d6..bb487d8 100644
--- a/src/gc/space_bitmap.h
+++ b/src/gc/accounting/space_bitmap.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_GC_SPACE_BITMAP_H_
-#define ART_SRC_GC_SPACE_BITMAP_H_
+#ifndef ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_H_
+#define ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_H_
 
 #include "locks.h"
 #include "globals.h"
@@ -28,12 +28,17 @@
 #include <vector>
 
 namespace art {
+
 namespace mirror {
-class Object;
+  class Object;
 }  // namespace mirror
 
+namespace gc {
+namespace accounting {
+
 class SpaceBitmap {
  public:
+  // Alignment of objects within spaces.
   static const size_t kAlignment = 8;
 
   typedef void Callback(mirror::Object* obj, void* arg);
@@ -52,7 +57,7 @@
   // <index> is the index of .bits that contains the bit representing
   //         <offset>.
   static size_t OffsetToIndex(size_t offset) {
-      return offset / kAlignment / kBitsPerWord;
+    return offset / kAlignment / kBitsPerWord;
   }
 
   static uintptr_t IndexToOffset(size_t index) {
@@ -75,6 +80,7 @@
   // Returns true if the object was previously marked.
   bool AtomicTestAndSet(const mirror::Object* obj);
 
+  // Fill the bitmap with zeroes.  Returns the bitmap's memory to the system as a side-effect.
   void Clear();
 
   bool Test(const mirror::Object* obj) const;
@@ -160,6 +166,8 @@
   std::string GetName() const;
   void SetName(const std::string& name);
 
+  std::string Dump() const;
+
   const void* GetObjectWordAddress(const mirror::Object* obj) const {
     uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
     const uintptr_t offset = addr - heap_begin_;
@@ -236,7 +244,8 @@
     }
   }
 
-  SpaceSetMap(const std::string& name);
+  SpaceSetMap(const std::string& name) : name_(name) {}
+  ~SpaceSetMap() {}
 
   Objects& GetObjects() {
     return contained_;
@@ -249,6 +258,8 @@
 
 std::ostream& operator << (std::ostream& stream, const SpaceBitmap& bitmap);
 
+}  // namespace accounting
+}  // namespace gc
 }  // namespace art
 
-#endif  // ART_SRC_GC_SPACE_BITMAP_H_
+#endif  // ART_SRC_GC_ACCOUNTING_SPACE_BITMAP_H_
diff --git a/src/gc/space_bitmap_test.cc b/src/gc/accounting/space_bitmap_test.cc
similarity index 96%
rename from src/gc/space_bitmap_test.cc
rename to src/gc/accounting/space_bitmap_test.cc
index 4645659..d00d7c2 100644
--- a/src/gc/space_bitmap_test.cc
+++ b/src/gc/accounting/space_bitmap_test.cc
@@ -17,7 +17,6 @@
 #include "space_bitmap.h"
 
 #include "common_test.h"
-#include "dlmalloc.h"
 #include "globals.h"
 #include "space_bitmap-inl.h"
 #include "UniquePtr.h"
@@ -25,6 +24,8 @@
 #include <stdint.h>
 
 namespace art {
+namespace gc {
+namespace accounting {
 
 class SpaceBitmapTest : public CommonTest {
  public:
@@ -87,4 +88,6 @@
   }
 }
 
+}  // namespace accounting
+}  // namespace gc
 }  // namespace art
diff --git a/src/dlmalloc.cc b/src/gc/allocator/dlmalloc.cc
similarity index 68%
rename from src/dlmalloc.cc
rename to src/gc/allocator/dlmalloc.cc
index 1d62d20..7584b6e 100644
--- a/src/dlmalloc.cc
+++ b/src/gc/allocator/dlmalloc.cc
@@ -45,3 +45,28 @@
 static void art_heap_usage_error(const char* function, void* p) {
   LOG(FATAL) << "Incorrect use of function '" << function << "' argument " << p << " not expected";
 }
+
+#include "globals.h"
+#include "utils.h"
+#include <sys/mman.h>
+
+using namespace art;
+extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_bytes, void* arg) {
+  // Is this chunk in use?
+  if (used_bytes != 0) {
+    return;
+  }
+  // Do we have any whole pages to give back?
+  start = reinterpret_cast<void*>(RoundUp(reinterpret_cast<uintptr_t>(start), kPageSize));
+  end = reinterpret_cast<void*>(RoundDown(reinterpret_cast<uintptr_t>(end), kPageSize));
+  if (end > start) {
+    size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
+    int rc = madvise(start, length, MADV_DONTNEED);
+    if (UNLIKELY(rc != 0)) {
+      errno = rc;
+      PLOG(FATAL) << "madvise failed during heap trimming";
+    }
+    size_t* reclaimed = reinterpret_cast<size_t*>(arg);
+    *reclaimed += length;
+  }
+}
diff --git a/src/dlmalloc.h b/src/gc/allocator/dlmalloc.h
similarity index 76%
rename from src/dlmalloc.h
rename to src/gc/allocator/dlmalloc.h
index b6759a0..6b02a44 100644
--- a/src/dlmalloc.h
+++ b/src/gc/allocator/dlmalloc.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_DLMALLOC_H_
-#define ART_SRC_DLMALLOC_H_
+#ifndef ART_SRC_GC_ALLOCATOR_DLMALLOC_H_
+#define ART_SRC_GC_ALLOCATOR_DLMALLOC_H_
 
 // Configure dlmalloc for mspaces.
 #define HAVE_MMAP 0
@@ -33,4 +33,8 @@
 extern "C" void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), void* arg);
 extern "C" int  dlmalloc_trim(size_t);
 
-#endif  // ART_SRC_DLMALLOC_H_
+// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
+// pages back to the kernel.
+extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_bytes, void* /*arg*/);
+
+#endif  // ART_SRC_GC_ALLOCATOR_DLMALLOC_H_
diff --git a/src/gc/collector/garbage_collector.cc b/src/gc/collector/garbage_collector.cc
new file mode 100644
index 0000000..7412835
--- /dev/null
+++ b/src/gc/collector/garbage_collector.cc
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "garbage_collector.h"
+
+#include "base/logging.h"
+#include "base/mutex-inl.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
+#include "thread.h"
+#include "thread_list.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
+    : heap_(heap),
+      name_(name),
+      verbose_(VLOG_IS_ON(heap)),
+      duration_ns_(0),
+      timings_(name_.c_str(), true, verbose_),
+      cumulative_timings_(name) {
+  ResetCumulativeStatistics();
+}
+
+bool GarbageCollector::HandleDirtyObjectsPhase() {
+  DCHECK(IsConcurrent());
+  return true;
+}
+
+void GarbageCollector::RegisterPause(uint64_t nano_length) {
+  pause_times_.push_back(nano_length);
+}
+
+void GarbageCollector::ResetCumulativeStatistics() {
+  cumulative_timings_.Reset();
+  total_time_ns_ = 0;
+  total_paused_time_ns_ = 0;
+  total_freed_objects_ = 0;
+  total_freed_bytes_ = 0;
+}
+
+void GarbageCollector::Run() {
+  Thread* self = Thread::Current();
+  ThreadList* thread_list = Runtime::Current()->GetThreadList();
+
+  uint64_t start_time = NanoTime();
+  pause_times_.clear();
+  duration_ns_ = 0;
+
+  InitializePhase();
+
+  if (!IsConcurrent()) {
+    // Pause is the entire length of the GC.
+    uint64_t pause_start = NanoTime();
+    thread_list->SuspendAll();
+    MarkingPhase();
+    ReclaimPhase();
+    thread_list->ResumeAll();
+    uint64_t pause_end = NanoTime();
+    pause_times_.push_back(pause_end - pause_start);
+  } else {
+    {
+      ReaderMutexLock mu(self, *Locks::mutator_lock_);
+      MarkingPhase();
+    }
+    bool done = false;
+    while (!done) {
+      uint64_t pause_start = NanoTime();
+      thread_list->SuspendAll();
+      done = HandleDirtyObjectsPhase();
+      thread_list->ResumeAll();
+      uint64_t pause_end = NanoTime();
+      pause_times_.push_back(pause_end - pause_start);
+    }
+    {
+      ReaderMutexLock mu(self, *Locks::mutator_lock_);
+      ReclaimPhase();
+    }
+  }
+
+  uint64_t end_time = NanoTime();
+  duration_ns_ = end_time - start_time;
+
+  FinishPhase();
+}
+
+void GarbageCollector::SwapBitmaps() {
+  // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
+  // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
+  // bits of dead objects in the live bitmap.
+  const GcType gc_type = GetGcType();
+  const std::vector<space::ContinuousSpace*>& cont_spaces = GetHeap()->GetContinuousSpaces();
+  // TODO: C++0x
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = cont_spaces.begin(), end = cont_spaces.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    // We never allocate into zygote spaces.
+    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
+        (gc_type == kGcTypeFull &&
+         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
+      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+      if (live_bitmap != mark_bitmap) {
+        heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
+        heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
+        space->AsDlMallocSpace()->SwapBitmaps();
+      }
+    }
+  }
+  const std::vector<space::DiscontinuousSpace*>& disc_spaces = GetHeap()->GetDiscontinuousSpaces();
+  // TODO: C++0x
+  typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
+  for (It2 it = disc_spaces.begin(), end = disc_spaces.end(); it != end; ++it) {
+    space::LargeObjectSpace* space = down_cast<space::LargeObjectSpace*>(*it);
+    accounting::SpaceSetMap* live_set = space->GetLiveObjects();
+    accounting::SpaceSetMap* mark_set = space->GetMarkObjects();
+    heap_->GetLiveBitmap()->ReplaceObjectSet(live_set, mark_set);
+    heap_->GetMarkBitmap()->ReplaceObjectSet(mark_set, live_set);
+    space->SwapBitmaps();
+  }
+}
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
diff --git a/src/gc/garbage_collector.h b/src/gc/collector/garbage_collector.h
similarity index 61%
rename from src/gc/garbage_collector.h
rename to src/gc/collector/garbage_collector.h
index a1014c2..1ab3957 100644
--- a/src/gc/garbage_collector.h
+++ b/src/gc/collector/garbage_collector.h
@@ -17,28 +17,38 @@
 #ifndef ART_SRC_GC_GARBAGE_COLLECTOR_H_
 #define ART_SRC_GC_GARBAGE_COLLECTOR_H_
 
+#include "gc_type.h"
 #include "locks.h"
+#include "base/timing_logger.h"
 
 #include <stdint.h>
 #include <vector>
 
 namespace art {
+namespace gc {
 
 class Heap;
 
+namespace collector {
+
 class GarbageCollector {
  public:
   // Returns true iff the garbage collector is concurrent.
   virtual bool IsConcurrent() const = 0;
 
-  GarbageCollector(Heap* heap);
+  GarbageCollector(Heap* heap, const std::string& name);
+  virtual ~GarbageCollector() { }
 
-  virtual ~GarbageCollector();
+  const char* GetName() const {
+    return name_.c_str();
+  }
+
+  virtual GcType GetGcType() const = 0;
 
   // Run the garbage collector.
   void Run();
 
-  Heap* GetHeap() {
+  Heap* GetHeap() const {
     return heap_;
   }
 
@@ -48,16 +58,28 @@
   }
 
   // Returns how long the GC took to complete in nanoseconds.
-  uint64_t GetDuration() const {
-    return duration_;
+  uint64_t GetDurationNs() const {
+    return duration_ns_;
   }
 
-
-  virtual std::string GetName() const = 0;
-
   void RegisterPause(uint64_t nano_length);
 
+  base::NewTimingLogger& GetTimings() {
+    return timings_;
+  }
+
+  CumulativeLogger& GetCumulativeTimings() {
+    return cumulative_timings_;
+  }
+
+  void ResetCumulativeStatistics();
+
+  // Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
+  // this is the allocation space, for full GC then we swap the zygote bitmaps too.
+  void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+
  protected:
+
   // The initial phase. Done without mutators paused.
   virtual void InitializePhase() = 0;
 
@@ -73,11 +95,28 @@
   // Called after the GC is finished. Done without mutators paused.
   virtual void FinishPhase() = 0;
 
-  Heap* heap_;
+  Heap* const heap_;
+
+  std::string name_;
+
+  const bool verbose_;
+
+  uint64_t duration_ns_;
+  base::NewTimingLogger timings_;
+
+  // Cumulative statistics.
+  uint64_t total_time_ns_;
+  uint64_t total_paused_time_ns_;
+  uint64_t total_freed_objects_;
+  uint64_t total_freed_bytes_;
+
+  CumulativeLogger cumulative_timings_;
+
   std::vector<uint64_t> pause_times_;
-  uint64_t duration_;
 };
 
+}  // namespace collector
+}  // namespace gc
 }  // namespace art
 
 #endif  // ART_SRC_GC_GARBAGE_COLLECTOR_H_
diff --git a/src/gc/collector/gc_type.cc b/src/gc/collector/gc_type.cc
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/gc/collector/gc_type.cc
diff --git a/src/gc/collector/gc_type.h b/src/gc/collector/gc_type.h
new file mode 100644
index 0000000..bb25bb9
--- /dev/null
+++ b/src/gc/collector/gc_type.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_COLLECTOR_GC_TYPE_H_
+#define ART_SRC_GC_COLLECTOR_GC_TYPE_H_
+
+#include <ostream>
+
+namespace art {
+namespace gc {
+namespace collector {
+
+// The type of collection to be performed. The ordering of the enum matters, it is used to
+// determine which GCs are run first.
+enum GcType {
+  // Placeholder for when no GC has been performed.
+  kGcTypeNone,
+  // Sticky mark bits GC that attempts to only free objects allocated since the last GC.
+  kGcTypeSticky,
+  // Partial GC that marks the application heap but not the Zygote.
+  kGcTypePartial,
+  // Full GC that marks and frees in both the application and Zygote heap.
+  kGcTypeFull,
+  // Number of different GC types.
+  kGcTypeMax,
+};
+std::ostream& operator<<(std::ostream& os, const GcType& policy);
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_COLLECTOR_GC_TYPE_H_
diff --git a/src/gc/mark_sweep-inl.h b/src/gc/collector/mark_sweep-inl.h
similarity index 97%
rename from src/gc/mark_sweep-inl.h
rename to src/gc/collector/mark_sweep-inl.h
index 7265023..ea9fced 100644
--- a/src/gc/mark_sweep-inl.h
+++ b/src/gc/collector/mark_sweep-inl.h
@@ -17,12 +17,16 @@
 #ifndef ART_SRC_GC_MARK_SWEEP_INL_H_
 #define ART_SRC_GC_MARK_SWEEP_INL_H_
 
-#include "heap.h"
+#include "gc/collector/mark_sweep.h"
+
+#include "gc/heap.h"
 #include "mirror/class.h"
 #include "mirror/field.h"
 #include "mirror/object_array.h"
 
 namespace art {
+namespace gc {
+namespace collector {
 
 template <typename MarkVisitor>
 inline void MarkSweep::ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) {
@@ -154,6 +158,8 @@
   }
 }
 
+}  // namespace collector
+}  // namespace gc
 }  // namespace art
 
 #endif  // ART_SRC_GC_MARK_SWEEP_INL_H_
diff --git a/src/gc/mark_sweep.cc b/src/gc/collector/mark_sweep.cc
similarity index 76%
rename from src/gc/mark_sweep.cc
rename to src/gc/collector/mark_sweep.cc
index 14d604a..d54fec6 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/collector/mark_sweep.cc
@@ -25,13 +25,16 @@
 #include "base/macros.h"
 #include "base/mutex-inl.h"
 #include "base/timing_logger.h"
-#include "card_table.h"
-#include "card_table-inl.h"
-#include "heap.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
 #include "indirect_reference_table.h"
 #include "intern_table.h"
 #include "jni_internal.h"
-#include "large_object_space.h"
 #include "monitor.h"
 #include "mark_sweep-inl.h"
 #include "mirror/class-inl.h"
@@ -43,15 +46,15 @@
 #include "mirror/object_array.h"
 #include "mirror/object_array-inl.h"
 #include "runtime.h"
-#include "space.h"
-#include "space_bitmap-inl.h"
-#include "thread.h"
+#include "thread-inl.h"
 #include "thread_list.h"
 #include "verifier/method_verifier.h"
 
 using namespace art::mirror;
 
 namespace art {
+namespace gc {
+namespace collector {
 
 // Performance options.
 static const bool kParallelMarkStack = true;
@@ -68,7 +71,6 @@
 class SetFingerVisitor {
  public:
   SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
-
   }
 
   void operator ()(void* finger) const {
@@ -79,13 +81,7 @@
   MarkSweep* const mark_sweep_;
 };
 
-std::string MarkSweep::GetName() const {
-  std::ostringstream ss;
-  ss << (IsConcurrent() ? "Concurrent" : "") << GetGcType();
-  return ss.str();
-}
-
-void MarkSweep::ImmuneSpace(ContinuousSpace* space) {
+void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
   // Bind live to mark bitmap if necessary.
   if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
     BindLiveToMarkBitmap(space);
@@ -97,54 +93,68 @@
     SetImmuneRange(reinterpret_cast<Object*>(space->Begin()),
                    reinterpret_cast<Object*>(space->End()));
   } else {
-      const Spaces& spaces = GetHeap()->GetSpaces();
-      const ContinuousSpace* prev_space = NULL;
-      // Find out if the previous space is immune.
-      // TODO: C++0x
-      for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-        if (*it == space) {
-          break;
-        }
-        prev_space = *it;
+    const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+    const space::ContinuousSpace* prev_space = NULL;
+    // Find out if the previous space is immune.
+    // TODO: C++0x
+    typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+    for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+      if (*it == space) {
+        break;
       }
+      prev_space = *it;
+    }
 
-      // If previous space was immune, then extend the immune region.
-      if (prev_space != NULL &&
-          immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
-          immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
+    // If previous space was immune, then extend the immune region. Relies on continuous spaces
+    // being sorted by Heap::AddContinuousSpace.
+    if (prev_space != NULL &&
+        immune_begin_ <= reinterpret_cast<Object*>(prev_space->Begin()) &&
+        immune_end_ >= reinterpret_cast<Object*>(prev_space->End())) {
       immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
       immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
     }
   }
 }
 
-// Bind the live bits to the mark bits of bitmaps based on the gc type.
 void MarkSweep::BindBitmaps() {
-  Spaces& spaces = GetHeap()->GetSpaces();
+  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
 
   // Mark all of the spaces we never collect as immune.
-  for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    ContinuousSpace* space = *it;
-    if (space->GetGcRetentionPolicy() == kGcRetentionPolicyNeverCollect) {
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect) {
       ImmuneSpace(space);
     }
   }
 }
 
-MarkSweep::MarkSweep(Heap* heap, bool is_concurrent)
-    : GarbageCollector(heap),
+MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
+    : GarbageCollector(heap,
+                       name_prefix + (name_prefix.empty() ? "" : " ") +
+                       (is_concurrent ? "concurrent mark sweep": "mark sweep")),
+      current_mark_bitmap_(NULL),
+      java_lang_Class_(NULL),
+      mark_stack_(NULL),
+      finger_(NULL),
+      immune_begin_(NULL),
+      immune_end_(NULL),
+      soft_reference_list_(NULL),
+      weak_reference_list_(NULL),
+      finalizer_reference_list_(NULL),
+      phantom_reference_list_(NULL),
+      cleared_reference_list_(NULL),
       gc_barrier_(new Barrier(0)),
       large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock),
       mark_stack_expand_lock_("mark sweep mark stack expand lock"),
       is_concurrent_(is_concurrent),
-      timings_(GetName(), true),
-      cumulative_timings_(GetName()) {
-  cumulative_timings_.SetName(GetName());
-  ResetCumulativeStatistics();
+      clear_soft_references_(false) {
 }
 
 void MarkSweep::InitializePhase() {
+  timings_.Reset();
+  timings_.StartSplit("InitializePhase");
   mark_stack_ = GetHeap()->mark_stack_.get();
   DCHECK(mark_stack_ != NULL);
   finger_ = NULL;
@@ -169,34 +179,31 @@
   java_lang_Class_ = Class::GetJavaLangClass();
   CHECK(java_lang_Class_ != NULL);
   FindDefaultMarkBitmap();
-  // Mark any concurrent roots as dirty since we need to scan them at least once during this GC.
-  Runtime::Current()->DirtyRoots();
-  timings_.Reset();
   // Do any pre GC verification.
   heap_->PreGcVerification(this);
 }
 
 void MarkSweep::ProcessReferences(Thread* self) {
+  timings_.NewSplit("ProcessReferences");
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
                     &finalizer_reference_list_, &phantom_reference_list_);
-  timings_.AddSplit("ProcessReferences");
 }
 
 bool MarkSweep::HandleDirtyObjectsPhase() {
   Thread* self = Thread::Current();
-  ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
+  accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
   Locks::mutator_lock_->AssertExclusiveHeld(self);
 
   {
+    timings_.NewSplit("ReMarkRoots");
     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
 
     // Re-mark root set.
     ReMarkRoots();
-    timings_.AddSplit("ReMarkRoots");
 
     // Scan dirty objects, this is only required if we are not doing concurrent GC.
-    RecursiveMarkDirtyObjects(CardTable::kCardDirty);
+    RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty);
   }
 
   ProcessReferences(self);
@@ -206,15 +213,17 @@
     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
     // This second sweep makes sure that we don't have any objects in the live stack which point to
     // freed objects. These cause problems since their references may be previously freed objects.
-    SweepArray(timings_, allocation_stack, false);
+    SweepArray(allocation_stack, false);
   } else {
+    timings_.NewSplit("UnMarkAllocStack");
     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    // We only sweep over the live stack, and the live stack should not intersect with the
-    // allocation stack, so it should be safe to UnMark anything in the allocation stack as live.
+    // The allocation stack contains things allocated since the start of the GC. These may have been
+    // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
+    // Remove these objects from the mark bitmaps so that they will be eligible for sticky
+    // collection.
     heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(),
-                           GetHeap()->large_object_space_->GetMarkObjects(),
-                           allocation_stack);
-    timings_.AddSplit("UnMarkAllocStack");
+                            GetHeap()->large_object_space_->GetMarkObjects(),
+                            allocation_stack);
   }
   return true;
 }
@@ -227,31 +236,30 @@
   Heap* heap = GetHeap();
   Thread* self = Thread::Current();
 
+  timings_.NewSplit("BindBitmaps");
   BindBitmaps();
   FindDefaultMarkBitmap();
-  timings_.AddSplit("BindBitmaps");
-
   // Process dirty cards and add dirty cards to mod union tables.
   heap->ProcessCards(timings_);
 
   // Need to do this before the checkpoint since we don't want any threads to add references to
   // the live stack during the recursive mark.
+  timings_.NewSplit("SwapStacks");
   heap->SwapStacks();
-  timings_.AddSplit("SwapStacks");
 
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
   if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
     // If we exclusively hold the mutator lock, all threads must be suspended.
+    timings_.NewSplit("MarkRoots");
     MarkRoots();
-    timings_.AddSplit("MarkConcurrentRoots");
   } else {
-    MarkRootsCheckpoint();
-    timings_.AddSplit("MarkRootsCheckpoint");
+    timings_.NewSplit("MarkRootsCheckpoint");
+    MarkRootsCheckpoint(self);
+    timings_.NewSplit("MarkNonThreadRoots");
     MarkNonThreadRoots();
-    timings_.AddSplit("MarkNonThreadRoots");
   }
+  timings_.NewSplit("MarkConcurrentRoots");
   MarkConcurrentRoots();
-  timings_.AddSplit("MarkConcurrentRoots");
 
   heap->UpdateAndMarkModUnion(this, timings_, GetGcType());
   MarkReachableObjects();
@@ -260,12 +268,12 @@
 void MarkSweep::MarkReachableObjects() {
   // Mark everything allocated since the last as GC live so that we can sweep concurrently,
   // knowing that new allocations won't be marked as live.
-  ObjectStack* live_stack = heap_->GetLiveStack();
+  timings_.NewSplit("MarkStackAsLive");
+  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
   heap_->MarkAllocStack(heap_->alloc_space_->GetLiveBitmap(),
                        heap_->large_object_space_->GetLiveObjects(),
                        live_stack);
   live_stack->Reset();
-  timings_.AddSplit("MarkStackAsLive");
   // Recursively mark all the non-image bits set in the mark bitmap.
   RecursiveMark();
   DisableFinger();
@@ -289,60 +297,31 @@
     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
 
     // Reclaim unmarked objects.
-    Sweep(timings_, false);
+    Sweep(false);
 
     // Swap the live and mark bitmaps for each space which we modified space. This is an
     // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
     // bitmaps.
+    timings_.NewSplit("SwapBitmaps");
     SwapBitmaps();
-    timings_.AddSplit("SwapBitmaps");
 
     // Unbind the live and mark bitmaps.
     UnBindBitmaps();
   }
 }
 
-void MarkSweep::SwapBitmaps() {
-  // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
-  // these bitmaps. The bitmap swapping is an optimization so that we do not need to clear the live
-  // bits of dead objects in the live bitmap.
-  const GcType gc_type = GetGcType();
-  // TODO: C++0x
-  Spaces& spaces = heap_->GetSpaces();
-  for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    ContinuousSpace* space = *it;
-    // We never allocate into zygote spaces.
-    if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
-        (gc_type == kGcTypeFull &&
-            space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)) {
-      SpaceBitmap* live_bitmap = space->GetLiveBitmap();
-      SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
-      if (live_bitmap != mark_bitmap) {
-        heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
-        heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
-        space->AsAllocSpace()->SwapBitmaps();
-      }
-    }
-  }
-  SwapLargeObjects();
-}
-
-void MarkSweep::SwapLargeObjects() {
-  LargeObjectSpace* large_object_space = heap_->GetLargeObjectsSpace();
-  large_object_space->SwapBitmaps();
-  heap_->GetLiveBitmap()->SetLargeObjects(large_object_space->GetLiveObjects());
-  heap_->GetMarkBitmap()->SetLargeObjects(large_object_space->GetMarkObjects());
-}
-
 void MarkSweep::SetImmuneRange(Object* begin, Object* end) {
   immune_begin_ = begin;
   immune_end_ = end;
 }
 
 void MarkSweep::FindDefaultMarkBitmap() {
-  const Spaces& spaces = heap_->GetSpaces();
-  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
+  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+  // TODO: C++0x
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
       current_mark_bitmap_ = (*it)->GetMarkBitmap();
       CHECK(current_mark_bitmap_ != NULL);
       return;
@@ -389,10 +368,10 @@
 
   // Try to take advantage of locality of references within a space, failing this find the space
   // the hard way.
-  SpaceBitmap* object_bitmap = current_mark_bitmap_;
+  accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
   if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
-    SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetSpaceBitmap(obj);
-    if (new_bitmap != NULL) {
+    accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
+    if (LIKELY(new_bitmap != NULL)) {
       object_bitmap = new_bitmap;
     } else {
       MarkLargeObject(obj);
@@ -416,8 +395,9 @@
 
 // Rare case, probably not worth inlining since it will increase instruction cache miss rate.
 bool MarkSweep::MarkLargeObject(const Object* obj) {
-  LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-  SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
+  // TODO: support >1 discontinuous space.
+  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
   if (kProfileLargeObjects) {
     ++large_object_test_;
   }
@@ -450,9 +430,9 @@
 
   // Try to take advantage of locality of references within a space, failing this find the space
   // the hard way.
-  SpaceBitmap* object_bitmap = current_mark_bitmap_;
+  accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
   if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
-    SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetSpaceBitmap(obj);
+    accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
     if (new_bitmap != NULL) {
       object_bitmap = new_bitmap;
     } else {
@@ -512,8 +492,8 @@
 
 void MarkSweep::VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor) {
   // See if the root is on any space bitmap.
-  if (GetHeap()->GetLiveBitmap()->GetSpaceBitmap(root) == NULL) {
-    LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  if (GetHeap()->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == NULL) {
+    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
     if (!large_object_space->Contains(root)) {
       LOG(ERROR) << "Found invalid root: " << root;
       if (visitor != NULL) {
@@ -537,7 +517,8 @@
 }
 
 void MarkSweep::MarkConcurrentRoots() {
-  Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this);
+  // Visit all runtime roots and clear dirty flags.
+  Runtime::Current()->VisitConcurrentRoots(MarkObjectCallback, this, false, true);
 }
 
 class CheckObjectVisitor {
@@ -573,11 +554,11 @@
   mark_sweep->CheckObject(root);
 }
 
-void MarkSweep::BindLiveToMarkBitmap(ContinuousSpace* space) {
-  CHECK(space->IsAllocSpace());
-  DlMallocSpace* alloc_space = space->AsAllocSpace();
-  SpaceBitmap* live_bitmap = space->GetLiveBitmap();
-  SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
+void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
+  CHECK(space->IsDlMallocSpace());
+  space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+  accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+  accounting::SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
   GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
   alloc_space->temp_bitmap_.reset(mark_bitmap);
   alloc_space->mark_bitmap_.reset(live_bitmap);
@@ -586,7 +567,6 @@
 class ScanObjectVisitor {
  public:
   ScanObjectVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
-
   }
 
   // TODO: Fixme when anotatalysis works with visitors.
@@ -603,29 +583,39 @@
 };
 
 void MarkSweep::ScanGrayObjects(byte minimum_age) {
-  const Spaces& spaces = heap_->GetSpaces();
-  CardTable* card_table = heap_->GetCardTable();
+  accounting::CardTable* card_table = GetHeap()->GetCardTable();
+  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
   ScanObjectVisitor visitor(this);
   SetFingerVisitor finger_visitor(this);
-  // TODO: C++ 0x auto
-  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    ContinuousSpace* space = *it;
+  // TODO: C++0x
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) {
+    space::ContinuousSpace* space = *it;
+    switch (space->GetGcRetentionPolicy()) {
+      case space::kGcRetentionPolicyNeverCollect:
+        timings_.NewSplit("ScanGrayImageSpaceObjects");
+        break;
+      case space::kGcRetentionPolicyFullCollect:
+        timings_.NewSplit("ScanGrayZygoteSpaceObjects");
+        break;
+      case space::kGcRetentionPolicyAlwaysCollect:
+        timings_.NewSplit("ScanGrayAllocSpaceObjects");
+        break;
+    }
     byte* begin = space->Begin();
     byte* end = space->End();
     // Image spaces are handled properly since live == marked for them.
-    SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
-    card_table->Scan(mark_bitmap, begin, end, visitor, VoidFunctor(), minimum_age);
+    accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+    card_table->Scan(mark_bitmap, begin, end, visitor, finger_visitor, minimum_age);
   }
 }
 
 class CheckBitmapVisitor {
  public:
   CheckBitmapVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {
-
   }
 
-  void operator ()(const Object* obj) const
-      NO_THREAD_SAFETY_ANALYSIS {
+  void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
     if (kDebugLocking) {
       Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
     }
@@ -642,13 +632,15 @@
   // objects which are either in the image space or marked objects in the alloc
   // space
   CheckBitmapVisitor visitor(this);
-  const Spaces& spaces = heap_->GetSpaces();
-  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+  // TODO: C++0x
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
     if ((*it)->IsImageSpace()) {
-      ImageSpace* space = (*it)->AsImageSpace();
+      space::ImageSpace* space = (*it)->AsImageSpace();
       uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
       uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
-      SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
       DCHECK(live_bitmap != NULL);
       live_bitmap->VisitMarkedRange(begin, end, visitor, VoidFunctor());
     }
@@ -658,6 +650,7 @@
 // Populates the mark stack based on the set of marked objects and
 // recursively marks until the mark stack is emptied.
 void MarkSweep::RecursiveMark() {
+  timings_.NewSplit("RecursiveMark");
   // RecursiveMark will build the lists of known instances of the Reference classes.
   // See DelayReferenceReferent for details.
   CHECK(soft_reference_list_ == NULL);
@@ -667,16 +660,17 @@
   CHECK(cleared_reference_list_ == NULL);
 
   const bool partial = GetGcType() == kGcTypePartial;
-  const Spaces& spaces = heap_->GetSpaces();
   SetFingerVisitor set_finger_visitor(this);
   ScanObjectVisitor scan_visitor(this);
   if (!kDisableFinger) {
     finger_ = NULL;
-    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-      ContinuousSpace* space = *it;
-      if ((space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) ||
-          (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
-          ) {
+    const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+    // TODO: C++0x
+    typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+    for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+      space::ContinuousSpace* space = *it;
+      if ((space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) ||
+          (!partial && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
         current_mark_bitmap_ = space->GetMarkBitmap();
         if (current_mark_bitmap_ == NULL) {
           GetHeap()->DumpSpaces();
@@ -690,9 +684,8 @@
     }
   }
   DisableFinger();
-  timings_.AddSplit("RecursiveMark");
+  timings_.NewSplit("ProcessMarkStack");
   ProcessMarkStack();
-  timings_.AddSplit("ProcessMarkStack");
 }
 
 bool MarkSweep::IsMarkedCallback(const Object* object, void* arg) {
@@ -703,13 +696,12 @@
 
 void MarkSweep::RecursiveMarkDirtyObjects(byte minimum_age) {
   ScanGrayObjects(minimum_age);
-  timings_.AddSplit("ScanGrayObjects");
+  timings_.NewSplit("ProcessMarkStack");
   ProcessMarkStack();
-  timings_.AddSplit("ProcessMarkStack");
 }
 
 void MarkSweep::ReMarkRoots() {
-  Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this);
+  Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this, true, true);
 }
 
 void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
@@ -726,7 +718,7 @@
 }
 
 struct ArrayMarkedCheck {
-  ObjectStack* live_stack;
+  accounting::ObjectStack* live_stack;
   MarkSweep* mark_sweep;
 };
 
@@ -736,11 +728,11 @@
   if (array_check->mark_sweep->IsMarked(object)) {
     return true;
   }
-  ObjectStack* live_stack = array_check->live_stack;
+  accounting::ObjectStack* live_stack = array_check->live_stack;
   return std::find(live_stack->Begin(), live_stack->End(), object) == live_stack->End();
 }
 
-void MarkSweep::SweepSystemWeaksArray(ObjectStack* allocations) {
+void MarkSweep::SweepSystemWeaksArray(accounting::ObjectStack* allocations) {
   Runtime* runtime = Runtime::Current();
   // The callbacks check
   // !is_marked where is_marked is the callback but we want
@@ -777,7 +769,7 @@
 void MarkSweep::VerifyIsLive(const Object* obj) {
   Heap* heap = GetHeap();
   if (!heap->GetLiveBitmap()->Test(obj)) {
-    LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
     if (!large_object_space->GetLiveObjects()->Test(obj)) {
       if (std::find(heap->allocation_stack_->Begin(), heap->allocation_stack_->End(), obj) ==
           heap->allocation_stack_->End()) {
@@ -807,7 +799,7 @@
 
 struct SweepCallbackContext {
   MarkSweep* mark_sweep;
-  AllocSpace* space;
+  space::AllocSpace* space;
   Thread* self;
 };
 
@@ -830,28 +822,29 @@
   MarkSweep* mark_sweep_;
 };
 
-void MarkSweep::ResetCumulativeStatistics() {
-  cumulative_timings_.Reset();
-  total_time_ = 0;
-  total_paused_time_ = 0;
-  total_freed_objects_ = 0;
-  total_freed_bytes_ = 0;
-}
-
-void MarkSweep::MarkRootsCheckpoint() {
+void MarkSweep::MarkRootsCheckpoint(Thread* self) {
   CheckpointMarkThreadRoots check_point(this);
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
-  // Increment the count of the barrier. If all of the checkpoints have already been finished then
-  // will hit 0 and continue. Otherwise we are still waiting for some checkpoints, so the counter
-  // will go positive and we will unblock when it hits zero.
-  gc_barrier_->Increment(Thread::Current(), thread_list->RunCheckpoint(&check_point));
+  // Request the check point is run on all threads returning a count of the threads that must
+  // run through the barrier including self.
+  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
+  // Release locks then wait for all mutator threads to pass the barrier.
+  // TODO: optimize to not release locks when there are no threads to wait for.
+  Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
+  Locks::mutator_lock_->SharedUnlock(self);
+  ThreadState old_state = self->SetState(kWaitingForCheckPointsToRun);
+  CHECK_EQ(old_state, kWaitingPerformingGc);
+  gc_barrier_->Increment(self, barrier_count);
+  self->SetState(kWaitingPerformingGc);
+  Locks::mutator_lock_->SharedLock(self);
+  Locks::heap_bitmap_lock_->ExclusiveLock(self);
 }
 
 void MarkSweep::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
   SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
   MarkSweep* mark_sweep = context->mark_sweep;
   Heap* heap = mark_sweep->GetHeap();
-  AllocSpace* space = context->space;
+  space::AllocSpace* space = context->space;
   Thread* self = context->self;
   Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
   // Use a bulk free, that merges consecutive objects before freeing or free per object?
@@ -877,22 +870,23 @@
   }
 }
 
-void MarkSweep::SweepArray(TimingLogger& logger, ObjectStack* allocations, bool swap_bitmaps) {
+void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitmaps) {
   size_t freed_bytes = 0;
-  DlMallocSpace* space = heap_->GetAllocSpace();
+  space::DlMallocSpace* space = heap_->GetAllocSpace();
 
   // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
   // bitmap, resulting in occasional frees of Weaks which are still in use.
+  timings_.NewSplit("SweepSystemWeaks");
   SweepSystemWeaksArray(allocations);
-  logger.AddSplit("SweepSystemWeaks");
 
+  timings_.NewSplit("Process allocation stack");
   // Newly allocated objects MUST be in the alloc space and those are the only objects which we are
   // going to free.
-  SpaceBitmap* live_bitmap = space->GetLiveBitmap();
-  SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
-  LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-  SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
-  SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
+  accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+  accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
+  accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
   if (swap_bitmaps) {
     std::swap(live_bitmap, mark_bitmap);
     std::swap(large_live_objects, large_mark_objects);
@@ -918,7 +912,8 @@
       freed_bytes += large_object_space->Free(self, obj);
     }
   }
-  logger.AddSplit("Process allocation stack");
+  CHECK_EQ(count, allocations->Size());
+  timings_.NewSplit("FreeList");
 
   size_t freed_objects = out - objects;
   freed_bytes += space->FreeList(self, freed_objects, objects);
@@ -927,71 +922,78 @@
   heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
   freed_objects_ += freed_objects;
   freed_bytes_ += freed_bytes;
-  logger.AddSplit("FreeList");
+
+  timings_.NewSplit("ResetStack");
   allocations->Reset();
-  logger.AddSplit("ResetStack");
 }
 
-void MarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
+void MarkSweep::Sweep(bool swap_bitmaps) {
   DCHECK(mark_stack_->IsEmpty());
 
   // If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
   // bitmap, resulting in occasional frees of Weaks which are still in use.
+  timings_.NewSplit("SweepSystemWeaks");
   SweepSystemWeaks();
-  timings.AddSplit("SweepSystemWeaks");
 
-  const bool partial = GetGcType() == kGcTypePartial;
-  const Spaces& spaces = heap_->GetSpaces();
+  const bool partial = (GetGcType() == kGcTypePartial);
   SweepCallbackContext scc;
   scc.mark_sweep = this;
   scc.self = Thread::Current();
-  // TODO: C++0x auto
-  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    ContinuousSpace* space = *it;
-    if (
-        space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect ||
-        (!partial && space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect)
-        ) {
+  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+  // TODO: C++0x
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    // We always sweep always collect spaces.
+    bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
+    if (!partial && !sweep_space) {
+      // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
+      sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
+    }
+    if (sweep_space) {
       uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
       uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
-      scc.space = space->AsAllocSpace();
-      SpaceBitmap* live_bitmap = space->GetLiveBitmap();
-      SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+      scc.space = space->AsDlMallocSpace();
+      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
       if (swap_bitmaps) {
         std::swap(live_bitmap, mark_bitmap);
       }
-      if (space->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
+      if (!space->IsZygoteSpace()) {
+        timings_.NewSplit("SweepAllocSpace");
         // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
-        SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
-                               &SweepCallback, reinterpret_cast<void*>(&scc));
+        accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+                                           &SweepCallback, reinterpret_cast<void*>(&scc));
       } else {
-        // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual memory.
-        SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
-                               &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
+        timings_.NewSplit("SweepZygote");
+        // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
+        // memory.
+        accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+                                           &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
       }
     }
   }
-  timings.AddSplit("Sweep");
 
+  timings_.NewSplit("SweepLargeObjects");
   SweepLargeObjects(swap_bitmaps);
-  timings.AddSplit("SweepLargeObjects");
 }
 
 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
   // Sweep large objects
-  LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-  SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
-  SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
+  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
+  accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
   if (swap_bitmaps) {
     std::swap(large_live_objects, large_mark_objects);
   }
-  SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
+  accounting::SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
   // O(n*log(n)) but hopefully there are not too many large objects.
   size_t freed_objects = 0;
   size_t freed_bytes = 0;
-  // TODO: C++0x
   Thread* self = Thread::Current();
-  for (SpaceSetMap::Objects::iterator it = live_objects.begin(); it != live_objects.end(); ++it) {
+  // TODO: C++0x
+  typedef accounting::SpaceSetMap::Objects::iterator It;
+  for (It it = live_objects.begin(), end = live_objects.end(); it != end; ++it) {
     if (!large_mark_objects->Test(*it)) {
       freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it));
       ++freed_objects;
@@ -999,20 +1001,21 @@
   }
   freed_objects_ += freed_objects;
   freed_bytes_ += freed_bytes;
-  // Large objects don't count towards bytes_allocated.
   GetHeap()->RecordFree(freed_objects, freed_bytes);
 }
 
 void MarkSweep::CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static) {
-  const Spaces& spaces = heap_->GetSpaces();
-  // TODO: C++0x auto
-  for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
-    if ((*cur)->IsAllocSpace() && (*cur)->Contains(ref)) {
+  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+  // TODO: C++0x
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    if (space->IsDlMallocSpace() && space->Contains(ref)) {
       DCHECK(IsMarked(obj));
 
       bool is_marked = IsMarked(ref);
       if (!is_marked) {
-        LOG(INFO) << **cur;
+        LOG(INFO) << *space;
         LOG(WARNING) << (is_static ? "Static ref'" : "Instance ref'") << PrettyTypeOf(ref)
                      << "' (" << reinterpret_cast<const void*>(ref) << ") in '" << PrettyTypeOf(obj)
                      << "' (" << reinterpret_cast<const void*>(obj) << ") at offset "
@@ -1109,7 +1112,7 @@
 }
 
 class MarkStackChunk : public Task {
-public:
+ public:
   MarkStackChunk(ThreadPool* thread_pool, MarkSweep* mark_sweep, Object** begin, Object** end)
       : mark_sweep_(mark_sweep),
         thread_pool_(thread_pool),
@@ -1171,6 +1174,7 @@
   // Don't need to use atomic ++ since we only one thread is writing to an output block at any
   // given time.
   void Push(Object* obj) {
+    CHECK(obj != NULL);
     data_[length_++] = obj;
   }
 
@@ -1178,7 +1182,7 @@
     if (static_cast<size_t>(length_) < max_size) {
       Push(const_cast<Object*>(obj));
     } else {
-      // Internal buffer is full, push to a new buffer instead.
+      // Internal (thread-local) buffer is full, push to a new buffer instead.
       if (UNLIKELY(output_ == NULL)) {
         AllocateOutputChunk();
       } else if (UNLIKELY(static_cast<size_t>(output_->length_) == max_size)) {
@@ -1257,8 +1261,8 @@
     thread_pool->AddTask(self, new MarkStackChunk(thread_pool, this, begin, end));
   }
   thread_pool->StartWorkers(self);
+  thread_pool->Wait(self, true, true);
   mark_stack_->Reset();
-  thread_pool->Wait(self, true);
   //LOG(INFO) << "Idle wait time " << PrettyDuration(thread_pool->GetWaitTime());
   CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
 }
@@ -1445,15 +1449,16 @@
 }
 
 void MarkSweep::UnBindBitmaps() {
-  const Spaces& spaces = heap_->GetSpaces();
-  // TODO: C++0x auto
-  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    Space* space = *it;
-    if (space->IsAllocSpace()) {
-      DlMallocSpace* alloc_space = space->AsAllocSpace();
+  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+  // TODO: C++0x
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    if (space->IsDlMallocSpace()) {
+      space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
       if (alloc_space->temp_bitmap_.get() != NULL) {
         // At this point, the temp_bitmap holds our old mark bitmap.
-        SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
+        accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
         GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap);
         CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get());
         alloc_space->mark_bitmap_.reset(new_bitmap);
@@ -1466,20 +1471,21 @@
 void MarkSweep::FinishPhase() {
   // Can't enqueue referneces if we hold the mutator lock.
   Object* cleared_references = GetClearedReferences();
-  heap_->EnqueueClearedReferences(&cleared_references);
+  Heap* heap = GetHeap();
+  heap->EnqueueClearedReferences(&cleared_references);
 
-  heap_->PostGcVerification(this);
+  heap->PostGcVerification(this);
 
-  heap_->GrowForUtilization(GetDuration());
-  timings_.AddSplit("GrowForUtilization");
+  timings_.NewSplit("GrowForUtilization");
+  heap->GrowForUtilization(GetDurationNs());
 
-  heap_->RequestHeapTrim();
-  timings_.AddSplit("RequestHeapTrim");
+  timings_.NewSplit("RequestHeapTrim");
+  heap->RequestHeapTrim();
 
   // Update the cumulative statistics
-  total_time_ += GetDuration();
-  total_paused_time_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
-                                        std::plus<uint64_t>());
+  total_time_ns_ += GetDurationNs();
+  total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
+                                           std::plus<uint64_t>());
   total_freed_objects_ += GetFreedObjects();
   total_freed_bytes_ += GetFreedBytes();
 
@@ -1513,27 +1519,26 @@
 
   // Update the cumulative loggers.
   cumulative_timings_.Start();
-  cumulative_timings_.AddLogger(timings_);
+  cumulative_timings_.AddNewLogger(timings_);
   cumulative_timings_.End();
 
   // Clear all of the spaces' mark bitmaps.
-  const Spaces& spaces = heap_->GetSpaces();
-  // TODO: C++0x auto
-  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    ContinuousSpace* space = *it;
-    if (space->GetGcRetentionPolicy() != kGcRetentionPolicyNeverCollect) {
+  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+  // TODO: C++0x
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    if (space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
       space->GetMarkBitmap()->Clear();
     }
   }
   mark_stack_->Reset();
 
   // Reset the marked large objects.
-  LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
+  space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
   large_objects->GetMarkObjects()->Clear();
 }
 
-MarkSweep::~MarkSweep() {
-
-}
-
+}  // namespace collector
+}  // namespace gc
 }  // namespace art
diff --git a/src/gc/mark_sweep.h b/src/gc/collector/mark_sweep.h
similarity index 86%
rename from src/gc/mark_sweep.h
rename to src/gc/collector/mark_sweep.h
index 11ce32f..9df3c19 100644
--- a/src/gc/mark_sweep.h
+++ b/src/gc/collector/mark_sweep.h
@@ -21,40 +21,50 @@
 #include "barrier.h"
 #include "base/macros.h"
 #include "base/mutex.h"
-#include "base/timing_logger.h"
 #include "garbage_collector.h"
-#include "gc_type.h"
 #include "offsets.h"
 #include "root_visitor.h"
 #include "UniquePtr.h"
 
 namespace art {
+
 namespace mirror {
-class Class;
-class Object;
-template<class T> class ObjectArray;
-}
-template <typename T> class AtomicStack;
-class CheckObjectVisitor;
-class ContinuousSpace;
-class Heap;
-class MarkIfReachesAllocspaceVisitor;
-class ModUnionClearCardVisitor;
-class ModUnionVisitor;
-class ModUnionTableBitmap;
-typedef AtomicStack<mirror::Object*> ObjectStack;
-class SpaceBitmap;
+  class Class;
+  class Object;
+  template<class T> class ObjectArray;
+}  // namespace mirror
+
 class StackVisitor;
 class Thread;
-class MarkStackChunk;
+
+namespace gc {
+
+namespace accounting {
+  template <typename T> class AtomicStack;
+  class MarkIfReachesAllocspaceVisitor;
+  class ModUnionClearCardVisitor;
+  class ModUnionVisitor;
+  class ModUnionTableBitmap;
+  class MarkStackChunk;
+  typedef AtomicStack<mirror::Object*> ObjectStack;
+  class SpaceBitmap;
+}  // namespace accounting
+
+namespace space {
+  class ContinuousSpace;
+}  // namespace space
+
+class CheckObjectVisitor;
+class Heap;
+
+namespace collector {
 
 class MarkSweep : public GarbageCollector {
  public:
-  explicit MarkSweep(Heap* heap, bool is_concurrent);
+  explicit MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
 
-  ~MarkSweep();
+  ~MarkSweep() {}
 
-  virtual std::string GetName() const;
   virtual void InitializePhase();
   virtual bool IsConcurrent() const;
   virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -85,8 +95,9 @@
   void MarkConcurrentRoots();
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  void MarkRootsCheckpoint();
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void MarkRootsCheckpoint(Thread* self)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Verify that image roots point to only marked objects within the alloc space.
   void VerifyImageRoots()
@@ -98,16 +109,17 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Make a space immune, immune spaces are assumed to have all live objects marked.
-  void ImmuneSpace(ContinuousSpace* space)
+  // Make a space immune, immune spaces have all live objects marked - that is the mark and
+  // live bitmaps are bound together.
+  void ImmuneSpace(space::ContinuousSpace* space)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Bind the live bits to the mark bits of bitmaps based on the gc type.
-  virtual void BindBitmaps()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
+  // the image. Mark that portion of the heap as immune.
+  virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void BindLiveToMarkBitmap(ContinuousSpace* space)
+  void BindLiveToMarkBitmap(space::ContinuousSpace* space)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   void UnBindBitmaps()
@@ -127,21 +139,15 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
-  virtual void Sweep(TimingLogger& timings, bool swap_bitmaps)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
-  void SweepLargeObjects(bool swap_bitmaps)
-      EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_);
+  void SweepLargeObjects(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Sweep only pointers within an array. WARNING: Trashes objects.
-  void SweepArray(TimingLogger& logger, ObjectStack* allocation_stack_, bool swap_bitmaps)
+  void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  // Swap bitmaps (if we are a full Gc then we swap the zygote bitmap too).
-  virtual void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-  void SwapLargeObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
   mirror::Object* GetClearedReferences() {
     return cleared_reference_list_;
   }
@@ -177,12 +183,12 @@
     return freed_objects_;
   }
 
-  uint64_t GetTotalTime() const {
-    return total_time_;
+  uint64_t GetTotalTimeNs() const {
+    return total_time_ns_;
   }
 
-  uint64_t GetTotalPausedTime() const {
-    return total_paused_time_;
+  uint64_t GetTotalPausedTimeNs() const {
+    return total_paused_time_ns_;
   }
 
   uint64_t GetTotalFreedObjects() const {
@@ -200,7 +206,7 @@
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Only sweep the weaks which are inside of an allocation stack.
-  void SweepSystemWeaksArray(ObjectStack* allocations)
+  void SweepSystemWeaksArray(accounting::ObjectStack* allocations)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   static bool VerifyIsLiveCallback(const mirror::Object* obj, void* arg)
@@ -237,16 +243,6 @@
     return *gc_barrier_;
   }
 
-  TimingLogger& GetTimings() {
-    return timings_;
-  }
-
-  CumulativeLogger& GetCumulativeTimings() {
-    return cumulative_timings_;
-  }
-
-  void ResetCumulativeStatistics();
-
  protected:
   // Returns true if the object has its bit set in the mark bitmap.
   bool IsMarked(const mirror::Object* object) const;
@@ -381,13 +377,14 @@
   // Whether or not we count how many of each type of object were scanned.
   static const bool kCountScannedTypes = false;
 
-  // Current space, we check this space first to avoid searching for the appropriate space for an object.
-  SpaceBitmap* current_mark_bitmap_;
+  // Current space, we check this space first to avoid searching for the appropriate space for an
+  // object.
+  accounting::SpaceBitmap* current_mark_bitmap_;
 
   // Cache java.lang.Class for optimization.
   mirror::Class* java_lang_Class_;
 
-  ObjectStack* mark_stack_;
+  accounting::ObjectStack* mark_stack_;
 
   mirror::Object* finger_;
 
@@ -401,10 +398,15 @@
   mirror::Object* phantom_reference_list_;
   mirror::Object* cleared_reference_list_;
 
+  // Number of bytes freed in this collection.
   AtomicInteger freed_bytes_;
+  // Number of objects freed in this collection.
   AtomicInteger freed_objects_;
+  // Number of classes scanned, if kCountScannedTypes.
   AtomicInteger class_count_;
+  // Number of arrays scanned, if kCountScannedTypes.
   AtomicInteger array_count_;
+  // Number of non-class/arrays scanned, if kCountScannedTypes.
   AtomicInteger other_count_;
   AtomicInteger large_object_test_;
   AtomicInteger large_object_mark_;
@@ -414,28 +416,19 @@
   AtomicInteger work_chunks_deleted_;
   AtomicInteger reference_count_;
 
-  // Cumulative statistics.
-  uint64_t total_time_;
-  uint64_t total_paused_time_;
-  uint64_t total_freed_objects_;
-  uint64_t total_freed_bytes_;
-
   UniquePtr<Barrier> gc_barrier_;
   Mutex large_object_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   Mutex mark_stack_expand_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
   const bool is_concurrent_;
 
-  TimingLogger timings_;
-  CumulativeLogger cumulative_timings_;
-
   bool clear_soft_references_;
 
   friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
   friend class CheckBitmapVisitor;
   friend class CheckObjectVisitor;
   friend class CheckReferenceVisitor;
-  friend class Heap;
+  friend class art::gc::Heap;
   friend class InternTableEntryIsUnmarked;
   friend class MarkIfReachesAllocspaceVisitor;
   friend class ModUnionCheckReferences;
@@ -453,6 +446,8 @@
   DISALLOW_COPY_AND_ASSIGN(MarkSweep);
 };
 
+}  // namespace collector
+}  // namespace gc
 }  // namespace art
 
 #endif  // ART_SRC_GC_MARK_SWEEP_H_
diff --git a/src/gc/partial_mark_sweep.cc b/src/gc/collector/partial_mark_sweep.cc
similarity index 63%
rename from src/gc/partial_mark_sweep.cc
rename to src/gc/collector/partial_mark_sweep.cc
index f9c1787..ef893c5 100644
--- a/src/gc/partial_mark_sweep.cc
+++ b/src/gc/collector/partial_mark_sweep.cc
@@ -16,36 +16,38 @@
 
 #include "partial_mark_sweep.h"
 
-#include "heap.h"
-#include "large_object_space.h"
+#include "gc/heap.h"
+#include "gc/space/space.h"
 #include "partial_mark_sweep.h"
-#include "space.h"
 #include "thread.h"
 
 namespace art {
+namespace gc {
+namespace collector {
 
-PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent)
-    : MarkSweep(heap, is_concurrent) {
+PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
+    : MarkSweep(heap, is_concurrent, name_prefix + (name_prefix.empty() ? "" : " ") + "partial") {
   cumulative_timings_.SetName(GetName());
 }
 
-PartialMarkSweep::~PartialMarkSweep() {
-
-}
-
 void PartialMarkSweep::BindBitmaps() {
   MarkSweep::BindBitmaps();
 
-  Spaces& spaces = GetHeap()->GetSpaces();
+  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   // For partial GCs we need to bind the bitmap of the zygote space so that all objects in the
   // zygote space are viewed as marked.
-  for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    ContinuousSpace* space = *it;
-    if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+  // TODO: C++0x
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+      CHECK(space->IsZygoteSpace());
       ImmuneSpace(space);
     }
   }
 }
 
+}  // namespace collector
+}  // namespace gc
 }  // namespace art
diff --git a/src/gc/collector/partial_mark_sweep.h b/src/gc/collector/partial_mark_sweep.h
new file mode 100644
index 0000000..bd4a580
--- /dev/null
+++ b/src/gc/collector/partial_mark_sweep.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_COLLECTOR_PARTIAL_MARK_SWEEP_H_
+#define ART_SRC_GC_COLLECTOR_PARTIAL_MARK_SWEEP_H_
+
+#include "locks.h"
+#include "mark_sweep.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+class PartialMarkSweep : public MarkSweep {
+ public:
+  virtual GcType GetGcType() const {
+    return kGcTypePartial;
+  }
+
+  explicit PartialMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
+  ~PartialMarkSweep() {}
+
+protected:
+  // Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
+  // collections, ie the Zygote space. Also mark this space is immune.
+  virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
+};
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_COLLECTOR_PARTIAL_MARK_SWEEP_H_
diff --git a/src/gc/collector/sticky_mark_sweep.cc b/src/gc/collector/sticky_mark_sweep.cc
new file mode 100644
index 0000000..71e580d
--- /dev/null
+++ b/src/gc/collector/sticky_mark_sweep.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/heap.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space.h"
+#include "sticky_mark_sweep.h"
+#include "thread.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix)
+    : PartialMarkSweep(heap, is_concurrent,
+                       name_prefix + (name_prefix.empty() ? "" : " ") + "sticky") {
+  cumulative_timings_.SetName(GetName());
+}
+
+void StickyMarkSweep::BindBitmaps() {
+  PartialMarkSweep::BindBitmaps();
+
+  const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  // For sticky GC, we want to bind the bitmaps of all spaces as the allocation stack lets us
+  // know what was allocated since the last GC. A side-effect of binding the allocation space mark
+  // and live bitmap is that marking the objects will place them in the live bitmap.
+  // TODO: C++0x
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) {
+      BindLiveToMarkBitmap(space);
+    }
+  }
+
+  GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+}
+
+void StickyMarkSweep::MarkReachableObjects() {
+  DisableFinger();
+  RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty - 1);
+}
+
+void StickyMarkSweep::Sweep(bool swap_bitmaps) {
+  timings_.NewSplit("SweepArray");
+  accounting::ObjectStack* live_stack = GetHeap()->GetLiveStack();
+  SweepArray(live_stack, false);
+}
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art
diff --git a/src/gc/sticky_mark_sweep.h b/src/gc/collector/sticky_mark_sweep.h
similarity index 67%
rename from src/gc/sticky_mark_sweep.h
rename to src/gc/collector/sticky_mark_sweep.h
index 41ab0cc..b16cfc1 100644
--- a/src/gc/sticky_mark_sweep.h
+++ b/src/gc/collector/sticky_mark_sweep.h
@@ -22,29 +22,34 @@
 #include "partial_mark_sweep.h"
 
 namespace art {
+namespace gc {
+namespace collector {
 
 class StickyMarkSweep : public PartialMarkSweep {
  public:
-  virtual GcType GetGcType() const {
+  GcType GetGcType() const {
     return kGcTypeSticky;
   }
 
-  explicit StickyMarkSweep(Heap* heap, bool is_concurrent);
-  ~StickyMarkSweep();
-protected:
-  virtual void BindBitmaps()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  explicit StickyMarkSweep(Heap* heap, bool is_concurrent, const std::string& name_prefix = "");
+  ~StickyMarkSweep() {}
 
-  virtual void MarkReachableObjects()
+protected:
+  // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
+  // alloc space will be marked as immune.
+  void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void MarkReachableObjects()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  virtual void Sweep(TimingLogger& timings, bool swap_bitmaps)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
 };
 
+}  // namespace collector
+}  // namespace gc
 }  // namespace art
 
 #endif  // ART_SRC_GC_STICKY_MARK_SWEEP_H_
diff --git a/src/gc/garbage_collector.cc b/src/gc/garbage_collector.cc
deleted file mode 100644
index 94daec7..0000000
--- a/src/gc/garbage_collector.cc
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "garbage_collector.h"
-
-#include "base/mutex-inl.h"
-#include "thread.h"
-#include "thread_list.h"
-
-namespace art {
-  GarbageCollector::GarbageCollector(Heap* heap)
-      : heap_(heap),
-        duration_(0) {
-
-  }
-
-  bool GarbageCollector::HandleDirtyObjectsPhase() {
-    DCHECK(IsConcurrent());
-    return true;
-  }
-
-  void GarbageCollector::RegisterPause(uint64_t nano_length) {
-    pause_times_.push_back(nano_length);
-  }
-
-  void GarbageCollector::Run() {
-    Thread* self = Thread::Current();
-    ThreadList* thread_list = Runtime::Current()->GetThreadList();
-
-    uint64_t start_time = NanoTime();
-    pause_times_.clear();
-    duration_ = 0;
-
-    InitializePhase();
-
-    if (!IsConcurrent()) {
-      // Pause is the entire length of the GC.
-      uint64_t pause_start = NanoTime();
-      thread_list->SuspendAll();
-      MarkingPhase();
-      ReclaimPhase();
-      thread_list->ResumeAll();
-      uint64_t pause_end = NanoTime();
-      pause_times_.push_back(pause_end - pause_start);
-    } else {
-      {
-        ReaderMutexLock mu(self, *Locks::mutator_lock_);
-        MarkingPhase();
-      }
-      bool done = false;
-      while (!done) {
-        uint64_t pause_start = NanoTime();
-        thread_list->SuspendAll();
-        done = HandleDirtyObjectsPhase();
-        thread_list->ResumeAll();
-        uint64_t pause_end = NanoTime();
-        pause_times_.push_back(pause_end - pause_start);
-      }
-      {
-        ReaderMutexLock mu(self, *Locks::mutator_lock_);
-        ReclaimPhase();
-      }
-    }
-
-    uint64_t end_time = NanoTime();
-    duration_ = end_time - start_time;
-
-    FinishPhase();
-  }
-
-  GarbageCollector::~GarbageCollector() {
-
-  }
-}  // namespace art
diff --git a/src/gc/gc_type.h b/src/gc/gc_type.h
deleted file mode 100644
index 908f038..0000000
--- a/src/gc/gc_type.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_GC_TYPE_H_
-#define ART_SRC_GC_GC_TYPE_H_
-
-namespace art {
-
-// The ordering of the enum matters, it is used to determine which GCs are run first.
-enum GcType {
-  // No Gc
-  kGcTypeNone,
-  // Sticky mark bits "generational" GC.
-  kGcTypeSticky,
-  // Partial GC, over only the alloc space.
-  kGcTypePartial,
-  // Full GC
-  kGcTypeFull,
-  // Number of different Gc types.
-  kGcTypeMax,
-};
-std::ostream& operator<<(std::ostream& os, const GcType& policy);
-
-}  // namespace art
-
-#endif  // ART_SRC_GC_GC_TYPE_H_
diff --git a/src/heap.cc b/src/gc/heap.cc
similarity index 70%
rename from src/heap.cc
rename to src/gc/heap.cc
index f39c26e..34c0b5c 100644
--- a/src/heap.cc
+++ b/src/gc/heap.cc
@@ -25,20 +25,17 @@
 #include "base/stl_util.h"
 #include "cutils/sched_policy.h"
 #include "debugger.h"
-#include "gc/atomic_stack.h"
-#include "gc/card_table.h"
-#include "gc/card_table-inl.h"
-#include "gc/heap_bitmap.h"
-#include "gc/heap_bitmap-inl.h"
-#include "gc/large_object_space.h"
-#include "gc/mark_sweep.h"
-#include "gc/mark_sweep-inl.h"
-#include "gc/partial_mark_sweep.h"
-#include "gc/space_bitmap-inl.h"
-#include "gc/sticky_mark_sweep.h"
-#include "gc/mod_union_table.h"
-#include "gc/mod_union_table-inl.h"
-#include "gc/space.h"
+#include "gc/accounting/atomic_stack.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/heap_bitmap-inl.h"
+#include "gc/accounting/mod_union_table-inl.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/collector/mark_sweep-inl.h"
+#include "gc/collector/partial_mark_sweep.h"
+#include "gc/collector/sticky_mark_sweep.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
 #include "image.h"
 #include "invoke_arg_array_builder.h"
 #include "mirror/class-inl.h"
@@ -56,10 +53,15 @@
 #include "well_known_classes.h"
 
 namespace art {
+namespace gc {
 
+// When to create a log message about a slow GC, 100ms.
 static const uint64_t kSlowGcThreshold = MsToNs(100);
+// When to create a log message about a slow pause, 5ms.
 static const uint64_t kLongGcPauseThreshold = MsToNs(5);
 static const bool kDumpGcPerformanceOnShutdown = false;
+// Minimum amount of remaining bytes before a concurrent GC is triggered.
+static const size_t kMinConcurrentRemainingBytes = 128 * KB;
 const double Heap::kDefaultTargetUtilization = 0.5;
 
 static bool GenerateImage(const std::string& image_file_name) {
@@ -156,19 +158,18 @@
       card_table_(NULL),
       concurrent_gc_(concurrent_gc),
       have_zygote_space_(false),
+      reference_queue_lock_(NULL),
       is_gc_running_(false),
-      last_gc_type_(kGcTypeNone),
-      enforce_heap_growth_rate_(false),
+      last_gc_type_(collector::kGcTypeNone),
       capacity_(capacity),
       growth_limit_(growth_limit),
       max_allowed_footprint_(initial_size),
-      concurrent_start_size_(128 * KB),
-      concurrent_min_free_(256 * KB),
-      concurrent_start_bytes_(concurrent_gc ? initial_size - concurrent_start_size_ :
-          std::numeric_limits<size_t>::max()),
+      concurrent_start_bytes_(concurrent_gc ? initial_size - (kMinConcurrentRemainingBytes)
+                                            :  std::numeric_limits<size_t>::max()),
       sticky_gc_count_(0),
-      total_bytes_freed_(0),
-      total_objects_freed_(0),
+      sticky_to_partial_gc_ratio_(10),
+      total_bytes_freed_ever_(0),
+      total_objects_freed_ever_(0),
       large_object_threshold_(3 * kPageSize),
       num_bytes_allocated_(0),
       verify_missing_card_marks_(false),
@@ -176,12 +177,11 @@
       verify_pre_gc_heap_(false),
       verify_post_gc_heap_(false),
       verify_mod_union_table_(false),
-      partial_gc_frequency_(10),
       min_alloc_space_size_for_sticky_gc_(2 * MB),
       min_remaining_space_for_sticky_gc_(1 * MB),
-      last_trim_time_(0),
+      last_trim_time_ms_(0),
       allocation_rate_(0),
-      max_allocation_stack_size_(MB),
+      max_allocation_stack_size_(kDesiredHeapVerification > kNoHeapVerification? KB : MB),
       reference_referent_offset_(0),
       reference_queue_offset_(0),
       reference_queueNext_offset_(0),
@@ -198,34 +198,34 @@
     LOG(INFO) << "Heap() entering";
   }
 
-  live_bitmap_.reset(new HeapBitmap(this));
-  mark_bitmap_.reset(new HeapBitmap(this));
+  live_bitmap_.reset(new accounting::HeapBitmap(this));
+  mark_bitmap_.reset(new accounting::HeapBitmap(this));
 
   // Requested begin for the alloc space, to follow the mapped image and oat files
   byte* requested_begin = NULL;
   std::string image_file_name(original_image_file_name);
   if (!image_file_name.empty()) {
-    ImageSpace* image_space = NULL;
+    space::ImageSpace* image_space = NULL;
 
     if (OS::FileExists(image_file_name.c_str())) {
       // If the /system file exists, it should be up-to-date, don't try to generate
-      image_space = ImageSpace::Create(image_file_name);
+      image_space = space::ImageSpace::Create(image_file_name);
     } else {
       // If the /system file didn't exist, we need to use one from the dalvik-cache.
       // If the cache file exists, try to open, but if it fails, regenerate.
       // If it does not exist, generate.
       image_file_name = GetDalvikCacheFilenameOrDie(image_file_name);
       if (OS::FileExists(image_file_name.c_str())) {
-        image_space = ImageSpace::Create(image_file_name);
+        image_space = space::ImageSpace::Create(image_file_name);
       }
       if (image_space == NULL) {
         CHECK(GenerateImage(image_file_name)) << "Failed to generate image: " << image_file_name;
-        image_space = ImageSpace::Create(image_file_name);
+        image_space = space::ImageSpace::Create(image_file_name);
       }
     }
 
     CHECK(image_space != NULL) << "Failed to create space from " << image_file_name;
-    AddSpace(image_space);
+    AddContinuousSpace(image_space);
     // Oat files referenced by image files immediately follow them in memory, ensure alloc space
     // isn't going to get in the middle
     byte* oat_file_end_addr = image_space->GetImageHeader().GetOatFileEnd();
@@ -247,46 +247,47 @@
   // Allocate the large object space.
   const bool kUseFreeListSpaceForLOS  = false;
   if (kUseFreeListSpaceForLOS) {
-    large_object_space_.reset(FreeListSpace::Create("large object space", NULL, capacity));
+    large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity);
   } else {
-    large_object_space_.reset(LargeObjectMapSpace::Create("large object space"));
+    large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
   }
-  live_bitmap_->SetLargeObjects(large_object_space_->GetLiveObjects());
-  mark_bitmap_->SetLargeObjects(large_object_space_->GetMarkObjects());
+  CHECK(large_object_space_ != NULL) << "Failed to create large object space";
+  AddDiscontinuousSpace(large_object_space_);
 
-  UniquePtr<DlMallocSpace> alloc_space(DlMallocSpace::Create("alloc space", initial_size,
-                                                             growth_limit, capacity,
-                                                             requested_begin));
-  alloc_space_ = alloc_space.release();
+  alloc_space_ = space::DlMallocSpace::Create("alloc space",
+                                              initial_size,
+                                              growth_limit, capacity,
+                                              requested_begin);
   CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
   alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
-  AddSpace(alloc_space_);
+  AddContinuousSpace(alloc_space_);
 
-  // Spaces are sorted in order of Begin().
-  byte* heap_begin = spaces_.front()->Begin();
-  size_t heap_capacity = spaces_.back()->End() - spaces_.front()->Begin();
-  if (spaces_.back()->IsAllocSpace()) {
-    heap_capacity += spaces_.back()->AsAllocSpace()->NonGrowthLimitCapacity();
+  // Compute heap capacity. Continuous spaces are sorted in order of Begin().
+  byte* heap_begin = continuous_spaces_.front()->Begin();
+  size_t heap_capacity = continuous_spaces_.back()->End() - continuous_spaces_.front()->Begin();
+  if (continuous_spaces_.back()->IsDlMallocSpace()) {
+    heap_capacity += continuous_spaces_.back()->AsDlMallocSpace()->NonGrowthLimitCapacity();
   }
 
   // Mark image objects in the live bitmap
   // TODO: C++0x
-  for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-    Space* space = *it;
+  typedef std::vector<space::ContinuousSpace*>::iterator It;
+  for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) {
+    space::ContinuousSpace* space = *it;
     if (space->IsImageSpace()) {
-      ImageSpace* image_space = space->AsImageSpace();
+      space::ImageSpace* image_space = space->AsImageSpace();
       image_space->RecordImageAllocations(image_space->GetLiveBitmap());
     }
   }
 
   // Allocate the card table.
-  card_table_.reset(CardTable::Create(heap_begin, heap_capacity));
+  card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
   CHECK(card_table_.get() != NULL) << "Failed to create card table";
 
-  mod_union_table_.reset(new ModUnionTableToZygoteAllocspace<ModUnionTableReferenceCache>(this));
-  CHECK(mod_union_table_.get() != NULL) << "Failed to create mod-union table";
+  image_mod_union_table_.reset(new accounting::ModUnionTableToZygoteAllocspace(this));
+  CHECK(image_mod_union_table_.get() != NULL) << "Failed to create image mod-union table";
 
-  zygote_mod_union_table_.reset(new ModUnionTableCardCache(this));
+  zygote_mod_union_table_.reset(new accounting::ModUnionTableCardCache(this));
   CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table";
 
   // TODO: Count objects in the image space here.
@@ -294,11 +295,11 @@
 
   // Default mark stack size in bytes.
   static const size_t default_mark_stack_size = 64 * KB;
-  mark_stack_.reset(ObjectStack::Create("mark stack", default_mark_stack_size));
-  allocation_stack_.reset(ObjectStack::Create("allocation stack",
-                                              max_allocation_stack_size_));
-  live_stack_.reset(ObjectStack::Create("live stack",
-                                      max_allocation_stack_size_));
+  mark_stack_.reset(accounting::ObjectStack::Create("mark stack", default_mark_stack_size));
+  allocation_stack_.reset(accounting::ObjectStack::Create("allocation stack",
+                                                          max_allocation_stack_size_));
+  live_stack_.reset(accounting::ObjectStack::Create("live stack",
+                                                    max_allocation_stack_size_));
 
   // It's still too early to take a lock because there are no threads yet, but we can create locks
   // now. We don't create it earlier to make it clear that you can't use locks during heap
@@ -308,17 +309,17 @@
                                                 *gc_complete_lock_));
 
   // Create the reference queue lock, this is required so for parrallel object scanning in the GC.
-  reference_queue_lock_.reset(new Mutex("reference queue lock"));
+  reference_queue_lock_ = new Mutex("reference queue lock");
 
-  last_gc_time_ = NanoTime();
+  last_gc_time_ns_ = NanoTime();
   last_gc_size_ = GetBytesAllocated();
 
   // Create our garbage collectors.
   for (size_t i = 0; i < 2; ++i) {
     const bool concurrent = i != 0;
-    mark_sweep_collectors_.push_back(new MarkSweep(this, concurrent));
-    mark_sweep_collectors_.push_back(new PartialMarkSweep(this, concurrent));
-    mark_sweep_collectors_.push_back(new StickyMarkSweep(this, concurrent));
+    mark_sweep_collectors_.push_back(new collector::MarkSweep(this, concurrent));
+    mark_sweep_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
+    mark_sweep_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
   }
 
   CHECK(max_allowed_footprint_ != 0);
@@ -331,7 +332,7 @@
   // TODO: Make sysconf(_SC_NPROCESSORS_CONF) be a helper function?
   // Use the number of processors - 1 since the thread doing the GC does work while its waiting for
   // workers to complete.
-  thread_pool_.reset(new ThreadPool(sysconf(_SC_NPROCESSORS_CONF) - 1));
+  thread_pool_.reset(new ThreadPool(1)); // new ThreadPool(sysconf(_SC_NPROCESSORS_CONF) - 1));
 }
 
 void Heap::DeleteThreadPool() {
@@ -339,44 +340,55 @@
 }
 
 // Sort spaces based on begin address
-struct SpaceSorter {
-  bool operator ()(const ContinuousSpace* a, const ContinuousSpace* b) const {
+struct ContinuousSpaceSorter {
+  bool operator ()(const space::ContinuousSpace* a, const space::ContinuousSpace* b) const {
     return a->Begin() < b->Begin();
   }
 };
 
-void Heap::AddSpace(ContinuousSpace* space) {
+void Heap::AddContinuousSpace(space::ContinuousSpace* space) {
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   DCHECK(space != NULL);
   DCHECK(space->GetLiveBitmap() != NULL);
-  live_bitmap_->AddSpaceBitmap(space->GetLiveBitmap());
+  live_bitmap_->AddContinuousSpaceBitmap(space->GetLiveBitmap());
   DCHECK(space->GetMarkBitmap() != NULL);
-  mark_bitmap_->AddSpaceBitmap(space->GetMarkBitmap());
-  spaces_.push_back(space);
-  if (space->IsAllocSpace()) {
-    alloc_space_ = space->AsAllocSpace();
+  mark_bitmap_->AddContinuousSpaceBitmap(space->GetMarkBitmap());
+  continuous_spaces_.push_back(space);
+  if (space->IsDlMallocSpace() && !space->IsLargeObjectSpace()) {
+    alloc_space_ = space->AsDlMallocSpace();
   }
 
   // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger)
-  std::sort(spaces_.begin(), spaces_.end(), SpaceSorter());
+  std::sort(continuous_spaces_.begin(), continuous_spaces_.end(), ContinuousSpaceSorter());
 
   // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
   // avoid redundant marking.
   bool seen_zygote = false, seen_alloc = false;
-  for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-    Space* space = *it;
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = continuous_spaces_.begin(); it != continuous_spaces_.end(); ++it) {
+    space::ContinuousSpace* space = *it;
     if (space->IsImageSpace()) {
       DCHECK(!seen_zygote);
       DCHECK(!seen_alloc);
     } else if (space->IsZygoteSpace()) {
       DCHECK(!seen_alloc);
       seen_zygote = true;
-    } else if (space->IsAllocSpace()) {
+    } else if (space->IsDlMallocSpace()) {
       seen_alloc = true;
     }
   }
 }
 
+void Heap::AddDiscontinuousSpace(space::DiscontinuousSpace* space) {
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  DCHECK(space != NULL);
+  DCHECK(space->GetLiveObjects() != NULL);
+  live_bitmap_->AddDiscontinuousObjectSet(space->GetLiveObjects());
+  DCHECK(space->GetMarkObjects() != NULL);
+  mark_bitmap_->AddDiscontinuousObjectSet(space->GetMarkObjects());
+  discontinuous_spaces_.push_back(space);
+}
+
 void Heap::DumpGcPerformanceInfo(std::ostream& os) {
   // Dump cumulative timings.
   os << "Dumping cumulative Gc timings\n";
@@ -385,14 +397,15 @@
   // Dump cumulative loggers for each GC type.
   // TODO: C++0x
   uint64_t total_paused_time = 0;
-  for (Collectors::const_iterator it = mark_sweep_collectors_.begin();
+  typedef std::vector<collector::MarkSweep*>::const_iterator It;
+  for (It it = mark_sweep_collectors_.begin();
        it != mark_sweep_collectors_.end(); ++it) {
-    MarkSweep* collector = *it;
+    collector::MarkSweep* collector = *it;
     CumulativeLogger& logger = collector->GetCumulativeTimings();
     if (logger.GetTotalNs() != 0) {
       os << Dumpable<CumulativeLogger>(logger);
       const uint64_t total_ns = logger.GetTotalNs();
-      const uint64_t total_pause_ns = (*it)->GetTotalPausedTime();
+      const uint64_t total_pause_ns = (*it)->GetTotalPausedTimeNs();
       double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
       const uint64_t freed_bytes = collector->GetTotalFreedBytes();
       const uint64_t freed_objects = collector->GetTotalFreedObjects();
@@ -407,15 +420,15 @@
     }
   }
   uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
-  size_t total_objects_allocated = GetTotalObjectsAllocated();
-  size_t total_bytes_allocated = GetTotalBytesAllocated();
+  size_t total_objects_allocated = GetObjectsAllocatedEver();
+  size_t total_bytes_allocated = GetBytesAllocatedEver();
   if (total_duration != 0) {
     const double total_seconds = double(total_duration / 1000) / 1000000.0;
     os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
     os << "Mean GC size throughput: "
-       << PrettySize(GetTotalBytesFreed() / total_seconds) << "/s\n";
+       << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
     os << "Mean GC object throughput: "
-       << (GetTotalObjectsFreed() / total_seconds) << " objects/s\n";
+       << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
   }
   os << "Total number of allocations: " << total_objects_allocated << "\n";
   os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
@@ -444,24 +457,54 @@
   // heap lock held. We know though that no non-daemon threads are executing, and we know that
   // all daemon threads are suspended, and we also know that the threads list have been deleted, so
   // those threads can't resume. We're the only running thread, and we can do whatever we like...
-  STLDeleteElements(&spaces_);
+  STLDeleteElements(&continuous_spaces_);
+  STLDeleteElements(&discontinuous_spaces_);
   delete gc_complete_lock_;
+  delete reference_queue_lock_;
 }
 
-ContinuousSpace* Heap::FindSpaceFromObject(const mirror::Object* obj) const {
+space::ContinuousSpace* Heap::FindContinuousSpaceFromObject(const mirror::Object* obj,
+                                                            bool fail_ok) const {
   // TODO: C++0x auto
-  for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
     if ((*it)->Contains(obj)) {
       return *it;
     }
   }
-  LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
+  if (!fail_ok) {
+    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
+  }
   return NULL;
 }
 
-ImageSpace* Heap::GetImageSpace() {
+space::DiscontinuousSpace* Heap::FindDiscontinuousSpaceFromObject(const mirror::Object* obj,
+                                                                  bool fail_ok) const {
   // TODO: C++0x auto
-  for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+  typedef std::vector<space::DiscontinuousSpace*>::const_iterator It;
+  for (It it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
+    if ((*it)->Contains(obj)) {
+      return *it;
+    }
+  }
+  if (!fail_ok) {
+    LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
+  }
+  return NULL;
+}
+
+space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok) const {
+  space::Space* result = FindContinuousSpaceFromObject(obj, true);
+  if (result != NULL) {
+    return result;
+  }
+  return FindDiscontinuousSpaceFromObject(obj, true);
+}
+
+space::ImageSpace* Heap::GetImageSpace() const {
+  // TODO: C++0x auto
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
     if ((*it)->IsImageSpace()) {
       return (*it)->AsImageSpace();
     }
@@ -469,10 +512,6 @@
   return NULL;
 }
 
-DlMallocSpace* Heap::GetAllocSpace() {
-  return alloc_space_;
-}
-
 static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
   size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
   if (used_bytes < chunk_size) {
@@ -501,17 +540,17 @@
   // range. This also means that we rely on SetClass not dirtying the object's card.
   if (byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray()) {
     size = RoundUp(byte_count, kPageSize);
-    obj = Allocate(self, large_object_space_.get(), size);
+    obj = Allocate(self, large_object_space_, size);
     // Make sure that our large object didn't get placed anywhere within the space interval or else
     // it breaks the immune range.
     DCHECK(obj == NULL ||
-           reinterpret_cast<byte*>(obj) < spaces_.front()->Begin() ||
-           reinterpret_cast<byte*>(obj) >= spaces_.back()->End());
+           reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() ||
+           reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End());
   } else {
     obj = Allocate(self, alloc_space_, byte_count);
 
     // Ensure that we did not allocate into a zygote space.
-    DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj)->IsZygoteSpace());
+    DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
     size = alloc_space_->AllocationSize(obj);
   }
 
@@ -543,8 +582,8 @@
   }
   std::ostringstream oss;
   int64_t total_bytes_free = GetFreeMemory();
-  uint64_t alloc_space_size = alloc_space_->GetNumBytesAllocated();
-  uint64_t large_object_size = large_object_space_->GetNumObjectsAllocated();
+  uint64_t alloc_space_size = alloc_space_->GetBytesAllocated();
+  uint64_t large_object_size = large_object_space_->GetObjectsAllocated();
   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
       << " free bytes; allocation space size " << alloc_space_size
       << "; large object space size " << large_object_size;
@@ -552,9 +591,11 @@
   if (total_bytes_free >= byte_count) {
     size_t max_contiguous_allocation = 0;
     // TODO: C++0x auto
-    for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-      if ((*it)->IsAllocSpace()) {
-        (*it)->AsAllocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
+    typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+    for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+      space::ContinuousSpace* space = *it;
+      if (space->IsDlMallocSpace()) {
+        space->AsDlMallocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
       }
     }
     oss << "; failed due to fragmentation (largest possible contiguous allocation "
@@ -570,20 +611,41 @@
   if (obj == NULL) {
     return true;
   }
-  if (!IsAligned<kObjectAlignment>(obj)) {
+  if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
     return false;
   }
-  for (size_t i = 0; i < spaces_.size(); ++i) {
-    if (spaces_[i]->Contains(obj)) {
-      return true;
-    }
-  }
-  return large_object_space_->Contains(obj);
+  return FindSpaceFromObject(obj, true) != NULL;
 }
 
 bool Heap::IsLiveObjectLocked(const mirror::Object* obj) {
-  Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
-  return IsHeapAddress(obj) && GetLiveBitmap()->Test(obj);
+  //Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
+  if (obj == NULL) {
+    return false;
+  }
+  if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
+    return false;
+  }
+  space::ContinuousSpace* cont_space = FindContinuousSpaceFromObject(obj, true);
+  if (cont_space != NULL) {
+    if (cont_space->GetLiveBitmap()->Test(obj)) {
+      return true;
+    }
+  } else {
+    space::DiscontinuousSpace* disc_space = FindDiscontinuousSpaceFromObject(obj, true);
+    if (disc_space != NULL) {
+      if (disc_space->GetLiveObjects()->Test(obj)) {
+        return true;
+      }
+    }
+  }
+  for (size_t i = 0; i < 5; ++i) {
+    if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj)) ||
+        live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
+      return true;
+    }
+    NanoSleep(MsToNs(10));
+  }
+  return false;
 }
 
 void Heap::VerifyObjectImpl(const mirror::Object* obj) {
@@ -596,16 +658,19 @@
 
 void Heap::DumpSpaces() {
   // TODO: C++0x auto
-  for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-    ContinuousSpace* space = *it;
-    SpaceBitmap* live_bitmap = space->GetLiveBitmap();
-    SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+    accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
     LOG(INFO) << space << " " << *space << "\n"
               << live_bitmap << " " << *live_bitmap << "\n"
               << mark_bitmap << " " << *mark_bitmap;
   }
-  if (large_object_space_.get() != NULL) {
-    large_object_space_->Dump(LOG(INFO));
+  typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
+  for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
+    space::DiscontinuousSpace* space = *it;
+    LOG(INFO) << space << " " << *space << "\n";
   }
 }
 
@@ -636,18 +701,11 @@
   if (verify_object_mode_ != kVerifyAllFast) {
     // TODO: the bitmap tests below are racy if VerifyObjectBody is called without the
     //       heap_bitmap_lock_.
-    if (!GetLiveBitmap()->Test(obj)) {
-      // Check the allocation stack / live stack.
-      if (!std::binary_search(live_stack_->Begin(), live_stack_->End(), obj) &&
-          std::find(allocation_stack_->Begin(), allocation_stack_->End(), obj) ==
-              allocation_stack_->End()) {
-        if (large_object_space_->GetLiveObjects()->Test(obj)) {
-          DumpSpaces();
-          LOG(FATAL) << "Object is dead: " << obj;
-        }
-      }
+    if (!IsLiveObjectLocked(obj)) {
+      DumpSpaces();
+      LOG(FATAL) << "Object is dead: " << obj;
     }
-    if (!GetLiveBitmap()->Test(c)) {
+    if (!IsLiveObjectLocked(c)) {
       LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
     }
   }
@@ -682,7 +740,7 @@
   // This is safe to do since the GC will never free objects which are neither in the allocation
   // stack or the live bitmap.
   while (!allocation_stack_->AtomicPushBack(obj)) {
-    CollectGarbageInternal(kGcTypeSticky, kGcCauseForAlloc, false);
+    CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
   }
 }
 
@@ -702,7 +760,8 @@
   }
 }
 
-mirror::Object* Heap::TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow) {
+mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size,
+                                    bool grow) {
   // Should we try to use a CAS here and fix up num_bytes_allocated_ later with AllocationSize?
   if (num_bytes_allocated_ + alloc_size > max_allowed_footprint_) {
     // max_allowed_footprint_ <= growth_limit_ so it is safe to check in here.
@@ -710,23 +769,12 @@
       // Completely out of memory.
       return NULL;
     }
-
-    if (enforce_heap_growth_rate_) {
-      if (grow) {
-        // Grow the heap by alloc_size extra bytes.
-        max_allowed_footprint_ = std::min(max_allowed_footprint_ + alloc_size, growth_limit_);
-        VLOG(gc) << "Grow heap to " << PrettySize(max_allowed_footprint_)
-                 << " for a " << PrettySize(alloc_size) << " allocation";
-      } else {
-        return NULL;
-      }
-    }
   }
 
   return space->Alloc(self, alloc_size);
 }
 
-mirror::Object* Heap::Allocate(Thread* self, AllocSpace* space, size_t alloc_size) {
+mirror::Object* Heap::Allocate(Thread* self, space::AllocSpace* space, size_t alloc_size) {
   // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
   // done in the runnable state where suspension is expected.
   DCHECK_EQ(self->GetState(), kRunnable);
@@ -739,8 +787,8 @@
 
   // The allocation failed. If the GC is running, block until it completes, and then retry the
   // allocation.
-  GcType last_gc = WaitForConcurrentGcToComplete(self);
-  if (last_gc != kGcTypeNone) {
+  collector::GcType last_gc = WaitForConcurrentGcToComplete(self);
+  if (last_gc != collector::kGcTypeNone) {
     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
     ptr = TryToAllocate(self, space, alloc_size, false);
     if (ptr != NULL) {
@@ -749,20 +797,21 @@
   }
 
   // Loop through our different Gc types and try to Gc until we get enough free memory.
-  for (size_t i = static_cast<size_t>(last_gc) + 1; i < static_cast<size_t>(kGcTypeMax); ++i) {
+  for (size_t i = static_cast<size_t>(last_gc) + 1;
+      i < static_cast<size_t>(collector::kGcTypeMax); ++i) {
     bool run_gc = false;
-    GcType gc_type = static_cast<GcType>(i);
+    collector::GcType gc_type = static_cast<collector::GcType>(i);
     switch (gc_type) {
-      case kGcTypeSticky: {
+      case collector::kGcTypeSticky: {
           const size_t alloc_space_size = alloc_space_->Size();
           run_gc = alloc_space_size > min_alloc_space_size_for_sticky_gc_ &&
               alloc_space_->Capacity() - alloc_space_size >= min_remaining_space_for_sticky_gc_;
           break;
         }
-      case kGcTypePartial:
+      case collector::kGcTypePartial:
         run_gc = have_zygote_space_;
         break;
-      case kGcTypeFull:
+      case collector::kGcTypeFull:
         run_gc = true;
         break;
       default:
@@ -771,7 +820,7 @@
 
     if (run_gc) {
       // If we actually ran a different type of Gc than requested, we can skip the index forwards.
-      GcType gc_type_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
+      collector::GcType gc_type_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
       DCHECK_GE(static_cast<size_t>(gc_type_ran), i);
       i = static_cast<size_t>(gc_type_ran);
 
@@ -799,7 +848,7 @@
            << " allocation";
 
   // We don't need a WaitForConcurrentGcToComplete here either.
-  CollectGarbageInternal(kGcTypeFull, kGcCauseForAlloc, true);
+  CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
   return TryToAllocate(self, space, alloc_size, true);
 }
 
@@ -809,45 +858,54 @@
   target_utilization_ = target;
 }
 
-int64_t Heap::GetMaxMemory() const {
-  return growth_limit_;
-}
-
-int64_t Heap::GetTotalMemory() const {
-  return GetMaxMemory();
-}
-
-int64_t Heap::GetFreeMemory() const {
-  return GetMaxMemory() - num_bytes_allocated_;
-}
-
-size_t Heap::GetTotalBytesFreed() const {
-  return total_bytes_freed_;
-}
-
-size_t Heap::GetTotalObjectsFreed() const {
-  return total_objects_freed_;
-}
-
-size_t Heap::GetTotalObjectsAllocated() const {
-  size_t total = large_object_space_->GetTotalObjectsAllocated();
-  for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-    Space* space = *it;
-    if (space->IsAllocSpace()) {
-      total += space->AsAllocSpace()->GetTotalObjectsAllocated();
+size_t Heap::GetObjectsAllocated() const {
+  size_t total = 0;
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    if (space->IsDlMallocSpace()) {
+      total += space->AsDlMallocSpace()->GetObjectsAllocated();
     }
   }
+  typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
+  for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
+    space::DiscontinuousSpace* space = *it;
+    total += space->AsLargeObjectSpace()->GetObjectsAllocated();
+  }
   return total;
 }
 
-size_t Heap::GetTotalBytesAllocated() const {
-  size_t total = large_object_space_->GetTotalBytesAllocated();
-  for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-    Space* space = *it;
-    if (space->IsAllocSpace()) {
-      total += space->AsAllocSpace()->GetTotalBytesAllocated();
+size_t Heap::GetObjectsAllocatedEver() const {
+  size_t total = 0;
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    if (space->IsDlMallocSpace()) {
+      total += space->AsDlMallocSpace()->GetTotalObjectsAllocated();
     }
   }
+  typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
+  for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
+    space::DiscontinuousSpace* space = *it;
+    total += space->AsLargeObjectSpace()->GetTotalObjectsAllocated();
+  }
+  return total;
+}
+
+size_t Heap::GetBytesAllocatedEver() const {
+  size_t total = 0;
+  typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+  for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
+    if (space->IsDlMallocSpace()) {
+      total += space->AsDlMallocSpace()->GetTotalBytesAllocated();
+    }
+  }
+  typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
+  for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
+    space::DiscontinuousSpace* space = *it;
+    total += space->AsLargeObjectSpace()->GetTotalBytesAllocated();
+  }
   return total;
 }
 
@@ -945,7 +1003,7 @@
   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
   // annotalysis on visitors.
   void operator()(const mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
-    MarkSweep::VisitObjectReferences(o, *this);
+    collector::MarkSweep::VisitObjectReferences(o, *this);
   }
 
   // For MarkSweep::VisitObjectReferences.
@@ -983,7 +1041,7 @@
   // last GC will not have necessarily been cleared.
   Thread* self = Thread::Current();
   WaitForConcurrentGcToComplete(self);
-  CollectGarbageInternal(kGcTypeFull, kGcCauseExplicit, clear_soft_references);
+  CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references);
 }
 
 void Heap::PreZygoteFork() {
@@ -1006,29 +1064,22 @@
     FlushAllocStack();
   }
 
-  // Replace the first alloc space we find with a zygote space.
-  // TODO: C++0x auto
-  for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-    if ((*it)->IsAllocSpace()) {
-      DlMallocSpace* zygote_space = (*it)->AsAllocSpace();
+  // Turns the current alloc space into a Zygote space and obtain the new alloc space composed
+  // of the remaining available heap memory.
+  space::DlMallocSpace* zygote_space = alloc_space_;
+  alloc_space_ = zygote_space->CreateZygoteSpace();
+  alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
 
-      // Turns the current alloc space into a Zygote space and obtain the new alloc space composed
-      // of the remaining available heap memory.
-      alloc_space_ = zygote_space->CreateZygoteSpace();
-      alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
-
-      // Change the GC retention policy of the zygote space to only collect when full.
-      zygote_space->SetGcRetentionPolicy(kGcRetentionPolicyFullCollect);
-      AddSpace(alloc_space_);
-      have_zygote_space_ = true;
-      break;
-    }
-  }
+  // Change the GC retention policy of the zygote space to only collect when full.
+  zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect);
+  AddContinuousSpace(alloc_space_);
+  have_zygote_space_ = true;
 
   // Reset the cumulative loggers since we now have a few additional timing phases.
   // TODO: C++0x
-  for (Collectors::const_iterator it = mark_sweep_collectors_.begin();
-        it != mark_sweep_collectors_.end(); ++it) {
+  typedef std::vector<collector::MarkSweep*>::const_iterator It;
+  for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end();
+      it != end; ++it) {
     (*it)->ResetCumulativeStatistics();
   }
 }
@@ -1039,11 +1090,8 @@
   allocation_stack_->Reset();
 }
 
-size_t Heap::GetUsedMemorySize() const {
-  return num_bytes_allocated_;
-}
-
-void Heap::MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack) {
+void Heap::MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
+                          accounting::ObjectStack* stack) {
   mirror::Object** limit = stack->End();
   for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
     const mirror::Object* obj = *it;
@@ -1056,7 +1104,8 @@
   }
 }
 
-void Heap::UnMarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack) {
+void Heap::UnMarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
+                            accounting::ObjectStack* stack) {
   mirror::Object** limit = stack->End();
   for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
     const mirror::Object* obj = *it;
@@ -1069,7 +1118,8 @@
   }
 }
 
-GcType Heap::CollectGarbageInternal(GcType gc_type, GcCause gc_cause, bool clear_soft_references) {
+collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
+                                               bool clear_soft_references) {
   Thread* self = Thread::Current();
   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
   Locks::mutator_lock_->AssertNotHeld(self);
@@ -1103,31 +1153,37 @@
 
   // We need to do partial GCs every now and then to avoid the heap growing too much and
   // fragmenting.
-  if (gc_type == kGcTypeSticky && ++sticky_gc_count_ > partial_gc_frequency_) {
-    gc_type = have_zygote_space_ ? kGcTypePartial : kGcTypeFull;
-  }
-  if (gc_type != kGcTypeSticky) {
+  // TODO: if sticky GCs are failing to free memory then we should lower the
+  // sticky_to_partial_gc_ratio_, if they are successful we can increase it.
+  if (gc_type == collector::kGcTypeSticky) {
+    ++sticky_gc_count_;
+    if (sticky_gc_count_ >= sticky_to_partial_gc_ratio_) {
+      gc_type = have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
+      sticky_gc_count_ = 0;
+    }
+  } else {
     sticky_gc_count_ = 0;
   }
 
-  uint64_t gc_start_time = NanoTime();
+  uint64_t gc_start_time_ns = NanoTime();
   uint64_t gc_start_size = GetBytesAllocated();
   // Approximate allocation rate in bytes / second.
-  if (UNLIKELY(gc_start_time == last_gc_time_)) {
+  if (UNLIKELY(gc_start_time_ns == last_gc_time_ns_)) {
     LOG(WARNING) << "Timers are broken (gc_start_time == last_gc_time_).";
   }
-  uint64_t ms_delta = NsToMs(gc_start_time - last_gc_time_);
+  uint64_t ms_delta = NsToMs(gc_start_time_ns - last_gc_time_ns_);
   if (ms_delta != 0) {
-    allocation_rate_ = (gc_start_size - last_gc_size_) * 1000 / ms_delta;
+    allocation_rate_ = ((gc_start_size - last_gc_size_) * 1000) / ms_delta;
     VLOG(heap) << "Allocation rate: " << PrettySize(allocation_rate_) << "/s";
   }
 
-  DCHECK_LT(gc_type, kGcTypeMax);
-  DCHECK_NE(gc_type, kGcTypeNone);
-  MarkSweep* collector = NULL;
-  for (Collectors::iterator it = mark_sweep_collectors_.begin();
-      it != mark_sweep_collectors_.end(); ++it) {
-    MarkSweep* cur_collector = *it;
+  DCHECK_LT(gc_type, collector::kGcTypeMax);
+  DCHECK_NE(gc_type, collector::kGcTypeNone);
+  collector::MarkSweep* collector = NULL;
+  typedef std::vector<collector::MarkSweep*>::iterator It;
+  for (It it = mark_sweep_collectors_.begin(), end = mark_sweep_collectors_.end();
+      it != end; ++it) {
+    collector::MarkSweep* cur_collector = *it;
     if (cur_collector->IsConcurrent() == concurrent_gc_ && cur_collector->GetGcType() == gc_type) {
       collector = cur_collector;
       break;
@@ -1138,10 +1194,10 @@
       << " and type=" << gc_type;
   collector->clear_soft_references_ = clear_soft_references;
   collector->Run();
-  total_objects_freed_ += collector->GetFreedObjects();
-  total_bytes_freed_ += collector->GetFreedBytes();
+  total_objects_freed_ever_ += collector->GetFreedObjects();
+  total_bytes_freed_ever_ += collector->GetFreedBytes();
 
-  const size_t duration = collector->GetDuration();
+  const size_t duration = collector->GetDurationNs();
   std::vector<uint64_t> pauses = collector->GetPauseTimes();
   bool was_slow = duration > kSlowGcThreshold ||
       (gc_cause == kGcCauseForAlloc && duration > kLongGcPauseThreshold);
@@ -1153,7 +1209,7 @@
 
   if (was_slow) {
     const size_t percent_free = GetPercentFree();
-    const size_t current_heap_size = GetUsedMemorySize();
+    const size_t current_heap_size = GetBytesAllocated();
     const size_t total_memory = GetTotalMemory();
     std::ostringstream pause_string;
     for (size_t i = 0; i < pauses.size(); ++i) {
@@ -1166,7 +1222,7 @@
               << PrettySize(total_memory) << ", " << "paused " << pause_string.str()
               << " total " << PrettyDuration((duration / 1000) * 1000);
     if (VLOG_IS_ON(heap)) {
-      LOG(INFO) << Dumpable<TimingLogger>(collector->GetTimings());
+      LOG(INFO) << Dumpable<base::NewTimingLogger>(collector->GetTimings());
     }
   }
 
@@ -1182,32 +1238,33 @@
   return gc_type;
 }
 
-void Heap::UpdateAndMarkModUnion(MarkSweep* mark_sweep, TimingLogger& timings, GcType gc_type) {
-  if (gc_type == kGcTypeSticky) {
+void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::NewTimingLogger& timings,
+                                 collector::GcType gc_type) {
+  if (gc_type == collector::kGcTypeSticky) {
     // Don't need to do anything for mod union table in this case since we are only scanning dirty
     // cards.
     return;
   }
 
   // Update zygote mod union table.
-  if (gc_type == kGcTypePartial) {
+  if (gc_type == collector::kGcTypePartial) {
+    timings.NewSplit("UpdateZygoteModUnionTable");
     zygote_mod_union_table_->Update();
-    timings.AddSplit("UpdateZygoteModUnionTable");
 
+    timings.NewSplit("ZygoteMarkReferences");
     zygote_mod_union_table_->MarkReferences(mark_sweep);
-    timings.AddSplit("ZygoteMarkReferences");
   }
 
   // Processes the cards we cleared earlier and adds their objects into the mod-union table.
-  mod_union_table_->Update();
-  timings.AddSplit("UpdateModUnionTable");
+  timings.NewSplit("UpdateModUnionTable");
+  image_mod_union_table_->Update();
 
   // Scans all objects in the mod-union table.
-  mod_union_table_->MarkReferences(mark_sweep);
-  timings.AddSplit("MarkImageToAllocSpaceReferences");
+  timings.NewSplit("MarkImageToAllocSpaceReferences");
+  image_mod_union_table_->MarkReferences(mark_sweep);
 }
 
-void Heap::RootMatchesObjectVisitor(const mirror::Object* root, void* arg) {
+static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg) {
   mirror::Object* obj = reinterpret_cast<mirror::Object*>(arg);
   if (root == obj) {
     LOG(INFO) << "Object " << obj << " is a root";
@@ -1221,94 +1278,109 @@
   }
 };
 
+// Verify a reference from an object.
 class VerifyReferenceVisitor {
  public:
-  VerifyReferenceVisitor(Heap* heap, bool* failed)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
-                            Locks::heap_bitmap_lock_)
-      : heap_(heap),
-        failed_(failed) {
+  VerifyReferenceVisitor(Heap* heap)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+      : heap_(heap), failed_(false) {
+  }
+
+  bool Failed() const {
+    return failed_;
   }
 
   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
-  // analysis.
+  // analysis on visitors.
   void operator ()(const mirror::Object* obj, const mirror::Object* ref,
-                   const MemberOffset& /* offset */, bool /* is_static */) const
+                   const MemberOffset& offset, bool /* is_static */) const
       NO_THREAD_SAFETY_ANALYSIS {
     // Verify that the reference is live.
-    if (ref != NULL && !IsLive(ref)) {
-      CardTable* card_table = heap_->GetCardTable();
-      ObjectStack* alloc_stack = heap_->allocation_stack_.get();
-      ObjectStack* live_stack = heap_->live_stack_.get();
+    if (UNLIKELY(ref != NULL && !IsLive(ref))) {
+      accounting::CardTable* card_table = heap_->GetCardTable();
+      accounting::ObjectStack* alloc_stack = heap_->allocation_stack_.get();
+      accounting::ObjectStack* live_stack = heap_->live_stack_.get();
 
-      byte* card_addr = card_table->CardFromAddr(obj);
-      LOG(ERROR) << "Object " << obj << " references dead object " << ref << "\n"
-                 << "IsDirty = " << (*card_addr == CardTable::kCardDirty) << "\n"
-                 << "Obj type " << PrettyTypeOf(obj) << "\n"
-                 << "Ref type " << PrettyTypeOf(ref);
-      card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
-      void* cover_begin = card_table->AddrFromCard(card_addr);
-      void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
-          CardTable::kCardSize);
-      LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
-                 << "-" << cover_end;
-      SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetSpaceBitmap(obj);
+      if (obj != NULL) {
+        byte* card_addr = card_table->CardFromAddr(obj);
+        LOG(ERROR) << "Object " << obj << " references dead object " << ref << " at offset " << offset
+                   << "\nIsDirty = " << (*card_addr == accounting::CardTable::kCardDirty)
+                   << "\nObj type " << PrettyTypeOf(obj)
+                   << "\nRef type " << PrettyTypeOf(ref);
+        card_table->CheckAddrIsInCardTable(reinterpret_cast<const byte*>(obj));
+        void* cover_begin = card_table->AddrFromCard(card_addr);
+        void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
+            accounting::CardTable::kCardSize);
+        LOG(ERROR) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
+            << "-" << cover_end;
+        accounting::SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(obj);
 
-      // Print out how the object is live.
-      if (bitmap->Test(obj)) {
-        LOG(ERROR) << "Object " << obj << " found in live bitmap";
+        // Print out how the object is live.
+        if (bitmap != NULL && bitmap->Test(obj)) {
+          LOG(ERROR) << "Object " << obj << " found in live bitmap";
+        }
+        if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
+          LOG(ERROR) << "Object " << obj << " found in allocation stack";
+        }
+        if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
+          LOG(ERROR) << "Object " << obj << " found in live stack";
+        }
+        // Attempt to see if the card table missed the reference.
+        ScanVisitor scan_visitor;
+        byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
+        card_table->Scan(bitmap, byte_cover_begin,
+                         byte_cover_begin + accounting::CardTable::kCardSize,
+                         scan_visitor, VoidFunctor());
+
+        // Search to see if any of the roots reference our object.
+        void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
+        Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
+
+        // Search to see if any of the roots reference our reference.
+        arg = const_cast<void*>(reinterpret_cast<const void*>(ref));
+        Runtime::Current()->VisitRoots(&RootMatchesObjectVisitor, arg, false, false);
+      } else {
+        LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref);
       }
-      if (std::binary_search(alloc_stack->Begin(), alloc_stack->End(), obj)) {
-        LOG(ERROR) << "Object " << obj << " found in allocation stack";
+      if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
+        LOG(ERROR) << "Reference " << ref << " found in allocation stack!";
       }
-      if (std::binary_search(live_stack->Begin(), live_stack->End(), obj)) {
-        LOG(ERROR) << "Object " << obj << " found in live stack";
-      }
-      if (std::binary_search(live_stack->Begin(), live_stack->End(), ref)) {
+      if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
         LOG(ERROR) << "Reference " << ref << " found in live stack!";
       }
-
-      // Attempt to see if the card table missed the reference.
-      ScanVisitor scan_visitor;
-      byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
-      card_table->Scan(bitmap, byte_cover_begin, byte_cover_begin + CardTable::kCardSize,
-                       scan_visitor, VoidFunctor());
-
-      // Search to see if any of the roots reference our object.
-      void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
-      Runtime::Current()->VisitRoots(&Heap::RootMatchesObjectVisitor, arg);
-      *failed_ = true;
+      heap_->image_mod_union_table_->Dump(LOG(ERROR) << "Image mod-union table: ");
+      heap_->zygote_mod_union_table_->Dump(LOG(ERROR) << "Zygote mod-union table: ");
+      failed_ = true;
     }
   }
 
   bool IsLive(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
-    if (heap_->GetLiveBitmap()->Test(obj)) {
-      return true;
-    }
-    ObjectStack* alloc_stack = heap_->allocation_stack_.get();
-    // At this point we need to search the allocation since things in the live stack may get swept.
-    // If the object is not either in the live bitmap or allocation stack, so the object must be
-    // dead.
-    return std::binary_search(alloc_stack->Begin(), alloc_stack->End(), obj);
+    return heap_->IsLiveObjectLocked(obj);
+  }
+
+  static void VerifyRoots(const mirror::Object* root, void* arg) {
+    VerifyReferenceVisitor* visitor = reinterpret_cast<VerifyReferenceVisitor*>(arg);
+    (*visitor)(NULL, root, MemberOffset(0), true);
   }
 
  private:
-  Heap* heap_;
-  bool* failed_;
+  Heap* const heap_;
+  mutable bool failed_;
 };
 
+// Verify all references within an object, for use with HeapBitmap::Visit.
 class VerifyObjectVisitor {
  public:
-  VerifyObjectVisitor(Heap* heap)
-      : heap_(heap),
-        failed_(false) {
-
+  VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {
   }
 
   void operator ()(const mirror::Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
-    VerifyReferenceVisitor visitor(heap_, const_cast<bool*>(&failed_));
-    MarkSweep::VisitObjectReferences(obj, visitor);
+    // Note: we are verifying the references in obj but not obj itself, this is because obj must
+    // be live or else how did we find it in the live bitmap?
+    VerifyReferenceVisitor visitor(heap_);
+    collector::MarkSweep::VisitObjectReferences(obj, visitor);
+    failed_ = failed_ || visitor.Failed();
   }
 
   bool Failed() const {
@@ -1316,18 +1388,19 @@
   }
 
  private:
-  Heap* heap_;
-  bool failed_;
+  Heap* const heap_;
+  mutable bool failed_;
 };
 
 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
 bool Heap::VerifyHeapReferences() {
   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
   // Lets sort our allocation stacks so that we can efficiently binary search them.
-  std::sort(allocation_stack_->Begin(), allocation_stack_->End());
-  std::sort(live_stack_->Begin(), live_stack_->End());
+  allocation_stack_->Sort();
+  live_stack_->Sort();
   // Perform the verification.
   VerifyObjectVisitor visitor(this);
+  Runtime::Current()->VisitRoots(VerifyReferenceVisitor::VerifyRoots, &visitor, false, false);
   GetLiveBitmap()->Visit(visitor);
   // We don't want to verify the objects in the allocation stack since they themselves may be
   // pointing to dead objects if they are not reachable.
@@ -1343,8 +1416,7 @@
   VerifyReferenceCardVisitor(Heap* heap, bool* failed)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
                             Locks::heap_bitmap_lock_)
-      : heap_(heap),
-        failed_(failed) {
+      : heap_(heap), failed_(failed) {
   }
 
   // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
@@ -1354,7 +1426,7 @@
     // Filter out class references since changing an object's class does not mark the card as dirty.
     // Also handles large objects, since the only reference they hold is a class reference.
     if (ref != NULL && !ref->IsClass()) {
-      CardTable* card_table = heap_->GetCardTable();
+      accounting::CardTable* card_table = heap_->GetCardTable();
       // If the object is not dirty and it is referencing something in the live stack other than
       // class, then it must be on a dirty card.
       if (!card_table->AddrIsInCardTable(obj)) {
@@ -1363,9 +1435,9 @@
       } else if (!card_table->IsDirty(obj)) {
         // Card should be either kCardDirty if it got re-dirtied after we aged it, or
         // kCardDirty - 1 if it didnt get touched since we aged it.
-        ObjectStack* live_stack = heap_->live_stack_.get();
-        if (std::binary_search(live_stack->Begin(), live_stack->End(), ref)) {
-          if (std::binary_search(live_stack->Begin(), live_stack->End(), obj)) {
+        accounting::ObjectStack* live_stack = heap_->live_stack_.get();
+        if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
+          if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
             LOG(ERROR) << "Object " << obj << " found in live stack";
           }
           if (heap_->GetLiveBitmap()->Test(obj)) {
@@ -1406,8 +1478,8 @@
   }
 
  private:
-  Heap* heap_;
-  bool* failed_;
+  Heap* const heap_;
+  bool* const failed_;
 };
 
 class VerifyLiveStackReferences {
@@ -1421,7 +1493,7 @@
   void operator ()(const mirror::Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
-    MarkSweep::VisitObjectReferences(obj, visitor);
+    collector::MarkSweep::VisitObjectReferences(obj, visitor);
   }
 
   bool Failed() const {
@@ -1429,7 +1501,7 @@
   }
 
  private:
-  Heap* heap_;
+  Heap* const heap_;
   bool failed_;
 };
 
@@ -1437,7 +1509,7 @@
   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
 
   // We need to sort the live stack since we binary search it.
-  std::sort(live_stack_->Begin(), live_stack_->End());
+  live_stack_->Sort();
   VerifyLiveStackReferences visitor(this);
   GetLiveBitmap()->Visit(visitor);
 
@@ -1458,30 +1530,31 @@
 
   // Sort the live stack so that we can quickly binary search it later.
   if (verify_object_mode_ > kNoHeapVerification) {
-    std::sort(live_stack_->Begin(), live_stack_->End());
+    live_stack_->Sort();
   }
 }
 
-void Heap::ProcessCards(TimingLogger& timings) {
-  // Clear image space cards and keep track of cards we cleared in the mod-union table.
-  for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-    ContinuousSpace* space = *it;
+void Heap::ProcessCards(base::NewTimingLogger& timings) {
+  // Clear cards and keep track of cards cleared in the mod-union table.
+  typedef std::vector<space::ContinuousSpace*>::iterator It;
+  for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+    space::ContinuousSpace* space = *it;
     if (space->IsImageSpace()) {
-      mod_union_table_->ClearCards(*it);
-      timings.AddSplit("ModUnionClearCards");
-    } else if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+      timings.NewSplit("ModUnionClearCards");
+      image_mod_union_table_->ClearCards(space);
+    } else if (space->IsZygoteSpace()) {
+      timings.NewSplit("ZygoteModUnionClearCards");
       zygote_mod_union_table_->ClearCards(space);
-      timings.AddSplit("ZygoteModUnionClearCards");
     } else {
       // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
       // were dirty before the GC started.
+      timings.NewSplit("AllocSpaceClearCards");
       card_table_->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), VoidFunctor());
-      timings.AddSplit("AllocSpaceClearCards");
     }
   }
 }
 
-void Heap::PreGcVerification(GarbageCollector* gc) {
+void Heap::PreGcVerification(collector::GarbageCollector* gc) {
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
   Thread* self = Thread::Current();
 
@@ -1516,43 +1589,48 @@
     ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_);
     zygote_mod_union_table_->Update();
     zygote_mod_union_table_->Verify();
-    mod_union_table_->Update();
-    mod_union_table_->Verify();
+    image_mod_union_table_->Update();
+    image_mod_union_table_->Verify();
     thread_list->ResumeAll();
   }
 }
 
-void Heap::PreSweepingGcVerification(GarbageCollector* gc) {
+void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) {
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
-  Thread* self = Thread::Current();
 
   // Called before sweeping occurs since we want to make sure we are not going so reclaim any
   // reachable objects.
   if (verify_post_gc_heap_) {
+    Thread* self = Thread::Current();
+    CHECK_NE(self->GetState(), kRunnable);
+    Locks::mutator_lock_->SharedUnlock(self);
     thread_list->SuspendAll();
-    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    // Swapping bound bitmaps does nothing.
-    live_bitmap_.swap(mark_bitmap_);
-    if (!VerifyHeapReferences()) {
-      LOG(FATAL) << "Post " << gc->GetName() << "Gc verification failed";
+    {
+      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+      // Swapping bound bitmaps does nothing.
+      gc->SwapBitmaps();
+      if (!VerifyHeapReferences()) {
+        LOG(FATAL) << "Post " << gc->GetName() << "GC verification failed";
+      }
+      gc->SwapBitmaps();
     }
-    live_bitmap_.swap(mark_bitmap_);
     thread_list->ResumeAll();
+    Locks::mutator_lock_->SharedLock(self);
   }
 }
 
-void Heap::PostGcVerification(GarbageCollector* gc) {
+void Heap::PostGcVerification(collector::GarbageCollector* gc) {
   Thread* self = Thread::Current();
 
   if (verify_system_weaks_) {
     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    MarkSweep* mark_sweep = down_cast<MarkSweep*>(gc);
+    collector::MarkSweep* mark_sweep = down_cast<collector::MarkSweep*>(gc);
     mark_sweep->VerifySystemWeaks();
   }
 }
 
-GcType Heap::WaitForConcurrentGcToComplete(Thread* self) {
-  GcType last_gc_type = kGcTypeNone;
+collector::GcType Heap::WaitForConcurrentGcToComplete(Thread* self) {
+  collector::GcType last_gc_type = collector::kGcTypeNone;
   if (concurrent_gc_) {
     bool do_wait;
     uint64_t wait_start = NanoTime();
@@ -1583,7 +1661,7 @@
 }
 
 void Heap::DumpForSigQuit(std::ostream& os) {
-  os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetUsedMemorySize()) << "/"
+  os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetBytesAllocated()) << "/"
      << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
   DumpGcPerformanceInfo(os);
 }
@@ -1606,7 +1684,7 @@
   // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
   const size_t bytes_allocated = GetBytesAllocated();
   last_gc_size_ = bytes_allocated;
-  last_gc_time_ = NanoTime();
+  last_gc_time_ns_ = NanoTime();
 
   size_t target_size = bytes_allocated / GetTargetHeapUtilization();
   if (target_size > bytes_allocated + max_free_) {
@@ -1617,20 +1695,23 @@
 
   SetIdealFootprint(target_size);
 
-  // Calculate when to perform the next ConcurrentGC if we have enough free memory.
-  if (concurrent_gc_ && GetFreeMemory() >= concurrent_min_free_) {
+  // Calculate when to perform the next ConcurrentGC.
+  if (concurrent_gc_) {
     // Calculate the estimated GC duration.
     double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
     // Estimate how many remaining bytes we will have when we need to start the next GC.
     size_t remaining_bytes = allocation_rate_ * gc_duration_seconds;
-    if (remaining_bytes < max_allowed_footprint_) {
+    remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
+    if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
+      // A never going to happen situation that from the estimated allocation rate we will exceed
+      // the applications entire footprint with the given estimated allocation rate. Schedule
+      // another GC straight away.
+      concurrent_start_bytes_ = bytes_allocated;
+    } else {
       // Start a concurrent GC when we get close to the estimated remaining bytes. When the
       // allocation rate is very high, remaining_bytes could tell us that we should start a GC
       // right away.
       concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes, bytes_allocated);
-    } else {
-      // The estimated rate is so high that we should request another GC straight away.
-      concurrent_start_bytes_ = bytes_allocated;
     }
     DCHECK_LE(concurrent_start_bytes_, max_allowed_footprint_);
     DCHECK_LE(max_allowed_footprint_, growth_limit_);
@@ -1736,30 +1817,6 @@
       arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
 }
 
-size_t Heap::GetBytesAllocated() const {
-  return num_bytes_allocated_;
-}
-
-size_t Heap::GetObjectsAllocated() const {
-  size_t total = 0;
-  // TODO: C++0x
-  for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
-    Space* space = *it;
-    if (space->IsAllocSpace()) {
-      total += space->AsAllocSpace()->GetNumObjectsAllocated();
-    }
-  }
-  return total;
-}
-
-size_t Heap::GetConcurrentStartSize() const {
-  return concurrent_start_size_;
-}
-
-size_t Heap::GetConcurrentMinFree() const {
-  return concurrent_min_free_;
-}
-
 void Heap::EnqueueClearedReferences(mirror::Object** cleared) {
   DCHECK(cleared != NULL);
   if (*cleared != NULL) {
@@ -1811,11 +1868,11 @@
   }
 
   // Wait for any GCs currently running to finish.
-  if (WaitForConcurrentGcToComplete(self) == kGcTypeNone) {
+  if (WaitForConcurrentGcToComplete(self) == collector::kGcTypeNone) {
     if (alloc_space_->Size() > min_alloc_space_size_for_sticky_gc_) {
-      CollectGarbageInternal(kGcTypeSticky, kGcCauseBackground, false);
+      CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseBackground, false);
     } else {
-      CollectGarbageInternal(kGcTypePartial, kGcCauseBackground, false);
+      CollectGarbageInternal(collector::kGcTypePartial, kGcCauseBackground, false);
     }
   }
 }
@@ -1835,8 +1892,8 @@
   // not how much use we're making of those pages.
   uint64_t ms_time = MilliTime();
   float utilization =
-      static_cast<float>(alloc_space_->GetNumBytesAllocated()) / alloc_space_->Size();
-  if ((utilization > 0.75f) || ((ms_time - last_trim_time_) < 2 * 1000)) {
+      static_cast<float>(alloc_space_->GetBytesAllocated()) / alloc_space_->Size();
+  if ((utilization > 0.75f) || ((ms_time - last_trim_time_ms_) < 2 * 1000)) {
     // Don't bother trimming the alloc space if it's more than 75% utilized, or if a
     // heap trim occurred in the last two seconds.
     return;
@@ -1861,7 +1918,7 @@
     return;
   }
 
-  last_trim_time_ = ms_time;
+  last_trim_time_ms_ = ms_time;
   JNIEnv* env = self->GetJniEnv();
   DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
   DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
@@ -1875,4 +1932,5 @@
   return alloc_space_->Trim();
 }
 
+}  // namespace gc
 }  // namespace art
diff --git a/src/heap.h b/src/gc/heap.h
similarity index 66%
rename from src/heap.h
rename to src/gc/heap.h
index 642c436..d86c7dc 100644
--- a/src/heap.h
+++ b/src/gc/heap.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_HEAP_H_
-#define ART_SRC_HEAP_H_
+#ifndef ART_SRC_GC_HEAP_H_
+#define ART_SRC_GC_HEAP_H_
 
 #include <iosfwd>
 #include <string>
@@ -23,10 +23,9 @@
 
 #include "atomic_integer.h"
 #include "base/timing_logger.h"
-#include "gc/atomic_stack.h"
-#include "gc/card_table.h"
-#include "gc/gc_type.h"
-#include "gc/heap_bitmap.h"
+#include "gc/accounting/atomic_stack.h"
+#include "gc/accounting/card_table.h"
+#include "gc/collector/gc_type.h"
 #include "globals.h"
 #include "gtest/gtest.h"
 #include "locks.h"
@@ -35,32 +34,44 @@
 #include "thread_pool.h"
 
 namespace art {
-namespace mirror {
-class Class;
-class Object;
-}  // namespace mirror
-class AllocSpace;
+
 class ConditionVariable;
-class DlMallocSpace;
-class GarbageCollector;
-class HeapBitmap;
-class ImageSpace;
-class LargeObjectSpace;
-class MarkSweep;
-class ModUnionTable;
 class Mutex;
-class Space;
-class SpaceTest;
 class StackVisitor;
 class Thread;
 class TimingLogger;
 
-typedef std::vector<ContinuousSpace*> Spaces;
+namespace mirror {
+  class Class;
+  class Object;
+}  // namespace mirror
+
+namespace gc {
+namespace accounting {
+  class HeapBitmap;
+  class ModUnionTable;
+  class SpaceSetMap;
+}  // namespace accounting
+
+namespace collector {
+  class GarbageCollector;
+  class MarkSweep;
+}  // namespace collector
+
+namespace space {
+  class AllocSpace;
+  class DiscontinuousSpace;
+  class DlMallocSpace;
+  class ImageSpace;
+  class LargeObjectSpace;
+  class Space;
+  class SpaceTest;
+}  // namespace space
 
 class AgeCardVisitor {
  public:
   byte operator ()(byte card) const {
-    if (card == CardTable::kCardDirty) {
+    if (card == accounting::CardTable::kCardDirty) {
       return card - 1;
     } else {
       return 0;
@@ -68,9 +79,14 @@
   }
 };
 
+// What caused the GC?
 enum GcCause {
+  // GC triggered by a failed allocation. Thread doing allocation is blocked waiting for GC before
+  // retrying allocation.
   kGcCauseForAlloc,
+  // A background GC trying to ensure there is free memory ahead of allocations.
   kGcCauseBackground,
+  // An explicit System.gc() call.
   kGcCauseExplicit,
 };
 std::ostream& operator<<(std::ostream& os, const GcCause& policy);
@@ -120,10 +136,8 @@
 
   // Check sanity of all live references.
   void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
-  static void RootMatchesObjectVisitor(const mirror::Object* root, void* arg);
   bool VerifyHeapReferences()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
   bool VerifyMissingCardMarks()
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -139,20 +153,12 @@
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Initiates an explicit garbage collection.
-  void CollectGarbage(bool clear_soft_references)
-      LOCKS_EXCLUDED(Locks::mutator_lock_);
+  void CollectGarbage(bool clear_soft_references) LOCKS_EXCLUDED(Locks::mutator_lock_);
 
   // Does a concurrent GC, should only be called by the GC daemon thread
   // through runtime.
   void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
 
-  // Implements java.lang.Runtime.maxMemory.
-  int64_t GetMaxMemory() const;
-  // Implements java.lang.Runtime.totalMemory.
-  int64_t GetTotalMemory() const;
-  // Implements java.lang.Runtime.freeMemory.
-  int64_t GetFreeMemory() const;
-
   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
   void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
@@ -188,14 +194,14 @@
 
   // Blocks the caller until the garbage collector becomes idle and returns
   // true if we waited for the GC to complete.
-  GcType WaitForConcurrentGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
+  collector::GcType WaitForConcurrentGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
 
-  const Spaces& GetSpaces() const {
-    return spaces_;
+  const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
+    return continuous_spaces_;
   }
 
-  Spaces& GetSpaces() {
-    return spaces_;
+  const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
+    return discontinuous_spaces_;
   }
 
   void SetReferenceOffsets(MemberOffset reference_referent_offset,
@@ -257,47 +263,78 @@
     card_table_->MarkCard(dst);
   }
 
-  CardTable* GetCardTable() {
+  accounting::CardTable* GetCardTable() const {
     return card_table_.get();
   }
 
   void AddFinalizerReference(Thread* self, mirror::Object* object);
 
-  size_t GetBytesAllocated() const;
+  // Returns the number of bytes currently allocated.
+  size_t GetBytesAllocated() const {
+    return num_bytes_allocated_;
+  }
+
+  // Returns the number of objects currently allocated.
   size_t GetObjectsAllocated() const;
-  size_t GetConcurrentStartSize() const;
-  size_t GetConcurrentMinFree() const;
-  size_t GetUsedMemorySize() const;
 
   // Returns the total number of objects allocated since the heap was created.
-  size_t GetTotalObjectsAllocated() const;
+  size_t GetObjectsAllocatedEver() const;
 
   // Returns the total number of bytes allocated since the heap was created.
-  size_t GetTotalBytesAllocated() const;
+  size_t GetBytesAllocatedEver() const;
 
   // Returns the total number of objects freed since the heap was created.
-  size_t GetTotalObjectsFreed() const;
+  size_t GetObjectsFreedEver() const {
+    return total_objects_freed_ever_;
+  }
 
   // Returns the total number of bytes freed since the heap was created.
-  size_t GetTotalBytesFreed() const;
+  size_t GetBytesFreedEver() const {
+    return total_bytes_freed_ever_;
+  }
 
-  // Functions for getting the bitmap which corresponds to an object's address.
-  // This is probably slow, TODO: use better data structure like binary tree .
-  ContinuousSpace* FindSpaceFromObject(const mirror::Object*) const;
+  // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
+  // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
+  // were specified. Android apps start with a growth limit (small heap size) which is
+  // cleared/extended for large apps.
+  int64_t GetMaxMemory() const {
+    return growth_limit_;
+  }
+
+  // Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an
+  // application.
+  int64_t GetTotalMemory() const {
+    // TODO: we use the footprint limit here which is conservative wrt number of pages really used.
+    //       We could implement a more accurate count across all spaces.
+    return max_allowed_footprint_;
+  }
+
+  // Implements java.lang.Runtime.freeMemory.
+  int64_t GetFreeMemory() const {
+    return GetTotalMemory() - num_bytes_allocated_;
+  }
+
+  // Get the space that corresponds to an object's address. Current implementation searches all
+  // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
+  // TODO: consider using faster data structure like binary tree.
+  space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
+  space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
+                                                              bool fail_ok) const;
+  space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
 
   void DumpForSigQuit(std::ostream& os);
 
   size_t Trim();
 
-  HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
     return live_bitmap_.get();
   }
 
-  HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
     return mark_bitmap_.get();
   }
 
-  ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
     return live_stack_.get();
   }
 
@@ -308,24 +345,32 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Mark all the objects in the allocation stack in the specified bitmap.
-  void MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack)
+  void MarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
+                      accounting::ObjectStack* stack)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Unmark all the objects in the allocation stack in the specified bitmap.
-  void UnMarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack)
+  void UnMarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
+                        accounting::ObjectStack* stack)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Update and mark mod union table based on gc type.
-  void UpdateAndMarkModUnion(MarkSweep* mark_sweep, TimingLogger& timings, GcType gc_type)
+  void UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::NewTimingLogger& timings,
+                             collector::GcType gc_type)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
   // Assumes there is only one image space.
-  ImageSpace* GetImageSpace();
-  DlMallocSpace* GetAllocSpace();
-  LargeObjectSpace* GetLargeObjectsSpace() {
-    return large_object_space_.get();
+  space::ImageSpace* GetImageSpace() const;
+
+  space::DlMallocSpace* GetAllocSpace() const {
+    return alloc_space_;
   }
+
+  space::LargeObjectSpace* GetLargeObjectsSpace() const {
+    return large_object_space_;
+  }
+
   void DumpSpaces();
 
   // UnReserve the address range where the oat file will be placed.
@@ -344,12 +389,12 @@
  private:
   // Allocates uninitialized storage. Passing in a null space tries to place the object in the
   // large object space.
-  mirror::Object* Allocate(Thread* self, AllocSpace* space, size_t num_bytes)
+  mirror::Object* Allocate(Thread* self, space::AllocSpace* space, size_t num_bytes)
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Try to allocate a number of bytes, this function never does any GCs.
-  mirror::Object* TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow)
+  mirror::Object* TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size, bool grow)
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -365,14 +410,16 @@
 
   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
   // which type of Gc was actually ran.
-  GcType CollectGarbageInternal(GcType gc_plan, GcCause gc_cause, bool clear_soft_references)
+  collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause,
+                                           bool clear_soft_references)
       LOCKS_EXCLUDED(gc_complete_lock_,
                      Locks::heap_bitmap_lock_,
                      Locks::thread_suspend_count_lock_);
 
-  void PreGcVerification(GarbageCollector* gc);
-  void PreSweepingGcVerification(GarbageCollector* gc);
-  void PostGcVerification(GarbageCollector* gc);
+  void PreGcVerification(collector::GarbageCollector* gc);
+  void PreSweepingGcVerification(collector::GarbageCollector* gc)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void PostGcVerification(collector::GarbageCollector* gc);
 
   // Given the current contents of the alloc space, increase the allowed heap footprint to match
   // the target utilization ratio.  This should only be called immediately after a full garbage
@@ -381,7 +428,9 @@
 
   size_t GetPercentFree();
 
-  void AddSpace(ContinuousSpace* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void AddContinuousSpace(space::ContinuousSpace* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
+  void AddDiscontinuousSpace(space::DiscontinuousSpace* space)
+      LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
 
   // No thread saftey analysis since we call this everywhere and it is impossible to find a proper
   // lock ordering for it.
@@ -394,26 +443,32 @@
   void SwapStacks();
 
   // Clear cards and update the mod union table.
-  void ProcessCards(TimingLogger& timings);
+  void ProcessCards(base::NewTimingLogger& timings);
 
-  Spaces spaces_;
+  // All-known continuous spaces, where objects lie within fixed bounds.
+  std::vector<space::ContinuousSpace*> continuous_spaces_;
 
-  // A map that we use to temporarily reserve address range for the oat file.
-  UniquePtr<MemMap> oat_file_map_;
+  // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
+  std::vector<space::DiscontinuousSpace*> discontinuous_spaces_;
 
-  // The alloc space which we are currently allocating into.
-  DlMallocSpace* alloc_space_;
+  // The allocation space we are currently allocating into.
+  space::DlMallocSpace* alloc_space_;
+
+  // The large object space we are currently allocating into.
+  space::LargeObjectSpace* large_object_space_;
+
+  // The card table, dirtied by the write barrier.
+  UniquePtr<accounting::CardTable> card_table_;
 
   // The mod-union table remembers all of the references from the image space to the alloc /
-  // zygote spaces.
-  UniquePtr<ModUnionTable> mod_union_table_;
+  // zygote spaces to allow the card table to be cleared.
+  UniquePtr<accounting::ModUnionTable> image_mod_union_table_;
 
   // This table holds all of the references from the zygote space to the alloc space.
-  UniquePtr<ModUnionTable> zygote_mod_union_table_;
+  UniquePtr<accounting::ModUnionTable> zygote_mod_union_table_;
 
-  UniquePtr<CardTable> card_table_;
-
-  // True for concurrent mark sweep GC, false for mark sweep.
+  // What kind of concurrency behavior is the runtime after? True for concurrent mark sweep GC,
+  // false for stop-the-world mark sweep.
   const bool concurrent_gc_;
 
   // If we have a zygote space.
@@ -424,40 +479,43 @@
   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
 
-  // Reference queue lock
-  UniquePtr<Mutex> reference_queue_lock_;
+  // Mutex held when adding references to reference queues.
+  // TODO: move to a UniquePtr, currently annotalysis is confused that UniquePtr isn't lockable.
+  Mutex* reference_queue_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
   // True while the garbage collector is running.
   volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_);
 
   // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
-  volatile GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
-
-  // If enabled, causes Gc for alloc when heap size reaches the current footprint limit before the
-  // Gc updates it.
-  const bool enforce_heap_growth_rate_;
+  volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
 
   // Maximum size that the heap can reach.
-  size_t capacity_;
+  const size_t capacity_;
+  // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
+  // programs it is "cleared" making it the same as capacity.
   size_t growth_limit_;
+  // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
+  // a GC should be triggered.
   size_t max_allowed_footprint_;
 
-  // Minimum bytes before concurrent GC starts.
-  size_t concurrent_start_size_;
-  size_t concurrent_min_free_;
+  // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
+  // it completes ahead of an allocation failing.
   size_t concurrent_start_bytes_;
 
-  // Number of bytes allocated since the last Gc, we use this to help determine when to schedule concurrent GCs.
+  // Number of back-to-back sticky mark sweep collections.
   size_t sticky_gc_count_;
 
-  size_t total_bytes_freed_;
-  size_t total_objects_freed_;
+  // After how many sticky GCs we force to do a partial GC instead of sticky mark bits GC.
+  const size_t sticky_to_partial_gc_ratio_;
+
+  // Since the heap was created, how many bytes have been freed.
+  size_t total_bytes_freed_ever_;
+
+  // Since the heap was created, how many objects have been freed.
+  size_t total_objects_freed_ever_;
 
   // Primitive objects larger than this size are put in the large object space.
-  size_t large_object_threshold_;
-
-  // Large object space.
-  UniquePtr<LargeObjectSpace> large_object_space_;
+  const size_t large_object_threshold_;
 
   // Number of bytes allocated.  Adjusted after each allocation and free.
   AtomicInteger num_bytes_allocated_;
@@ -472,9 +530,6 @@
   // Parallel GC data structures.
   UniquePtr<ThreadPool> thread_pool_;
 
-  // After how many GCs we force to do a partial GC instead of sticky mark bits GC.
-  const size_t partial_gc_frequency_;
-
   // Sticky mark bits GC has some overhead, so if we have less a few megabytes of AllocSpace then
   // it's probably better to just do a partial GC.
   const size_t min_alloc_space_size_for_sticky_gc_;
@@ -483,31 +538,34 @@
   // normal GC, it is important to not use it when we are almost out of memory.
   const size_t min_remaining_space_for_sticky_gc_;
 
-  // Last trim time
-  uint64_t last_trim_time_;
+  // The last time a heap trim occurred.
+  uint64_t last_trim_time_ms_;
 
-  // The time at which the last GC ended.
-  uint64_t last_gc_time_;
+  // The nanosecond time at which the last GC ended.
+  uint64_t last_gc_time_ns_;
 
   // How many bytes were allocated at the end of the last GC.
   uint64_t last_gc_size_;
 
-  // Estimated allocation rate (bytes / second).
+  // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
+  // and the start of the current one.
   uint64_t allocation_rate_;
 
-  UniquePtr<HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
-  UniquePtr<HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
+  // For a GC cycle, a bitmap that is set corresponding to the
+  UniquePtr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
+  UniquePtr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
 
   // Mark stack that we reuse to avoid re-allocating the mark stack.
-  UniquePtr<ObjectStack> mark_stack_;
+  UniquePtr<accounting::ObjectStack> mark_stack_;
 
   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
   // to use the live bitmap as the old mark bitmap.
   const size_t max_allocation_stack_size_;
-  UniquePtr<ObjectStack> allocation_stack_;
+  bool is_allocation_stack_sorted_;
+  UniquePtr<accounting::ObjectStack> allocation_stack_;
 
   // Second allocation stack so that we can process allocation with the heap unlocked.
-  UniquePtr<ObjectStack> live_stack_;
+  UniquePtr<accounting::ObjectStack> live_stack_;
 
   // offset of java.lang.ref.Reference.referent
   MemberOffset reference_referent_offset_;
@@ -544,22 +602,22 @@
   // The current state of heap verification, may be enabled or disabled.
   HeapVerificationMode verify_object_mode_;
 
-  typedef std::vector<MarkSweep*> Collectors;
-  Collectors mark_sweep_collectors_;
+  std::vector<collector::MarkSweep*> mark_sweep_collectors_;
 
-  friend class MarkSweep;
+  // A map that we use to temporarily reserve address range for the oat file.
+  UniquePtr<MemMap> oat_file_map_;
+
+  friend class collector::MarkSweep;
   friend class VerifyReferenceCardVisitor;
   friend class VerifyReferenceVisitor;
   friend class VerifyObjectVisitor;
   friend class ScopedHeapLock;
-  FRIEND_TEST(SpaceTest, AllocAndFree);
-  FRIEND_TEST(SpaceTest, AllocAndFreeList);
-  FRIEND_TEST(SpaceTest, ZygoteSpace);
-  friend class SpaceTest;
+  friend class space::SpaceTest;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
 };
 
+}  // namespace gc
 }  // namespace art
 
-#endif  // ART_SRC_HEAP_H_
+#endif  // ART_SRC_GC_HEAP_H_
diff --git a/src/gc/heap_bitmap-inl.h b/src/gc/heap_bitmap-inl.h
deleted file mode 100644
index 2811183..0000000
--- a/src/gc/heap_bitmap-inl.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_HEAP_BITMAP_INL_H_
-#define ART_SRC_GC_HEAP_BITMAP_INL_H_
-
-#include "heap_bitmap.h"
-
-namespace art {
-
-template <typename Visitor>
-inline void HeapBitmap::Visit(const Visitor& visitor) {
-  // TODO: C++0x auto
-  for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
-    SpaceBitmap* bitmap = *it;
-    bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor, VoidFunctor());
-  }
-  large_objects_->Visit(visitor);
-}
-
-}  // namespace art
-
-#endif  // ART_SRC_GC_HEAP_BITMAP_INL_H_
diff --git a/src/gc/heap_bitmap.cc b/src/gc/heap_bitmap.cc
deleted file mode 100644
index cef6884..0000000
--- a/src/gc/heap_bitmap.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-#include "heap_bitmap.h"
-#include "space.h"
-
-namespace art {
-
-void HeapBitmap::ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap) {
-  // TODO: C++0x auto
-  for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
-    if (*it == old_bitmap) {
-      *it = new_bitmap;
-      return;
-    }
-  }
-  LOG(FATAL) << "bitmap " << static_cast<const void*>(old_bitmap) << " not found";
-}
-
-void HeapBitmap::AddSpaceBitmap(SpaceBitmap* bitmap) {
-  DCHECK(bitmap != NULL);
-
-  // Check for interval overlap.
-  for (Bitmaps::const_iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
-    SpaceBitmap* cur_bitmap = *it;
-    if (bitmap->HeapBegin() < cur_bitmap->HeapLimit() &&
-        bitmap->HeapLimit() > cur_bitmap->HeapBegin()) {
-      LOG(FATAL) << "Overlapping space bitmaps added to heap bitmap!";
-    }
-  }
-  bitmaps_.push_back(bitmap);
-}
-
-void HeapBitmap::SetLargeObjects(SpaceSetMap* large_objects) {
-  DCHECK(large_objects != NULL);
-  large_objects_ = large_objects;
-}
-
-HeapBitmap::HeapBitmap(Heap* heap) : heap_(heap), large_objects_(NULL) {
-
-}
-
-void HeapBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) {
-  // TODO: C++0x auto
-  for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
-    (*it)->Walk(callback, arg);
-  }
-
-  large_objects_->Walk(callback, arg);
-}
-
-}  // namespace art
diff --git a/src/gc/heap_bitmap.h b/src/gc/heap_bitmap.h
deleted file mode 100644
index 87e0848..0000000
--- a/src/gc/heap_bitmap.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_HEAP_BITMAP_H_
-#define ART_SRC_GC_HEAP_BITMAP_H_
-
-#include "locks.h"
-#include "space_bitmap.h"
-
-namespace art {
-class Heap;
-
-class HeapBitmap {
- public:
-  bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
-    SpaceBitmap* bitmap = GetSpaceBitmap(obj);
-    if (LIKELY(bitmap != NULL)) {
-      return bitmap->Test(obj);
-    } else {
-      return large_objects_->Test(obj);
-    }
-  }
-
-  void Clear(const mirror::Object* obj)
-  EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
-    SpaceBitmap* bitmap = GetSpaceBitmap(obj);
-    if (LIKELY(bitmap != NULL)) {
-      bitmap->Clear(obj);
-    } else {
-      large_objects_->Clear(obj);
-    }
-  }
-
-  void Set(const mirror::Object* obj)
-  EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
-    SpaceBitmap* bitmap = GetSpaceBitmap(obj);
-    if (LIKELY(bitmap != NULL)) {
-      bitmap->Set(obj);
-    } else {
-      large_objects_->Set(obj);
-    }
-  }
-
-  SpaceBitmap* GetSpaceBitmap(const mirror::Object* obj) {
-    // TODO: C++0x auto
-    for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
-      if ((*it)->HasAddress(obj)) {
-        return *it;
-      }
-    }
-    return NULL;
-  }
-
-  void Walk(SpaceBitmap::Callback* callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
-  template <typename Visitor>
-  void Visit(const Visitor& visitor)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
-  void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
-  HeapBitmap(Heap* heap);
-
-  inline SpaceSetMap* GetLargeObjects() const {
-    return large_objects_;
-  }
-
-  void SetLargeObjects(SpaceSetMap* large_objects);
-
- private:
-
-  const Heap* const heap_;
-
-  void AddSpaceBitmap(SpaceBitmap* bitmap);
-
-  typedef std::vector<SpaceBitmap*> Bitmaps;
-  Bitmaps bitmaps_;
-
-  // Large object sets.
-  SpaceSetMap* large_objects_;
-
-  friend class Heap;
-};
-
-}  // namespace art
-
-#endif  // ART_SRC_GC_HEAP_BITMAP_H_
diff --git a/src/heap_test.cc b/src/gc/heap_test.cc
similarity index 73%
rename from src/heap_test.cc
rename to src/gc/heap_test.cc
index 8bed7e3..02708e8 100644
--- a/src/heap_test.cc
+++ b/src/gc/heap_test.cc
@@ -15,14 +15,15 @@
  */
 
 #include "common_test.h"
-#include "gc/card_table-inl.h"
-#include "gc/space_bitmap-inl.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/space_bitmap-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
 #include "sirt_ref.h"
 
 namespace art {
+namespace gc {
 
 class HeapTest : public CommonTest {};
 
@@ -56,9 +57,15 @@
 
 TEST_F(HeapTest, HeapBitmapCapacityTest) {
   byte* heap_begin = reinterpret_cast<byte*>(0x1000);
-  const size_t heap_capacity = SpaceBitmap::kAlignment * (sizeof(intptr_t) * 8 + 1);
-  UniquePtr<SpaceBitmap> bitmap(SpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
-  bitmap->Set(reinterpret_cast<const mirror::Object*>(&heap_begin[heap_capacity - SpaceBitmap::kAlignment]));
+  const size_t heap_capacity = accounting::SpaceBitmap::kAlignment * (sizeof(intptr_t) * 8 + 1);
+  UniquePtr<accounting::SpaceBitmap> bitmap(accounting::SpaceBitmap::Create("test bitmap",
+                                                                            heap_begin,
+                                                                            heap_capacity));
+  mirror::Object* fake_end_of_heap_object =
+      reinterpret_cast<mirror::Object*>(&heap_begin[heap_capacity -
+                                                    accounting::SpaceBitmap::kAlignment]);
+  bitmap->Set(fake_end_of_heap_object);
 }
 
+}  // namespace gc
 }  // namespace art
diff --git a/src/gc/mod_union_table-inl.h b/src/gc/mod_union_table-inl.h
deleted file mode 100644
index c1c69fb..0000000
--- a/src/gc/mod_union_table-inl.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_MOD_UNION_TABLE_INL_H_
-#define ART_SRC_GC_MOD_UNION_TABLE_INL_H_
-
-#include "mod_union_table.h"
-
-namespace art {
-
-template <typename Implementation>
-class ModUnionTableToZygoteAllocspace : public Implementation {
-public:
-  ModUnionTableToZygoteAllocspace(Heap* heap) : Implementation(heap) {
-  }
-
-  bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
-    const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
-    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-      if ((*it)->Contains(ref)) {
-        return (*it)->IsAllocSpace();
-      }
-    }
-    // Assume it points to a large object.
-    // TODO: Check.
-    return true;
-  }
-};
-
-template <typename Implementation>
-class ModUnionTableToAllocspace : public Implementation {
-public:
-  ModUnionTableToAllocspace(Heap* heap) : Implementation(heap) {
-  }
-
-  bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
-    const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
-    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-      if ((*it)->Contains(ref)) {
-        return (*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect;
-      }
-    }
-    // Assume it points to a large object.
-    // TODO: Check.
-    return true;
-  }
-};
-
-}  // namespace art
-
-#endif  // ART_SRC_GC_MOD_UNION_TABLE_INL_H_
diff --git a/src/gc/mod_union_table.cc b/src/gc/mod_union_table.cc
deleted file mode 100644
index da950bb..0000000
--- a/src/gc/mod_union_table.cc
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mod_union_table.h"
-
-#include "base/stl_util.h"
-#include "card_table-inl.h"
-#include "heap.h"
-#include "heap_bitmap.h"
-#include "mark_sweep.h"
-#include "mark_sweep-inl.h"
-#include "mirror/object-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/field-inl.h"
-#include "mirror/object_array-inl.h"
-#include "space.h"
-#include "space_bitmap-inl.h"
-#include "thread.h"
-#include "UniquePtr.h"
-
-using namespace art::mirror;
-
-namespace art {
-
-class MarkIfReachesAllocspaceVisitor {
- public:
-  explicit MarkIfReachesAllocspaceVisitor(Heap* const heap, SpaceBitmap* bitmap)
-    : heap_(heap),
-      bitmap_(bitmap) {
-  }
-
-  // Extra parameters are required since we use this same visitor signature for checking objects.
-  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */, bool /* is_static */) const {
-    // TODO: Optimize?
-    // TODO: C++0x auto
-    const Spaces& spaces = heap_->GetSpaces();
-    for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
-      if ((*cur)->IsAllocSpace() && (*cur)->Contains(ref)) {
-        bitmap_->Set(obj);
-        break;
-      }
-    }
-  }
-
- private:
-  Heap* const heap_;
-  SpaceBitmap* bitmap_;
-};
-
-class ModUnionVisitor {
- public:
-  explicit ModUnionVisitor(Heap* const heap, SpaceBitmap* bitmap)
-    : heap_(heap),
-      bitmap_(bitmap) {
-  }
-
-  void operator ()(const Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
-                            Locks::mutator_lock_) {
-    DCHECK(obj != NULL);
-    // We don't have an early exit since we use the visitor pattern, an early exit should
-    // significantly speed this up.
-    MarkIfReachesAllocspaceVisitor visitor(heap_, bitmap_);
-    MarkSweep::VisitObjectReferences(obj, visitor);
-  }
- private:
-  Heap* const heap_;
-  SpaceBitmap* bitmap_;
-};
-
-class ModUnionClearCardSetVisitor {
- public:
-  explicit ModUnionClearCardSetVisitor(std::set<byte*>* const cleared_cards)
-    : cleared_cards_(cleared_cards) {
-  }
-
-  inline void operator ()(byte* card, byte expected_value, byte new_value) const {
-    if (expected_value == CardTable::kCardDirty) {
-      cleared_cards_->insert(card);
-    }
-  }
-
- private:
-  std::set<byte*>* const cleared_cards_;
-};
-
-class ModUnionClearCardVisitor {
- public:
-  explicit ModUnionClearCardVisitor(std::vector<byte*>* cleared_cards)
-    : cleared_cards_(cleared_cards) {
-  }
-
-  void operator ()(byte* card, byte expected_card, byte new_card) const {
-    if (expected_card == CardTable::kCardDirty) {
-      cleared_cards_->push_back(card);
-    }
-  }
- private:
-  std::vector<byte*>* cleared_cards_;
-};
-
-ModUnionTableBitmap::ModUnionTableBitmap(Heap* heap) : ModUnionTable(heap)  {
-  // Prevent fragmentation of the heap which is caused by resizing of the vector.
-  // TODO: Make a new vector which uses madvise (basically same as a mark stack).
-  cleared_cards_.reserve(32);
-  const Spaces& spaces = heap->GetSpaces();
-  // Create one heap bitmap per image space.
-  // TODO: C++0x auto
-  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    ContinuousSpace* space = *it;
-    if (space->IsImageSpace()) {
-      // The mod-union table is only needed when we have an image space since it's purpose is to
-      // cache image roots.
-      UniquePtr<SpaceBitmap> bitmap(SpaceBitmap::Create("mod-union table bitmap", space->Begin(),
-                                                        space->Size()));
-      CHECK(bitmap.get() != NULL) << "Failed to create mod-union bitmap";
-      bitmaps_.Put(space, bitmap.release());
-    }
-  }
-}
-
-ModUnionTableBitmap::~ModUnionTableBitmap() {
-  STLDeleteValues(&bitmaps_);
-}
-
-void ModUnionTableBitmap::ClearCards(ContinuousSpace* space) {
-  CardTable* card_table = heap_->GetCardTable();
-  ModUnionClearCardVisitor visitor(&cleared_cards_);
-  // Clear dirty cards in the this image space and update the corresponding mod-union bits.
-  card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
-}
-
-void ModUnionTableBitmap::Update() {
-  CardTable* card_table = heap_->GetCardTable();
-  while (!cleared_cards_.empty()) {
-    byte* card = cleared_cards_.back();
-    cleared_cards_.pop_back();
-
-    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
-    uintptr_t end = start + CardTable::kCardSize;
-    ContinuousSpace* space = heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start));
-    SpaceBitmap* bitmap = space->GetLiveBitmap();
-
-    // Clear the mod-union bitmap range corresponding to this card so that we don't have any
-    // objects marked which do not reach the alloc space.
-    bitmap->VisitRange(start, end, SpaceBitmap::ClearVisitor(bitmap));
-
-    // At this point we need to update the mod-union bitmap to contain all the objects which reach
-    // the alloc space.
-    ModUnionVisitor visitor(heap_, bitmap);
-    space->GetLiveBitmap()->VisitMarkedRange(start, end, visitor, VoidFunctor());
-  }
-}
-
-class ModUnionScanImageRootVisitor {
- public:
-  ModUnionScanImageRootVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {
-  }
-
-  void operator ()(const Object* root) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    DCHECK(root != NULL);
-    mark_sweep_->ScanRoot(root);
-  }
-
- private:
-  MarkSweep* const mark_sweep_;
-};
-
-void ModUnionTableBitmap::MarkReferences(MarkSweep* mark_sweep) {
-  // Some tests have no image space, and therefore no mod-union bitmap.
-  ModUnionScanImageRootVisitor image_root_scanner(mark_sweep);
-  for (BitmapMap::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
-    const ContinuousSpace* space = it->first;
-    uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
-    uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
-    it->second->VisitMarkedRange(begin, end, image_root_scanner, VoidFunctor());
-  }
-}
-
-
-ModUnionTableReferenceCache::ModUnionTableReferenceCache(Heap* heap) : ModUnionTable(heap) {
-
-}
-
-ModUnionTableReferenceCache::~ModUnionTableReferenceCache() {
-
-}
-
-void ModUnionTableReferenceCache::ClearCards(ContinuousSpace* space) {
-  CardTable* card_table = GetHeap()->GetCardTable();
-  ModUnionClearCardSetVisitor visitor(&cleared_cards_);
-  // Clear dirty cards in the this space and update the corresponding mod-union bits.
-  card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
-}
-
-class AddToReferenceArrayVisitor {
- public:
-  explicit AddToReferenceArrayVisitor(
-      ModUnionTableReferenceCache* const mod_union_table,
-        ModUnionTableReferenceCache::ReferenceArray* references)
-    : mod_union_table_(mod_union_table),
-      references_(references) {
-  }
-
-  // Extra parameters are required since we use this same visitor signature for checking objects.
-  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
-                     bool /* is_static */) const {
-    // Only add the reference if it is non null and fits our criteria.
-    if (ref != NULL && mod_union_table_->AddReference(obj, ref)) {
-      references_->push_back(ref);
-    }
-  }
-
- private:
-  ModUnionTableReferenceCache* mod_union_table_;
-  ModUnionTable::ReferenceArray* references_;
-};
-
-class ModUnionReferenceVisitor {
- public:
-  explicit ModUnionReferenceVisitor(
-        ModUnionTableReferenceCache* const mod_union_table,
-        ModUnionTableReferenceCache::ReferenceArray* references)
-    : mod_union_table_(mod_union_table),
-      references_(references) {
-  }
-
-  void operator ()(const Object* obj) const
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
-                            Locks::mutator_lock_) {
-    DCHECK(obj != NULL);
-    // We don't have an early exit since we use the visitor pattern, an early
-    // exit should significantly speed this up.
-    AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
-    MarkSweep::VisitObjectReferences(obj, visitor);
-  }
- private:
-  ModUnionTableReferenceCache* const mod_union_table_;
-  ModUnionTable::ReferenceArray* references_;
-};
-
-
-class CheckReferenceVisitor {
- public:
-  typedef std::set<const Object*> ReferenceSet;
-
-  explicit CheckReferenceVisitor(
-      ModUnionTableReferenceCache* const mod_union_table,
-      const ReferenceSet& references)
-    : mod_union_table_(mod_union_table),
-      references_(references) {
-  }
-
-  // Extra parameters are required since we use this same visitor signature for checking objects.
-  // TODO: Fixme when anotatalysis works with visitors.
-  void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
-                     bool /* is_static */) const
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
-    Heap* heap = mod_union_table_->GetHeap();
-    if (ref != NULL && mod_union_table_->AddReference(obj, ref) &&
-        references_.find(ref) == references_.end()) {
-      ContinuousSpace* from_space = heap->FindSpaceFromObject(obj);
-      ContinuousSpace* to_space = heap->FindSpaceFromObject(ref);
-      LOG(INFO) << "Object " << reinterpret_cast<const void*>(obj) << "(" << PrettyTypeOf(obj) << ")"
-                << "References " << reinterpret_cast<const void*>(ref)
-                << "(" << PrettyTypeOf(ref) << ") without being in mod-union table";
-      LOG(INFO) << "FromSpace " << from_space->GetName() << " type " << from_space->GetGcRetentionPolicy();
-      LOG(INFO) << "ToSpace " << to_space->GetName() << " type " << to_space->GetGcRetentionPolicy();
-      mod_union_table_->GetHeap()->DumpSpaces();
-      LOG(FATAL) << "FATAL ERROR";
-    }
-  }
-
- private:
-  ModUnionTableReferenceCache* const mod_union_table_;
-  const ReferenceSet& references_;
-};
-
-class ModUnionCheckReferences {
- public:
-  typedef std::set<const Object*> ReferenceSet;
-
-  explicit ModUnionCheckReferences (
-      ModUnionTableReferenceCache* const mod_union_table,
-      const ReferenceSet& references)
-    : mod_union_table_(mod_union_table),
-      references_(references) {
-  }
-
-  void operator ()(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
-    DCHECK(obj != NULL);
-    if (kDebugLocking) {
-      Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
-    }
-    CheckReferenceVisitor visitor(mod_union_table_, references_);
-    MarkSweep::VisitObjectReferences(obj, visitor);
-  }
-
- private:
-  ModUnionTableReferenceCache* const mod_union_table_;
-  const ReferenceSet& references_;
-};
-
-void ModUnionTableReferenceCache::Verify() {
-  // Start by checking that everything in the mod union table is marked.
-  Heap* heap = GetHeap();
-  for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
-    for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end();
-        ++it_ref ) {
-      DCHECK(heap->GetLiveBitmap()->Test(*it_ref));
-    }
-  }
-
-  // Check the references of each clean card which is also in the mod union table.
-  for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
-    const byte* card = &*it->first;
-    if (*card == CardTable::kCardClean) {
-      std::set<const Object*> reference_set;
-      for (ReferenceArray::const_iterator itr = it->second.begin(); itr != it->second.end();++itr) {
-        reference_set.insert(*itr);
-      }
-      ModUnionCheckReferences visitor(this, reference_set);
-      CardTable* card_table = heap->GetCardTable();
-      uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
-      uintptr_t end = start + CardTable::kCardSize;
-      SpaceBitmap* live_bitmap =
-              heap->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
-      live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
-    }
-  }
-}
-
-void ModUnionTableReferenceCache::Update() {
-  Heap* heap = GetHeap();
-  CardTable* card_table = heap->GetCardTable();
-
-  ReferenceArray cards_references;
-  ModUnionReferenceVisitor visitor(this, &cards_references);
-
-  for (ClearedCards::iterator it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
-    byte* card = *it;
-    // Clear and re-compute alloc space references associated with this card.
-    cards_references.clear();
-    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
-    uintptr_t end = start + CardTable::kCardSize;
-    SpaceBitmap* live_bitmap =
-        heap->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
-    live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
-
-    // Update the corresponding references for the card.
-    // TODO: C++0x auto
-    ReferenceMap::iterator found = references_.find(card);
-    if (found == references_.end()) {
-      if (cards_references.empty()) {
-        // No reason to add empty array.
-        continue;
-      }
-      references_.Put(card, cards_references);
-    } else {
-      found->second = cards_references;
-    }
-  }
-  cleared_cards_.clear();
-}
-
-void ModUnionTableReferenceCache::MarkReferences(MarkSweep* mark_sweep) {
-  // TODO: C++0x auto
-  size_t count = 0;
-  for (ReferenceMap::const_iterator it = references_.begin(); it != references_.end(); ++it) {
-    for (ReferenceArray::const_iterator it_ref = it->second.begin(); it_ref != it->second.end(); ++it_ref ) {
-      mark_sweep->MarkRoot(*it_ref);
-      ++count;
-    }
-  }
-  if (VLOG_IS_ON(heap)) {
-    VLOG(gc) << "Marked " << count << " references in mod union table";
-  }
-}
-
-ModUnionTableCardCache::ModUnionTableCardCache(Heap* heap) : ModUnionTable(heap) {
-
-}
-
-ModUnionTableCardCache::~ModUnionTableCardCache() {
-
-}
-
-void ModUnionTableCardCache::ClearCards(ContinuousSpace* space) {
-  CardTable* card_table = GetHeap()->GetCardTable();
-  ModUnionClearCardSetVisitor visitor(&cleared_cards_);
-  // Clear dirty cards in the this space and update the corresponding mod-union bits.
-  card_table->ModifyCardsAtomic(space->Begin(), space->End(), AgeCardVisitor(), visitor);
-}
-
-// Mark all references to the alloc space(s).
-void ModUnionTableCardCache::MarkReferences(MarkSweep* mark_sweep) {
-  CardTable* card_table = heap_->GetCardTable();
-  ModUnionScanImageRootVisitor visitor(mark_sweep);
-  for (ClearedCards::const_iterator it = cleared_cards_.begin(); it != cleared_cards_.end(); ++it) {
-    byte* card = *it;
-    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
-    uintptr_t end = start + CardTable::kCardSize;
-    SpaceBitmap* live_bitmap =
-        heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start))->GetLiveBitmap();
-    live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
-  }
-}
-
-}  // namespace art
diff --git a/src/gc/mod_union_table.h b/src/gc/mod_union_table.h
deleted file mode 100644
index c0b9535..0000000
--- a/src/gc/mod_union_table.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_MOD_UNION_TABLE_H_
-#define ART_SRC_GC_MOD_UNION_TABLE_H_
-
-#include "globals.h"
-#include "safe_map.h"
-
-#include <set>
-#include <vector>
-
-namespace art {
-namespace mirror {
-class Object;
-}
-class ContinuousSpace;
-class Heap;
-class HeapBitmap;
-class MarkSweep;
-class Space;
-class SpaceBitmap;
-
-// Base class
-class ModUnionTable {
- public:
-  typedef std::vector<const mirror::Object*> ReferenceArray;
-  typedef std::set<byte*> ClearedCards;
-
-  ModUnionTable(Heap* heap) : heap_(heap) {
-
-  }
-
-  virtual ~ModUnionTable() {
-
-  }
-
-  // Clear cards which map to a memory range of a space.
-  virtual void ClearCards(ContinuousSpace* space) = 0;
-
-  // Update the mod-union table.
-  virtual void Update() = 0;
-
-  // Mark all references which are stored in the mod union table.
-  virtual void MarkReferences(MarkSweep* mark_sweep) = 0;
-
-  // Verification, sanity checks that we don't have clean cards which conflict with out cached data
-  // for said cards. Exclusive lock is required since verify sometimes uses
-  // SpaceBitmap::VisitMarkedRange and VisitMarkedRange can't know if the callback will modify the
-  // bitmap or not.
-  virtual void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) = 0;
-
-  Heap* GetHeap() const {
-    return heap_;
-  }
-
- protected:
-  Heap* const heap_;
-};
-
-// Bitmap implementation.
-// DEPRECATED, performs strictly less well than merely caching which cards were dirty.
-class ModUnionTableBitmap : public ModUnionTable {
- public:
-  ModUnionTableBitmap(Heap* heap);
-  virtual ~ModUnionTableBitmap();
-
-  // Clear space cards.
-  void ClearCards(ContinuousSpace* space);
-
-  // Update table based on cleared cards.
-  void Update()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  // Mark all references to the alloc space(s).
-  void MarkReferences(MarkSweep* mark_sweep) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- protected:
-  // Cleared card array, used to update the mod-union table.
-  std::vector<byte*> cleared_cards_;
-
-  // One bitmap per image space.
-  // TODO: Add support for Zygote spaces?
-  typedef SafeMap<ContinuousSpace*, SpaceBitmap*> BitmapMap;
-  BitmapMap bitmaps_;
-};
-
-// Reference caching implementation. Caches references pointing to alloc space(s) for each card.
-class ModUnionTableReferenceCache : public ModUnionTable {
- public:
-  typedef SafeMap<const byte*, ReferenceArray > ReferenceMap;
-
-  ModUnionTableReferenceCache(Heap* heap);
-  virtual ~ModUnionTableReferenceCache();
-
-  // Clear and store cards for a space.
-  void ClearCards(ContinuousSpace* space);
-
-  // Update table based on cleared cards.
-  void Update()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  // Mark all references to the alloc space(s).
-  void MarkReferences(MarkSweep* mark_sweep)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
-  // Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
-  // VisitMarkedRange can't know if the callback will modify the bitmap or not.
-  void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
-  // Function that tells whether or not to add a reference to the table.
-  virtual bool AddReference(const mirror::Object* obj, const mirror::Object* ref) = 0;
-
- protected:
-  // Cleared card array, used to update the mod-union table.
-  ClearedCards cleared_cards_;
-
-  // Maps from dirty cards to their corresponding alloc space references.
-  ReferenceMap references_;
-};
-
-// Card caching implementation. Keeps track of which cards we cleared and only this information.
-class ModUnionTableCardCache : public ModUnionTable {
- public:
-  typedef SafeMap<const byte*, ReferenceArray > ReferenceMap;
-
-  ModUnionTableCardCache(Heap* heap);
-  virtual ~ModUnionTableCardCache();
-
-  // Clear and store cards for a space.
-  void ClearCards(ContinuousSpace* space);
-
-  // Nothing to update.
-  void Update() {}
-
-  // Mark all references to the alloc space(s).
-  void MarkReferences(MarkSweep* mark_sweep)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  // Nothing to verify.
-  void Verify() {}
-
- protected:
-  // Cleared card array, used to update the mod-union table.
-  ClearedCards cleared_cards_;
-};
-
-}  // namespace art
-
-#endif  // ART_SRC_GC_MOD_UNION_TABLE_H_
diff --git a/src/gc/partial_mark_sweep.h b/src/gc/partial_mark_sweep.h
deleted file mode 100644
index 64c0bcd..0000000
--- a/src/gc/partial_mark_sweep.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
-#define ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
-
-#include "locks.h"
-#include "mark_sweep.h"
-
-namespace art {
-
-class PartialMarkSweep : public MarkSweep {
- public:
-  virtual GcType GetGcType() const {
-    return kGcTypePartial;
-  }
-
-  explicit PartialMarkSweep(Heap* heap, bool is_concurrent);
-  ~PartialMarkSweep();
-
-protected:
-  virtual void BindBitmaps()
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
-};
-
-}  // namespace art
-
-#endif  // ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
diff --git a/src/gc/space.h b/src/gc/space.h
deleted file mode 100644
index d2bcd53..0000000
--- a/src/gc/space.h
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_GC_SPACE_H_
-#define ART_SRC_GC_SPACE_H_
-
-#include <string>
-
-#include "UniquePtr.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "globals.h"
-#include "image.h"
-#include "dlmalloc.h"
-#include "mem_map.h"
-
-namespace art {
-
-static const bool kDebugSpaces = kIsDebugBuild;
-
-namespace mirror {
-class Object;
-}  // namespace mirror
-class DlMallocSpace;
-class ImageSpace;
-class LargeObjectSpace;
-class SpaceBitmap;
-
-enum GcRetentionPolicy {
-  kGcRetentionPolicyNeverCollect,
-  kGcRetentionPolicyAlwaysCollect,
-  kGcRetentionPolicyFullCollect, // Collect only for full GC
-};
-std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
-
-enum SpaceType {
-  kSpaceTypeImageSpace,
-  kSpaceTypeAllocSpace,
-  kSpaceTypeZygoteSpace,
-  kSpaceTypeLargeObjectSpace,
-};
-std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
-
-// A space contains memory allocated for managed objects.
-class Space {
- public:
-  virtual bool CanAllocateInto() const = 0;
-  virtual bool IsCompactible() const = 0;
-  virtual bool Contains(const mirror::Object* obj) const = 0;
-  virtual SpaceType GetType() const = 0;
-  virtual GcRetentionPolicy GetGcRetentionPolicy() const = 0;
-  virtual std::string GetName() const = 0;
-
-  ImageSpace* AsImageSpace();
-  DlMallocSpace* AsAllocSpace();
-  DlMallocSpace* AsZygoteSpace();
-  LargeObjectSpace* AsLargeObjectSpace();
-
-  bool IsImageSpace() const {
-    return GetType() == kSpaceTypeImageSpace;
-  }
-
-  bool IsAllocSpace() const {
-    return GetType() == kSpaceTypeAllocSpace || GetType() == kSpaceTypeZygoteSpace;
-  }
-
-  bool IsZygoteSpace() const {
-    return GetType() == kSpaceTypeZygoteSpace;
-  }
-
-  bool IsLargeObjectSpace() const {
-    return GetType() == kSpaceTypeLargeObjectSpace;
-  }
-
-  virtual void Dump(std::ostream& /* os */) const { }
-
-  virtual ~Space() {}
-
- protected:
-  Space() { }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(Space);
-};
-
-// AllocSpace interface.
-class AllocSpace {
- public:
-  virtual bool CanAllocateInto() const {
-    return true;
-  }
-
-  // General statistics
-  virtual uint64_t GetNumBytesAllocated() const = 0;
-  virtual uint64_t GetNumObjectsAllocated() const = 0;
-  virtual uint64_t GetTotalBytesAllocated() const = 0;
-  virtual uint64_t GetTotalObjectsAllocated() const = 0;
-
-  // Allocate num_bytes without allowing growth.
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) = 0;
-
-  // Return the storage space required by obj.
-  virtual size_t AllocationSize(const mirror::Object* obj) = 0;
-
-  // Returns how many bytes were freed.
-  virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
-
-  // Returns how many bytes were freed.
-  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
-
- protected:
-  AllocSpace() {}
-  virtual ~AllocSpace() {}
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(AllocSpace);
-};
-
-// Continuous spaces have bitmaps, and an address range.
-class ContinuousSpace : public Space {
- public:
-  // Address at which the space begins
-  byte* Begin() const {
-    return begin_;
-  }
-
-  // Address at which the space ends, which may vary as the space is filled.
-  byte* End() const {
-    return end_;
-  }
-
-  // Current size of space
-  size_t Size() const {
-    return End() - Begin();
-  }
-
-  virtual SpaceBitmap* GetLiveBitmap() const = 0;
-  virtual SpaceBitmap* GetMarkBitmap() const = 0;
-
-  // Is object within this space?
-  bool HasAddress(const mirror::Object* obj) const {
-    const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
-    return Begin() <= byte_ptr && byte_ptr < End();
-  }
-
-  virtual bool Contains(const mirror::Object* obj) const {
-    return HasAddress(obj);
-  }
-
-  virtual ~ContinuousSpace() {}
-
-  virtual std::string GetName() const {
-    return name_;
-  }
-
-  virtual GcRetentionPolicy GetGcRetentionPolicy() const {
-    return gc_retention_policy_;
-  }
-
- protected:
-  ContinuousSpace(const std::string& name, byte* begin, byte* end,
-                  GcRetentionPolicy gc_retention_policy);
-
-  std::string name_;
-  GcRetentionPolicy gc_retention_policy_;
-
-  // The beginning of the storage for fast access.
-  byte* begin_;
-
-  // Current end of the space.
-  byte* end_;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
-};
-
-class DiscontinuousSpace : public virtual Space {
- public:
-  // Is object within this space?
-  virtual bool Contains(const mirror::Object* obj) const = 0;
-
-  virtual std::string GetName() const {
-    return name_;
-  }
-
-  virtual GcRetentionPolicy GetGcRetentionPolicy() const {
-    return gc_retention_policy_;
-  }
-
-protected:
-  DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
-
-private:
-  std::string name_;
-  GcRetentionPolicy gc_retention_policy_;
-
-  DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
-};
-
-std::ostream& operator<<(std::ostream& os, const Space& space);
-
-class MemMapSpace : public ContinuousSpace {
- public:
-  // Maximum which the mapped space can grow to.
-  virtual size_t Capacity() const {
-    return mem_map_->Size();
-  }
-
-  // Size of the space without a limit on its growth. By default this is just the Capacity, but
-  // for the allocation space we support starting with a small heap and then extending it.
-  virtual size_t NonGrowthLimitCapacity() const {
-    return Capacity();
-  }
-
- protected:
-  MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
-              GcRetentionPolicy gc_retention_policy);
-
-  MemMap* GetMemMap() {
-    return mem_map_.get();
-  }
-
-  const MemMap* GetMemMap() const {
-    return mem_map_.get();
-  }
-
- private:
-  // Underlying storage of the space
-  UniquePtr<MemMap> mem_map_;
-
-  DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
-};
-
-// An alloc space is a space where objects may be allocated and garbage collected.
-class DlMallocSpace : public MemMapSpace, public AllocSpace {
- public:
-  typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
-
-  virtual bool CanAllocateInto() const {
-    return true;
-  }
-
-  virtual bool IsCompactible() const {
-    return false;
-  }
-
-  virtual SpaceType GetType() const {
-    return kSpaceTypeAllocSpace;
-  }
-
-  // Create a AllocSpace with the requested sizes. The requested
-  // base address is not guaranteed to be granted, if it is required,
-  // the caller should call Begin on the returned space to confirm
-  // the request was granted.
-  static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
-                            size_t capacity, byte* requested_begin);
-
-  // Allocate num_bytes without allowing the underlying mspace to grow.
-  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes);
-
-  // Allocate num_bytes allowing the underlying mspace to grow.
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
-
-  // Return the storage space required by obj.
-  virtual size_t AllocationSize(const mirror::Object* obj);
-  virtual size_t Free(Thread* self, mirror::Object* ptr);
-  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
-
-  void* MoreCore(intptr_t increment);
-
-  void* GetMspace() const {
-    return mspace_;
-  }
-
-  // Hands unused pages back to the system.
-  size_t Trim();
-
-  // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
-  // in use, indicated by num_bytes equaling zero.
-  void Walk(WalkCallback callback, void* arg);
-
-  // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
-  size_t GetFootprintLimit();
-
-  // Set the maximum number of bytes that the heap is allowed to obtain from the system via
-  // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
-  // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
-  void SetFootprintLimit(size_t limit);
-
-  // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
-  // maximum reserved size of the heap.
-  void ClearGrowthLimit() {
-    growth_limit_ = NonGrowthLimitCapacity();
-  }
-
-  // Override capacity so that we only return the possibly limited capacity
-  virtual size_t Capacity() const {
-    return growth_limit_;
-  }
-
-  // The total amount of memory reserved for the alloc space
-  virtual size_t NonGrowthLimitCapacity() const {
-    return GetMemMap()->Size();
-  }
-
-  virtual SpaceBitmap* GetLiveBitmap() const {
-    return live_bitmap_.get();
-  }
-
-  virtual SpaceBitmap* GetMarkBitmap() const {
-    return mark_bitmap_.get();
-  }
-
-  virtual void Dump(std::ostream& os) const;
-
-  void SetGrowthLimit(size_t growth_limit);
-
-  // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
-  virtual void SwapBitmaps();
-
-  // Turn ourself into a zygote space and return a new alloc space which has our unused memory.
-  DlMallocSpace* CreateZygoteSpace();
-
-  void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
-    gc_retention_policy_ = gc_retention_policy;
-  }
-
-  virtual uint64_t GetNumBytesAllocated() const {
-    return num_bytes_allocated_;
-  }
-
-  virtual uint64_t GetNumObjectsAllocated() const {
-    return num_objects_allocated_;
-  }
-
-  virtual uint64_t GetTotalBytesAllocated() const {
-    return total_bytes_allocated_;
-  }
-
-  virtual uint64_t GetTotalObjectsAllocated() const {
-    return total_objects_allocated_;
-  }
-
- private:
-  size_t InternalAllocationSize(const mirror::Object* obj);
-  mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
-  UniquePtr<SpaceBitmap> live_bitmap_;
-  UniquePtr<SpaceBitmap> mark_bitmap_;
-  UniquePtr<SpaceBitmap> temp_bitmap_;
-
-  // Approximate number of bytes which have been allocated into the space.
-  size_t num_bytes_allocated_;
-  size_t num_objects_allocated_;
-  size_t total_bytes_allocated_;
-  size_t total_objects_allocated_;
-
-  static size_t bitmap_index_;
-
-  DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
-             size_t growth_limit);
-
-  bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
-
-  static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
-
-  // The boundary tag overhead.
-  static const size_t kChunkOverhead = kWordSize;
-
-  // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
-  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-
-  // Underlying malloc space
-  void* const mspace_;
-
-  // The capacity of the alloc space until such time that ClearGrowthLimit is called.
-  // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
-  // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
-  // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
-  // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
-  // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
-  // one time by a call to ClearGrowthLimit.
-  size_t growth_limit_;
-
-  friend class MarkSweep;
-
-  DISALLOW_COPY_AND_ASSIGN(DlMallocSpace);
-};
-
-// An image space is a space backed with a memory mapped image
-class ImageSpace : public MemMapSpace {
- public:
-  virtual bool CanAllocateInto() const {
-    return false;
-  }
-
-  virtual bool IsCompactible() const {
-    return false;
-  }
-
-  virtual SpaceType GetType() const {
-    return kSpaceTypeImageSpace;
-  }
-
-  // create a Space from an image file. cannot be used for future allocation or collected.
-  static ImageSpace* Create(const std::string& image)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  const ImageHeader& GetImageHeader() const {
-    return *reinterpret_cast<ImageHeader*>(Begin());
-  }
-
-  const std::string GetImageFilename() const {
-    return GetName();
-  }
-
-  // Mark the objects defined in this space in the given live bitmap
-  void RecordImageAllocations(SpaceBitmap* live_bitmap) const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  virtual SpaceBitmap* GetLiveBitmap() const {
-    return live_bitmap_.get();
-  }
-
-  virtual SpaceBitmap* GetMarkBitmap() const {
-    // ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
-    // special cases to test against.
-    return live_bitmap_.get();
-  }
-
-  virtual void Dump(std::ostream& os) const;
-
- private:
-  friend class Space;
-
-  UniquePtr<SpaceBitmap> live_bitmap_;
-  static size_t bitmap_index_;
-
-  ImageSpace(const std::string& name, MemMap* mem_map);
-
-  DISALLOW_COPY_AND_ASSIGN(ImageSpace);
-};
-
-// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
-// pages back to the kernel.
-void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* /*arg*/);
-
-}  // namespace art
-
-#endif  // ART_SRC_GC_SPACE_H_
diff --git a/src/gc/space.cc b/src/gc/space/dlmalloc_space.cc
similarity index 63%
rename from src/gc/space.cc
rename to src/gc/space/dlmalloc_space.cc
index 1d3ee28..02acd28 100644
--- a/src/gc/space.cc
+++ b/src/gc/space/dlmalloc_space.cc
@@ -13,33 +13,19 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
-#include "space.h"
-
-#include "base/logging.h"
-#include "base/stl_util.h"
-#include "base/unix_file/fd_file.h"
-#include "card_table.h"
-#include "dlmalloc.h"
-#include "image.h"
-#include "mirror/array.h"
-#include "mirror/abstract_method.h"
-#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
-#include "os.h"
+#include "dlmalloc_space.h"
+#include "gc/accounting/card_table.h"
+#include "gc/heap.h"
 #include "runtime.h"
-#include "space_bitmap.h"
-#include "space_bitmap-inl.h"
 #include "thread.h"
-#include "UniquePtr.h"
 #include "utils.h"
 
+//#include <valgrind/memcheck.h>
+#include <valgrind.h>
+
 namespace art {
-
-static const bool kPrefetchDuringDlMallocFreeList = true;
-
-// Magic padding value that we use to check for buffer overruns.
-static const word kPaddingValue = 0xBAC0BAC0;
+namespace gc {
+namespace space {
 
 // TODO: Remove define macro
 #define CHECK_MEMORY_CALL(call, args, what) \
@@ -51,45 +37,86 @@
     } \
   } while (false)
 
-ImageSpace* Space::AsImageSpace() {
-  DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
-  return down_cast<ImageSpace*>(down_cast<MemMapSpace*>(this));
-}
+static const bool kPrefetchDuringDlMallocFreeList = true;
 
-DlMallocSpace* Space::AsAllocSpace() {
-  DCHECK_EQ(GetType(), kSpaceTypeAllocSpace);
-  return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
-}
+// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
+// after each allocation. 8 bytes provides long/double alignment.
+const size_t kValgrindRedZoneBytes = 8;
 
-DlMallocSpace* Space::AsZygoteSpace() {
-  DCHECK_EQ(GetType(), kSpaceTypeZygoteSpace);
-  return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
-}
+// A specialization of DlMallocSpace that provides information to valgrind wrt allocations.
+class ValgrindDlMallocSpace : public DlMallocSpace {
+ public:
+  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes) {
+    void* obj_with_rdz = DlMallocSpace::AllocWithGrowth(self, num_bytes + (2 * kValgrindRedZoneBytes));
+    if (obj_with_rdz != NULL) {
+      //VALGRIND_MAKE_MEM_UNDEFINED();
+      mirror::Object* result = reinterpret_cast<mirror::Object*>(reinterpret_cast<byte*>(obj_with_rdz) +
+                                                                 kValgrindRedZoneBytes);
+      VALGRIND_MEMPOOL_ALLOC(GetMspace(), result, num_bytes);
+      LOG(INFO) << "AllocWithGrowth on " << self << " = " << obj_with_rdz
+          << " of size " << num_bytes;
+      return result;
+    } else {
+      return NULL;
+    }
+  }
 
-LargeObjectSpace* Space::AsLargeObjectSpace() {
-  DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
-  return reinterpret_cast<LargeObjectSpace*>(this);
-}
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) {
+    void* obj_with_rdz = DlMallocSpace::Alloc(self, num_bytes + (2 * kValgrindRedZoneBytes));
+    if (obj_with_rdz != NULL) {
+      mirror::Object* result = reinterpret_cast<mirror::Object*>(reinterpret_cast<byte*>(obj_with_rdz) +
+                                                                 kValgrindRedZoneBytes);
+      VALGRIND_MEMPOOL_ALLOC(GetMspace(), result, num_bytes);
+      LOG(INFO) << "Alloc on " << self << " = " << obj_with_rdz
+          << " of size " << num_bytes;
+      return result;
+    } else {
+      return NULL;
+    }
+  }
 
-ContinuousSpace::ContinuousSpace(const std::string& name, byte* begin, byte* end,
-                                 GcRetentionPolicy gc_retention_policy)
-    : name_(name), gc_retention_policy_(gc_retention_policy), begin_(begin), end_(end) {
+  virtual size_t AllocationSize(const mirror::Object* obj) {
+    const void* obj_after_rdz = reinterpret_cast<const void*>(obj);
+    size_t result = DlMallocSpace::AllocationSize(
+        reinterpret_cast<const mirror::Object*>(reinterpret_cast<const byte*>(obj_after_rdz) -
+                                                kValgrindRedZoneBytes));
+    return result - (2 * kValgrindRedZoneBytes);
+  }
 
-}
+  virtual size_t Free(Thread* self, mirror::Object* ptr) {
+    void* obj_after_rdz = reinterpret_cast<void*>(ptr);
+    void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
+    LOG(INFO) << "Free on " << self << " of " << obj_with_rdz;
+    size_t freed = DlMallocSpace::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
+    VALGRIND_MEMPOOL_FREE(GetMspace(), obj_after_rdz);
+    return freed - (2 * kValgrindRedZoneBytes);
+  }
 
-DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
-                                       GcRetentionPolicy gc_retention_policy)
-    : name_(name), gc_retention_policy_(gc_retention_policy) {
+  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+    size_t freed = 0;
+    for (size_t i = 0; i < num_ptrs; i++) {
+      void* obj_after_rdz = reinterpret_cast<void*>(ptrs[i]);
+      void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
+      LOG(INFO) << "FreeList on " << self << " of " << obj_with_rdz;
+      freed += DlMallocSpace::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
+      VALGRIND_MEMPOOL_FREE(GetMspace(), obj_after_rdz);
+    }
+    return freed - (2 * kValgrindRedZoneBytes * num_ptrs);
+  }
 
-}
+  ValgrindDlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
+                        byte* end, size_t growth_limit) :
+      DlMallocSpace(name, mem_map, mspace, begin, end, growth_limit) {
+    VALGRIND_CREATE_MEMPOOL(GetMspace(), kValgrindRedZoneBytes, true);
+  }
 
-MemMapSpace::MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
-                         GcRetentionPolicy gc_retention_policy)
-    : ContinuousSpace(name, mem_map->Begin(), mem_map->Begin() + initial_size, gc_retention_policy),
-      mem_map_(mem_map)
-{
+  virtual ~ValgrindDlMallocSpace() {
+    VALGRIND_DESTROY_MEMPOOL(GetMspace());
+  }
 
-}
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ValgrindDlMallocSpace);
+};
 
 size_t DlMallocSpace::bitmap_index_ = 0;
 
@@ -103,15 +130,15 @@
 
   size_t bitmap_index = bitmap_index_++;
 
-  static const uintptr_t kGcCardSize = static_cast<uintptr_t>(CardTable::kCardSize);
+  static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
   CHECK(reinterpret_cast<uintptr_t>(mem_map->Begin()) % kGcCardSize == 0);
   CHECK(reinterpret_cast<uintptr_t>(mem_map->End()) % kGcCardSize == 0);
-  live_bitmap_.reset(SpaceBitmap::Create(
+  live_bitmap_.reset(accounting::SpaceBitmap::Create(
       StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
       Begin(), Capacity()));
   DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" << bitmap_index;
 
-  mark_bitmap_.reset(SpaceBitmap::Create(
+  mark_bitmap_.reset(accounting::SpaceBitmap::Create(
       StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
       Begin(), Capacity()));
   DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
@@ -177,8 +204,13 @@
 
   // Everything is set so record in immutable structure and leave
   MemMap* mem_map_ptr = mem_map.release();
-  DlMallocSpace* space = new DlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
-                                           growth_limit);
+  DlMallocSpace* space;
+  if (RUNNING_ON_VALGRIND > 0) {
+    space = new ValgrindDlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
+                                      growth_limit);
+  } else {
+    space = new DlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end, growth_limit);
+  }
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
         << " ) " << *space;
@@ -203,33 +235,26 @@
 }
 
 void DlMallocSpace::SwapBitmaps() {
-  SpaceBitmap* temp_live_bitmap = live_bitmap_.release();
-  live_bitmap_.reset(mark_bitmap_.release());
-  mark_bitmap_.reset(temp_live_bitmap);
+  live_bitmap_.swap(mark_bitmap_);
   // Swap names to get more descriptive diagnostics.
-  std::string temp_name = live_bitmap_->GetName();
+  std::string temp_name(live_bitmap_->GetName());
   live_bitmap_->SetName(mark_bitmap_->GetName());
   mark_bitmap_->SetName(temp_name);
 }
 
 mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
-  if (kDebugSpaces) {
-    num_bytes += sizeof(word);
-  }
-
   mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_calloc(mspace_, 1, num_bytes));
-  if (kDebugSpaces && result != NULL) {
-    CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
-        << ") not in bounds of allocation space " << *this;
-    // Put a magic pattern before and after the allocation.
-    *reinterpret_cast<word*>(reinterpret_cast<byte*>(result) + AllocationSize(result)
-        - sizeof(word) - kChunkOverhead) = kPaddingValue;
+  if (result != NULL) {
+    if (kDebugSpaces) {
+      CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
+            << ") not in bounds of allocation space " << *this;
+    }
+    size_t allocation_size = AllocationSize(result);
+    num_bytes_allocated_ += allocation_size;
+    total_bytes_allocated_ += allocation_size;
+    ++total_objects_allocated_;
+    ++num_objects_allocated_;
   }
-  size_t allocation_size = AllocationSize(result);
-  num_bytes_allocated_ += allocation_size;
-  total_bytes_allocated_ += allocation_size;
-  ++total_objects_allocated_;
-  ++num_objects_allocated_;
   return result;
 }
 
@@ -263,8 +288,8 @@
 
 DlMallocSpace* DlMallocSpace::CreateZygoteSpace() {
   end_ = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(end_), kPageSize));
-  DCHECK(IsAligned<CardTable::kCardSize>(begin_));
-  DCHECK(IsAligned<CardTable::kCardSize>(end_));
+  DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
+  DCHECK(IsAligned<accounting::CardTable::kCardSize>(end_));
   DCHECK(IsAligned<kPageSize>(begin_));
   DCHECK(IsAligned<kPageSize>(end_));
   size_t size = RoundUp(Size(), kPageSize);
@@ -291,7 +316,7 @@
   VLOG(heap) << "Size " << GetMemMap()->Size();
   VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
   VLOG(heap) << "Capacity " << PrettySize(capacity);
-  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(GetName().c_str(), End(), capacity, PROT_READ | PROT_WRITE));
+  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(GetName(), End(), capacity, PROT_READ | PROT_WRITE));
   void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
   // Protect memory beyond the initial size.
   byte* end = mem_map->Begin() + starting_size;
@@ -314,9 +339,6 @@
   if (kDebugSpaces) {
     CHECK(ptr != NULL);
     CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
-    CHECK_EQ(
-        *reinterpret_cast<word*>(reinterpret_cast<byte*>(ptr) + AllocationSize(ptr) -
-            sizeof(word) - kChunkOverhead), kPaddingValue);
   }
   const size_t bytes_freed = InternalAllocationSize(ptr);
   num_bytes_allocated_ -= bytes_freed;
@@ -374,20 +396,16 @@
   lock_.AssertHeld(Thread::Current());
   byte* original_end = end_;
   if (increment != 0) {
-    VLOG(heap) << "AllocSpace::MoreCore " << PrettySize(increment);
+    VLOG(heap) << "DlMallocSpace::MoreCore " << PrettySize(increment);
     byte* new_end = original_end + increment;
     if (increment > 0) {
-#if DEBUG_SPACES
       // Should never be asked to increase the allocation beyond the capacity of the space. Enforced
       // by mspace_set_footprint_limit.
       CHECK_LE(new_end, Begin() + Capacity());
-#endif
       CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetName());
     } else {
-#if DEBUG_SPACES
       // Should never be asked for negative footprint (ie before begin)
       CHECK_GT(original_end + increment, Begin());
-#endif
       // Advise we don't need the pages and protect them
       // TODO: by removing permissions to the pages we may be causing TLB shoot-down which can be
       // expensive (note the same isn't true for giving permissions to a page as the protected
@@ -414,29 +432,13 @@
   return InternalAllocationSize(obj);
 }
 
-void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* arg) {
-  // Is this chunk in use?
-  if (used_bytes != 0) {
-    return;
-  }
-  // Do we have any whole pages to give back?
-  start = reinterpret_cast<void*>(RoundUp(reinterpret_cast<uintptr_t>(start), kPageSize));
-  end = reinterpret_cast<void*>(RoundDown(reinterpret_cast<uintptr_t>(end), kPageSize));
-  if (end > start) {
-    size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
-    CHECK_MEMORY_CALL(madvise, (start, length, MADV_DONTNEED), "trim");
-    size_t* reclaimed = reinterpret_cast<size_t*>(arg);
-    *reclaimed += length;
-  }
-}
-
 size_t DlMallocSpace::Trim() {
   MutexLock mu(Thread::Current(), lock_);
   // Trim to release memory at the end of the space.
   mspace_trim(mspace_, 0);
   // Visit space looking for page-sized holes to advise the kernel we don't need.
   size_t reclaimed = 0;
-  mspace_inspect_all(mspace_, MspaceMadviseCallback, &reclaimed);
+  mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
   return reclaimed;
 }
 
@@ -465,111 +467,14 @@
   mspace_set_footprint_limit(mspace_, new_size);
 }
 
-size_t ImageSpace::bitmap_index_ = 0;
-
-ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map)
-    : MemMapSpace(name, mem_map, mem_map->Size(), kGcRetentionPolicyNeverCollect) {
-  const size_t bitmap_index = bitmap_index_++;
-  live_bitmap_.reset(SpaceBitmap::Create(
-      StringPrintf("imagespace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
-      Begin(), Capacity()));
-  DCHECK(live_bitmap_.get() != NULL) << "could not create imagespace live bitmap #" << bitmap_index;
-}
-
-ImageSpace* ImageSpace::Create(const std::string& image_file_name) {
-  CHECK(!image_file_name.empty());
-
-  uint64_t start_time = 0;
-  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
-    start_time = NanoTime();
-    LOG(INFO) << "Space::CreateImageSpace entering" << " image_file_name=" << image_file_name;
-  }
-
-  UniquePtr<File> file(OS::OpenFile(image_file_name.c_str(), false));
-  if (file.get() == NULL) {
-    LOG(ERROR) << "Failed to open " << image_file_name;
-    return NULL;
-  }
-  ImageHeader image_header;
-  bool success = file->ReadFully(&image_header, sizeof(image_header));
-  if (!success || !image_header.IsValid()) {
-    LOG(ERROR) << "Invalid image header " << image_file_name;
-    return NULL;
-  }
-  UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
-                                                 file->GetLength(),
-                                                 PROT_READ | PROT_WRITE,
-                                                 MAP_PRIVATE | MAP_FIXED,
-                                                 file->Fd(),
-                                                 0,
-                                                 false));
-  if (map.get() == NULL) {
-    LOG(ERROR) << "Failed to map " << image_file_name;
-    return NULL;
-  }
-  CHECK_EQ(image_header.GetImageBegin(), map->Begin());
-  DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
-
-  Runtime* runtime = Runtime::Current();
-  mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
-  runtime->SetResolutionMethod(down_cast<mirror::AbstractMethod*>(resolution_method));
-
-  mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
-  runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kSaveAll);
-  callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
-  runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsOnly);
-  callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
-  runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsAndArgs);
-
-  ImageSpace* space = new ImageSpace(image_file_name, map.release());
-  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
-    LOG(INFO) << "Space::CreateImageSpace exiting (" << PrettyDuration(NanoTime() - start_time)
-        << ") " << *space;
-  }
-  return space;
-}
-
-void ImageSpace::RecordImageAllocations(SpaceBitmap* live_bitmap) const {
-  uint64_t start_time = 0;
-  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
-    LOG(INFO) << "ImageSpace::RecordImageAllocations entering";
-    start_time = NanoTime();
-  }
-  DCHECK(!Runtime::Current()->IsStarted());
-  CHECK(live_bitmap != NULL);
-  byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
-  byte* end = End();
-  while (current < end) {
-    DCHECK_ALIGNED(current, kObjectAlignment);
-    const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(current);
-    live_bitmap->Set(obj);
-    current += RoundUp(obj->SizeOf(), kObjectAlignment);
-  }
-  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
-    LOG(INFO) << "ImageSpace::RecordImageAllocations exiting ("
-        << PrettyDuration(NanoTime() - start_time) << ")";
-  }
-}
-
-std::ostream& operator<<(std::ostream& os, const Space& space) {
-  space.Dump(os);
-  return os;
-}
-
 void DlMallocSpace::Dump(std::ostream& os) const {
   os << GetType()
-      << "begin=" << reinterpret_cast<void*>(Begin())
+      << " begin=" << reinterpret_cast<void*>(Begin())
       << ",end=" << reinterpret_cast<void*>(End())
       << ",size=" << PrettySize(Size()) << ",capacity=" << PrettySize(Capacity())
       << ",name=\"" << GetName() << "\"]";
 }
 
-void ImageSpace::Dump(std::ostream& os) const {
-  os << GetType()
-      << "begin=" << reinterpret_cast<void*>(Begin())
-      << ",end=" << reinterpret_cast<void*>(End())
-      << ",size=" << PrettySize(Size())
-      << ",name=\"" << GetName() << "\"]";
-}
-
+}  // namespace space
+}  // namespace gc
 }  // namespace art
diff --git a/src/gc/space/dlmalloc_space.h b/src/gc/space/dlmalloc_space.h
new file mode 100644
index 0000000..00df0e6
--- /dev/null
+++ b/src/gc/space/dlmalloc_space.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_DLMALLOC_SPACE_H_
+#define ART_SRC_GC_SPACE_DLMALLOC_SPACE_H_
+
+#include "gc/allocator/dlmalloc.h"
+#include "space.h"
+
+namespace art {
+namespace gc {
+
+namespace collector {
+  class MarkSweep;
+}  // namespace collector
+
+namespace space {
+
+// An alloc space is a space where objects may be allocated and garbage collected.
+class DlMallocSpace : public MemMapSpace, public AllocSpace {
+ public:
+  typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+
+  SpaceType GetType() const {
+    if (GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+      return kSpaceTypeZygoteSpace;
+    } else {
+      return kSpaceTypeAllocSpace;
+    }
+  }
+
+  // Create a AllocSpace with the requested sizes. The requested
+  // base address is not guaranteed to be granted, if it is required,
+  // the caller should call Begin on the returned space to confirm
+  // the request was granted.
+  static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
+                               size_t capacity, byte* requested_begin);
+
+  // Allocate num_bytes without allowing the underlying mspace to grow.
+  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes);
+
+  // Allocate num_bytes allowing the underlying mspace to grow.
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
+
+  // Return the storage space required by obj.
+  virtual size_t AllocationSize(const mirror::Object* obj);
+  virtual size_t Free(Thread* self, mirror::Object* ptr);
+  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
+
+  void* MoreCore(intptr_t increment);
+
+  void* GetMspace() const {
+    return mspace_;
+  }
+
+  // Hands unused pages back to the system.
+  size_t Trim();
+
+  // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
+  // in use, indicated by num_bytes equaling zero.
+  void Walk(WalkCallback callback, void* arg);
+
+  // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
+  size_t GetFootprintLimit();
+
+  // Set the maximum number of bytes that the heap is allowed to obtain from the system via
+  // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
+  // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
+  void SetFootprintLimit(size_t limit);
+
+  // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
+  // maximum reserved size of the heap.
+  void ClearGrowthLimit() {
+    growth_limit_ = NonGrowthLimitCapacity();
+  }
+
+  // Override capacity so that we only return the possibly limited capacity
+  size_t Capacity() const {
+    return growth_limit_;
+  }
+
+  // The total amount of memory reserved for the alloc space.
+  size_t NonGrowthLimitCapacity() const {
+    return GetMemMap()->Size();
+  }
+
+  accounting::SpaceBitmap* GetLiveBitmap() const {
+    return live_bitmap_.get();
+  }
+
+  accounting::SpaceBitmap* GetMarkBitmap() const {
+    return mark_bitmap_.get();
+  }
+
+  void Dump(std::ostream& os) const;
+
+  void SetGrowthLimit(size_t growth_limit);
+
+  // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
+  void SwapBitmaps();
+
+  // Turn ourself into a zygote space and return a new alloc space which has our unused memory.
+  DlMallocSpace* CreateZygoteSpace();
+
+  uint64_t GetBytesAllocated() const {
+    return num_bytes_allocated_;
+  }
+
+  uint64_t GetObjectsAllocated() const {
+    return num_objects_allocated_;
+  }
+
+  uint64_t GetTotalBytesAllocated() const {
+    return total_bytes_allocated_;
+  }
+
+  uint64_t GetTotalObjectsAllocated() const {
+    return total_objects_allocated_;
+  }
+
+ protected:
+  DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
+                size_t growth_limit);
+
+ private:
+  size_t InternalAllocationSize(const mirror::Object* obj);
+  mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+  bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
+
+  static void* CreateMallocSpace(void* base, size_t morecore_start, size_t initial_size);
+
+  UniquePtr<accounting::SpaceBitmap> live_bitmap_;
+  UniquePtr<accounting::SpaceBitmap> mark_bitmap_;
+  UniquePtr<accounting::SpaceBitmap> temp_bitmap_;
+
+  // Approximate number of bytes which have been allocated into the space.
+  size_t num_bytes_allocated_;
+  size_t num_objects_allocated_;
+  size_t total_bytes_allocated_;
+  size_t total_objects_allocated_;
+
+  static size_t bitmap_index_;
+
+  // The boundary tag overhead.
+  static const size_t kChunkOverhead = kWordSize;
+
+  // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
+  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
+  // Underlying malloc space
+  void* const mspace_;
+
+  // The capacity of the alloc space until such time that ClearGrowthLimit is called.
+  // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
+  // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
+  // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
+  // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
+  // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
+  // one time by a call to ClearGrowthLimit.
+  size_t growth_limit_;
+
+  friend class collector::MarkSweep;
+
+  DISALLOW_COPY_AND_ASSIGN(DlMallocSpace);
+};
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_SPACE_DLMALLOC_SPACE_H_
diff --git a/src/gc/space/image_space.cc b/src/gc/space/image_space.cc
new file mode 100644
index 0000000..46c3937
--- /dev/null
+++ b/src/gc/space/image_space.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "image_space.h"
+
+#include "base/unix_file/fd_file.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "mirror/abstract_method.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "os.h"
+#include "runtime.h"
+#include "space-inl.h"
+#include "utils.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+size_t ImageSpace::bitmap_index_ = 0;
+
+ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map)
+: MemMapSpace(name, mem_map, mem_map->Size(), kGcRetentionPolicyNeverCollect) {
+  const size_t bitmap_index = bitmap_index_++;
+  live_bitmap_.reset(accounting::SpaceBitmap::Create(
+      StringPrintf("imagespace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
+      Begin(), Capacity()));
+  DCHECK(live_bitmap_.get() != NULL) << "could not create imagespace live bitmap #" << bitmap_index;
+}
+
+ImageSpace* ImageSpace::Create(const std::string& image_file_name) {
+  CHECK(!image_file_name.empty());
+
+  uint64_t start_time = 0;
+  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+    start_time = NanoTime();
+    LOG(INFO) << "Space::CreateImageSpace entering" << " image_file_name=" << image_file_name;
+  }
+
+  UniquePtr<File> file(OS::OpenFile(image_file_name.c_str(), false));
+  if (file.get() == NULL) {
+    LOG(ERROR) << "Failed to open " << image_file_name;
+    return NULL;
+  }
+  ImageHeader image_header;
+  bool success = file->ReadFully(&image_header, sizeof(image_header));
+  if (!success || !image_header.IsValid()) {
+    LOG(ERROR) << "Invalid image header " << image_file_name;
+    return NULL;
+  }
+  UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
+                                                 file->GetLength(),
+                                                 PROT_READ | PROT_WRITE,
+                                                 MAP_PRIVATE | MAP_FIXED,
+                                                 file->Fd(),
+                                                 0,
+                                                 false));
+  if (map.get() == NULL) {
+    LOG(ERROR) << "Failed to map " << image_file_name;
+    return NULL;
+  }
+  CHECK_EQ(image_header.GetImageBegin(), map->Begin());
+  DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
+
+  Runtime* runtime = Runtime::Current();
+  mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
+  runtime->SetResolutionMethod(down_cast<mirror::AbstractMethod*>(resolution_method));
+
+  mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
+  runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kSaveAll);
+  callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
+  runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsOnly);
+  callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
+  runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsAndArgs);
+
+  ImageSpace* space = new ImageSpace(image_file_name, map.release());
+  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+    LOG(INFO) << "Space::CreateImageSpace exiting (" << PrettyDuration(NanoTime() - start_time)
+             << ") " << *space;
+  }
+  return space;
+}
+
+void ImageSpace::RecordImageAllocations(accounting::SpaceBitmap* live_bitmap) const {
+  uint64_t start_time = 0;
+  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+    LOG(INFO) << "ImageSpace::RecordImageAllocations entering";
+    start_time = NanoTime();
+  }
+  DCHECK(!Runtime::Current()->IsStarted());
+  CHECK(live_bitmap != NULL);
+  byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
+  byte* end = End();
+  while (current < end) {
+    DCHECK_ALIGNED(current, kObjectAlignment);
+    const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(current);
+    live_bitmap->Set(obj);
+    current += RoundUp(obj->SizeOf(), kObjectAlignment);
+  }
+  if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+    LOG(INFO) << "ImageSpace::RecordImageAllocations exiting ("
+        << PrettyDuration(NanoTime() - start_time) << ")";
+  }
+}
+
+void ImageSpace::Dump(std::ostream& os) const {
+  os << GetType()
+      << "begin=" << reinterpret_cast<void*>(Begin())
+      << ",end=" << reinterpret_cast<void*>(End())
+      << ",size=" << PrettySize(Size())
+      << ",name=\"" << GetName() << "\"]";
+}
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
diff --git a/src/gc/space/image_space.h b/src/gc/space/image_space.h
new file mode 100644
index 0000000..afec5b7
--- /dev/null
+++ b/src/gc/space/image_space.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_IMAGE_SPACE_H_
+#define ART_SRC_GC_SPACE_IMAGE_SPACE_H_
+
+#include "space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+// An image space is a space backed with a memory mapped image.
+class ImageSpace : public MemMapSpace {
+ public:
+  bool CanAllocateInto() const {
+    return false;
+  }
+
+  SpaceType GetType() const {
+    return kSpaceTypeImageSpace;
+  }
+
+  // create a Space from an image file. cannot be used for future allocation or collected.
+  static ImageSpace* Create(const std::string& image)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  const ImageHeader& GetImageHeader() const {
+    return *reinterpret_cast<ImageHeader*>(Begin());
+  }
+
+  const std::string GetImageFilename() const {
+    return GetName();
+  }
+
+  // Mark the objects defined in this space in the given live bitmap
+  void RecordImageAllocations(accounting::SpaceBitmap* live_bitmap) const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  accounting::SpaceBitmap* GetLiveBitmap() const {
+    return live_bitmap_.get();
+  }
+
+  accounting::SpaceBitmap* GetMarkBitmap() const {
+    // ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
+    // special cases to test against.
+    return live_bitmap_.get();
+  }
+
+  void Dump(std::ostream& os) const;
+
+ private:
+  friend class Space;
+
+  static size_t bitmap_index_;
+
+  UniquePtr<accounting::SpaceBitmap> live_bitmap_;
+
+  ImageSpace(const std::string& name, MemMap* mem_map);
+
+  DISALLOW_COPY_AND_ASSIGN(ImageSpace);
+};
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_SPACE_IMAGE_SPACE_H_
diff --git a/src/gc/large_object_space.cc b/src/gc/space/large_object_space.cc
similarity index 97%
rename from src/gc/large_object_space.cc
rename to src/gc/space/large_object_space.cc
index c3bf382..3cee1b7 100644
--- a/src/gc/large_object_space.cc
+++ b/src/gc/space/large_object_space.cc
@@ -14,18 +14,19 @@
  * limitations under the License.
  */
 
+#include "large_object_space.h"
+
 #include "base/logging.h"
 #include "base/stl_util.h"
-#include "large_object_space.h"
 #include "UniquePtr.h"
-#include "dlmalloc.h"
 #include "image.h"
 #include "os.h"
-#include "space_bitmap.h"
 #include "thread.h"
 #include "utils.h"
 
 namespace art {
+namespace gc {
+namespace space {
 
 void LargeObjectSpace::SwapBitmaps() {
   live_objects_.swap(mark_objects_);
@@ -39,8 +40,6 @@
     : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
       num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
       total_objects_allocated_(0) {
-  live_objects_.reset(new SpaceSetMap("large live objects"));
-  mark_objects_.reset(new SpaceSetMap("large marked objects"));
 }
 
 
@@ -281,4 +280,6 @@
      << " end: " << reinterpret_cast<void*>(End());
 }
 
-}
+}  // namespace space
+}  // namespace gc
+}  // namespace art
diff --git a/src/gc/large_object_space.h b/src/gc/space/large_object_space.h
similarity index 84%
rename from src/gc/large_object_space.h
rename to src/gc/space/large_object_space.h
index 8a2f970..197fad3 100644
--- a/src/gc/large_object_space.h
+++ b/src/gc/space/large_object_space.h
@@ -14,51 +14,38 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_GC_LARGE_OBJECT_SPACE_H_
-#define ART_SRC_GC_LARGE_OBJECT_SPACE_H_
+#ifndef ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_
+#define ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_
 
-#include "space.h"
+
+#include "dlmalloc_space.h"
 #include "safe_map.h"
+#include "space.h"
 
 #include <set>
 #include <vector>
 
 namespace art {
-class SpaceSetMap;
+namespace gc {
+namespace space {
 
 // Abstraction implemented by all large object spaces.
 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
  public:
-  virtual bool CanAllocateInto() const {
-    return true;
-  }
-
-  virtual bool IsCompactible() const {
-    return true;
-  }
-
   virtual SpaceType GetType() const {
     return kSpaceTypeLargeObjectSpace;
   }
 
-  virtual SpaceSetMap* GetLiveObjects() const {
-    return live_objects_.get();
-  }
-
-  virtual SpaceSetMap* GetMarkObjects() const {
-    return mark_objects_.get();
-  }
-
   virtual void SwapBitmaps();
   virtual void CopyLiveToMarked();
   virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
   virtual ~LargeObjectSpace() {}
 
-  uint64_t GetNumBytesAllocated() const {
+  uint64_t GetBytesAllocated() const {
     return num_bytes_allocated_;
   }
 
-  uint64_t GetNumObjectsAllocated() const {
+  uint64_t GetObjectsAllocated() const {
     return num_objects_allocated_;
   }
 
@@ -82,10 +69,10 @@
   size_t total_bytes_allocated_;
   size_t total_objects_allocated_;
 
-  UniquePtr<SpaceSetMap> live_objects_;
-  UniquePtr<SpaceSetMap> mark_objects_;
-
   friend class Space;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(LargeObjectSpace);
 };
 
 // A discontinuous large object space implemented by individual mmap/munmap calls.
@@ -96,12 +83,13 @@
   static LargeObjectMapSpace* Create(const std::string& name);
 
   // Return the storage space required by obj.
-  virtual size_t AllocationSize(const mirror::Object* obj);
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
+  size_t AllocationSize(const mirror::Object* obj);
+  mirror::Object* Alloc(Thread* self, size_t num_bytes);
   size_t Free(Thread* self, mirror::Object* ptr);
-  virtual void Walk(DlMallocSpace::WalkCallback, void* arg);
+  void Walk(DlMallocSpace::WalkCallback, void* arg);
   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
-  virtual bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
+  bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
+
 private:
   LargeObjectMapSpace(const std::string& name);
   virtual ~LargeObjectMapSpace() {}
@@ -114,6 +102,7 @@
 };
 
 // A continuous large object space with a free-list to handle holes.
+// TODO: this implementation is buggy.
 class FreeListSpace : public LargeObjectSpace {
  public:
   virtual ~FreeListSpace();
@@ -140,7 +129,7 @@
     return End() - Begin();
   }
 
-  virtual void Dump(std::ostream& os) const;
+  void Dump(std::ostream& os) const;
 
  private:
   static const size_t kAlignment = kPageSize;
@@ -197,6 +186,8 @@
   FreeChunks free_chunks_ GUARDED_BY(lock_);
 };
 
-}
+}  // namespace space
+}  // namespace gc
+}  // namespace art
 
-#endif  // ART_SRC_GC_LARGE_OBJECT_SPACE_H_
+#endif  // ART_SRC_GC_SPACE_LARGE_OBJECT_SPACE_H_
diff --git a/src/gc/space/space-inl.h b/src/gc/space/space-inl.h
new file mode 100644
index 0000000..8216d1b
--- /dev/null
+++ b/src/gc/space/space-inl.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_SPACE_INL_H_
+#define ART_SRC_GC_SPACE_SPACE_INL_H_
+
+#include "space.h"
+
+#include "dlmalloc_space.h"
+#include "image_space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+inline ImageSpace* Space::AsImageSpace() {
+  DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
+  return down_cast<ImageSpace*>(down_cast<MemMapSpace*>(this));
+}
+
+inline DlMallocSpace* Space::AsDlMallocSpace() {
+  DCHECK_EQ(GetType(), kSpaceTypeAllocSpace);
+  return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
+}
+
+inline DlMallocSpace* Space::AsZygoteSpace() {
+  DCHECK_EQ(GetType(), kSpaceTypeZygoteSpace);
+  return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
+}
+
+inline LargeObjectSpace* Space::AsLargeObjectSpace() {
+  DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
+  return reinterpret_cast<LargeObjectSpace*>(this);
+}
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_SPACE_SPACE_INL_H_
diff --git a/src/gc/space/space.cc b/src/gc/space/space.cc
new file mode 100644
index 0000000..eae281a
--- /dev/null
+++ b/src/gc/space/space.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "space.h"
+
+#include "base/logging.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+Space::Space(const std::string& name, GcRetentionPolicy gc_retention_policy) :
+    name_(name), gc_retention_policy_(gc_retention_policy) { }
+
+void Space::Dump(std::ostream& os) const {
+  os << GetName() << ":" << GetGcRetentionPolicy();
+}
+
+std::ostream& operator<<(std::ostream& os, const Space& space) {
+  space.Dump(os);
+  return os;
+}
+
+
+DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
+                                       GcRetentionPolicy gc_retention_policy) :
+    Space(name, gc_retention_policy),
+    live_objects_(new accounting::SpaceSetMap("large live objects")),
+    mark_objects_(new accounting::SpaceSetMap("large marked objects")) {
+}
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
diff --git a/src/gc/space/space.h b/src/gc/space/space.h
new file mode 100644
index 0000000..ca01c55
--- /dev/null
+++ b/src/gc/space/space.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_SPACE_H_
+#define ART_SRC_GC_SPACE_SPACE_H_
+
+#include <string>
+
+#include "UniquePtr.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc/accounting/space_bitmap.h"
+#include "globals.h"
+#include "image.h"
+#include "mem_map.h"
+
+namespace art {
+namespace mirror {
+  class Object;
+}  // namespace mirror
+
+namespace gc {
+
+namespace accounting {
+  class SpaceBitmap;
+} // namespace accounting
+
+class Heap;
+
+namespace space {
+
+class DlMallocSpace;
+class ImageSpace;
+class LargeObjectSpace;
+
+static const bool kDebugSpaces = kIsDebugBuild;
+
+// See Space::GetGcRetentionPolicy.
+enum GcRetentionPolicy {
+  // Objects are retained forever with this policy for a space.
+  kGcRetentionPolicyNeverCollect,
+  // Every GC cycle will attempt to collect objects in this space.
+  kGcRetentionPolicyAlwaysCollect,
+  // Objects will be considered for collection only in "full" GC cycles, ie faster partial
+  // collections won't scan these areas such as the Zygote.
+  kGcRetentionPolicyFullCollect,
+};
+std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
+
+enum SpaceType {
+  kSpaceTypeImageSpace,
+  kSpaceTypeAllocSpace,
+  kSpaceTypeZygoteSpace,
+  kSpaceTypeLargeObjectSpace,
+};
+std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
+
+// A space contains memory allocated for managed objects.
+class Space {
+ public:
+  // Dump space. Also key method for C++ vtables.
+  virtual void Dump(std::ostream& os) const;
+
+  // Name of the space. May vary, for example before/after the Zygote fork.
+  const char* GetName() const {
+    return name_.c_str();
+  }
+
+  // The policy of when objects are collected associated with this space.
+  GcRetentionPolicy GetGcRetentionPolicy() const {
+    return gc_retention_policy_;
+  }
+
+  // Does the space support allocation?
+  virtual bool CanAllocateInto() const {
+    return true;
+  }
+
+  // Is the given object contained within this space?
+  virtual bool Contains(const mirror::Object* obj) const = 0;
+
+  // The kind of space this: image, alloc, zygote, large object.
+  virtual SpaceType GetType() const = 0;
+
+  // Is this an image space, ie one backed by a memory mapped image file.
+  bool IsImageSpace() const {
+    return GetType() == kSpaceTypeImageSpace;
+  }
+  ImageSpace* AsImageSpace();
+
+  // Is this a dlmalloc backed allocation space?
+  bool IsDlMallocSpace() const {
+    SpaceType type = GetType();
+    return type == kSpaceTypeAllocSpace || type == kSpaceTypeZygoteSpace;
+  }
+  DlMallocSpace* AsDlMallocSpace();
+
+  // Is this the space allocated into by the Zygote and no-longer in use?
+  bool IsZygoteSpace() const {
+    return GetType() == kSpaceTypeZygoteSpace;
+  }
+  DlMallocSpace* AsZygoteSpace();
+
+  // Does this space hold large objects and implement the large object space abstraction?
+  bool IsLargeObjectSpace() const {
+    return GetType() == kSpaceTypeLargeObjectSpace;
+  }
+  LargeObjectSpace* AsLargeObjectSpace();
+
+  virtual ~Space() {}
+
+ protected:
+  Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
+
+  void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
+    gc_retention_policy_ = gc_retention_policy;
+  }
+
+  // Name of the space that may vary due to the Zygote fork.
+  std::string name_;
+
+ private:
+  // When should objects within this space be reclaimed? Not constant as we vary it in the case
+  // of Zygote forking.
+  GcRetentionPolicy gc_retention_policy_;
+
+  friend class art::gc::Heap;
+
+  DISALLOW_COPY_AND_ASSIGN(Space);
+};
+std::ostream& operator<<(std::ostream& os, const Space& space);
+
+// AllocSpace interface.
+class AllocSpace {
+ public:
+  // Number of bytes currently allocated.
+  virtual uint64_t GetBytesAllocated() const = 0;
+  // Number of objects currently allocated.
+  virtual uint64_t GetObjectsAllocated() const = 0;
+  // Number of bytes allocated since the space was created.
+  virtual uint64_t GetTotalBytesAllocated() const = 0;
+  // Number of objects allocated since the space was created.
+  virtual uint64_t GetTotalObjectsAllocated() const = 0;
+
+  // Allocate num_bytes without allowing growth.
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) = 0;
+
+  // Return the storage space required by obj.
+  virtual size_t AllocationSize(const mirror::Object* obj) = 0;
+
+  // Returns how many bytes were freed.
+  virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
+
+  // Returns how many bytes were freed.
+  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
+
+ protected:
+  AllocSpace() {}
+  virtual ~AllocSpace() {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(AllocSpace);
+};
+
+// Continuous spaces have bitmaps, and an address range. Although not required, objects within
+// continuous spaces can be marked in the card table.
+class ContinuousSpace : public Space {
+ public:
+  // Address at which the space begins
+  byte* Begin() const {
+    return begin_;
+  }
+
+  // Address at which the space ends, which may vary as the space is filled.
+  byte* End() const {
+    return end_;
+  }
+
+  // Current size of space
+  size_t Size() const {
+    return End() - Begin();
+  }
+
+  virtual accounting::SpaceBitmap* GetLiveBitmap() const = 0;
+  virtual accounting::SpaceBitmap* GetMarkBitmap() const = 0;
+
+  // Is object within this space? We check to see if the pointer is beyond the end first as
+  // continuous spaces are iterated over from low to high.
+  bool HasAddress(const mirror::Object* obj) const {
+    const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
+    return byte_ptr < End() && byte_ptr >= Begin();
+  }
+
+  bool Contains(const mirror::Object* obj) const {
+    return HasAddress(obj);
+  }
+
+  virtual ~ContinuousSpace() {}
+
+ protected:
+  ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
+                  byte* begin, byte* end) :
+      Space(name, gc_retention_policy), begin_(begin), end_(end) {
+  }
+
+
+  // The beginning of the storage for fast access.
+  byte* const begin_;
+
+  // Current end of the space.
+  byte* end_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
+};
+
+// A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
+// the card table can't cover these objects and so the write barrier shouldn't be triggered. This
+// is suitable for use for large primitive arrays.
+class DiscontinuousSpace : public Space {
+ public:
+  accounting::SpaceSetMap* GetLiveObjects() const {
+    return live_objects_.get();
+  }
+
+  accounting::SpaceSetMap* GetMarkObjects() const {
+    return mark_objects_.get();
+  }
+
+  virtual ~DiscontinuousSpace() {}
+
+ protected:
+  DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
+
+  UniquePtr<accounting::SpaceSetMap> live_objects_;
+  UniquePtr<accounting::SpaceSetMap> mark_objects_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
+};
+
+class MemMapSpace : public ContinuousSpace {
+ public:
+  // Maximum which the mapped space can grow to.
+  virtual size_t Capacity() const {
+    return mem_map_->Size();
+  }
+
+  // Size of the space without a limit on its growth. By default this is just the Capacity, but
+  // for the allocation space we support starting with a small heap and then extending it.
+  virtual size_t NonGrowthLimitCapacity() const {
+    return Capacity();
+  }
+
+ protected:
+  MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
+              GcRetentionPolicy gc_retention_policy)
+      : ContinuousSpace(name, gc_retention_policy,
+                        mem_map->Begin(), mem_map->Begin() + initial_size),
+        mem_map_(mem_map) {
+  }
+
+  MemMap* GetMemMap() {
+    return mem_map_.get();
+  }
+
+  const MemMap* GetMemMap() const {
+    return mem_map_.get();
+  }
+
+ private:
+  // Underlying storage of the space
+  UniquePtr<MemMap> mem_map_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
+};
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_SRC_GC_SPACE_SPACE_H_
diff --git a/src/gc/space_test.cc b/src/gc/space/space_test.cc
similarity index 97%
rename from src/gc/space_test.cc
rename to src/gc/space/space_test.cc
index 372ec77..08ae894 100644
--- a/src/gc/space_test.cc
+++ b/src/gc/space/space_test.cc
@@ -14,22 +14,27 @@
  * limitations under the License.
  */
 
-#include "space.h"
+#include "dlmalloc_space.h"
 
 #include "common_test.h"
-#include "dlmalloc.h"
 #include "globals.h"
 #include "UniquePtr.h"
 
 #include <stdint.h>
 
 namespace art {
+namespace gc {
+namespace space {
 
 class SpaceTest : public CommonTest {
  public:
   void SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr_t object_size,
                                            int round, size_t growth_limit);
   void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size);
+
+  void AddContinuousSpace(ContinuousSpace* space) {
+    Runtime::Current()->GetHeap()->AddContinuousSpace(space);
+  }
 };
 
 TEST_F(SpaceTest, Init) {
@@ -79,7 +84,7 @@
     ASSERT_TRUE(space != NULL);
 
     // Make space findable to the heap, will also delete space when runtime is cleaned up
-    Runtime::Current()->GetHeap()->AddSpace(space);
+    AddContinuousSpace(space);
     Thread* self = Thread::Current();
 
     // Succeeds, fits without adjusting the footprint limit.
@@ -121,7 +126,7 @@
     space = space->CreateZygoteSpace();
 
     // Make space findable to the heap, will also delete space when runtime is cleaned up
-    Runtime::Current()->GetHeap()->AddSpace(space);
+    AddContinuousSpace(space);
 
     // Succeeds, fits without adjusting the footprint limit.
     ptr1 = space->Alloc(self, 1 * MB);
@@ -148,7 +153,7 @@
   Thread* self = Thread::Current();
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
-  Runtime::Current()->GetHeap()->AddSpace(space);
+  AddContinuousSpace(space);
 
   // Succeeds, fits without adjusting the footprint limit.
   mirror::Object* ptr1 = space->Alloc(self, 1 * MB);
@@ -190,7 +195,7 @@
   ASSERT_TRUE(space != NULL);
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
-  Runtime::Current()->GetHeap()->AddSpace(space);
+  AddContinuousSpace(space);
   Thread* self = Thread::Current();
 
   // Succeeds, fits without adjusting the max allowed footprint.
@@ -384,7 +389,7 @@
   EXPECT_EQ(space->NonGrowthLimitCapacity(), capacity);
 
   // Make space findable to the heap, will also delete space when runtime is cleaned up
-  Runtime::Current()->GetHeap()->AddSpace(space);
+  AddContinuousSpace(space);
 
   // In this round we don't allocate with growth and therefore can't grow past the initial size.
   // This effectively makes the growth_limit the initial_size, so assert this.
@@ -419,4 +424,6 @@
 TEST_SizeFootPrintGrowthLimitAndTrim(4MB, 4 * MB)
 TEST_SizeFootPrintGrowthLimitAndTrim(8MB, 8 * MB)
 
+}  // namespace space
+}  // namespace gc
 }  // namespace art
diff --git a/src/gc/sticky_mark_sweep.cc b/src/gc/sticky_mark_sweep.cc
deleted file mode 100644
index 988d4e7..0000000
--- a/src/gc/sticky_mark_sweep.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "heap.h"
-#include "large_object_space.h"
-#include "space.h"
-#include "sticky_mark_sweep.h"
-#include "thread.h"
-
-namespace art {
-
-StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent)
-    : PartialMarkSweep(heap, is_concurrent) {
-  cumulative_timings_.SetName(GetName());
-}
-
-StickyMarkSweep::~StickyMarkSweep() {
-
-}
-
-void StickyMarkSweep::BindBitmaps() {
-  PartialMarkSweep::BindBitmaps();
-
-  Spaces& spaces = GetHeap()->GetSpaces();
-  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-  // For sticky GC, we want to bind the bitmaps of both the zygote space and the alloc space.
-  // This lets us start with the mark bitmap of the previous garbage collection as the current
-  // mark bitmap of the alloc space. After the sticky GC finishes, we then unbind the bitmaps,
-  // making it so that the live bitmap of the alloc space is contains the newly marked objects
-  // from the sticky GC.
-  for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
-      BindLiveToMarkBitmap(*it);
-    }
-  }
-
-  GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
-}
-
-void StickyMarkSweep::MarkReachableObjects() {
-  DisableFinger();
-  RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1);
-}
-
-void StickyMarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
-  ObjectStack* live_stack = GetHeap()->GetLiveStack();
-  SweepArray(timings_, live_stack, false);
-  timings_.AddSplit("SweepArray");
-}
-
-}  // namespace art
diff --git a/src/hprof/hprof.cc b/src/hprof/hprof.cc
index 7539066..d66ec79 100644
--- a/src/hprof/hprof.cc
+++ b/src/hprof/hprof.cc
@@ -44,8 +44,10 @@
 #include "common_throws.h"
 #include "debugger.h"
 #include "dex_file-inl.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/heap.h"
+#include "gc/space/space.h"
 #include "globals.h"
-#include "heap.h"
 #include "mirror/class.h"
 #include "mirror/class-inl.h"
 #include "mirror/field.h"
@@ -55,7 +57,6 @@
 #include "os.h"
 #include "safe_map.h"
 #include "scoped_thread_state_change.h"
-#include "gc/space.h"
 #include "thread_list.h"
 
 namespace art {
@@ -412,7 +413,7 @@
       LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) {
     // Walk the roots and the heap.
     current_record_.StartNewRecord(body_fp_, HPROF_TAG_HEAP_DUMP_SEGMENT, HPROF_TIME);
-    Runtime::Current()->VisitRoots(RootVisitor, this);
+    Runtime::Current()->VisitRoots(RootVisitor, this, false, false);
     Thread* self = Thread::Current();
     {
       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
diff --git a/src/image_test.cc b/src/image_test.cc
index 0769e21..9f86a1a 100644
--- a/src/image_test.cc
+++ b/src/image_test.cc
@@ -22,7 +22,7 @@
 #include "image_writer.h"
 #include "oat_writer.h"
 #include "signal_catcher.h"
-#include "gc/space.h"
+#include "gc/space/image_space.h"
 #include "UniquePtr.h"
 #include "utils.h"
 #include "vector_output_stream.h"
@@ -83,12 +83,12 @@
     file->ReadFully(&image_header, sizeof(image_header));
     ASSERT_TRUE(image_header.IsValid());
 
-    Heap* heap = Runtime::Current()->GetHeap();
-    ASSERT_EQ(1U, heap->GetSpaces().size());
-    ContinuousSpace* space = heap->GetSpaces().front();
+    gc::Heap* heap = Runtime::Current()->GetHeap();
+    ASSERT_EQ(1U, heap->GetContinuousSpaces().size());
+    gc::space::ContinuousSpace* space = heap->GetContinuousSpaces().front();
     ASSERT_FALSE(space->IsImageSpace());
     ASSERT_TRUE(space != NULL);
-    ASSERT_TRUE(space->IsAllocSpace());
+    ASSERT_TRUE(space->IsDlMallocSpace());
     ASSERT_GE(sizeof(image_header) + space->Size(), static_cast<size_t>(file->GetLength()));
   }
 
@@ -125,14 +125,14 @@
   ASSERT_TRUE(runtime_.get() != NULL);
   class_linker_ = runtime_->GetClassLinker();
 
-  Heap* heap = Runtime::Current()->GetHeap();
-  ASSERT_EQ(2U, heap->GetSpaces().size());
-  ASSERT_TRUE(heap->GetSpaces()[0]->IsImageSpace());
-  ASSERT_FALSE(heap->GetSpaces()[0]->IsAllocSpace());
-  ASSERT_FALSE(heap->GetSpaces()[1]->IsImageSpace());
-  ASSERT_TRUE(heap->GetSpaces()[1]->IsAllocSpace());
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  ASSERT_EQ(2U, heap->GetContinuousSpaces().size());
+  ASSERT_TRUE(heap->GetContinuousSpaces()[0]->IsImageSpace());
+  ASSERT_FALSE(heap->GetContinuousSpaces()[0]->IsDlMallocSpace());
+  ASSERT_FALSE(heap->GetContinuousSpaces()[1]->IsImageSpace());
+  ASSERT_TRUE(heap->GetContinuousSpaces()[1]->IsDlMallocSpace());
 
-  ImageSpace* image_space = heap->GetImageSpace();
+  gc::space::ImageSpace* image_space = heap->GetImageSpace();
   byte* image_begin = image_space->Begin();
   byte* image_end = image_space->End();
   CHECK_EQ(requested_image_base, reinterpret_cast<uintptr_t>(image_begin));
diff --git a/src/image_writer.cc b/src/image_writer.cc
index 5a1ebbb..f0b49be 100644
--- a/src/image_writer.cc
+++ b/src/image_writer.cc
@@ -26,11 +26,12 @@
 #include "compiled_method.h"
 #include "compiler/driver/compiler_driver.h"
 #include "dex_file-inl.h"
-#include "gc/card_table-inl.h"
-#include "gc/large_object_space.h"
-#include "gc/space.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/heap.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
 #include "globals.h"
-#include "heap.h"
 #include "image.h"
 #include "intern_table.h"
 #include "mirror/array-inl.h"
@@ -63,9 +64,6 @@
   CHECK_NE(image_begin, 0U);
   image_begin_ = reinterpret_cast<byte*>(image_begin);
 
-  Heap* heap = Runtime::Current()->GetHeap();
-  const Spaces& spaces = heap->GetSpaces();
-
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   const std::vector<DexCache*>& all_dex_caches = class_linker->GetDexCaches();
   for (size_t i = 0; i < all_dex_caches.size(); i++) {
@@ -92,12 +90,16 @@
     ComputeEagerResolvedStrings();
     Thread::Current()->TransitionFromRunnableToSuspended(kNative);
   }
-  heap->CollectGarbage(false);  // Remove garbage
-  // Trim size of alloc spaces
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  heap->CollectGarbage(false);  // Remove garbage.
+  // Trim size of alloc spaces.
+  const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
   // TODO: C++0x auto
-  for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
-    if ((*cur)->IsAllocSpace()) {
-      (*cur)->AsAllocSpace()->Trim();
+  typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    gc::space::ContinuousSpace* space = *it;
+    if (space->IsDlMallocSpace()) {
+      space->AsDlMallocSpace()->Trim();
     }
   }
 
@@ -137,11 +139,15 @@
 }
 
 bool ImageWriter::AllocMemory() {
-  const Spaces& spaces = Runtime::Current()->GetHeap()->GetSpaces();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
   size_t size = 0;
-  for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-    if ((*it)->IsAllocSpace()) {
-      size += (*it)->Size();
+  // TODO: C++0x auto
+  typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    gc::space::ContinuousSpace* space = *it;
+    if (space->IsDlMallocSpace()) {
+      size += space->Size();
     }
   }
 
@@ -191,7 +197,7 @@
 void ImageWriter::ComputeEagerResolvedStrings()
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // TODO: Check image spaces only?
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   heap->FlushAllocStack();
   heap->GetLiveBitmap()->Walk(ComputeEagerResolvedStringsCallback, this);
@@ -267,7 +273,7 @@
     return;
   }
 
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   Thread* self = Thread::Current();
   {
     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -372,8 +378,8 @@
   Thread* self = Thread::Current();
   SirtRef<ObjectArray<Object> > image_roots(self, CreateImageRoots());
 
-  Heap* heap = Runtime::Current()->GetHeap();
-  const Spaces& spaces = heap->GetSpaces();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
   DCHECK(!spaces.empty());
   DCHECK_EQ(0U, image_end_);
 
@@ -388,8 +394,11 @@
     // TODO: Add InOrderWalk to heap bitmap.
     const char* old = self->StartAssertNoThreadSuspension("ImageWriter");
     DCHECK(heap->GetLargeObjectsSpace()->GetLiveObjects()->IsEmpty());
-    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-      (*it)->GetLiveBitmap()->InOrderWalk(CalculateNewObjectOffsetsCallback, this);
+    // TODO: C++0x auto
+    typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
+    for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+      gc::space::ContinuousSpace* space = *it;
+      space->GetLiveBitmap()->InOrderWalk(CalculateNewObjectOffsetsCallback, this);
       DCHECK_LT(image_end_, image_->Size());
     }
     self->EndAssertNoThreadSuspension(old);
@@ -417,7 +426,7 @@
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   Thread* self = Thread::Current();
   const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   // TODO: heap validation can't handle this fix up pass
   heap->DisableObjectValidation();
   // TODO: Image spaces only?
diff --git a/src/image_writer.h b/src/image_writer.h
index 4507592..b79cb2f 100644
--- a/src/image_writer.h
+++ b/src/image_writer.h
@@ -29,7 +29,7 @@
 #include "mirror/dex_cache.h"
 #include "os.h"
 #include "safe_map.h"
-#include "gc/space.h"
+#include "gc/space/space.h"
 #include "UniquePtr.h"
 
 namespace art {
diff --git a/src/intern_table.cc b/src/intern_table.cc
index fa3c075..d1ad2db 100644
--- a/src/intern_table.cc
+++ b/src/intern_table.cc
@@ -38,13 +38,15 @@
      << image_strong_interns_.size() << " image strong\n";
 }
 
-void InternTable::VisitRoots(RootVisitor* visitor, void* arg) {
+void InternTable::VisitRoots(RootVisitor* visitor, void* arg, bool clean_dirty) {
   MutexLock mu(Thread::Current(), intern_table_lock_);
   typedef Table::const_iterator It; // TODO: C++0x auto
   for (It it = strong_interns_.begin(), end = strong_interns_.end(); it != end; ++it) {
     visitor(it->second, arg);
   }
-  is_dirty_ = false;
+  if (clean_dirty) {
+    is_dirty_ = false;
+  }
   // Note: we deliberately don't visit the weak_interns_ table and the immutable image roots.
 }
 
diff --git a/src/intern_table.h b/src/intern_table.h
index 3018317..1ff4f6d 100644
--- a/src/intern_table.h
+++ b/src/intern_table.h
@@ -66,7 +66,7 @@
 
   size_t Size() const;
 
-  void VisitRoots(RootVisitor* visitor, void* arg);
+  void VisitRoots(RootVisitor* visitor, void* arg, bool clean_dirty);
 
   void DumpForSigQuit(std::ostream& os) const;
 
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 657bf43..dd96f8d 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -23,7 +23,8 @@
 #include "common_throws.h"
 #include "dex_file-inl.h"
 #include "dex_instruction-inl.h"
-#include "gc/card_table-inl.h"
+#include "dex_instruction.h"
+#include "gc/accounting/card_table-inl.h"
 #include "invoke_arg_array_builder.h"
 #include "nth_caller_visitor.h"
 #include "mirror/class.h"
diff --git a/src/jni_internal.cc b/src/jni_internal.cc
index 7c19025..2631845 100644
--- a/src/jni_internal.cc
+++ b/src/jni_internal.cc
@@ -28,7 +28,7 @@
 #include "base/stringpiece.h"
 #include "class_linker.h"
 #include "dex_file-inl.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "invoke_arg_array_builder.h"
 #include "jni.h"
 #include "mirror/class-inl.h"
diff --git a/src/jni_internal_test.cc b/src/jni_internal_test.cc
index 0f58444..c8b9eb9 100644
--- a/src/jni_internal_test.cc
+++ b/src/jni_internal_test.cc
@@ -16,6 +16,7 @@
 
 #include "jni_internal.h"
 
+#include <limits.h>
 #include <cfloat>
 #include <cmath>
 
diff --git a/src/mirror/abstract_method.cc b/src/mirror/abstract_method.cc
index c2ab29e..88a9dc1 100644
--- a/src/mirror/abstract_method.cc
+++ b/src/mirror/abstract_method.cc
@@ -20,7 +20,7 @@
 #include "base/stringpiece.h"
 #include "class-inl.h"
 #include "dex_file-inl.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "interpreter/interpreter.h"
 #include "jni_internal.h"
 #include "object-inl.h"
diff --git a/src/mirror/array.cc b/src/mirror/array.cc
index 84c2dc6..e2e63a6 100644
--- a/src/mirror/array.cc
+++ b/src/mirror/array.cc
@@ -20,7 +20,7 @@
 #include "class-inl.h"
 #include "common_throws.h"
 #include "dex_file-inl.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "object-inl.h"
 #include "object_array.h"
 #include "object_array-inl.h"
@@ -51,7 +51,7 @@
     return NULL;
   }
 
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   Array* array = down_cast<Array*>(heap->AllocObject(self, array_class, size));
   if (array != NULL) {
     DCHECK(array->IsArrayInstance());
diff --git a/src/mirror/class.cc b/src/mirror/class.cc
index 2dae90c..2d2130c 100644
--- a/src/mirror/class.cc
+++ b/src/mirror/class.cc
@@ -23,7 +23,7 @@
 #include "dex_cache.h"
 #include "dex_file-inl.h"
 #include "field-inl.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "object-inl.h"
 #include "object_array-inl.h"
 #include "object_utils.h"
diff --git a/src/mirror/dex_cache.cc b/src/mirror/dex_cache.cc
index d9c05fb..239dc5e 100644
--- a/src/mirror/dex_cache.cc
+++ b/src/mirror/dex_cache.cc
@@ -19,8 +19,8 @@
 #include "abstract_method-inl.h"
 #include "base/logging.h"
 #include "class_linker.h"
-#include "heap.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/heap.h"
 #include "globals.h"
 #include "object.h"
 #include "object-inl.h"
diff --git a/src/mirror/dex_cache_test.cc b/src/mirror/dex_cache_test.cc
index 3d753e1..441c6da 100644
--- a/src/mirror/dex_cache_test.cc
+++ b/src/mirror/dex_cache_test.cc
@@ -17,7 +17,7 @@
 #include "class_linker.h"
 #include "common_test.h"
 #include "dex_cache.h"
-#include "heap.h"
+#include "gc/heap.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/object-inl.h"
 #include "sirt_ref.h"
diff --git a/src/mirror/field-inl.h b/src/mirror/field-inl.h
index cda461b..be5dcab 100644
--- a/src/mirror/field-inl.h
+++ b/src/mirror/field-inl.h
@@ -20,7 +20,7 @@
 #include "field.h"
 
 #include "base/logging.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "jvalue.h"
 #include "object-inl.h"
 #include "object_utils.h"
diff --git a/src/mirror/field.cc b/src/mirror/field.cc
index 6e2559a..a96e8c8 100644
--- a/src/mirror/field.cc
+++ b/src/mirror/field.cc
@@ -17,7 +17,7 @@
 #include "field.h"
 
 #include "field-inl.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "object-inl.h"
 #include "object_utils.h"
 #include "runtime.h"
diff --git a/src/mirror/object.cc b/src/mirror/object.cc
index 4acb567..b2d6e71 100644
--- a/src/mirror/object.cc
+++ b/src/mirror/object.cc
@@ -22,8 +22,8 @@
 #include "class_linker-inl.h"
 #include "field.h"
 #include "field-inl.h"
-#include "gc/card_table-inl.h"
-#include "heap.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/heap.h"
 #include "iftable-inl.h"
 #include "monitor.h"
 #include "object-inl.h"
@@ -44,7 +44,7 @@
   // Object::SizeOf gets the right size even if we're an array.
   // Using c->AllocObject() here would be wrong.
   size_t num_bytes = SizeOf();
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   SirtRef<Object> copy(self, heap->AllocObject(self, c, num_bytes));
   if (copy.get() == NULL) {
     return NULL;
diff --git a/src/mirror/object_array-inl.h b/src/mirror/object_array-inl.h
index 05bce95..b130dac 100644
--- a/src/mirror/object_array-inl.h
+++ b/src/mirror/object_array-inl.h
@@ -19,7 +19,7 @@
 
 #include "object_array.h"
 
-#include "heap.h"
+#include "gc/heap.h"
 #include "mirror/class.h"
 #include "mirror/field.h"
 #include "runtime.h"
@@ -101,7 +101,7 @@
     MemberOffset src_offset(DataOffset(sizeof(Object*)).Int32Value() + src_pos * sizeof(Object*));
     MemberOffset dst_offset(DataOffset(sizeof(Object*)).Int32Value() + dst_pos * sizeof(Object*));
     Class* array_class = dst->GetClass();
-    Heap* heap = Runtime::Current()->GetHeap();
+    gc::Heap* heap = Runtime::Current()->GetHeap();
     if (array_class == src->GetClass()) {
       // No need for array store checks if arrays are of the same type
       for (size_t i = 0; i < length; i++) {
diff --git a/src/mirror/object_test.cc b/src/mirror/object_test.cc
index 52df544..53a1df9 100644
--- a/src/mirror/object_test.cc
+++ b/src/mirror/object_test.cc
@@ -27,8 +27,8 @@
 #include "common_test.h"
 #include "dex_file.h"
 #include "field-inl.h"
-#include "gc/card_table-inl.h"
-#include "heap.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/heap.h"
 #include "iftable-inl.h"
 #include "abstract_method-inl.h"
 #include "object-inl.h"
diff --git a/src/mirror/stack_trace_element.cc b/src/mirror/stack_trace_element.cc
index 9d557ec..1ad0182 100644
--- a/src/mirror/stack_trace_element.cc
+++ b/src/mirror/stack_trace_element.cc
@@ -17,7 +17,7 @@
 #include "stack_trace_element.h"
 
 #include "class.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "object-inl.h"
 #include "string.h"
 
diff --git a/src/mirror/string.cc b/src/mirror/string.cc
index 45a6779..97126cb 100644
--- a/src/mirror/string.cc
+++ b/src/mirror/string.cc
@@ -17,7 +17,7 @@
 #include "string.h"
 
 #include "array.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "intern_table.h"
 #include "object-inl.h"
 #include "runtime.h"
diff --git a/src/mirror/throwable.cc b/src/mirror/throwable.cc
index bbff9c2..78b76dc 100644
--- a/src/mirror/throwable.cc
+++ b/src/mirror/throwable.cc
@@ -19,7 +19,7 @@
 #include "abstract_method-inl.h"
 #include "class-inl.h"
 #include "dex_file-inl.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "object-inl.h"
 #include "object_array.h"
 #include "object_array-inl.h"
diff --git a/src/native/dalvik_system_DexFile.cc b/src/native/dalvik_system_DexFile.cc
index e07339c..b9838f8 100644
--- a/src/native/dalvik_system_DexFile.cc
+++ b/src/native/dalvik_system_DexFile.cc
@@ -20,7 +20,8 @@
 #include "class_linker.h"
 #include "common_throws.h"
 #include "dex_file-inl.h"
-#include "gc/space.h"
+#include "gc/space/image_space.h"
+#include "gc/space/space-inl.h"
 #include "image.h"
 #include "jni_internal.h"
 #include "mirror/class_loader.h"
@@ -248,13 +249,14 @@
     return JNI_TRUE;
   }
 
-  Heap* heap = runtime->GetHeap();
-  const Spaces& spaces = heap->GetSpaces();
+  gc::Heap* heap = runtime->GetHeap();
+  const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
   // TODO: C++0x auto
-  for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
-    if ((*cur)->IsImageSpace()) {
+  typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
+  for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+    if ((*it)->IsImageSpace()) {
       // TODO: Ensure this works with multiple image spaces.
-      const ImageHeader& image_header = (*cur)->AsImageSpace()->GetImageHeader();
+      const ImageHeader& image_header = (*it)->AsImageSpace()->GetImageHeader();
       if (oat_file->GetOatHeader().GetImageFileLocationOatChecksum() != image_header.GetOatChecksum()) {
         ScopedObjectAccess soa(env);
         LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc
index d2ef43c..0a2e1a6 100644
--- a/src/native/dalvik_system_VMRuntime.cc
+++ b/src/native/dalvik_system_VMRuntime.cc
@@ -20,13 +20,14 @@
 #include "common_throws.h"
 #include "debugger.h"
 #include "dex_file-inl.h"
+#include "gc/allocator/dlmalloc.h"
+#include "gc/space/dlmalloc_space.h"
 #include "jni_internal.h"
 #include "mirror/class-inl.h"
 #include "mirror/object.h"
 #include "mirror/object-inl.h"
 #include "object_utils.h"
 #include "scoped_thread_state_change.h"
-#include "gc/space.h"
 #include "thread.h"
 #include "thread_list.h"
 #include "toStringArray.h"
@@ -164,11 +165,11 @@
   uint64_t start_ns = NanoTime();
 
   // Trim the managed heap.
-  Heap* heap = Runtime::Current()->GetHeap();
-  DlMallocSpace* alloc_space = heap->GetAllocSpace();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  gc::space::DlMallocSpace* alloc_space = heap->GetAllocSpace();
   size_t alloc_space_size = alloc_space->Size();
   float managed_utilization =
-      static_cast<float>(alloc_space->GetNumBytesAllocated()) / alloc_space_size;
+      static_cast<float>(alloc_space->GetBytesAllocated()) / alloc_space_size;
   size_t managed_reclaimed = heap->Trim();
 
   uint64_t gc_heap_end_ns = NanoTime();
@@ -176,7 +177,7 @@
   // Trim the native heap.
   dlmalloc_trim(0);
   size_t native_reclaimed = 0;
-  dlmalloc_inspect_all(MspaceMadviseCallback, &native_reclaimed);
+  dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
 
   uint64_t end_ns = NanoTime();
 
diff --git a/src/native/java_lang_Runtime.cc b/src/native/java_lang_Runtime.cc
index 54ccddc..3642635 100644
--- a/src/native/java_lang_Runtime.cc
+++ b/src/native/java_lang_Runtime.cc
@@ -18,7 +18,7 @@
 #include <limits.h>
 #include <unistd.h>
 
-#include "heap.h"
+#include "gc/heap.h"
 #include "jni_internal.h"
 #include "mirror/class_loader.h"
 #include "runtime.h"
diff --git a/src/native/java_lang_System.cc b/src/native/java_lang_System.cc
index d8df9d9..2462f2f 100644
--- a/src/native/java_lang_System.cc
+++ b/src/native/java_lang_System.cc
@@ -15,7 +15,7 @@
  */
 
 #include "common_throws.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "jni_internal.h"
 #include "mirror/array.h"
 #include "mirror/class.h"
diff --git a/src/native/java_lang_Thread.cc b/src/native/java_lang_Thread.cc
index 7ccfaaa..8ef190a 100644
--- a/src/native/java_lang_Thread.cc
+++ b/src/native/java_lang_Thread.cc
@@ -74,6 +74,7 @@
     case kNative:                         return kJavaRunnable;
     case kWaitingForGcToComplete:         return kJavaWaiting;
     case kWaitingPerformingGc:            return kJavaWaiting;
+    case kWaitingForCheckPointsToRun:     return kJavaWaiting;
     case kWaitingForDebuggerSend:         return kJavaWaiting;
     case kWaitingForDebuggerToAttach:     return kJavaWaiting;
     case kWaitingInMainDebuggerLoop:      return kJavaWaiting;
diff --git a/src/native/sun_misc_Unsafe.cc b/src/native/sun_misc_Unsafe.cc
index abb0d5c..eece81a 100644
--- a/src/native/sun_misc_Unsafe.cc
+++ b/src/native/sun_misc_Unsafe.cc
@@ -15,7 +15,7 @@
  */
 
 #include "atomic.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "jni_internal.h"
 #include "mirror/object.h"
 #include "mirror/object-inl.h"
diff --git a/src/oat/runtime/support_dexcache.cc b/src/oat/runtime/support_dexcache.cc
index 3e8ebc6..0af7a62 100644
--- a/src/oat/runtime/support_dexcache.cc
+++ b/src/oat/runtime/support_dexcache.cc
@@ -15,7 +15,7 @@
  */
 
 #include "callee_save_frame.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
 #include "mirror/abstract_method-inl.h"
diff --git a/src/oat_writer.cc b/src/oat_writer.cc
index 639d249..1d249d6 100644
--- a/src/oat_writer.cc
+++ b/src/oat_writer.cc
@@ -22,6 +22,7 @@
 #include "base/unix_file/fd_file.h"
 #include "class_linker.h"
 #include "dex_file-inl.h"
+#include "gc/space/space.h"
 #include "mirror/abstract_method-inl.h"
 #include "mirror/array.h"
 #include "mirror/class_loader.h"
@@ -30,7 +31,6 @@
 #include "output_stream.h"
 #include "safe_map.h"
 #include "scoped_thread_state_change.h"
-#include "gc/space.h"
 #include "verifier/method_verifier.h"
 
 namespace art {
diff --git a/src/oatdump.cc b/src/oatdump.cc
index a8a0e86..f9caa9d 100644
--- a/src/oatdump.cc
+++ b/src/oatdump.cc
@@ -30,8 +30,9 @@
 #include "dex_instruction.h"
 #include "disassembler.h"
 #include "gc_map.h"
-#include "gc/large_object_space.h"
-#include "gc/space.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
 #include "image.h"
 #include "indenter.h"
 #include "mirror/abstract_method-inl.h"
@@ -679,7 +680,7 @@
 class ImageDumper {
  public:
   explicit ImageDumper(std::ostream* os, const std::string& image_filename,
-                       const std::string& host_prefix, Space& image_space,
+                       const std::string& host_prefix, gc::space::ImageSpace& image_space,
                        const ImageHeader& image_header)
       : os_(os), image_filename_(image_filename), host_prefix_(host_prefix),
         image_space_(image_space), image_header_(image_header) {}
@@ -763,8 +764,8 @@
     os << "OBJECTS:\n" << std::flush;
 
     // Loop through all the image spaces and dump their objects.
-    Heap* heap = Runtime::Current()->GetHeap();
-    const Spaces& spaces = heap->GetSpaces();
+    gc::Heap* heap = Runtime::Current()->GetHeap();
+    const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
     Thread* self = Thread::Current();
     {
       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -777,10 +778,11 @@
       os_ = &indent_os;
       ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
       // TODO: C++0x auto
-      for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-        Space* space = *it;
+      typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
+      for (It it = spaces.begin(), end = spaces.end(); it != end; ++it) {
+        gc::space::Space* space = *it;
         if (space->IsImageSpace()) {
-          ImageSpace* image_space = space->AsImageSpace();
+          gc::space::ImageSpace* image_space = space->AsImageSpace();
           image_space->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
           indent_os << "\n";
         }
@@ -1343,7 +1345,7 @@
   std::ostream* os_;
   const std::string image_filename_;
   const std::string host_prefix_;
-  Space& image_space_;
+  gc::space::ImageSpace& image_space_;
   const ImageHeader& image_header_;
 
   DISALLOW_COPY_AND_ASSIGN(ImageDumper);
@@ -1454,8 +1456,8 @@
   Thread::Current()->TransitionFromRunnableToSuspended(kNative);
   ScopedObjectAccess soa(Thread::Current());
 
-  Heap* heap = Runtime::Current()->GetHeap();
-  ImageSpace* image_space = heap->GetImageSpace();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  gc::space::ImageSpace* image_space = heap->GetImageSpace();
   CHECK(image_space != NULL);
   const ImageHeader& image_header = image_space->GetImageHeader();
   if (!image_header.IsValid()) {
diff --git a/src/runtime.cc b/src/runtime.cc
index 87456ca..1889d88 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -34,8 +34,9 @@
 #include "constants_mips.h"
 #include "constants_x86.h"
 #include "debugger.h"
-#include "gc/card_table-inl.h"
-#include "heap.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/heap.h"
+#include "gc/space/space.h"
 #include "image.h"
 #include "instrumentation.h"
 #include "intern_table.h"
@@ -55,7 +56,6 @@
 #include "signal_catcher.h"
 #include "signal_set.h"
 #include "sirt_ref.h"
-#include "gc/space.h"
 #include "thread.h"
 #include "thread_list.h"
 #include "trace.h"
@@ -332,11 +332,11 @@
   // -Xcheck:jni is off by default for regular builds but on by default in debug builds.
   parsed->check_jni_ = kIsDebugBuild;
 
-  parsed->heap_initial_size_ = Heap::kDefaultInitialSize;
-  parsed->heap_maximum_size_ = Heap::kDefaultMaximumSize;
-  parsed->heap_min_free_ = Heap::kDefaultMinFree;
-  parsed->heap_max_free_ = Heap::kDefaultMaxFree;
-  parsed->heap_target_utilization_ = Heap::kDefaultTargetUtilization;
+  parsed->heap_initial_size_ = gc::Heap::kDefaultInitialSize;
+  parsed->heap_maximum_size_ = gc::Heap::kDefaultMaximumSize;
+  parsed->heap_min_free_ = gc::Heap::kDefaultMinFree;
+  parsed->heap_max_free_ = gc::Heap::kDefaultMaxFree;
+  parsed->heap_target_utilization_ = gc::Heap::kDefaultTargetUtilization;
   parsed->heap_growth_limit_ = 0;  // 0 means no growth limit.
   parsed->stack_size_ = 0; // 0 means default.
 
@@ -810,14 +810,14 @@
     GetInstrumentation()->ForceInterpretOnly();
   }
 
-  heap_ = new Heap(options->heap_initial_size_,
-                   options->heap_growth_limit_,
-                   options->heap_min_free_,
-                   options->heap_max_free_,
-                   options->heap_target_utilization_,
-                   options->heap_maximum_size_,
-                   options->image_,
-                   options->is_concurrent_gc_enabled_);
+  heap_ = new gc::Heap(options->heap_initial_size_,
+                       options->heap_growth_limit_,
+                       options->heap_min_free_,
+                       options->heap_max_free_,
+                       options->heap_target_utilization_,
+                       options->heap_maximum_size_,
+                       options->image_,
+                       options->is_concurrent_gc_enabled_);
 
   BlockSignals();
   InitPlatformSignalHandlers();
@@ -839,8 +839,8 @@
   // Now we're attached, we can take the heap lock and validate the heap.
   GetHeap()->EnableObjectValidation();
 
-  CHECK_GE(GetHeap()->GetSpaces().size(), 1U);
-  if (GetHeap()->GetSpaces()[0]->IsImageSpace()) {
+  CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
+  if (GetHeap()->GetContinuousSpaces()[0]->IsImageSpace()) {
     class_linker_ = ClassLinker::CreateFromImage(intern_table_);
   } else {
     CHECK(options->boot_class_path_ != NULL);
@@ -1052,12 +1052,13 @@
   thread_list_->Unregister(self);
 }
 
-void Runtime::VisitConcurrentRoots(RootVisitor* visitor, void* arg) {
-  if (intern_table_->IsDirty()) {
-    intern_table_->VisitRoots(visitor, arg);
+void Runtime::VisitConcurrentRoots(RootVisitor* visitor, void* arg, bool only_dirty,
+                                   bool clean_dirty) {
+  if (!only_dirty || intern_table_->IsDirty()) {
+    intern_table_->VisitRoots(visitor, arg, clean_dirty);
   }
-  if (class_linker_->IsDirty()) {
-    class_linker_->VisitRoots(visitor, arg);
+  if (!only_dirty || class_linker_->IsDirty()) {
+    class_linker_->VisitRoots(visitor, arg, clean_dirty);
   }
 }
 
@@ -1077,15 +1078,8 @@
   VisitNonThreadRoots(visitor, arg);
 }
 
-void Runtime::DirtyRoots() {
-  CHECK(intern_table_ != NULL);
-  intern_table_->Dirty();
-  CHECK(class_linker_ != NULL);
-  class_linker_->Dirty();
-}
-
-void Runtime::VisitRoots(RootVisitor* visitor, void* arg) {
-  VisitConcurrentRoots(visitor, arg);
+void Runtime::VisitRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty) {
+  VisitConcurrentRoots(visitor, arg, only_dirty, clean_dirty);
   VisitNonConcurrentRoots(visitor, arg);
 }
 
diff --git a/src/runtime.h b/src/runtime.h
index 1b0c437..dfcd647 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -27,8 +27,8 @@
 
 #include "base/macros.h"
 #include "base/stringpiece.h"
+#include "gc/heap.h"
 #include "globals.h"
-#include "heap.h"
 #include "instruction_set.h"
 #include "instrumentation.h"
 #include "jobject_comparator.h"
@@ -39,17 +39,19 @@
 
 namespace art {
 
+namespace gc {
+  class Heap;
+}
 namespace mirror {
-class AbstractMethod;
-class ClassLoader;
-template<class T> class PrimitiveArray;
-typedef PrimitiveArray<int8_t> ByteArray;
-class String;
-class Throwable;
+  class AbstractMethod;
+  class ClassLoader;
+  template<class T> class PrimitiveArray;
+  typedef PrimitiveArray<int8_t> ByteArray;
+  class String;
+  class Throwable;
 }  // namespace mirror
 class ClassLinker;
 class DexFile;
-class Heap;
 class InternTable;
 struct JavaVMExt;
 class MonitorList;
@@ -224,7 +226,7 @@
     return default_stack_size_;
   }
 
-  Heap* GetHeap() const {
+  gc::Heap* GetHeap() const {
     return heap_;
   }
 
@@ -256,14 +258,13 @@
     return "2.0.0";
   }
 
-  // Force all the roots which can be marked concurrently to be dirty.
-  void DirtyRoots();
-
-  // Visit all the roots.
-  void VisitRoots(RootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
+  // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
+  void VisitRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Visit all of the roots we can do safely do concurrently.
-  void VisitConcurrentRoots(RootVisitor* visitor, void* arg);
+  void VisitConcurrentRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty);
 
   // Visit all of the non thread roots, we can do this with mutators unpaused.
   void VisitNonThreadRoots(RootVisitor* visitor, void* arg);
@@ -392,7 +393,7 @@
   // The default stack size for managed threads created by the runtime.
   size_t default_stack_size_;
 
-  Heap* heap_;
+  gc::Heap* heap_;
 
   MonitorList* monitor_list_;
 
diff --git a/src/runtime_support.cc b/src/runtime_support.cc
index f6225ed..c933621 100644
--- a/src/runtime_support.cc
+++ b/src/runtime_support.cc
@@ -18,7 +18,7 @@
 
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "mirror/abstract_method-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/field-inl.h"
diff --git a/src/signal_catcher.cc b/src/signal_catcher.cc
index c021dd1..a630db8 100644
--- a/src/signal_catcher.cc
+++ b/src/signal_catcher.cc
@@ -27,7 +27,7 @@
 
 #include "base/unix_file/fd_file.h"
 #include "class_linker.h"
-#include "heap.h"
+#include "gc/heap.h"
 #include "os.h"
 #include "runtime.h"
 #include "scoped_thread_state_change.h"
diff --git a/src/thread-inl.h b/src/thread-inl.h
index 6c1ae59..2fc5987 100644
--- a/src/thread-inl.h
+++ b/src/thread-inl.h
@@ -125,7 +125,7 @@
 }
 
 inline void Thread::VerifyStack() {
-  Heap* heap = Runtime::Current()->GetHeap();
+  gc::Heap* heap = Runtime::Current()->GetHeap();
   if (heap->IsObjectValidationEnabled()) {
     VerifyStackImpl();
   }
diff --git a/src/thread.cc b/src/thread.cc
index 3ed388b..b7337cc 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -35,8 +35,9 @@
 #include "debugger.h"
 #include "dex_file-inl.h"
 #include "gc_map.h"
-#include "gc/card_table-inl.h"
-#include "heap.h"
+#include "gc/accounting/card_table-inl.h"
+#include "gc/heap.h"
+#include "gc/space/space.h"
 #include "invoke_arg_array_builder.h"
 #include "jni_internal.h"
 #include "mirror/abstract_method-inl.h"
@@ -55,7 +56,6 @@
 #include "ScopedLocalRef.h"
 #include "ScopedUtfChars.h"
 #include "sirt_ref.h"
-#include "gc/space.h"
 #include "stack.h"
 #include "stack_indirect_reference_table.h"
 #include "thread-inl.h"
@@ -2198,7 +2198,7 @@
 }
 
 static void VerifyObject(const mirror::Object* root, void* arg) {
-  Heap* heap = reinterpret_cast<Heap*>(arg);
+  gc::Heap* heap = reinterpret_cast<gc::Heap*>(arg);
   heap->VerifyObject(root);
 }
 
diff --git a/src/thread_pool.cc b/src/thread_pool.cc
index 370e4bc..f0f6f18 100644
--- a/src/thread_pool.cc
+++ b/src/thread_pool.cc
@@ -154,17 +154,22 @@
   return NULL;
 }
 
-void ThreadPool::Wait(Thread* self, bool do_work) {
-  Task* task = NULL;
-  while ((task = TryGetTask(self)) != NULL) {
-    task->Run(self);
-    task->Finalize();
+void ThreadPool::Wait(Thread* self, bool do_work, bool may_hold_locks) {
+  if (do_work) {
+    Task* task = NULL;
+    while ((task = TryGetTask(self)) != NULL) {
+      task->Run(self);
+      task->Finalize();
+    }
   }
-
   // Wait until each thread is waiting and the task list is empty.
   MutexLock mu(self, task_queue_lock_);
   while (!shutting_down_ && (waiting_count_ != GetThreadCount() || !tasks_.empty())) {
-    completion_condition_.Wait(self);
+    if (!may_hold_locks) {
+      completion_condition_.Wait(self);
+    } else {
+      completion_condition_.WaitHoldingLocks(self);
+    }
   }
 }
 
diff --git a/src/thread_pool.h b/src/thread_pool.h
index 18af97d..814e654 100644
--- a/src/thread_pool.h
+++ b/src/thread_pool.h
@@ -80,7 +80,7 @@
   virtual ~ThreadPool();
 
   // Wait for all tasks currently on queue to get completed.
-  void Wait(Thread* self, bool do_work = true);
+  void Wait(Thread* self, bool do_work, bool may_hold_locks);
 
   size_t GetTaskCount(Thread* self);
 
diff --git a/src/thread_pool_test.cc b/src/thread_pool_test.cc
index e056935..e2a32f5 100644
--- a/src/thread_pool_test.cc
+++ b/src/thread_pool_test.cc
@@ -68,7 +68,7 @@
   }
   thread_pool.StartWorkers(self);
   // Wait for tasks to complete.
-  thread_pool.Wait(self);
+  thread_pool.Wait(self, true, false);
   // Make sure that we finished all the work.
   EXPECT_EQ(num_tasks, count);
 }
@@ -137,7 +137,7 @@
   static const int depth = 8;
   thread_pool.AddTask(self, new TreeTask(&thread_pool, &count, depth));
   thread_pool.StartWorkers(self);
-  thread_pool.Wait(self);
+  thread_pool.Wait(self, true, false);
   EXPECT_EQ((1 << depth) - 1, count);
 }
 
diff --git a/src/thread_state.h b/src/thread_state.h
index 7c4a16f..52f092e 100644
--- a/src/thread_state.h
+++ b/src/thread_state.h
@@ -28,6 +28,7 @@
   kBlocked,                        // BLOCKED        TS_MONITOR   blocked on a monitor
   kWaiting,                        // WAITING        TS_WAIT      in Object.wait()
   kWaitingForGcToComplete,         // WAITING        TS_WAIT      blocked waiting for GC
+  kWaitingForCheckPointsToRun,     // WAITING        TS_WAIT      GC waiting for checkpoints to run
   kWaitingPerformingGc,            // WAITING        TS_WAIT      performing GC
   kWaitingForDebuggerSend,         // WAITING        TS_WAIT      blocked waiting for events to be sent
   kWaitingForDebuggerToAttach,     // WAITING        TS_WAIT      blocked waiting for debugger to attach
diff --git a/src/verifier/method_verifier.cc b/src/verifier/method_verifier.cc
index efdd6f6..021e984 100644
--- a/src/verifier/method_verifier.cc
+++ b/src/verifier/method_verifier.cc
@@ -26,7 +26,7 @@
 #include "dex_file-inl.h"
 #include "dex_instruction-inl.h"
 #include "dex_instruction_visitor.h"
-#include "gc/card_table-inl.h"
+#include "gc/accounting/card_table-inl.h"
 #include "indenter.h"
 #include "intern_table.h"
 #include "leb128.h"