Directory restructuring of object.h

Break object.h into constituent files.
Reduce number of #includes in other GC header files.
Introduce -inl.h files to avoid mirror files #include-ing each other.
Check invariants of verifier RegTypes for all constructors.

Change-Id: Iecf1171c02910ac152d52947330ef456df4043bc
diff --git a/src/gc/atomic_stack.h b/src/gc/atomic_stack.h
index cd1781d..0197bce 100644
--- a/src/gc/atomic_stack.h
+++ b/src/gc/atomic_stack.h
@@ -101,11 +101,11 @@
   }
 
   T* Begin() {
-    return const_cast<Object**>(begin_ + front_index_);
+    return const_cast<mirror::Object**>(begin_ + front_index_);
   }
 
   T* End() {
-    return const_cast<Object**>(begin_ + back_index_);
+    return const_cast<mirror::Object**>(begin_ + back_index_);
   }
 
   size_t Capacity() const {
@@ -159,6 +159,8 @@
   DISALLOW_COPY_AND_ASSIGN(AtomicStack);
 };
 
+typedef AtomicStack<mirror::Object*> ObjectStack;
+
 }  // namespace art
 
 #endif  // ART_SRC_MARK_STACK_H_
diff --git a/src/gc/card_table-inl.h b/src/gc/card_table-inl.h
new file mode 100644
index 0000000..13590b7
--- /dev/null
+++ b/src/gc/card_table-inl.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_CARDTABLE_INL_H_
+#define ART_SRC_GC_CARDTABLE_INL_H_
+
+#include "base/logging.h"
+#include "card_table.h"
+#include "cutils/atomic-inline.h"
+#include "space_bitmap.h"
+#include "utils.h"
+
+namespace art {
+
+static inline bool byte_cas(byte old_value, byte new_value, byte* address) {
+  // Little endian means most significant byte is on the left.
+  const size_t shift = reinterpret_cast<uintptr_t>(address) % sizeof(uintptr_t);
+  // Align the address down.
+  address -= shift;
+  int32_t* word_address = reinterpret_cast<int32_t*>(address);
+  // Word with the byte we are trying to cas cleared.
+  const int32_t cur_word = *word_address & ~(0xFF << shift);
+  const int32_t old_word = cur_word | (static_cast<int32_t>(old_value) << shift);
+  const int32_t new_word = cur_word | (static_cast<int32_t>(new_value) << shift);
+  bool success = android_atomic_cas(old_word, new_word, word_address) == 0;
+  return success;
+}
+
+template <typename Visitor, typename FingerVisitor>
+inline void CardTable::Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
+                            const Visitor& visitor, const FingerVisitor& finger_visitor,
+                            const byte minimum_age) const {
+  DCHECK(bitmap->HasAddress(scan_begin));
+  DCHECK(bitmap->HasAddress(scan_end - 1));  // scan_end is the byte after the last byte we scan.
+  byte* card_cur = CardFromAddr(scan_begin);
+  byte* card_end = CardFromAddr(scan_end);
+  CheckCardValid(card_cur);
+  CheckCardValid(card_end);
+
+  // Handle any unaligned cards at the start.
+  while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
+    if (*card_cur >= minimum_age) {
+      uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
+      uintptr_t end = start + kCardSize;
+      bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
+    }
+    ++card_cur;
+  }
+
+  byte* aligned_end = card_end -
+      (reinterpret_cast<uintptr_t>(card_end) & (sizeof(uintptr_t) - 1));
+
+  // Now we have the words, we can send these to be processed in parallel.
+  uintptr_t* word_cur = reinterpret_cast<uintptr_t*>(card_cur);
+  uintptr_t* word_end = reinterpret_cast<uintptr_t*>(aligned_end);
+
+  // TODO: Parallelize
+  while (word_cur < word_end) {
+    // Find the first dirty card.
+    while (*word_cur == 0 && word_cur < word_end) {
+      word_cur++;
+    }
+    if (word_cur >= word_end) {
+      break;
+    }
+    uintptr_t start_word = *word_cur;
+    for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
+      if ((start_word & 0xFF) >= minimum_age) {
+        byte* card = reinterpret_cast<byte*>(word_cur) + i;
+        const byte card_byte = *card;
+        DCHECK(card_byte == (start_word & 0xFF) || card_byte == kCardDirty)
+        << "card " << static_cast<size_t>(card_byte) << " word " << (start_word & 0xFF);
+        uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card));
+        uintptr_t end = start + kCardSize;
+        bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
+      }
+      start_word >>= 8;
+    }
+    ++word_cur;
+  }
+
+  // Handle any unaligned cards at the end.
+  card_cur = reinterpret_cast<byte*>(word_end);
+  while (card_cur < card_end) {
+    if (*card_cur >= minimum_age) {
+      uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
+      uintptr_t end = start + kCardSize;
+      bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
+    }
+    ++card_cur;
+  }
+}
+
+/*
+ * Visitor is expected to take in a card and return the new value. When a value is modified, the
+ * modify visitor is called.
+ * visitor: The visitor which modifies the cards. Returns the new value for a card given an old
+ * value.
+ * modified: Whenever the visitor modifies a card, this visitor is called on the card. Enables
+ * us to know which cards got cleared.
+ */
+template <typename Visitor, typename ModifiedVisitor>
+inline void CardTable::ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor,
+                                         const ModifiedVisitor& modified) {
+  byte* card_cur = CardFromAddr(scan_begin);
+  byte* card_end = CardFromAddr(scan_end);
+  CheckCardValid(card_cur);
+  CheckCardValid(card_end);
+
+  // Handle any unaligned cards at the start.
+  while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
+    byte expected, new_value;
+    do {
+      expected = *card_cur;
+      new_value = visitor(expected);
+    } while (expected != new_value && UNLIKELY(!byte_cas(expected, new_value, card_cur)));
+    if (expected != new_value) {
+      modified(card_cur, expected, new_value);
+    }
+    ++card_cur;
+  }
+
+  // Handle unaligned cards at the end.
+  while (!IsAligned<sizeof(word)>(card_end) && card_end > card_cur) {
+    --card_end;
+    byte expected, new_value;
+    do {
+      expected = *card_end;
+      new_value = visitor(expected);
+    } while (expected != new_value && UNLIKELY(!byte_cas(expected, new_value, card_end)));
+    if (expected != new_value) {
+      modified(card_cur, expected, new_value);
+    }
+  }
+
+  // Now we have the words, we can process words in parallel.
+  uintptr_t* word_cur = reinterpret_cast<uintptr_t*>(card_cur);
+  uintptr_t* word_end = reinterpret_cast<uintptr_t*>(card_end);
+  uintptr_t expected_word;
+  uintptr_t new_word;
+
+  // TODO: Parallelize.
+  while (word_cur < word_end) {
+    while ((expected_word = *word_cur) != 0) {
+      new_word =
+          (visitor((expected_word >> 0) & 0xFF) << 0) |
+          (visitor((expected_word >> 8) & 0xFF) << 8) |
+          (visitor((expected_word >> 16) & 0xFF) << 16) |
+          (visitor((expected_word >> 24) & 0xFF) << 24);
+      if (new_word == expected_word) {
+        // No need to do a cas.
+        break;
+      }
+      if (LIKELY(android_atomic_cas(expected_word, new_word,
+                                    reinterpret_cast<int32_t*>(word_cur)) == 0)) {
+        for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
+          const byte expected_byte = (expected_word >> (8 * i)) & 0xFF;
+          const byte new_byte = (new_word >> (8 * i)) & 0xFF;
+          if (expected_byte != new_byte) {
+            modified(reinterpret_cast<byte*>(word_cur) + i, expected_byte, new_byte);
+          }
+        }
+        break;
+      }
+    }
+    ++word_cur;
+  }
+}
+
+inline void* CardTable::AddrFromCard(const byte *card_addr) const {
+  DCHECK(IsValidCard(card_addr))
+    << " card_addr: " << reinterpret_cast<const void*>(card_addr)
+    << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
+    << " end: " << reinterpret_cast<void*>(mem_map_->End());
+  uintptr_t offset = card_addr - biased_begin_;
+  return reinterpret_cast<void*>(offset << kCardShift);
+}
+
+inline byte* CardTable::CardFromAddr(const void *addr) const {
+  byte *card_addr = biased_begin_ + (reinterpret_cast<uintptr_t>(addr) >> kCardShift);
+  // Sanity check the caller was asking for address covered by the card table
+  DCHECK(IsValidCard(card_addr)) << "addr: " << addr
+      << " card_addr: " << reinterpret_cast<void*>(card_addr);
+  return card_addr;
+}
+
+inline void CardTable::CheckCardValid(byte* card) const {
+  DCHECK(IsValidCard(card))
+      << " card_addr: " << reinterpret_cast<const void*>(card)
+      << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
+      << " end: " << reinterpret_cast<void*>(mem_map_->End());
+}
+
+}  // namespace art
+
+#endif  // ART_SRC_GC_CARDTABLE_INL_H_
diff --git a/src/gc/card_table.cc b/src/gc/card_table.cc
index f27777b..4331270 100644
--- a/src/gc/card_table.cc
+++ b/src/gc/card_table.cc
@@ -19,6 +19,7 @@
 #include <dynamic_annotations.h>
 
 #include "base/logging.h"
+#include "gc/card_table-inl.h"
 #include "heap.h"
 #include "heap_bitmap.h"
 #include "runtime.h"
diff --git a/src/gc/card_table.h b/src/gc/card_table.h
index 8f1bc92..842fcc3 100644
--- a/src/gc/card_table.h
+++ b/src/gc/card_table.h
@@ -17,19 +17,18 @@
 #ifndef ART_SRC_GC_CARDTABLE_H_
 #define ART_SRC_GC_CARDTABLE_H_
 
-#include "base/logging.h"
 #include "globals.h"
+#include "locks.h"
 #include "mem_map.h"
-#include "space_bitmap.h"
 #include "UniquePtr.h"
-#include "utils.h"
 
 namespace art {
-
+namespace mirror {
+class Object;
+}  // namespace mirror
 class Heap;
 class ContinuousSpace;
 class SpaceBitmap;
-class Object;
 
 // Maintain a card table from the the write barrier. All writes of
 // non-NULL values to heap addresses should go through an entry in
@@ -50,12 +49,12 @@
   }
 
   // Is the object on a dirty card?
-  bool IsDirty(const Object* obj) const {
+  bool IsDirty(const mirror::Object* obj) const {
     return GetCard(obj) == kCardDirty;
   }
 
   // Return the state of the card at an address.
-  byte GetCard(const Object* obj) const {
+  byte GetCard(const mirror::Object* obj) const {
     return *CardFromAddr(obj);
   }
 
@@ -88,71 +87,7 @@
    */
   template <typename Visitor, typename ModifiedVisitor>
   void ModifyCardsAtomic(byte* scan_begin, byte* scan_end, const Visitor& visitor,
-                      const ModifiedVisitor& modified = VoidFunctor()) {
-    byte* card_cur = CardFromAddr(scan_begin);
-    byte* card_end = CardFromAddr(scan_end);
-    CheckCardValid(card_cur);
-    CheckCardValid(card_end);
-
-    // Handle any unaligned cards at the start.
-    while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
-      byte expected, new_value;
-      do {
-        expected = *card_cur;
-        new_value = visitor(expected);
-      } while (expected != new_value && UNLIKELY(byte_cas(expected, new_value, card_cur) != 0));
-      if (expected != new_value) {
-        modified(card_cur, expected, new_value);
-      }
-      ++card_cur;
-    }
-
-    // Handle unaligned cards at the end.
-    while (!IsAligned<sizeof(word)>(card_end) && card_end > card_cur) {
-      --card_end;
-      byte expected, new_value;
-      do {
-        expected = *card_end;
-        new_value = visitor(expected);
-      } while (expected != new_value && UNLIKELY(byte_cas(expected, new_value, card_end) != 0));
-      if (expected != new_value) {
-        modified(card_cur, expected, new_value);
-      }
-    }
-
-    // Now we have the words, we can process words in parallel.
-    uintptr_t* word_cur = reinterpret_cast<uintptr_t*>(card_cur);
-    uintptr_t* word_end = reinterpret_cast<uintptr_t*>(card_end);
-    uintptr_t expected_word;
-    uintptr_t new_word;
-
-    // TODO: Parallelize.
-    while (word_cur < word_end) {
-      while ((expected_word = *word_cur) != 0) {
-        new_word =
-            (visitor((expected_word >> 0) & 0xFF) << 0) |
-            (visitor((expected_word >> 8) & 0xFF) << 8) |
-            (visitor((expected_word >> 16) & 0xFF) << 16) |
-            (visitor((expected_word >> 24) & 0xFF) << 24);
-        if (new_word == expected_word) {
-          // No need to do a cas.
-          break;
-        }
-        if (LIKELY(android_atomic_cas(expected_word, new_word,
-                                      reinterpret_cast<int32_t*>(word_cur)) == 0)) {
-          for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
-            const byte expected_byte = (expected_word >> (8 * i)) & 0xFF;
-            const byte new_byte = (new_word >> (8 * i)) & 0xFF;
-            if (expected_byte != new_byte) {
-              modified(reinterpret_cast<byte*>(word_cur) + i, expected_byte, new_byte);
-            }
-          }
-          break;
-        }
-      }
-      ++word_cur;
-    }
-  }
+                         const ModifiedVisitor& modified);
 
   // For every dirty at least minumum age between begin and end invoke the visitor with the
   // specified argument.
@@ -161,67 +96,7 @@
             const Visitor& visitor, const FingerVisitor& finger_visitor,
             const byte minimum_age = kCardDirty) const
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    DCHECK(bitmap->HasAddress(scan_begin));
-    DCHECK(bitmap->HasAddress(scan_end - 1));  // scan_end is the byte after the last byte we scan.
-    byte* card_cur = CardFromAddr(scan_begin);
-    byte* card_end = CardFromAddr(scan_end);
-    CheckCardValid(card_cur);
-    CheckCardValid(card_end);
-
-    // Handle any unaligned cards at the start.
-    while (!IsAligned<sizeof(word)>(card_cur) && card_cur < card_end) {
-      if (*card_cur >= minimum_age) {
-        uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
-        uintptr_t end = start + kCardSize;
-        bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
-      }
-      ++card_cur;
-    }
-
-    byte* aligned_end = card_end -
-        (reinterpret_cast<uintptr_t>(card_end) & (sizeof(uintptr_t) - 1));
-
-    // Now we have the words, we can send these to be processed in parallel.
-    uintptr_t* word_cur = reinterpret_cast<uintptr_t*>(card_cur);
-    uintptr_t* word_end = reinterpret_cast<uintptr_t*>(aligned_end);
-
-    // TODO: Parallelize
-    while (word_cur < word_end) {
-      // Find the first dirty card.
-      while (*word_cur == 0 && word_cur < word_end) {
-        word_cur++;
-      }
-      if (word_cur >= word_end) {
-        break;
-      }
-      uintptr_t start_word = *word_cur;
-      for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
-        if ((start_word & 0xFF) >= minimum_age) {
-          byte* card = reinterpret_cast<byte*>(word_cur) + i;
-          const byte card_byte = *card;
-          DCHECK(card_byte == (start_word & 0xFF) || card_byte == kCardDirty)
-              << "card " << static_cast<size_t>(card_byte) << " word " << (start_word & 0xFF);
-          uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card));
-          uintptr_t end = start + kCardSize;
-          bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
-        }
-        start_word >>= 8;
-      }
-      ++word_cur;
-    }
-
-    // Handle any unaligned cards at the end.
-    card_cur = reinterpret_cast<byte*>(word_end);
-    while (card_cur < card_end) {
-      if (*card_cur >= minimum_age) {
-        uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
-        uintptr_t end = start + kCardSize;
-        bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
-      }
-      ++card_cur;
-    }
-  }
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Assertion used to check the given address is covered by the card table
   void CheckAddrIsInCardTable(const byte* addr) const;
@@ -233,40 +108,14 @@
   void ClearSpaceCards(ContinuousSpace* space);
 
   // Returns the first address in the heap which maps to this card.
-  void* AddrFromCard(const byte *card_addr) const {
-    DCHECK(IsValidCard(card_addr))
-      << " card_addr: " << reinterpret_cast<const void*>(card_addr)
-      << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
-      << " end: " << reinterpret_cast<void*>(mem_map_->End());
-    uintptr_t offset = card_addr - biased_begin_;
-    return reinterpret_cast<void*>(offset << kCardShift);
-  }
+  void* AddrFromCard(const byte *card_addr) const;
 
   // Returns the address of the relevant byte in the card table, given an address on the heap.
-  byte* CardFromAddr(const void *addr) const {
-    byte *card_addr = biased_begin_ + (reinterpret_cast<uintptr_t>(addr) >> kCardShift);
-    // Sanity check the caller was asking for address covered by the card table
-    DCHECK(IsValidCard(card_addr)) << "addr: " << addr
-        << " card_addr: " << reinterpret_cast<void*>(card_addr);
-    return card_addr;
-  }
+  byte* CardFromAddr(const void *addr) const;
 
   bool AddrIsInCardTable(const void* addr) const;
 
  private:
-  static int byte_cas(byte old_value, byte new_value, byte* address) {
-    // Little endian means most significant byte is on the left.
-    const size_t shift = reinterpret_cast<uintptr_t>(address) % sizeof(uintptr_t);
-    // Align the address down.
-    address -= shift;
-    int32_t* word_address = reinterpret_cast<int32_t*>(address);
-    // Word with the byte we are trying to cas cleared.
-    const int32_t cur_word = *word_address & ~(0xFF << shift);
-    const int32_t old_word = cur_word | (static_cast<int32_t>(old_value) << shift);
-    const int32_t new_word = cur_word | (static_cast<int32_t>(new_value) << shift);
-    return android_atomic_cas(old_word, new_word, word_address);
-  }
-
   CardTable(MemMap* begin, byte* biased_begin, size_t offset);
 
   // Returns true iff the card table address is within the bounds of the card table.
@@ -276,12 +125,7 @@
     return card_addr >= begin && card_addr < end;
   }
 
-  void CheckCardValid(byte* card) const {
-    DCHECK(IsValidCard(card))
-        << " card_addr: " << reinterpret_cast<const void*>(card)
-        << " begin: " << reinterpret_cast<void*>(mem_map_->Begin() + offset_)
-        << " end: " << reinterpret_cast<void*>(mem_map_->End());
-  }
+  void CheckCardValid(byte* card) const;
 
   // Verifies that all gray objects are on a dirty card.
   void VerifyCardTable();
diff --git a/src/gc/garbage_collector.cc b/src/gc/garbage_collector.cc
index bcc7b63..fbcdbaf 100644
--- a/src/gc/garbage_collector.cc
+++ b/src/gc/garbage_collector.cc
@@ -15,6 +15,7 @@
  */
 
 #include "garbage_collector.h"
+#include "thread.h"
 #include "thread_list.h"
 
 namespace art {
diff --git a/src/gc/garbage_collector.h b/src/gc/garbage_collector.h
index 9ddf45f..a1014c2 100644
--- a/src/gc/garbage_collector.h
+++ b/src/gc/garbage_collector.h
@@ -14,11 +14,13 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_GC_GARBAGE_COLLECTR_H_
-#define ART_SRC_GC_GARBAGE_COLLECTR_H_
+#ifndef ART_SRC_GC_GARBAGE_COLLECTOR_H_
+#define ART_SRC_GC_GARBAGE_COLLECTOR_H_
 
 #include "locks.h"
-#include "utils.h"
+
+#include <stdint.h>
+#include <vector>
 
 namespace art {
 
@@ -56,7 +58,7 @@
   void RegisterPause(uint64_t nano_length);
 
  protected:
-  // The initial phase. Done with mutators upaused.
+  // The initial phase. Done without mutators paused.
   virtual void InitializePhase() = 0;
 
   // Mark all reachable objects, done concurrently.
@@ -68,7 +70,7 @@
   // Called with mutators running.
   virtual void ReclaimPhase() = 0;
 
-  // Called after the GC is finished. Done with mutators upaused.
+  // Called after the GC is finished. Done without mutators paused.
   virtual void FinishPhase() = 0;
 
   Heap* heap_;
@@ -78,4 +80,4 @@
 
 }  // namespace art
 
-#endif  // ART_SRC_GC_GARBAGE_COLLECTR_H_
+#endif  // ART_SRC_GC_GARBAGE_COLLECTOR_H_
diff --git a/src/gc/gc_type.h b/src/gc/gc_type.h
new file mode 100644
index 0000000..908f038
--- /dev/null
+++ b/src/gc/gc_type.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_GC_TYPE_H_
+#define ART_SRC_GC_GC_TYPE_H_
+
+namespace art {
+
+// The ordering of the enum matters, it is used to determine which GCs are run first.
+enum GcType {
+  // No Gc
+  kGcTypeNone,
+  // Sticky mark bits "generational" GC.
+  kGcTypeSticky,
+  // Partial GC, over only the alloc space.
+  kGcTypePartial,
+  // Full GC
+  kGcTypeFull,
+  // Number of different Gc types.
+  kGcTypeMax,
+};
+std::ostream& operator<<(std::ostream& os, const GcType& policy);
+
+}  // namespace art
+
+#endif  // ART_SRC_GC_GC_TYPE_H_
diff --git a/src/gc/heap_bitmap-inl.h b/src/gc/heap_bitmap-inl.h
new file mode 100644
index 0000000..2811183
--- /dev/null
+++ b/src/gc/heap_bitmap-inl.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_HEAP_BITMAP_INL_H_
+#define ART_SRC_GC_HEAP_BITMAP_INL_H_
+
+#include "heap_bitmap.h"
+
+namespace art {
+
+template <typename Visitor>
+inline void HeapBitmap::Visit(const Visitor& visitor) {
+  // TODO: C++0x auto
+  for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+    SpaceBitmap* bitmap = *it;
+    bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor, VoidFunctor());
+  }
+  large_objects_->Visit(visitor);
+}
+
+}  // namespace art
+
+#endif  // ART_SRC_GC_HEAP_BITMAP_INL_H_
diff --git a/src/gc/heap_bitmap.h b/src/gc/heap_bitmap.h
index 42c4166..87e0848 100644
--- a/src/gc/heap_bitmap.h
+++ b/src/gc/heap_bitmap.h
@@ -14,96 +14,91 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_HEAP_BITMAP_H_
-#define ART_SRC_HEAP_BITMAP_H_
+#ifndef ART_SRC_GC_HEAP_BITMAP_H_
+#define ART_SRC_GC_HEAP_BITMAP_H_
 
+#include "locks.h"
 #include "space_bitmap.h"
 
 namespace art {
-  class Heap;
-  class SpaceBitmap;
+class Heap;
 
-  class HeapBitmap {
-   public:
-    bool Test(const Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
-      SpaceBitmap* bitmap = GetSpaceBitmap(obj);
-      if (LIKELY(bitmap != NULL)) {
-        return bitmap->Test(obj);
-      } else {
-        return large_objects_->Test(obj);
+class HeapBitmap {
+ public:
+  bool Test(const mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    SpaceBitmap* bitmap = GetSpaceBitmap(obj);
+    if (LIKELY(bitmap != NULL)) {
+      return bitmap->Test(obj);
+    } else {
+      return large_objects_->Test(obj);
+    }
+  }
+
+  void Clear(const mirror::Object* obj)
+  EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    SpaceBitmap* bitmap = GetSpaceBitmap(obj);
+    if (LIKELY(bitmap != NULL)) {
+      bitmap->Clear(obj);
+    } else {
+      large_objects_->Clear(obj);
+    }
+  }
+
+  void Set(const mirror::Object* obj)
+  EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+    SpaceBitmap* bitmap = GetSpaceBitmap(obj);
+    if (LIKELY(bitmap != NULL)) {
+      bitmap->Set(obj);
+    } else {
+      large_objects_->Set(obj);
+    }
+  }
+
+  SpaceBitmap* GetSpaceBitmap(const mirror::Object* obj) {
+    // TODO: C++0x auto
+    for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
+      if ((*it)->HasAddress(obj)) {
+        return *it;
       }
     }
+    return NULL;
+  }
 
-    void Clear(const Object* obj)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
-      SpaceBitmap* bitmap = GetSpaceBitmap(obj);
-      if (LIKELY(bitmap != NULL)) {
-        bitmap->Clear(obj);
-      } else {
-        large_objects_->Clear(obj);
-      }
-    }
+  void Walk(SpaceBitmap::Callback* callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-    void Set(const Object* obj)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
-      SpaceBitmap* bitmap = GetSpaceBitmap(obj);
-      if (LIKELY(bitmap != NULL)) {
-        bitmap->Set(obj);
-      } else {
-        large_objects_->Set(obj);
-      }
-    }
+  template <typename Visitor>
+  void Visit(const Visitor& visitor)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-    SpaceBitmap* GetSpaceBitmap(const Object* obj) {
-      // TODO: C++0x auto
-      for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
-        if ((*it)->HasAddress(obj)) {
-          return *it;
-        }
-      }
-      return NULL;
-    }
+  // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
+  void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-    void Walk(SpaceBitmap::Callback* callback, void* arg)
-        SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  HeapBitmap(Heap* heap);
 
-    template <typename Visitor>
-    void Visit(const Visitor& visitor)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
-      // TODO: C++0x auto
-      for (Bitmaps::iterator it = bitmaps_.begin(); it != bitmaps_.end(); ++it) {
-        SpaceBitmap* bitmap = *it;
-        bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor, VoidFunctor());
-      }
-      large_objects_->Visit(visitor);
-    }
+  inline SpaceSetMap* GetLargeObjects() const {
+    return large_objects_;
+  }
 
-    // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
-    void ReplaceBitmap(SpaceBitmap* old_bitmap, SpaceBitmap* new_bitmap)
-        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void SetLargeObjects(SpaceSetMap* large_objects);
 
-    HeapBitmap(Heap* heap);
+ private:
 
-    inline SpaceSetMap* GetLargeObjects() const {
-      return large_objects_;
-    }
+  const Heap* const heap_;
 
-    void SetLargeObjects(SpaceSetMap* large_objects);
+  void AddSpaceBitmap(SpaceBitmap* bitmap);
 
-   private:
+  typedef std::vector<SpaceBitmap*> Bitmaps;
+  Bitmaps bitmaps_;
 
-    const Heap* const heap_;
+  // Large object sets.
+  SpaceSetMap* large_objects_;
 
-    void AddSpaceBitmap(SpaceBitmap* bitmap);
+  friend class Heap;
+};
 
-    typedef std::vector<SpaceBitmap*> Bitmaps;
-    Bitmaps bitmaps_;
-
-    // Large object sets.
-    SpaceSetMap* large_objects_;
-
-    friend class Heap;
-  };
 }  // namespace art
 
-#endif  // ART_SRC_HEAP_BITMAP_H_
+#endif  // ART_SRC_GC_HEAP_BITMAP_H_
diff --git a/src/gc/large_object_space.cc b/src/gc/large_object_space.cc
index 1b93e5d..69320fa 100644
--- a/src/gc/large_object_space.cc
+++ b/src/gc/large_object_space.cc
@@ -22,6 +22,7 @@
 #include "image.h"
 #include "os.h"
 #include "space_bitmap.h"
+#include "thread.h"
 #include "utils.h"
 
 namespace art {
@@ -58,13 +59,13 @@
   return new LargeObjectMapSpace(name);
 }
 
-Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
+mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
   MemMap* mem_map = MemMap::MapAnonymous("allocation", NULL, num_bytes, PROT_READ | PROT_WRITE);
   if (mem_map == NULL) {
     return NULL;
   }
   MutexLock mu(self, lock_);
-  Object* obj = reinterpret_cast<Object*>(mem_map->Begin());
+  mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
   large_objects_.push_back(obj);
   mem_maps_.Put(obj, mem_map);
   size_t allocation_size = mem_map->Size();
@@ -75,7 +76,7 @@
   return obj;
 }
 
-size_t LargeObjectMapSpace::Free(Thread* self, Object* ptr) {
+size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
   MutexLock mu(self, lock_);
   MemMaps::iterator found = mem_maps_.find(ptr);
   CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
@@ -88,14 +89,14 @@
   return allocation_size;
 }
 
-size_t LargeObjectMapSpace::AllocationSize(const Object* obj) {
+size_t LargeObjectMapSpace::AllocationSize(const mirror::Object* obj) {
   MutexLock mu(Thread::Current(), lock_);
-  MemMaps::iterator found = mem_maps_.find(const_cast<Object*>(obj));
+  MemMaps::iterator found = mem_maps_.find(const_cast<mirror::Object*>(obj));
   CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
   return found->second->Size();
 }
 
-size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
+size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
   size_t total = 0;
   for (size_t i = 0; i < num_ptrs; ++i) {
     if (kDebugSpaces) {
@@ -115,9 +116,9 @@
   }
 }
 
-bool LargeObjectMapSpace::Contains(const Object* obj) const {
+bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
   MutexLock mu(Thread::Current(), lock_);
-  return mem_maps_.find(const_cast<Object*>(obj)) != mem_maps_.end();
+  return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
 }
 
 FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
@@ -191,7 +192,7 @@
   }
 }
 
-size_t FreeListSpace::Free(Thread* self, Object* obj) {
+size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
   MutexLock mu(self, lock_);
   CHECK(Contains(obj));
   // Check adjacent chunks to see if we need to combine.
@@ -220,7 +221,7 @@
   return allocation_size;
 }
 
-bool FreeListSpace::Contains(const Object* obj) const {
+bool FreeListSpace::Contains(const mirror::Object* obj) const {
   return mem_map_->HasAddress(obj);
 }
 
@@ -228,13 +229,13 @@
   return chunk + chunk->GetSize() / kAlignment;
 }
 
-size_t FreeListSpace::AllocationSize(const Object* obj) {
-  Chunk* chunk = ChunkFromAddr(const_cast<Object*>(obj));
+size_t FreeListSpace::AllocationSize(const mirror::Object* obj) {
+  Chunk* chunk = ChunkFromAddr(const_cast<mirror::Object*>(obj));
   CHECK(!chunk->IsFree());
   return chunk->GetSize();
 }
 
-Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
+mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
   MutexLock mu(self, lock_);
   num_bytes = RoundUp(num_bytes, kAlignment);
   Chunk temp;
@@ -261,7 +262,7 @@
   total_objects_allocated_++;
   num_bytes_allocated_ += num_bytes;
   total_bytes_allocated_ += num_bytes;
-  return reinterpret_cast<Object*>(addr);
+  return reinterpret_cast<mirror::Object*>(addr);
 }
 
 void FreeListSpace::Dump(std::ostream& os) const{
diff --git a/src/gc/large_object_space.h b/src/gc/large_object_space.h
index 979fce6..c34dbcc 100644
--- a/src/gc/large_object_space.h
+++ b/src/gc/large_object_space.h
@@ -18,8 +18,13 @@
 #define ART_SRC_GC_LARGE_OBJECT_SPACE_H_
 
 #include "space.h"
+#include "safe_map.h"
+
+#include <set>
+#include <vector>
 
 namespace art {
+class SpaceSetMap;
 
 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
  public:
@@ -64,7 +69,7 @@
     return total_objects_allocated_;
   }
 
-  size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs);
+  size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
 
  protected:
 
@@ -90,19 +95,19 @@
   static LargeObjectMapSpace* Create(const std::string& name);
 
   // Return the storage space required by obj.
-  virtual size_t AllocationSize(const Object* obj);
-  virtual Object* Alloc(Thread* self, size_t num_bytes);
-  size_t Free(Thread* self, Object* ptr);
+  virtual size_t AllocationSize(const mirror::Object* obj);
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
+  size_t Free(Thread* self, mirror::Object* ptr);
   virtual void Walk(DlMallocSpace::WalkCallback, void* arg);
-  virtual bool Contains(const Object* obj) const;
+  virtual bool Contains(const mirror::Object* obj) const;
 private:
   LargeObjectMapSpace(const std::string& name);
   virtual ~LargeObjectMapSpace() {}
 
   // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
   mutable Mutex lock_;
-  std::vector<Object*> large_objects_;
-  typedef SafeMap<Object*, MemMap*> MemMaps;
+  std::vector<mirror::Object*> large_objects_;
+  typedef SafeMap<mirror::Object*, MemMap*> MemMaps;
   MemMaps mem_maps_;
 };
 
@@ -111,10 +116,10 @@
   virtual ~FreeListSpace();
   static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
 
-  size_t AllocationSize(const Object* obj);
-  Object* Alloc(Thread* self, size_t num_bytes);
-  size_t Free(Thread* self, Object* obj);
-  bool Contains(const Object* obj) const;
+  size_t AllocationSize(const mirror::Object* obj);
+  mirror::Object* Alloc(Thread* self, size_t num_bytes);
+  size_t Free(Thread* self, mirror::Object* obj);
+  bool Contains(const mirror::Object* obj) const;
   void Walk(DlMallocSpace::WalkCallback callback, void* arg);
 
   // Address at which the space begins
diff --git a/src/gc/mark_sweep-inl.h b/src/gc/mark_sweep-inl.h
new file mode 100644
index 0000000..7265023
--- /dev/null
+++ b/src/gc/mark_sweep-inl.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_MARK_SWEEP_INL_H_
+#define ART_SRC_GC_MARK_SWEEP_INL_H_
+
+#include "heap.h"
+#include "mirror/class.h"
+#include "mirror/field.h"
+#include "mirror/object_array.h"
+
+namespace art {
+
+template <typename MarkVisitor>
+inline void MarkSweep::ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor) {
+  DCHECK(obj != NULL);
+  if (kIsDebugBuild && !IsMarked(obj)) {
+    heap_->DumpSpaces();
+    LOG(FATAL) << "Scanning unmarked object " << obj;
+  }
+  mirror::Class* klass = obj->GetClass();
+  DCHECK(klass != NULL);
+  if (klass == java_lang_Class_) {
+    DCHECK_EQ(klass->GetClass(), java_lang_Class_);
+    if (kCountScannedTypes) {
+      ++class_count_;
+    }
+    VisitClassReferences(klass, obj, visitor);
+  } else if (klass->IsArrayClass()) {
+    if (kCountScannedTypes) {
+      ++array_count_;
+    }
+    visitor(obj, klass, mirror::Object::ClassOffset(), false);
+    if (klass->IsObjectArrayClass()) {
+      VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
+    }
+  } else {
+    if (kCountScannedTypes) {
+      ++other_count_;
+    }
+    VisitOtherReferences(klass, obj, visitor);
+    if (UNLIKELY(klass->IsReferenceClass())) {
+      DelayReferenceReferent(const_cast<mirror::Object*>(obj));
+    }
+  }
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor)
+    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
+                          Locks::mutator_lock_) {
+  DCHECK(obj != NULL);
+  DCHECK(obj->GetClass() != NULL);
+
+  mirror::Class* klass = obj->GetClass();
+  DCHECK(klass != NULL);
+  if (klass == mirror::Class::GetJavaLangClass()) {
+    DCHECK_EQ(klass->GetClass(), mirror::Class::GetJavaLangClass());
+    VisitClassReferences(klass, obj, visitor);
+  } else {
+    if (klass->IsArrayClass()) {
+      visitor(obj, klass, mirror::Object::ClassOffset(), false);
+      if (klass->IsObjectArrayClass()) {
+        VisitObjectArrayReferences(obj->AsObjectArray<mirror::Object>(), visitor);
+      }
+    } else {
+      VisitOtherReferences(klass, obj, visitor);
+    }
+  }
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitInstanceFieldsReferences(const mirror::Class* klass,
+                                                     const mirror::Object* obj,
+                                                     const Visitor& visitor)
+    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+  DCHECK(obj != NULL);
+  DCHECK(klass != NULL);
+  VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets(), false, visitor);
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
+                                            const Visitor& visitor)
+    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+  VisitInstanceFieldsReferences(klass, obj, visitor);
+  VisitStaticFieldsReferences(obj->AsClass(), visitor);
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitStaticFieldsReferences(const mirror::Class* klass,
+                                                   const Visitor& visitor)
+    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+  DCHECK(klass != NULL);
+  VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor);
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets,
+                                             bool is_static, const Visitor& visitor) {
+  if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
+    // Found a reference offset bitmap.  Mark the specified offsets.
+    while (ref_offsets != 0) {
+      size_t right_shift = CLZ(ref_offsets);
+      MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
+      const mirror::Object* ref = obj->GetFieldObject<const mirror::Object*>(field_offset, false);
+      visitor(obj, ref, field_offset, is_static);
+      ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
+    }
+  } else {
+    // There is no reference offset bitmap.  In the non-static case,
+    // walk up the class inheritance hierarchy and find reference
+    // offsets the hard way. In the static case, just consider this
+    // class.
+    for (const mirror::Class* klass = is_static ? obj->AsClass() : obj->GetClass();
+         klass != NULL;
+         klass = is_static ? NULL : klass->GetSuperClass()) {
+      size_t num_reference_fields = (is_static
+                                     ? klass->NumReferenceStaticFields()
+                                     : klass->NumReferenceInstanceFields());
+      for (size_t i = 0; i < num_reference_fields; ++i) {
+        mirror::Field* field = (is_static ? klass->GetStaticField(i)
+                                          : klass->GetInstanceField(i));
+        MemberOffset field_offset = field->GetOffset();
+        const mirror::Object* ref = obj->GetFieldObject<const mirror::Object*>(field_offset, false);
+        visitor(obj, ref, field_offset, is_static);
+      }
+    }
+  }
+}
+
+template <typename Visitor>
+inline void MarkSweep::VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
+                                                  const Visitor& visitor) {
+  const int32_t length = array->GetLength();
+  for (int32_t i = 0; i < length; ++i) {
+    const mirror::Object* element = array->GetWithoutChecks(i);
+    const size_t width = sizeof(mirror::Object*);
+    MemberOffset offset = MemberOffset(i * width + mirror::Array::DataOffset(width).Int32Value());
+    visitor(array, element, offset, false);
+  }
+}
+
+}  // namespace art
+
+#endif  // ART_SRC_GC_MARK_SWEEP_INL_H_
diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc
index 7c52c83..40102b2 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/mark_sweep.cc
@@ -25,22 +25,32 @@
 #include "base/logging.h"
 #include "base/macros.h"
 #include "card_table.h"
-#include "class_loader.h"
-#include "dex_cache.h"
+#include "card_table-inl.h"
 #include "heap.h"
 #include "indirect_reference_table.h"
 #include "intern_table.h"
 #include "jni_internal.h"
 #include "large_object_space.h"
 #include "monitor.h"
-#include "object.h"
+#include "mark_sweep-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/field.h"
+#include "mirror/field-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array.h"
+#include "mirror/object_array-inl.h"
 #include "runtime.h"
 #include "space.h"
+#include "space_bitmap-inl.h"
 #include "timing_logger.h"
 #include "thread.h"
 #include "thread_list.h"
 #include "verifier/method_verifier.h"
 
+using namespace art::mirror;
+
 namespace art {
 
 // Performance options.
@@ -186,7 +196,7 @@
     timings_.AddSplit("ReMarkRoots");
 
     // Scan dirty objects, this is only required if we are not doing concurrent GC.
-    RecursiveMarkDirtyObjects();
+    RecursiveMarkDirtyObjects(CardTable::kCardDirty);
   }
 
   ProcessReferences(self);
@@ -700,7 +710,7 @@
   Runtime::Current()->VisitRoots(ReMarkObjectVisitor, this);
 }
 
-void MarkSweep::SweepJniWeakGlobals(Heap::IsMarkedTester is_marked, void* arg) {
+void MarkSweep::SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg) {
   JavaVMExt* vm = Runtime::Current()->GetJavaVM();
   MutexLock mu(Thread::Current(), vm->weak_globals_lock);
   IndirectReferenceTable* table = &vm->weak_globals;
diff --git a/src/gc/mark_sweep.h b/src/gc/mark_sweep.h
index 3581d98..0d43bee 100644
--- a/src/gc/mark_sweep.h
+++ b/src/gc/mark_sweep.h
@@ -14,28 +14,38 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_MARK_SWEEP_H_
-#define ART_SRC_MARK_SWEEP_H_
+#ifndef ART_SRC_GC_MARK_SWEEP_H_
+#define ART_SRC_GC_MARK_SWEEP_H_
 
-#include "atomic_stack.h"
+#include "atomic_integer.h"
 #include "base/macros.h"
+#include "base/mutex.h"
 #include "garbage_collector.h"
-#include "heap_bitmap.h"
-#include "object.h"
+#include "gc_type.h"
 #include "offsets.h"
+#include "root_visitor.h"
+#include "timing_logger.h"
+#include "UniquePtr.h"
 
 namespace art {
-
+namespace mirror {
+class Class;
+class Object;
+template<class T> class ObjectArray;
+}
+template <typename T> class AtomicStack;
 class Barrier;
 class CheckObjectVisitor;
-class Class;
+class ContinuousSpace;
 class Heap;
 class MarkIfReachesAllocspaceVisitor;
 class ModUnionClearCardVisitor;
 class ModUnionVisitor;
 class ModUnionTableBitmap;
-class Object;
-class TimingLogger;
+typedef AtomicStack<mirror::Object*> ObjectStack;
+class SpaceBitmap;
+class StackVisitor;
+class Thread;
 class MarkStackChunk;
 
 class MarkSweep : public GarbageCollector {
@@ -79,7 +89,9 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Verify that image roots point to only marked objects within the alloc space.
-  void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void VerifyImageRoots()
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Builds a mark stack and recursively mark until it empties.
   void RecursiveMark()
@@ -88,8 +100,8 @@
 
   // Make a space immune, immune spaces are assumed to have all live objects marked.
   void ImmuneSpace(ContinuousSpace* space)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);;
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Bind the live bits to the mark bits of bitmaps based on the gc type.
   virtual void BindBitmaps()
@@ -102,7 +114,7 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Builds a mark stack with objects on dirty cards and recursively mark until it empties.
-  void RecursiveMarkDirtyObjects(byte minimum_age = CardTable::kCardDirty)
+  void RecursiveMarkDirtyObjects(byte minimum_age)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -130,61 +142,31 @@
   virtual void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
   void SwapLargeObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  Object* GetClearedReferences() {
+  mirror::Object* GetClearedReferences() {
     return cleared_reference_list_;
   }
 
   // Proxy for external access to ScanObject.
-  void ScanRoot(const Object* obj)
+  void ScanRoot(const mirror::Object* obj)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Blackens an object.
-  void ScanObject(const Object* obj)
+  void ScanObject(const mirror::Object* obj)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // TODO: enable thread safety analysis when in use by multiple worker threads.
   template <typename MarkVisitor>
-  void ScanObjectVisit(const Object* obj, const MarkVisitor& visitor)
-      NO_THREAD_SAFETY_ANALYSIS {
-    DCHECK(obj != NULL);
-    if (kIsDebugBuild && !IsMarked(obj)) {
-      heap_->DumpSpaces();
-      LOG(FATAL) << "Scanning unmarked object " << obj;
-    }
-    Class* klass = obj->GetClass();
-    DCHECK(klass != NULL);
-    if (klass == java_lang_Class_) {
-      DCHECK_EQ(klass->GetClass(), java_lang_Class_);
-      if (kCountScannedTypes) {
-        ++class_count_;
-      }
-      VisitClassReferences(klass, obj, visitor);
-    } else if (klass->IsArrayClass()) {
-      if (kCountScannedTypes) {
-        ++array_count_;
-      }
-      visitor(obj, klass, Object::ClassOffset(), false);
-      if (klass->IsObjectArrayClass()) {
-        VisitObjectArrayReferences(obj->AsObjectArray<Object>(), visitor);
-      }
-    } else {
-      if (kCountScannedTypes) {
-        ++other_count_;
-      }
-      VisitOtherReferences(klass, obj, visitor);
-      if (UNLIKELY(klass->IsReferenceClass())) {
-        DelayReferenceReferent(const_cast<Object*>(obj));
-      }
-    }
-  }
+  void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor)
+      NO_THREAD_SAFETY_ANALYSIS;
 
-  void SetFinger(Object* new_finger) {
+  void SetFinger(mirror::Object* new_finger) {
     finger_ = new_finger;
   }
 
   void DisableFinger() {
-    SetFinger(reinterpret_cast<Object*>(~static_cast<uintptr_t>(0)));
+    SetFinger(reinterpret_cast<mirror::Object*>(~static_cast<uintptr_t>(0)));
   }
 
   size_t GetFreedBytes() const {
@@ -212,7 +194,7 @@
   }
 
   // Everything inside the immune range is assumed to be marked.
-  void SetImmuneRange(Object* begin, Object* end);
+  void SetImmuneRange(mirror::Object* begin, mirror::Object* end);
 
   void SweepSystemWeaks()
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -221,52 +203,33 @@
   void SweepSystemWeaksArray(ObjectStack* allocations)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  static bool VerifyIsLiveCallback(const Object* obj, void* arg)
+  static bool VerifyIsLiveCallback(const mirror::Object* obj, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   void VerifySystemWeaks()
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Verify that an object is live, either in a live bitmap or in the allocation stack.
-  void VerifyIsLive(const Object* obj)
+  void VerifyIsLive(const mirror::Object* obj)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   template <typename Visitor>
-  static void VisitObjectReferences(const Object* obj, const Visitor& visitor)
+  static void VisitObjectReferences(const mirror::Object* obj, const Visitor& visitor)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
-                            Locks::mutator_lock_) {
-    DCHECK(obj != NULL);
-    DCHECK(obj->GetClass() != NULL);
+                            Locks::mutator_lock_);
 
-    Class* klass = obj->GetClass();
-    DCHECK(klass != NULL);
-    if (klass == Class::GetJavaLangClass()) {
-      DCHECK_EQ(klass->GetClass(), Class::GetJavaLangClass());
-      VisitClassReferences(klass, obj, visitor);
-    } else {
-      if (klass->IsArrayClass()) {
-        visitor(obj, klass, Object::ClassOffset(), false);
-        if (klass->IsObjectArrayClass()) {
-          VisitObjectArrayReferences(obj->AsObjectArray<Object>(), visitor);
-        }
-      } else {
-        VisitOtherReferences(klass, obj, visitor);
-      }
-    }
-  }
-
-  static void MarkObjectCallback(const Object* root, void* arg)
+  static void MarkObjectCallback(const mirror::Object* root, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  static void MarkRootParallelCallback(const Object* root, void* arg);
+  static void MarkRootParallelCallback(const mirror::Object* root, void* arg);
 
   // Marks an object.
-  void MarkObject(const Object* obj)
+  void MarkObject(const mirror::Object* obj)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  void MarkRoot(const Object* obj)
+  void MarkRoot(const mirror::Object* obj)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
@@ -277,45 +240,46 @@
 
  protected:
   // Returns true if the object has its bit set in the mark bitmap.
-  bool IsMarked(const Object* object) const;
+  bool IsMarked(const mirror::Object* object) const;
 
-  static bool IsMarkedCallback(const Object* object, void* arg)
+  static bool IsMarkedCallback(const mirror::Object* object, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  static bool IsMarkedArrayCallback(const Object* object, void* arg)
+  static bool IsMarkedArrayCallback(const mirror::Object* object, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  static void ReMarkObjectVisitor(const Object* root, void* arg)
+  static void ReMarkObjectVisitor(const mirror::Object* root, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  static void VerifyImageRootVisitor(Object* root, void* arg)
+  static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
                             Locks::mutator_lock_);
 
-  void MarkObjectNonNull(const Object* obj, bool check_finger)
+  void MarkObjectNonNull(const mirror::Object* obj, bool check_finger)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  void MarkObjectNonNullParallel(const Object* obj, bool check_finger);
+  void MarkObjectNonNullParallel(const mirror::Object* obj, bool check_finger);
 
-  bool MarkLargeObject(const Object* obj)
+  bool MarkLargeObject(const mirror::Object* obj)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Returns true if we need to add obj to a mark stack.
-  bool MarkObjectParallel(const Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+  bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
 
-  static void SweepCallback(size_t num_ptrs, Object** ptrs, void* arg)
+  static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Special sweep for zygote that just marks objects / dirties cards.
-  static void ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg)
+  static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  void CheckReference(const Object* obj, const Object* ref, MemberOffset offset, bool is_static)
+  void CheckReference(const mirror::Object* obj, const mirror::Object* ref, MemberOffset offset,
+                      bool is_static)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
-  void CheckObject(const Object* obj)
+  void CheckObject(const mirror::Object* obj)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Verify the roots of the heap and print out information related to any invalid roots.
@@ -326,90 +290,41 @@
   // Expand mark stack to 2x its current size. Thread safe.
   void ExpandMarkStack();
 
-  static void VerifyRootCallback(const Object* root, void* arg, size_t vreg,
+  static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
                                  const StackVisitor *visitor);
 
-  void VerifyRoot(const Object* root, size_t vreg, const StackVisitor* visitor)
+  void VerifyRoot(const mirror::Object* root, size_t vreg, const StackVisitor* visitor)
       NO_THREAD_SAFETY_ANALYSIS;
 
   template <typename Visitor>
-  static void VisitInstanceFieldsReferences(const Class* klass, const Object* obj,
+  static void VisitInstanceFieldsReferences(const mirror::Class* klass, const mirror::Object* obj,
                                             const Visitor& visitor)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
-    DCHECK(obj != NULL);
-    DCHECK(klass != NULL);
-    VisitFieldsReferences(obj, klass->GetReferenceInstanceOffsets(), false, visitor);
-  }
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Visit the header, static field references, and interface pointers of a class object.
   template <typename Visitor>
-  static void VisitClassReferences(const Class* klass, const Object* obj,
+  static void VisitClassReferences(const mirror::Class* klass, const mirror::Object* obj,
                                    const Visitor& visitor)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
-    VisitInstanceFieldsReferences(klass, obj, visitor);
-    VisitStaticFieldsReferences(obj->AsClass(), visitor);
-  }
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   template <typename Visitor>
-  static void VisitStaticFieldsReferences(const Class* klass, const Visitor& visitor)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
-    DCHECK(klass != NULL);
-    VisitFieldsReferences(klass, klass->GetReferenceStaticOffsets(), true, visitor);
-  }
+  static void VisitStaticFieldsReferences(const mirror::Class* klass, const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   template <typename Visitor>
-  static void VisitFieldsReferences(const Object* obj, uint32_t ref_offsets, bool is_static,
-                             const Visitor& visitor)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
-    if (LIKELY(ref_offsets != CLASS_WALK_SUPER)) {
-      // Found a reference offset bitmap.  Mark the specified offsets.
-      while (ref_offsets != 0) {
-        size_t right_shift = CLZ(ref_offsets);
-        MemberOffset field_offset = CLASS_OFFSET_FROM_CLZ(right_shift);
-        const Object* ref = obj->GetFieldObject<const Object*>(field_offset, false);
-        visitor(obj, ref, field_offset, is_static);
-        ref_offsets &= ~(CLASS_HIGH_BIT >> right_shift);
-      }
-    } else {
-      // There is no reference offset bitmap.  In the non-static case,
-      // walk up the class inheritance hierarchy and find reference
-      // offsets the hard way. In the static case, just consider this
-      // class.
-      for (const Class* klass = is_static ? obj->AsClass() : obj->GetClass();
-           klass != NULL;
-           klass = is_static ? NULL : klass->GetSuperClass()) {
-        size_t num_reference_fields = (is_static
-                                       ? klass->NumReferenceStaticFields()
-                                       : klass->NumReferenceInstanceFields());
-        for (size_t i = 0; i < num_reference_fields; ++i) {
-          Field* field = (is_static
-                          ? klass->GetStaticField(i)
-                          : klass->GetInstanceField(i));
-          MemberOffset field_offset = field->GetOffset();
-          const Object* ref = obj->GetFieldObject<const Object*>(field_offset, false);
-          visitor(obj, ref, field_offset, is_static);
-        }
-      }
-    }
-  }
+  static void VisitFieldsReferences(const mirror::Object* obj, uint32_t ref_offsets, bool is_static,
+                                    const Visitor& visitor)
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Visit all of the references in an object array.
   template <typename Visitor>
-  static void VisitObjectArrayReferences(const ObjectArray<Object>* array,
+  static void VisitObjectArrayReferences(const mirror::ObjectArray<mirror::Object>* array,
                                          const Visitor& visitor)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
-    const int32_t length = array->GetLength();
-    for (int32_t i = 0; i < length; ++i) {
-      const Object* element = array->GetWithoutChecks(i);
-      const size_t width = sizeof(Object*);
-      MemberOffset offset = MemberOffset(i * width + Array::DataOffset(width).Int32Value());
-      visitor(array, element, offset, false);
-    }
-  }
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Visits the header and field references of a data object.
   template <typename Visitor>
-  static void VisitOtherReferences(const Class* klass, const Object* obj,
+  static void VisitOtherReferences(const mirror::Class* klass, const mirror::Object* obj,
                                    const Visitor& visitor)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     return VisitInstanceFieldsReferences(klass, obj, visitor);
@@ -421,7 +336,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Schedules an unmarked object for reference processing.
-  void DelayReferenceReferent(Object* reference)
+  void DelayReferenceReferent(mirror::Object* reference)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   // Recursively blackens objects on the mark stack.
@@ -433,25 +348,25 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void EnqueueFinalizerReferences(Object** ref)
+  void EnqueueFinalizerReferences(mirror::Object** ref)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void PreserveSomeSoftReferences(Object** ref)
+  void PreserveSomeSoftReferences(mirror::Object** ref)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void ClearWhiteReferences(Object** list)
+  void ClearWhiteReferences(mirror::Object** list)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
-  void ProcessReferences(Object** soft_references, bool clear_soft_references,
-                         Object** weak_references,
-                         Object** finalizer_references,
-                         Object** phantom_references)
+  void ProcessReferences(mirror::Object** soft_references, bool clear_soft_references,
+                         mirror::Object** weak_references,
+                         mirror::Object** finalizer_references,
+                         mirror::Object** phantom_references)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void SweepJniWeakGlobals(Heap::IsMarkedTester is_marked, void* arg)
+  void SweepJniWeakGlobals(IsMarkedTester is_marked, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Whether or not we count how many of each type of object were scanned.
@@ -461,21 +376,21 @@
   SpaceBitmap* current_mark_bitmap_;
 
   // Cache java.lang.Class for optimization.
-  Class* java_lang_Class_;
+  mirror::Class* java_lang_Class_;
 
   ObjectStack* mark_stack_;
 
-  Object* finger_;
+  mirror::Object* finger_;
 
   // Immune range, every object inside the immune range is assumed to be marked.
-  Object* immune_begin_;
-  Object* immune_end_;
+  mirror::Object* immune_begin_;
+  mirror::Object* immune_end_;
 
-  Object* soft_reference_list_;
-  Object* weak_reference_list_;
-  Object* finalizer_reference_list_;
-  Object* phantom_reference_list_;
-  Object* cleared_reference_list_;
+  mirror::Object* soft_reference_list_;
+  mirror::Object* weak_reference_list_;
+  mirror::Object* finalizer_reference_list_;
+  mirror::Object* phantom_reference_list_;
+  mirror::Object* cleared_reference_list_;
 
   AtomicInteger freed_bytes_;
   AtomicInteger freed_objects_;
@@ -529,4 +444,4 @@
 
 }  // namespace art
 
-#endif  // ART_SRC_MARK_SWEEP_H_
+#endif  // ART_SRC_GC_MARK_SWEEP_H_
diff --git a/src/gc/mod_union_table-inl.h b/src/gc/mod_union_table-inl.h
new file mode 100644
index 0000000..c1c69fb
--- /dev/null
+++ b/src/gc/mod_union_table-inl.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_MOD_UNION_TABLE_INL_H_
+#define ART_SRC_GC_MOD_UNION_TABLE_INL_H_
+
+#include "mod_union_table.h"
+
+namespace art {
+
+template <typename Implementation>
+class ModUnionTableToZygoteAllocspace : public Implementation {
+public:
+  ModUnionTableToZygoteAllocspace(Heap* heap) : Implementation(heap) {
+  }
+
+  bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
+    const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
+    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+      if ((*it)->Contains(ref)) {
+        return (*it)->IsAllocSpace();
+      }
+    }
+    // Assume it points to a large object.
+    // TODO: Check.
+    return true;
+  }
+};
+
+template <typename Implementation>
+class ModUnionTableToAllocspace : public Implementation {
+public:
+  ModUnionTableToAllocspace(Heap* heap) : Implementation(heap) {
+  }
+
+  bool AddReference(const mirror::Object* /* obj */, const mirror::Object* ref) {
+    const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
+    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+      if ((*it)->Contains(ref)) {
+        return (*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect;
+      }
+    }
+    // Assume it points to a large object.
+    // TODO: Check.
+    return true;
+  }
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_GC_MOD_UNION_TABLE_INL_H_
diff --git a/src/gc/mod_union_table.cc b/src/gc/mod_union_table.cc
index 8953c5a..da950bb 100644
--- a/src/gc/mod_union_table.cc
+++ b/src/gc/mod_union_table.cc
@@ -17,12 +17,22 @@
 #include "mod_union_table.h"
 
 #include "base/stl_util.h"
+#include "card_table-inl.h"
 #include "heap.h"
 #include "heap_bitmap.h"
 #include "mark_sweep.h"
+#include "mark_sweep-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/object_array-inl.h"
 #include "space.h"
+#include "space_bitmap-inl.h"
+#include "thread.h"
 #include "UniquePtr.h"
 
+using namespace art::mirror;
+
 namespace art {
 
 class MarkIfReachesAllocspaceVisitor {
@@ -260,7 +270,7 @@
   // TODO: Fixme when anotatalysis works with visitors.
   void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
                      bool /* is_static */) const
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
     Heap* heap = mod_union_table_->GetHeap();
     if (ref != NULL && mod_union_table_->AddReference(obj, ref) &&
         references_.find(ref) == references_.end()) {
diff --git a/src/gc/mod_union_table.h b/src/gc/mod_union_table.h
index f3da41c..23c0a51 100644
--- a/src/gc/mod_union_table.h
+++ b/src/gc/mod_union_table.h
@@ -14,23 +14,30 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_MOD_UNION_TABLE_H_
-#define ART_SRC_MOD_UNION_TABLE_H_
+#ifndef ART_SRC_GC_MOD_UNION_TABLE_H_
+#define ART_SRC_GC_MOD_UNION_TABLE_H_
 
-#include "heap.h"
+#include "globals.h"
 #include "safe_map.h"
-#include "space.h"
+
+#include <set>
+#include <vector>
 
 namespace art {
-
+namespace mirror {
+class Object;
+}
+class ContinuousSpace;
 class Heap;
 class HeapBitmap;
+class MarkSweep;
 class Space;
+class SpaceBitmap;
 
 // Base class
 class ModUnionTable {
  public:
-  typedef std::vector<const Object*> ReferenceArray;
+  typedef std::vector<const mirror::Object*> ReferenceArray;
   typedef std::set<byte*> ClearedCards;
 
   ModUnionTable(Heap* heap) : heap_(heap) {
@@ -118,7 +125,7 @@
   void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Function that tells whether or not to add a reference to the table.
-  virtual bool AddReference(const Object* obj, const Object* ref) = 0;
+  virtual bool AddReference(const mirror::Object* obj, const mirror::Object* ref) = 0;
 
  protected:
   // Cleared card array, used to update the mod-union table.
@@ -155,44 +162,6 @@
   ClearedCards cleared_cards_;
 };
 
-template <typename Implementation>
-class ModUnionTableToZygoteAllocspace : public Implementation {
-public:
-  ModUnionTableToZygoteAllocspace(Heap* heap) : Implementation(heap) {
-  }
-
-  bool AddReference(const Object* /* obj */, const Object* ref) {
-    const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
-    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-      if ((*it)->Contains(ref)) {
-        return (*it)->IsAllocSpace();
-      }
-    }
-    // Assume it points to a large object.
-    // TODO: Check.
-    return true;
-  }
-};
-
-template <typename Implementation>
-class ModUnionTableToAllocspace : public Implementation {
-public:
-  ModUnionTableToAllocspace(Heap* heap) : Implementation(heap) {
-  }
-
-  bool AddReference(const Object* /* obj */, const Object* ref) {
-    const Spaces& spaces = Implementation::GetHeap()->GetSpaces();
-    for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
-      if ((*it)->Contains(ref)) {
-        return (*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect;
-      }
-    }
-    // Assume it points to a large object.
-    // TODO: Check.
-    return true;
-  }
-};
-
 }  // namespace art
 
-#endif  // ART_SRC_MOD_UNION_TABLE_H_
+#endif  // ART_SRC_GC_MOD_UNION_TABLE_H_
diff --git a/src/gc/partial_mark_sweep.cc b/src/gc/partial_mark_sweep.cc
index 64f09ff..f9c1787 100644
--- a/src/gc/partial_mark_sweep.cc
+++ b/src/gc/partial_mark_sweep.cc
@@ -14,32 +14,38 @@
  * limitations under the License.
  */
 
+#include "partial_mark_sweep.h"
+
+#include "heap.h"
 #include "large_object_space.h"
 #include "partial_mark_sweep.h"
 #include "space.h"
+#include "thread.h"
 
 namespace art {
-  PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent)
-      : MarkSweep(heap, is_concurrent) {
-    cumulative_timings_.SetName(GetName());
-  }
 
-  PartialMarkSweep::~PartialMarkSweep() {
+PartialMarkSweep::PartialMarkSweep(Heap* heap, bool is_concurrent)
+    : MarkSweep(heap, is_concurrent) {
+  cumulative_timings_.SetName(GetName());
+}
 
-  }
+PartialMarkSweep::~PartialMarkSweep() {
 
-  void PartialMarkSweep::BindBitmaps() {
-    MarkSweep::BindBitmaps();
+}
 
-    Spaces& spaces = GetHeap()->GetSpaces();
-    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-    // For partial GCs we need to bind the bitmap of the zygote space so that all objects in the
-    // zygote space are viewed as marked.
-    for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
-      ContinuousSpace* space = *it;
-      if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
-        ImmuneSpace(space);
-      }
+void PartialMarkSweep::BindBitmaps() {
+  MarkSweep::BindBitmaps();
+
+  Spaces& spaces = GetHeap()->GetSpaces();
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  // For partial GCs we need to bind the bitmap of the zygote space so that all objects in the
+  // zygote space are viewed as marked.
+  for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    ContinuousSpace* space = *it;
+    if (space->GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) {
+      ImmuneSpace(space);
     }
   }
+}
+
 }  // namespace art
diff --git a/src/gc/partial_mark_sweep.h b/src/gc/partial_mark_sweep.h
index 80a1563..64c0bcd 100644
--- a/src/gc/partial_mark_sweep.h
+++ b/src/gc/partial_mark_sweep.h
@@ -14,27 +14,14 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_PARTIAL_MARK_SWEEP_H_
-#define ART_SRC_PARTIAL_MARK_SWEEP_H_
+#ifndef ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
+#define ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
 
 #include "locks.h"
 #include "mark_sweep.h"
-#include "utils.h"
 
 namespace art {
 
-class Barrier;
-class CheckObjectVisitor;
-class Class;
-class Heap;
-class MarkIfReachesAllocspaceVisitor;
-class ModUnionClearCardVisitor;
-class ModUnionVisitor;
-class ModUnionTableBitmap;
-class Object;
-class TimingLogger;
-class MarkStackChunk;
-
 class PartialMarkSweep : public MarkSweep {
  public:
   virtual GcType GetGcType() const {
@@ -53,4 +40,4 @@
 
 }  // namespace art
 
-#endif  // ART_SRC_PARTIAL_MARK_SWEEP_H_
+#endif  // ART_SRC_GC_PARTIAL_MARK_SWEEP_H_
diff --git a/src/gc/space.cc b/src/gc/space.cc
index 04f932d..9db84f2 100644
--- a/src/gc/space.cc
+++ b/src/gc/space.cc
@@ -19,10 +19,16 @@
 #include "base/logging.h"
 #include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
+#include "card_table.h"
 #include "dlmalloc.h"
 #include "image.h"
+#include "mirror/array.h"
+#include "mirror/abstract_method.h"
 #include "os.h"
+#include "runtime.h"
 #include "space_bitmap.h"
+#include "space_bitmap-inl.h"
+#include "thread.h"
 #include "UniquePtr.h"
 #include "utils.h"
 
@@ -204,12 +210,12 @@
   mark_bitmap_->SetName(temp_name);
 }
 
-Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
+mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
   if (kDebugSpaces) {
     num_bytes += sizeof(word);
   }
 
-  Object* result = reinterpret_cast<Object*>(mspace_calloc(mspace_, 1, num_bytes));
+  mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_calloc(mspace_, 1, num_bytes));
   if (kDebugSpaces && result != NULL) {
     CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
         << ") not in bounds of allocation space " << *this;
@@ -225,18 +231,18 @@
   return result;
 }
 
-Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes) {
+mirror::Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes) {
   MutexLock mu(self, lock_);
   return AllocWithoutGrowthLocked(num_bytes);
 }
 
-Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) {
+mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) {
   MutexLock mu(self, lock_);
   // Grow as much as possible within the mspace.
   size_t max_allowed = Capacity();
   mspace_set_footprint_limit(mspace_, max_allowed);
   // Try the allocation.
-  Object* result = AllocWithoutGrowthLocked(num_bytes);
+  mirror::Object* result = AllocWithoutGrowthLocked(num_bytes);
   // Shrink back down as small as possible.
   size_t footprint = mspace_footprint(mspace_);
   mspace_set_footprint_limit(mspace_, footprint);
@@ -301,7 +307,7 @@
   return alloc_space;
 }
 
-size_t DlMallocSpace::Free(Thread* self, Object* ptr) {
+size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
   MutexLock mu(self, lock_);
   if (kDebugSpaces) {
     CHECK(ptr != NULL);
@@ -317,13 +323,13 @@
   return bytes_freed;
 }
 
-size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
+size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
   DCHECK(ptrs != NULL);
 
   // Don't need the lock to calculate the size of the freed pointers.
   size_t bytes_freed = 0;
   for (size_t i = 0; i < num_ptrs; i++) {
-    Object* ptr = ptrs[i];
+    mirror::Object* ptr = ptrs[i];
     const size_t look_ahead = 8;
     if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
       // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
@@ -397,12 +403,12 @@
 }
 
 // Virtual functions can't get inlined.
-inline size_t DlMallocSpace::InternalAllocationSize(const Object* obj) {
+inline size_t DlMallocSpace::InternalAllocationSize(const mirror::Object* obj) {
   return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) +
       kChunkOverhead;
 }
 
-size_t DlMallocSpace::AllocationSize(const Object* obj) {
+size_t DlMallocSpace::AllocationSize(const mirror::Object* obj) {
   return InternalAllocationSize(obj);
 }
 
@@ -504,29 +510,29 @@
   DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
 
   Runtime* runtime = Runtime::Current();
-  Object* jni_stub_array = image_header.GetImageRoot(ImageHeader::kJniStubArray);
-  runtime->SetJniDlsymLookupStub(down_cast<ByteArray*>(jni_stub_array));
+  mirror::Object* jni_stub_array = image_header.GetImageRoot(ImageHeader::kJniStubArray);
+  runtime->SetJniDlsymLookupStub(down_cast<mirror::ByteArray*>(jni_stub_array));
 
-  Object* ame_stub_array = image_header.GetImageRoot(ImageHeader::kAbstractMethodErrorStubArray);
-  runtime->SetAbstractMethodErrorStubArray(down_cast<ByteArray*>(ame_stub_array));
+  mirror::Object* ame_stub_array = image_header.GetImageRoot(ImageHeader::kAbstractMethodErrorStubArray);
+  runtime->SetAbstractMethodErrorStubArray(down_cast<mirror::ByteArray*>(ame_stub_array));
 
-  Object* resolution_stub_array =
+  mirror::Object* resolution_stub_array =
       image_header.GetImageRoot(ImageHeader::kStaticResolutionStubArray);
   runtime->SetResolutionStubArray(
-      down_cast<ByteArray*>(resolution_stub_array), Runtime::kStaticMethod);
+      down_cast<mirror::ByteArray*>(resolution_stub_array), Runtime::kStaticMethod);
   resolution_stub_array = image_header.GetImageRoot(ImageHeader::kUnknownMethodResolutionStubArray);
   runtime->SetResolutionStubArray(
-      down_cast<ByteArray*>(resolution_stub_array), Runtime::kUnknownMethod);
+      down_cast<mirror::ByteArray*>(resolution_stub_array), Runtime::kUnknownMethod);
 
-  Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
-  runtime->SetResolutionMethod(down_cast<AbstractMethod*>(resolution_method));
+  mirror::Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
+  runtime->SetResolutionMethod(down_cast<mirror::AbstractMethod*>(resolution_method));
 
-  Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
-  runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kSaveAll);
+  mirror::Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
+  runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kSaveAll);
   callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
-  runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kRefsOnly);
+  runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsOnly);
   callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
-  runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kRefsAndArgs);
+  runtime->SetCalleeSaveMethod(down_cast<mirror::AbstractMethod*>(callee_save_method), Runtime::kRefsAndArgs);
 
   ImageSpace* space = new ImageSpace(image_file_name, map.release());
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
@@ -548,7 +554,7 @@
   byte* end = End();
   while (current < end) {
     DCHECK_ALIGNED(current, kObjectAlignment);
-    const Object* obj = reinterpret_cast<const Object*>(current);
+    const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(current);
     live_bitmap->Set(obj);
     current += RoundUp(obj->SizeOf(), kObjectAlignment);
   }
diff --git a/src/gc/space.h b/src/gc/space.h
index 2ed4988..d2bcd53 100644
--- a/src/gc/space.h
+++ b/src/gc/space.h
@@ -31,10 +31,12 @@
 
 static const bool kDebugSpaces = kIsDebugBuild;
 
+namespace mirror {
+class Object;
+}  // namespace mirror
 class DlMallocSpace;
 class ImageSpace;
 class LargeObjectSpace;
-class Object;
 class SpaceBitmap;
 
 enum GcRetentionPolicy {
@@ -57,7 +59,7 @@
  public:
   virtual bool CanAllocateInto() const = 0;
   virtual bool IsCompactible() const = 0;
-  virtual bool Contains(const Object* obj) const = 0;
+  virtual bool Contains(const mirror::Object* obj) const = 0;
   virtual SpaceType GetType() const = 0;
   virtual GcRetentionPolicy GetGcRetentionPolicy() const = 0;
   virtual std::string GetName() const = 0;
@@ -108,16 +110,16 @@
   virtual uint64_t GetTotalObjectsAllocated() const = 0;
 
   // Allocate num_bytes without allowing growth.
-  virtual Object* Alloc(Thread* self, size_t num_bytes) = 0;
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) = 0;
 
   // Return the storage space required by obj.
-  virtual size_t AllocationSize(const Object* obj) = 0;
+  virtual size_t AllocationSize(const mirror::Object* obj) = 0;
 
   // Returns how many bytes were freed.
-  virtual size_t Free(Thread* self, Object* ptr) = 0;
+  virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
 
   // Returns how many bytes were freed.
-  virtual size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs) = 0;
+  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
 
  protected:
   AllocSpace() {}
@@ -149,12 +151,12 @@
   virtual SpaceBitmap* GetMarkBitmap() const = 0;
 
   // Is object within this space?
-  bool HasAddress(const Object* obj) const {
+  bool HasAddress(const mirror::Object* obj) const {
     const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
     return Begin() <= byte_ptr && byte_ptr < End();
   }
 
-  virtual bool Contains(const Object* obj) const {
+  virtual bool Contains(const mirror::Object* obj) const {
     return HasAddress(obj);
   }
 
@@ -188,7 +190,7 @@
 class DiscontinuousSpace : public virtual Space {
  public:
   // Is object within this space?
-  virtual bool Contains(const Object* obj) const = 0;
+  virtual bool Contains(const mirror::Object* obj) const = 0;
 
   virtual std::string GetName() const {
     return name_;
@@ -267,15 +269,15 @@
                             size_t capacity, byte* requested_begin);
 
   // Allocate num_bytes without allowing the underlying mspace to grow.
-  virtual Object* AllocWithGrowth(Thread* self, size_t num_bytes);
+  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes);
 
   // Allocate num_bytes allowing the underlying mspace to grow.
-  virtual Object* Alloc(Thread* self, size_t num_bytes);
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
 
   // Return the storage space required by obj.
-  virtual size_t AllocationSize(const Object* obj);
-  virtual size_t Free(Thread* self, Object* ptr);
-  virtual size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs);
+  virtual size_t AllocationSize(const mirror::Object* obj);
+  virtual size_t Free(Thread* self, mirror::Object* ptr);
+  virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
 
   void* MoreCore(intptr_t increment);
 
@@ -353,8 +355,8 @@
   }
 
  private:
-  size_t InternalAllocationSize(const Object* obj);
-  Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  size_t InternalAllocationSize(const mirror::Object* obj);
+  mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
   UniquePtr<SpaceBitmap> live_bitmap_;
   UniquePtr<SpaceBitmap> mark_bitmap_;
diff --git a/src/gc/space_bitmap-inl.h b/src/gc/space_bitmap-inl.h
new file mode 100644
index 0000000..e1fdd29
--- /dev/null
+++ b/src/gc/space_bitmap-inl.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_SPACE_BITMAP_INL_H_
+#define ART_SRC_GC_SPACE_BITMAP_INL_H_
+
+#include "base/logging.h"
+#include "cutils/atomic.h"
+
+namespace art {
+
+inline bool SpaceBitmap::AtomicTestAndSet(const mirror::Object* obj) {
+  uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
+  DCHECK_GE(addr, heap_begin_);
+  const uintptr_t offset = addr - heap_begin_;
+  const size_t index = OffsetToIndex(offset);
+  const word mask = OffsetToMask(offset);
+  word* const address = &bitmap_begin_[index];
+  DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
+  word old_word;
+  do {
+    old_word = *address;
+    // Fast path: The bit is already set.
+    if ((old_word & mask) != 0) {
+      return true;
+    }
+  } while (UNLIKELY(android_atomic_cas(old_word, old_word | mask, address) != 0));
+  return false;
+}
+
+inline bool SpaceBitmap::Test(const mirror::Object* obj) const {
+  uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
+  DCHECK(HasAddress(obj)) << obj;
+  DCHECK(bitmap_begin_ != NULL);
+  DCHECK_GE(addr, heap_begin_);
+  const uintptr_t offset = addr - heap_begin_;
+  return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
+}
+
+template <typename Visitor, typename FingerVisitor>
+void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
+                                   const Visitor& visitor,
+                                   const FingerVisitor& finger_visitor) const {
+  DCHECK_LT(visit_begin, visit_end);
+
+  const size_t word_span = kAlignment * kBitsPerWord; // Equals IndexToOffset(1).
+  const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment;
+  const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment;
+
+  size_t word_start = bit_index_start / kBitsPerWord;
+  size_t word_end = bit_index_end / kBitsPerWord;
+  DCHECK_LT(word_end * kWordSize, Size());
+
+  // Trim off left_bits of left bits.
+  size_t edge_word = bitmap_begin_[word_start];
+
+  // Handle bits on the left first as a special case
+  size_t left_bits = bit_index_start & (kBitsPerWord - 1);
+  if (left_bits != 0) {
+    edge_word &= (1 << (kBitsPerWord - left_bits)) - 1;
+  }
+
+  // If word_start == word_end then handle this case at the same place we handle the right edge.
+  if (edge_word != 0 && word_start < word_end) {
+    uintptr_t ptr_base = IndexToOffset(word_start) + heap_begin_;
+    finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
+    do {
+      const size_t shift = CLZ(edge_word);
+      mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+      visitor(obj);
+      edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+    } while (edge_word != 0);
+  }
+  word_start++;
+
+  for (size_t i = word_start; i < word_end; i++) {
+    size_t w = bitmap_begin_[i];
+    if (w != 0) {
+      uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
+      finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
+      do {
+        const size_t shift = CLZ(w);
+        mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+        visitor(obj);
+        w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+      } while (w != 0);
+    }
+  }
+
+  // Handle the right edge, and also the left edge if both edges are on the same word.
+  size_t right_bits = bit_index_end & (kBitsPerWord - 1);
+
+  // If word_start == word_end then we need to use the word which we removed the left bits.
+  if (word_start <= word_end) {
+    edge_word = bitmap_begin_[word_end];
+  }
+
+  // Bits that we trim off the right.
+  edge_word &= ~((static_cast<size_t>(kWordHighBitMask) >> right_bits) - 1);
+  uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_;
+  finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
+  while (edge_word != 0) {
+    const size_t shift = CLZ(edge_word);
+    mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
+    visitor(obj);
+    edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
+  }
+}
+
+inline bool SpaceBitmap::Modify(const mirror::Object* obj, bool do_set) {
+  uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
+  DCHECK_GE(addr, heap_begin_);
+  const uintptr_t offset = addr - heap_begin_;
+  const size_t index = OffsetToIndex(offset);
+  const word mask = OffsetToMask(offset);
+  DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
+  word* address = &bitmap_begin_[index];
+  word old_word = *address;
+  if (do_set) {
+    *address = old_word | mask;
+  } else {
+    *address = old_word & ~mask;
+  }
+  return (old_word & mask) != 0;
+}
+}  // namespace art
+
+#endif  // ART_SRC_GC_SPACE_BITMAP_INL_H_
diff --git a/src/gc/space_bitmap.cc b/src/gc/space_bitmap.cc
index 25fa672..d90c090 100644
--- a/src/gc/space_bitmap.cc
+++ b/src/gc/space_bitmap.cc
@@ -17,6 +17,11 @@
 #include "heap_bitmap.h"
 
 #include "base/logging.h"
+#include "mirror/class-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "space_bitmap-inl.h"
 #include "UniquePtr.h"
 #include "utils.h"
 
@@ -32,7 +37,7 @@
 
 void SpaceSetMap::Walk(SpaceBitmap::Callback* callback, void* arg) {
   for (Objects::iterator it = contained_.begin(); it != contained_.end(); ++it) {
-    callback(const_cast<Object*>(*it), arg);
+    callback(const_cast<mirror::Object*>(*it), arg);
   }
 }
 
@@ -98,7 +103,7 @@
       uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
       do {
         const size_t shift = CLZ(w);
-        Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+        mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
         (*callback)(obj, arg);
         w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
       } while (w != 0);
@@ -127,10 +132,10 @@
     return;
   }
 
-  // TODO: rewrite the callbacks to accept a std::vector<Object*> rather than a Object**?
+  // TODO: rewrite the callbacks to accept a std::vector<mirror::Object*> rather than a mirror::Object**?
   const size_t buffer_size = kWordSize * kBitsPerWord;
-  Object* pointer_buf[buffer_size];
-  Object** pb = &pointer_buf[0];
+  mirror::Object* pointer_buf[buffer_size];
+  mirror::Object** pb = &pointer_buf[0];
   size_t start = OffsetToIndex(sweep_begin - live_bitmap.heap_begin_);
   size_t end = OffsetToIndex(sweep_end - live_bitmap.heap_begin_ - 1);
   CHECK_LT(end, live_bitmap.Size() / kWordSize);
@@ -143,7 +148,7 @@
       do {
         const size_t shift = CLZ(garbage);
         garbage ^= static_cast<size_t>(kWordHighBitMask) >> shift;
-        *pb++ = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+        *pb++ = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
       } while (garbage != 0);
       // Make sure that there are always enough slots available for an
       // entire word of one bits.
@@ -161,32 +166,32 @@
 }  // namespace art
 
 // Support needed for in order traversal
-#include "object.h"
+#include "mirror/object.h"
 #include "object_utils.h"
 
 namespace art {
 
-static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj,
+static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj,
                               void* arg);
 
 // Walk instance fields of the given Class. Separate function to allow recursion on the super
 // class.
-static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj,
-                               Class* klass, void* arg)
+static void WalkInstanceFields(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj,
+                               mirror::Class* klass, void* arg)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Visit fields of parent classes first.
-  Class* super = klass->GetSuperClass();
+  mirror::Class* super = klass->GetSuperClass();
   if (super != NULL) {
     WalkInstanceFields(visited, callback, obj, super, arg);
   }
   // Walk instance fields
-  ObjectArray<Field>* fields = klass->GetIFields();
+  mirror::ObjectArray<mirror::Field>* fields = klass->GetIFields();
   if (fields != NULL) {
     for (int32_t i = 0; i < fields->GetLength(); i++) {
-      Field* field = fields->Get(i);
+      mirror::Field* field = fields->Get(i);
       FieldHelper fh(field);
       if (!fh.IsPrimitiveType()) {
-        Object* value = field->GetObj(obj);
+        mirror::Object* value = field->GetObj(obj);
         if (value != NULL) {
           WalkFieldsInOrder(visited, callback, value,  arg);
         }
@@ -196,7 +201,7 @@
 }
 
 // For an unvisited object, visit it then all its children found via fields.
-static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, Object* obj,
+static void WalkFieldsInOrder(SpaceBitmap* visited, SpaceBitmap::Callback* callback, mirror::Object* obj,
                               void* arg)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   if (visited->Test(obj)) {
@@ -206,17 +211,17 @@
   (*callback)(obj, arg);
   visited->Set(obj);
   // Walk instance fields of all objects
-  Class* klass = obj->GetClass();
+  mirror::Class* klass = obj->GetClass();
   WalkInstanceFields(visited, callback, obj, klass, arg);
   // Walk static fields of a Class
   if (obj->IsClass()) {
-    ObjectArray<Field>* fields = klass->GetSFields();
+    mirror::ObjectArray<mirror::Field>* fields = klass->GetSFields();
     if (fields != NULL) {
       for (int32_t i = 0; i < fields->GetLength(); i++) {
-        Field* field = fields->Get(i);
+        mirror::Field* field = fields->Get(i);
         FieldHelper fh(field);
         if (!fh.IsPrimitiveType()) {
-          Object* value = field->GetObj(NULL);
+          mirror::Object* value = field->GetObj(NULL);
           if (value != NULL) {
             WalkFieldsInOrder(visited, callback, value, arg);
           }
@@ -225,10 +230,10 @@
     }
   } else if (obj->IsObjectArray()) {
     // Walk elements of an object array
-    ObjectArray<Object>* obj_array = obj->AsObjectArray<Object>();
+    mirror::ObjectArray<mirror::Object>* obj_array = obj->AsObjectArray<mirror::Object>();
     int32_t length = obj_array->GetLength();
     for (int32_t i = 0; i < length; i++) {
-      Object* value = obj_array->Get(i);
+      mirror::Object* value = obj_array->Get(i);
       if (value != NULL) {
         WalkFieldsInOrder(visited, callback, value, arg);
       }
@@ -251,7 +256,7 @@
       uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
       while (w != 0) {
         const size_t shift = CLZ(w);
-        Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
+        mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
         WalkFieldsInOrder(visited.get(), callback, obj, arg);
         w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
       }
diff --git a/src/gc/space_bitmap.h b/src/gc/space_bitmap.h
index dd2f47d..6bc06d6 100644
--- a/src/gc/space_bitmap.h
+++ b/src/gc/space_bitmap.h
@@ -14,35 +14,33 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_SPACE_BITMAP_H_
-#define ART_SRC_SPACE_BITMAP_H_
+#ifndef ART_SRC_GC_SPACE_BITMAP_H_
+#define ART_SRC_GC_SPACE_BITMAP_H_
+
+#include "locks.h"
+#include "globals.h"
+#include "mem_map.h"
+#include "UniquePtr.h"
 
 #include <limits.h>
 #include <set>
 #include <stdint.h>
 #include <vector>
 
-#include "base/logging.h"
-#include "cutils/atomic.h"
-#include "cutils/atomic-inline.h"
-#include "UniquePtr.h"
-#include "globals.h"
-#include "mem_map.h"
-#include "utils.h"
-
 namespace art {
-
+namespace mirror {
 class Object;
+}  // namespace mirror
 
 class SpaceBitmap {
  public:
   static const size_t kAlignment = 8;
 
-  typedef void Callback(Object* obj, void* arg);
+  typedef void Callback(mirror::Object* obj, void* arg);
 
-  typedef void ScanCallback(Object* obj, void* finger, void* arg);
+  typedef void ScanCallback(mirror::Object* obj, void* finger, void* arg);
 
-  typedef void SweepCallback(size_t ptr_count, Object** ptrs, void* arg);
+  typedef void SweepCallback(size_t ptr_count, mirror::Object** ptrs, void* arg);
 
   // Initialize a HeapBitmap so that it points to a bitmap large enough to cover a heap at
   // heap_begin of heap_capacity bytes, where objects are guaranteed to be kAlignment-aligned.
@@ -66,44 +64,20 @@
     return static_cast<uintptr_t>(kWordHighBitMask) >> ((offset_ / kAlignment) % kBitsPerWord);
   }
 
-  inline bool Set(const Object* obj) {
+  inline bool Set(const mirror::Object* obj) {
     return Modify(obj, true);
   }
 
-  inline bool Clear(const Object* obj) {
+  inline bool Clear(const mirror::Object* obj) {
     return Modify(obj, false);
   }
 
   // Returns true if the object was previously marked.
-  inline bool AtomicTestAndSet(const Object* obj) {
-    uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
-    DCHECK_GE(addr, heap_begin_);
-    const uintptr_t offset = addr - heap_begin_;
-    const size_t index = OffsetToIndex(offset);
-    const word mask = OffsetToMask(offset);
-    word* const address = &bitmap_begin_[index];
-    DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
-    word old_word;
-    do {
-      old_word = *address;
-      // Fast path: The bit is already set.
-      if ((old_word & mask) != 0) {
-        return true;
-      }
-    } while (UNLIKELY(android_atomic_cas(old_word, old_word | mask, address) != 0));
-    return false;
-  }
+  bool AtomicTestAndSet(const mirror::Object* obj);
 
   void Clear();
 
-  inline bool Test(const Object* obj) const {
-    uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
-    DCHECK(HasAddress(obj)) << obj;
-    DCHECK(bitmap_begin_ != NULL);
-    DCHECK_GE(addr, heap_begin_);
-    const uintptr_t offset = addr - heap_begin_;
-    return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
-  }
+  bool Test(const mirror::Object* obj) const;
 
   // Return true iff <obj> is within the range of pointers that this bitmap could potentially cover,
   // even if a bit has not been set for it.
@@ -123,7 +97,7 @@
         : bitmap_(bitmap) {
     }
 
-    void operator ()(Object* obj) const {
+    void operator ()(mirror::Object* obj) const {
       bitmap_->Clear(obj);
     }
    private:
@@ -133,86 +107,21 @@
   template <typename Visitor>
   void VisitRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const {
     for (; visit_begin < visit_end; visit_begin += kAlignment ) {
-      visitor(reinterpret_cast<Object*>(visit_begin));
+      visitor(reinterpret_cast<mirror::Object*>(visit_begin));
     }
   }
 
   template <typename Visitor, typename FingerVisitor>
   void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
                         const Visitor& visitor, const FingerVisitor& finger_visitor) const
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
-    DCHECK_LT(visit_begin, visit_end);
-
-    const size_t word_span = kAlignment * kBitsPerWord; // Equals IndexToOffset(1).
-    const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment;
-    const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment;
-
-    size_t word_start = bit_index_start / kBitsPerWord;
-    size_t word_end = bit_index_end / kBitsPerWord;
-    DCHECK_LT(word_end * kWordSize, Size());
-
-    // Trim off left_bits of left bits.
-    size_t edge_word = bitmap_begin_[word_start];
-
-    // Handle bits on the left first as a special case
-    size_t left_bits = bit_index_start & (kBitsPerWord - 1);
-    if (left_bits != 0) {
-      edge_word &= (1 << (kBitsPerWord - left_bits)) - 1;
-    }
-
-    // If word_start == word_end then handle this case at the same place we handle the right edge.
-    if (edge_word != 0 && word_start < word_end) {
-      uintptr_t ptr_base = IndexToOffset(word_start) + heap_begin_;
-      finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
-      do {
-        const size_t shift = CLZ(edge_word);
-        Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
-        visitor(obj);
-        edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
-      } while (edge_word != 0);
-    }
-    word_start++;
-
-    for (size_t i = word_start; i < word_end; i++) {
-      size_t w = bitmap_begin_[i];
-      if (w != 0) {
-        uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
-        finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
-        do {
-          const size_t shift = CLZ(w);
-          Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
-          visitor(obj);
-          w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
-        } while (w != 0);
-      }
-    }
-
-    // Handle the right edge, and also the left edge if both edges are on the same word.
-    size_t right_bits = bit_index_end & (kBitsPerWord - 1);
-
-    // If word_start == word_end then we need to use the word which we removed the left bits.
-    if (word_start <= word_end) {
-      edge_word = bitmap_begin_[word_end];
-    }
-
-    // Bits that we trim off the right.
-    edge_word &= ~((static_cast<size_t>(kWordHighBitMask) >> right_bits) - 1);
-    uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_;
-    finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
-    while (edge_word != 0) {
-      const size_t shift = CLZ(edge_word);
-      Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
-      visitor(obj);
-      edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
-    }
-  }
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void Walk(Callback* callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   void InOrderWalk(Callback* callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
   static void SweepWalk(const SpaceBitmap& live,
                         const SpaceBitmap& mark,
@@ -251,7 +160,7 @@
   std::string GetName() const;
   void SetName(const std::string& name);
 
-  const void* GetObjectWordAddress(const Object* obj) const {
+  const void* GetObjectWordAddress(const mirror::Object* obj) const {
     uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
     const uintptr_t offset = addr - heap_begin_;
     const size_t index = OffsetToIndex(offset);
@@ -265,22 +174,7 @@
         heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
         name_(name) {}
 
-  inline bool Modify(const Object* obj, bool do_set) {
-    uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
-    DCHECK_GE(addr, heap_begin_);
-    const uintptr_t offset = addr - heap_begin_;
-    const size_t index = OffsetToIndex(offset);
-    const word mask = OffsetToMask(offset);
-    DCHECK_LT(index, bitmap_size_ / kWordSize) << " bitmap_size_ = " << bitmap_size_;
-    word* address = &bitmap_begin_[index];
-    word old_word = *address;
-    if (do_set) {
-      *address = old_word | mask;
-    } else {
-      *address = old_word & ~mask;
-    }
-    return (old_word & mask) != 0;
-  }
+  bool Modify(const mirror::Object* obj, bool do_set);
 
   // Backing storage for bitmap.
   UniquePtr<MemMap> mem_map_;
@@ -302,17 +196,17 @@
 // Like a bitmap except it keeps track of objects using sets.
 class SpaceSetMap {
  public:
-  typedef std::set<const Object*> Objects;
+  typedef std::set<const mirror::Object*> Objects;
 
   bool IsEmpty() const {
     return contained_.empty();
   }
 
-  inline void Set(const Object* obj) {
+  inline void Set(const mirror::Object* obj) {
     contained_.insert(obj);
   }
 
-  inline void Clear(const Object* obj) {
+  inline void Clear(const mirror::Object* obj) {
     Objects::iterator found = contained_.find(obj);
     if (found != contained_.end()) {
       contained_.erase(found);
@@ -323,7 +217,7 @@
     contained_.clear();
   }
 
-  inline bool Test(const Object* obj) const {
+  inline bool Test(const mirror::Object* obj) const {
     return contained_.find(obj) != contained_.end();
   }
 
@@ -357,4 +251,4 @@
 
 }  // namespace art
 
-#endif  // ART_SRC_SPACE_BITMAP_H_
+#endif  // ART_SRC_GC_SPACE_BITMAP_H_
diff --git a/src/gc/space_bitmap_test.cc b/src/gc/space_bitmap_test.cc
index a2f1afc..5a829e4 100644
--- a/src/gc/space_bitmap_test.cc
+++ b/src/gc/space_bitmap_test.cc
@@ -19,6 +19,7 @@
 #include "common_test.h"
 #include "dlmalloc.h"
 #include "globals.h"
+#include "space_bitmap-inl.h"
 #include "UniquePtr.h"
 
 #include <stdint.h>
@@ -39,20 +40,20 @@
 
 class BitmapVerify {
  public:
-  BitmapVerify(SpaceBitmap* bitmap, const Object* begin, const Object* end)
+  BitmapVerify(SpaceBitmap* bitmap, const mirror::Object* begin, const mirror::Object* end)
     : bitmap_(bitmap),
       begin_(begin),
       end_(end) {}
 
-  void operator ()(const Object* obj) {
+  void operator ()(const mirror::Object* obj) {
     EXPECT_TRUE(obj >= begin_);
     EXPECT_TRUE(obj <= end_);
     EXPECT_TRUE(bitmap_->Test(obj) == ((reinterpret_cast<uintptr_t>(obj) & 0xF) != 0));
   }
 
   SpaceBitmap* bitmap_;
-  const Object* begin_;
-  const Object* end_;
+  const mirror::Object* begin_;
+  const mirror::Object* end_;
 };
 
 TEST_F(SpaceBitmapTest, ScanRange) {
@@ -65,7 +66,8 @@
 
   // Set all the odd bits in the first BitsPerWord * 3 to one.
   for (size_t j = 0;j < kBitsPerWord * 3; ++j) {
-    const Object* obj = reinterpret_cast<Object*>(heap_begin + j * SpaceBitmap::kAlignment);
+    const mirror::Object* obj =
+        reinterpret_cast<mirror::Object*>(heap_begin + j * SpaceBitmap::kAlignment);
     if (reinterpret_cast<uintptr_t>(obj) & 0xF) {
       space_bitmap->Set(obj);
     }
@@ -75,9 +77,11 @@
   // This handles all the cases, having runs which start and end on the same word, and different
   // words.
   for (size_t i = 0; i < static_cast<size_t>(kBitsPerWord); ++i) {
-    Object* start = reinterpret_cast<Object*>(heap_begin + i * SpaceBitmap::kAlignment);
+    mirror::Object* start =
+        reinterpret_cast<mirror::Object*>(heap_begin + i * SpaceBitmap::kAlignment);
     for (size_t j = 0; j < static_cast<size_t>(kBitsPerWord * 2); ++j) {
-      Object* end = reinterpret_cast<Object*>(heap_begin + (i + j) * SpaceBitmap::kAlignment);
+      mirror::Object* end =
+          reinterpret_cast<mirror::Object*>(heap_begin + (i + j) * SpaceBitmap::kAlignment);
       BitmapVerify(space_bitmap.get(), start, end);
     }
   }
diff --git a/src/gc/space_test.cc b/src/gc/space_test.cc
index 2e03eae..372ec77 100644
--- a/src/gc/space_test.cc
+++ b/src/gc/space_test.cc
@@ -83,23 +83,23 @@
     Thread* self = Thread::Current();
 
     // Succeeds, fits without adjusting the footprint limit.
-    Object* ptr1 = space->Alloc(self, 1 * MB);
+    mirror::Object* ptr1 = space->Alloc(self, 1 * MB);
     EXPECT_TRUE(ptr1 != NULL);
 
     // Fails, requires a higher footprint limit.
-    Object* ptr2 = space->Alloc(self, 8 * MB);
+    mirror::Object* ptr2 = space->Alloc(self, 8 * MB);
     EXPECT_TRUE(ptr2 == NULL);
 
     // Succeeds, adjusts the footprint.
-    Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
+    mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
     EXPECT_TRUE(ptr3 != NULL);
 
     // Fails, requires a higher footprint limit.
-    Object* ptr4 = space->Alloc(self, 8 * MB);
+    mirror::Object* ptr4 = space->Alloc(self, 8 * MB);
     EXPECT_TRUE(ptr4 == NULL);
 
     // Also fails, requires a higher allowed footprint.
-    Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
+    mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
     EXPECT_TRUE(ptr5 == NULL);
 
     // Release some memory.
@@ -151,23 +151,23 @@
   Runtime::Current()->GetHeap()->AddSpace(space);
 
   // Succeeds, fits without adjusting the footprint limit.
-  Object* ptr1 = space->Alloc(self, 1 * MB);
+  mirror::Object* ptr1 = space->Alloc(self, 1 * MB);
   EXPECT_TRUE(ptr1 != NULL);
 
   // Fails, requires a higher footprint limit.
-  Object* ptr2 = space->Alloc(self, 8 * MB);
+  mirror::Object* ptr2 = space->Alloc(self, 8 * MB);
   EXPECT_TRUE(ptr2 == NULL);
 
   // Succeeds, adjusts the footprint.
-  Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
+  mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
   EXPECT_TRUE(ptr3 != NULL);
 
   // Fails, requires a higher footprint limit.
-  Object* ptr4 = space->Alloc(self, 8 * MB);
+  mirror::Object* ptr4 = space->Alloc(self, 8 * MB);
   EXPECT_TRUE(ptr4 == NULL);
 
   // Also fails, requires a higher allowed footprint.
-  Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
+  mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
   EXPECT_TRUE(ptr5 == NULL);
 
   // Release some memory.
@@ -194,7 +194,7 @@
   Thread* self = Thread::Current();
 
   // Succeeds, fits without adjusting the max allowed footprint.
-  Object* lots_of_objects[1024];
+  mirror::Object* lots_of_objects[1024];
   for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
     lots_of_objects[i] = space->Alloc(self, 16);
     EXPECT_TRUE(lots_of_objects[i] != NULL);
@@ -252,7 +252,7 @@
 
   // Fill the space with lots of small objects up to the growth limit
   size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
-  UniquePtr<Object*[]> lots_of_objects(new Object*[max_objects]);
+  UniquePtr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
   size_t last_object = 0;  // last object for which allocation succeeded
   size_t amount_allocated = 0;  // amount of space allocated
   Thread* self = Thread::Current();
@@ -269,7 +269,7 @@
           alloc_size = 8;
         }
       }
-      Object* object;
+      mirror::Object* object;
       if (round <= 1) {
         object = space->Alloc(self, alloc_size);
       } else {
@@ -326,7 +326,7 @@
 
     // Free some objects
     for (size_t i = 0; i < last_object; i += free_increment) {
-      Object* object = lots_of_objects.get()[i];
+      mirror::Object* object = lots_of_objects.get()[i];
       if (object == NULL) {
         continue;
       }
@@ -347,7 +347,7 @@
   }
 
   // All memory was released, try a large allocation to check freed memory is being coalesced
-  Object* large_object;
+  mirror::Object* large_object;
   size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
   if (round <= 1) {
     large_object = space->Alloc(self, three_quarters_space);
diff --git a/src/gc/sticky_mark_sweep.cc b/src/gc/sticky_mark_sweep.cc
index 23196fd..988d4e7 100644
--- a/src/gc/sticky_mark_sweep.cc
+++ b/src/gc/sticky_mark_sweep.cc
@@ -14,47 +14,51 @@
  * limitations under the License.
  */
 
+#include "heap.h"
 #include "large_object_space.h"
 #include "space.h"
 #include "sticky_mark_sweep.h"
+#include "thread.h"
 
 namespace art {
-  StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent)
-      : PartialMarkSweep(heap, is_concurrent) {
-    cumulative_timings_.SetName(GetName());
-  }
 
-  StickyMarkSweep::~StickyMarkSweep() {
+StickyMarkSweep::StickyMarkSweep(Heap* heap, bool is_concurrent)
+    : PartialMarkSweep(heap, is_concurrent) {
+  cumulative_timings_.SetName(GetName());
+}
 
-  }
+StickyMarkSweep::~StickyMarkSweep() {
 
-  void StickyMarkSweep::BindBitmaps() {
-    PartialMarkSweep::BindBitmaps();
+}
 
-    Spaces& spaces = GetHeap()->GetSpaces();
-    WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-    // For sticky GC, we want to bind the bitmaps of both the zygote space and the alloc space.
-    // This lets us start with the mark bitmap of the previous garbage collection as the current
-    // mark bitmap of the alloc space. After the sticky GC finishes, we then unbind the bitmaps,
-    // making it so that the live bitmap of the alloc space is contains the newly marked objects
-    // from the sticky GC.
-    for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
-      if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
-        BindLiveToMarkBitmap(*it);
-      }
+void StickyMarkSweep::BindBitmaps() {
+  PartialMarkSweep::BindBitmaps();
+
+  Spaces& spaces = GetHeap()->GetSpaces();
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  // For sticky GC, we want to bind the bitmaps of both the zygote space and the alloc space.
+  // This lets us start with the mark bitmap of the previous garbage collection as the current
+  // mark bitmap of the alloc space. After the sticky GC finishes, we then unbind the bitmaps,
+  // making it so that the live bitmap of the alloc space is contains the newly marked objects
+  // from the sticky GC.
+  for (Spaces::iterator it = spaces.begin(); it != spaces.end(); ++it) {
+    if ((*it)->GetGcRetentionPolicy() == kGcRetentionPolicyAlwaysCollect) {
+      BindLiveToMarkBitmap(*it);
     }
-
-    GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
   }
 
-  void StickyMarkSweep::MarkReachableObjects() {
-    DisableFinger();
-    RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1);
-  }
+  GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+}
 
-  void StickyMarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
-    ObjectStack* live_stack = GetHeap()->GetLiveStack();
-    SweepArray(timings_, live_stack, false);
-    timings_.AddSplit("SweepArray");
-  }
+void StickyMarkSweep::MarkReachableObjects() {
+  DisableFinger();
+  RecursiveMarkDirtyObjects(CardTable::kCardDirty - 1);
+}
+
+void StickyMarkSweep::Sweep(TimingLogger& timings, bool swap_bitmaps) {
+  ObjectStack* live_stack = GetHeap()->GetLiveStack();
+  SweepArray(timings_, live_stack, false);
+  timings_.AddSplit("SweepArray");
+}
+
 }  // namespace art
diff --git a/src/gc/sticky_mark_sweep.h b/src/gc/sticky_mark_sweep.h
index 8396bbe..41ab0cc 100644
--- a/src/gc/sticky_mark_sweep.h
+++ b/src/gc/sticky_mark_sweep.h
@@ -14,28 +14,15 @@
  * limitations under the License.
  */
 
-#ifndef ART_SRC_STICKY_MARK_SWEEP_H_
-#define ART_SRC_STICKY_MARK_SWEEP_H_
+#ifndef ART_SRC_GC_STICKY_MARK_SWEEP_H_
+#define ART_SRC_GC_STICKY_MARK_SWEEP_H_
 
 #include "base/macros.h"
 #include "locks.h"
 #include "partial_mark_sweep.h"
-#include "utils.h"
 
 namespace art {
 
-class Barrier;
-class CheckObjectVisitor;
-class Class;
-class Heap;
-class MarkIfReachesAllocspaceVisitor;
-class ModUnionClearCardVisitor;
-class ModUnionVisitor;
-class ModUnionTableBitmap;
-class Object;
-class TimingLogger;
-class MarkStackChunk;
-
 class StickyMarkSweep : public PartialMarkSweep {
  public:
   virtual GcType GetGcType() const {
@@ -60,4 +47,4 @@
 
 }  // namespace art
 
-#endif  // ART_SRC_STICKY_MARK_SWEEP_H_
+#endif  // ART_SRC_GC_STICKY_MARK_SWEEP_H_