Add a collection implementation.

Change-Id: I47330dc14dd2ecee052b6a5165c6d48bd6c89dc4
diff --git a/src/heap.cc b/src/heap.cc
index 3955a31..851433b 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -2,17 +2,26 @@
 // Author: cshapiro@google.com (Carl Shapiro)
 
 #include "heap.h"
+
+#include <vector>
+
+#include "mark_sweep.h"
 #include "object.h"
 #include "space.h"
+#include "stl_util.h"
 
 namespace art {
 
-Space* Heap::space_ = NULL;
+std::vector<Space*> Heap::spaces_;
 
 size_t Heap::startup_size_ = 0;
 
 size_t Heap::maximum_size_ = 0;
 
+size_t Heap::num_bytes_allocated_ = 0;
+
+size_t Heap::num_objects_allocated_ = 0;
+
 bool Heap::is_gc_running_ = false;
 
 HeapBitmap* Heap::mark_bitmap_ = NULL;
@@ -20,13 +29,13 @@
 HeapBitmap* Heap::live_bitmap_ = NULL;
 
 bool Heap::Init(size_t startup_size, size_t maximum_size) {
-  space_ = Space::Create(startup_size, maximum_size);
-  if (space_ == NULL) {
+  Space* space = Space::Create(startup_size, maximum_size);
+  if (space == NULL) {
     return false;
   }
 
-  byte* base = space_->GetBase();
-  size_t num_bytes = space_->Size();
+  byte* base = space->GetBase();
+  size_t num_bytes = space->Size();
 
   // Allocate the initial live bitmap.
   scoped_ptr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
@@ -40,6 +49,7 @@
     return false;
   }
 
+  spaces_.push_back(space);
   startup_size_ = startup_size;
   maximum_size_ = maximum_size;
   live_bitmap_ = live_bitmap.release();
@@ -51,18 +61,62 @@
 }
 
 void Heap::Destroy() {
-  delete space_;
+  STLDeleteElements(&spaces_);
   delete mark_bitmap_;
   delete live_bitmap_;
 }
 
+Object* Heap::AllocObject(Class* klass) {
+  return AllocObject(klass, klass->object_size_);
+}
+
+Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
+  Object* obj = Allocate(num_bytes);
+  if (obj != NULL) {
+    obj->klass_ = klass;
+  }
+  return obj;
+}
+
+void Heap::RecordAllocation(Space* space, const Object* obj) {
+  size_t size = space->AllocationSize(obj);
+  DCHECK_NE(size, 0u);
+  num_bytes_allocated_ += size;
+  num_objects_allocated_ += 1;
+  live_bitmap_->Set(obj);
+}
+
+void Heap::RecordFree(Space* space, const Object* obj) {
+  size_t size = space->AllocationSize(obj);
+  DCHECK_NE(size, 0u);
+  if (size < num_bytes_allocated_) {
+    num_bytes_allocated_ -= size;
+  } else {
+    num_bytes_allocated_ = 0;
+  }
+  live_bitmap_->Clear(obj);
+  if (num_objects_allocated_ > 0) {
+    num_objects_allocated_ -= 1;
+  }
+}
+
 Object* Heap::Allocate(size_t size) {
+  CHECK_EQ(spaces_.size(), 1u);
+  Space* space = spaces_[0];
+  Object* obj = Allocate(space, size);
+  if (obj != NULL) {
+    RecordAllocation(space, obj);
+  }
+  return obj;
+}
+
+Object* Heap::Allocate(Space* space, size_t size) {
   // Fail impossible allocations.  TODO: collect soft references.
   if (size > maximum_size_) {
     return NULL;
   }
 
-  Object* ptr = space_->AllocWithoutGrowth(size);
+  Object* ptr = space->AllocWithoutGrowth(size);
   if (ptr != NULL) {
     return ptr;
   }
@@ -73,7 +127,7 @@
     // The GC is concurrently tracing the heap.  Release the heap
     // lock, wait for the GC to complete, and retrying allocating.
     WaitForConcurrentGcToComplete();
-    ptr = space_->AllocWithoutGrowth(size);
+    ptr = space->AllocWithoutGrowth(size);
     if (ptr != NULL) {
       return ptr;
     }
@@ -82,25 +136,22 @@
   // Another failure.  Our thread was starved or there may be too many
   // live objects.  Try a foreground GC.  This will have no effect if
   // the concurrent GC is already running.
-  CollectGarbage();
-  ptr = space_->AllocWithoutGrowth(size);
+  CollectGarbageInternal();
+  ptr = space->AllocWithoutGrowth(size);
   if (ptr != NULL) {
     return ptr;
   }
 
   // Even that didn't work;  this is an exceptional state.
   // Try harder, growing the heap if necessary.
-  ptr = space_->AllocWithGrowth(size);
+  ptr = space->AllocWithGrowth(size);
   if (ptr != NULL) {
     //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
-    size_t new_footprint = space_->MaxAllowedFootprint();
-    //TODO: may want to grow a little bit more so that the amount of free
-    //      space is equal to the old free space + the utilization slop for
-    //      the new allocation.
-    // LOGI_HEAP("Grow heap (frag case) to "
-    //           "%zu.%03zuMB for %zu-byte allocation",
-    //           FRACTIONAL_MB(newHeapSize), size);
-    LOG(INFO) << "Grow heap (frag case) to " << new_footprint
+    size_t new_footprint = space->MaxAllowedFootprint();
+    // TODO: may want to grow a little bit more so that the amount of
+    //       free space is equal to the old free space + the
+    //       utilization slop for the new allocation.
+    LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
               << "for " << size << "-byte allocation";
     return ptr;
   }
@@ -111,25 +162,20 @@
   // spec requires that all SoftReferences have been collected and
   // cleared before throwing an OOME.
 
-  //TODO: wait for the finalizers from the previous GC to finish
-  //collect_soft_refs:
+  // TODO: wait for the finalizers from the previous GC to finish
   LOG(INFO) << "Forcing collection of SoftReferences for "
             << size << "-byte allocation";
-  //gcForMalloc(true);
-  CollectGarbage();
-  ptr = space_->AllocWithGrowth(size);
+  CollectGarbageInternal();
+  ptr = space->AllocWithGrowth(size);
   if (ptr != NULL) {
     return ptr;
   }
-  //TODO: maybe wait for finalizers and try one last time
 
-  //LOGE_HEAP("Out of memory on a %zd-byte allocation.", size);
   LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
 
-  //TODO: tell the HeapSource to dump its state
-  //dvmDumpThread(dvmThreadSelf(), false);
+  // TODO: tell the HeapSource to dump its state
+  // TODO: dump stack traces for all threads
 
-  // TODO: stack trace
   return NULL;
 }
 
@@ -145,9 +191,44 @@
 }
 
 void Heap::CollectGarbage() {
+  CollectGarbageInternal();
 }
 
 void Heap::CollectGarbageInternal() {
+  // TODO: check that heap lock is held
+
+  // TODO: Suspend all threads
+  {
+    MarkSweep mark_sweep;
+
+    mark_sweep.Init();
+
+    mark_sweep.MarkRoots();
+
+    // Push marked roots onto the mark stack
+
+    // TODO: if concurrent
+    //   unlock heap
+    //   resume threads
+
+    mark_sweep.RecursiveMark();
+
+    // TODO: if concurrent
+    //   lock heap
+    //   suspend threads
+    //   re-mark root set
+    //   scan dirty objects
+
+    mark_sweep.ProcessReferences(false);
+
+    // TODO: swap bitmaps
+
+    mark_sweep.Sweep();
+  }
+
+  GrowForUtilization();
+
+  // TODO: Resume all threads
 }
 
 void Heap::WaitForConcurrentGcToComplete() {
@@ -157,7 +238,7 @@
 // heap footprint to match the target utilization ratio.  This should
 // only be called immediately after a full garbage collection.
 void Heap::GrowForUtilization() {
-  LOG(FATAL) << "Unimplemented";
+  LOG(ERROR) << "Unimplemented";
 }
 
 }  // namespace art
diff --git a/src/heap.h b/src/heap.h
index 3721604..4aff139 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -4,6 +4,8 @@
 #ifndef ART_SRC_HEAP_H_
 #define ART_SRC_HEAP_H_
 
+#include <vector>
+
 #include "globals.h"
 #include "object.h"
 #include "object_bitmap.h"
@@ -28,19 +30,14 @@
 
   static void Destroy();
 
-  static Object* AllocObject(Class* klass) {
-    return AllocObject(klass, klass->object_size_);
-  }
+  // Allocates and initializes storage for a class instance.
+  static Object* AllocObject(Class* klass);
 
-  static Object* AllocObject(Class* klass, size_t num_bytes) {
-    Object* obj = Allocate(num_bytes);
-    if (obj != NULL) {
-      obj->klass_ = klass;
-    }
-    return obj;
-  }
+  static Object* AllocObject(Class* klass, size_t num_bytes);
 
-  static Array* AllocArray(Class* array_class, size_t component_count, size_t component_size) {
+  static Array* AllocArray(Class* array_class,
+                           size_t component_count,
+                           size_t component_size) {
     size_t size = sizeof(Array) + component_count * component_size;
     Array* array = down_cast<Array*>(AllocObject(array_class, size));
     if (array != NULL) {
@@ -49,12 +46,17 @@
     return array;
   }
 
-  static ObjectArray* AllocObjectArray(Class* object_array_class, size_t length) {
-    return down_cast<ObjectArray*>(AllocArray(object_array_class, length, sizeof(uint32_t)));
+  static ObjectArray* AllocObjectArray(Class* object_array_class,
+                                       size_t length) {
+    return down_cast<ObjectArray*>(AllocArray(object_array_class,
+                                              length,
+                                              sizeof(uint32_t)));
   }
 
   static CharArray* AllocCharArray(Class* char_array_class, size_t length) {
-    return down_cast<CharArray*>(AllocArray(char_array_class, length, sizeof(uint16_t)));
+    return down_cast<CharArray*>(AllocArray(char_array_class,
+                                            length,
+                                            sizeof(uint16_t)));
   }
 
   static String* AllocString(Class* java_lang_String) {
@@ -75,9 +77,29 @@
     return lock_;
   }
 
- private:
+  static const std::vector<Space*>& GetSpaces() {
+    return spaces_;
+  }
 
+  static HeapBitmap* GetLiveBits() {
+    return live_bitmap_;
+  };
+
+  static HeapBitmap* GetMarkBits() {
+    return mark_bitmap_;
+  };
+
+  static size_t GetMaximumSize() {
+    return maximum_size_;
+  }
+
+ private:
+  // Allocates uninitialized storage.
   static Object* Allocate(size_t num_bytes);
+  static Object* Allocate(Space* space, size_t num_bytes);
+
+  static void RecordAllocation(Space* space, const Object* object);
+  static void RecordFree(Space* space, const Object* object);
 
   static void CollectGarbageInternal();
 
@@ -85,18 +107,29 @@
 
   static Mutex* lock_;
 
-  static Space* space_;
+  static std::vector<Space*> spaces_;
 
   static HeapBitmap* mark_bitmap_;
 
   static HeapBitmap* live_bitmap_;
 
+  // The startup size of the heap in bytes.
   static size_t startup_size_;
 
+  // The maximum size of the heap in bytes.
   static size_t maximum_size_;
 
+  // True while the garbage collector is running.
   static bool is_gc_running_;
 
+  // Number of bytes allocated.  Adjusted after each allocation and
+  // free.
+  static size_t num_bytes_allocated_;
+
+  // Number of objects allocated.  Adjusted after each allocation and
+  // free.
+  static size_t num_objects_allocated_;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
 };
 
diff --git a/src/mark_stack.h b/src/mark_stack.h
index a3e19e9..6b71c84 100644
--- a/src/mark_stack.h
+++ b/src/mark_stack.h
@@ -13,7 +13,7 @@
 
 class MarkStack {
  public:
-  MarkStack* Create(size_t maximum_size);
+  static MarkStack* Create(size_t maximum_size);
 
   ~MarkStack();
 
diff --git a/src/mark_sweep.cc b/src/mark_sweep.cc
index d2e7b66..196c83c 100644
--- a/src/mark_sweep.cc
+++ b/src/mark_sweep.cc
@@ -3,10 +3,15 @@
 
 #include "mark_sweep.h"
 
+#include <climits>
+#include <vector>
+
+#include "heap.h"
 #include "logging.h"
 #include "macros.h"
 #include "mark_stack.h"
 #include "object.h"
+#include "space.h"
 #include "thread.h"
 
 #define CLZ(x) __builtin_clz(x)
@@ -19,6 +24,22 @@
 size_t MarkSweep::reference_pendingNext_offset_ = 0;  // TODO
 size_t MarkSweep::finalizer_reference_zombie_offset_ = 0;  // TODO
 
+bool MarkSweep::Init() {
+  mark_stack_ = MarkStack::Create(Heap::GetMaximumSize());
+  if (mark_stack_ == NULL) {
+    return false;
+  }
+
+  mark_bitmap_ = Heap::GetMarkBits();
+  live_bitmap_ = Heap::GetLiveBits();
+
+  // TODO: if concurrent, clear the card table.
+
+  // TODO: check that the mark bitmap is entirely clear.
+
+  return true;
+}
+
 void MarkSweep::MarkObject0(const Object* obj, bool check_finger) {
   DCHECK(obj != NULL);
   if (obj < condemned_) {
@@ -52,11 +73,59 @@
   LOG(FATAL) << "Unimplemented";
 }
 
-void MarkSweep::ReMarkRoots()
-{
+void MarkSweep::ScanBitmapCallback(Object* obj, void* finger, void* arg) {
+  MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
+  mark_sweep->finger_ = reinterpret_cast<Object*>(finger);
+  mark_sweep->ScanObject(obj);
+}
+
+// Populates the mark stack based on the set of marked objects and
+// recursively marks until the mark stack is emptied.
+void MarkSweep::RecursiveMark() {
+  void* arg = reinterpret_cast<void*>(this);
+  const std::vector<Space*>& spaces = Heap::GetSpaces();
+  for (size_t i = 0; i < spaces.size(); ++i) {
+    if (spaces[i]->IsCondemned()) {
+      uintptr_t base = reinterpret_cast<uintptr_t>(spaces[i]->GetBase());
+      uintptr_t limit = reinterpret_cast<uintptr_t>(spaces[i]->GetLimit());
+      mark_bitmap_->ScanWalk(base, limit, &MarkSweep::ScanBitmapCallback, arg);
+    }
+  }
+  finger_ = reinterpret_cast<Object*>(~0);
+  ProcessMarkStack();
+}
+
+void MarkSweep::ReMarkRoots() {
   LOG(FATAL) << "Unimplemented";
 }
 
+void MarkSweep::SweepSystemWeaks() {
+  LOG(FATAL) << "Unimplemented";
+}
+
+void MarkSweep::SweepCallback(size_t num_ptrs, void **ptrs, void *arg) {
+  // TODO, lock heap if concurrent
+  Space* space = static_cast<Space*>(arg);
+  for (size_t i = 0; i < num_ptrs; ++i) {
+    Object* obj = static_cast<Object*>(ptrs[i]);
+    space->Free(obj);
+  }
+  // TODO, unlock heap if concurrent
+}
+
+void MarkSweep::Sweep() {
+  const std::vector<Space*>& spaces = Heap::GetSpaces();
+  for (size_t i = 0; i < spaces.size(); ++i) {
+    if (spaces[i]->IsCondemned()) {
+      uintptr_t base = reinterpret_cast<uintptr_t>(spaces[i]->GetBase());
+      uintptr_t limit = reinterpret_cast<uintptr_t>(spaces[i]->GetLimit());
+      void* arg = static_cast<void*>(spaces[i]);
+      HeapBitmap::SweepWalk(*live_bitmap_, *mark_bitmap_, base, limit,
+                            &MarkSweep::SweepCallback, arg);
+    }
+  }
+}
+
 // Scans instance fields.
 void MarkSweep::ScanInstanceFields(const Object* obj) {
   DCHECK(obj != NULL);
@@ -91,12 +160,9 @@
 void MarkSweep::ScanStaticFields(const Class* klass) {
   DCHECK(klass != NULL);
   for (size_t i = 0; i < klass->NumStaticFields(); ++i) {
-    // char ch = clazz->sfields[i].signature[0];
     const StaticField* static_field = klass->GetStaticField(i);
     char ch = static_field->GetType();
     if (ch == '[' || ch == 'L') {
-      // Object *obj = clazz->sfields[i].value.l;
-      // markObject(obj, ctx);
       const Object* obj = static_field->GetObject();
       MarkObject(obj);
     }
@@ -186,7 +252,7 @@
 void MarkSweep::DelayReferenceReferent(Object* obj) {
   DCHECK(obj != NULL);
   DCHECK(obj->GetClass() != NULL);
-  //DCHECK(IS_CLASS_FLAG_SET(obj->clazz, CLASS_ISREFERENCE));
+  DCHECK(obj->IsReference());
   Object* pending = obj->GetFieldObject(reference_pendingNext_offset_);
   Object* referent = obj->GetFieldObject(reference_referent_offset_);
   if (pending == NULL && referent != NULL && !IsMarked(referent)) {
@@ -208,7 +274,7 @@
 // Scans the header and field references of a data object.  If the
 // scanned object is a reference subclass, it is scheduled for later
 // processing
-void MarkSweep::ScanDataObject(const Object *obj) {
+void MarkSweep::ScanOther(const Object* obj) {
   DCHECK(obj != NULL);
   DCHECK(obj->GetClass() != NULL);
   MarkObject(obj->GetClass());
@@ -229,7 +295,7 @@
   } else if (obj->IsArray()) {
     ScanArray(obj);
   } else {
-    ScanDataObject(obj);
+    ScanOther(obj);
   }
 }
 
@@ -344,9 +410,7 @@
   DCHECK(*list == NULL);
 }
 
-/*
- * Process reference class instances and schedule finalizations.
- */
+// Process reference class instances and schedule finalizations.
 void MarkSweep::ProcessReferences(Object** soft_references, bool clear_soft,
                                   Object** weak_references,
                                   Object** finalizer_references,
@@ -403,6 +467,7 @@
 }
 
 MarkSweep::~MarkSweep() {
+  delete mark_stack_;
   mark_bitmap_->Clear();
 }
 
diff --git a/src/mark_sweep.h b/src/mark_sweep.h
index d0c0a44..570d90d 100644
--- a/src/mark_sweep.h
+++ b/src/mark_sweep.h
@@ -15,14 +15,31 @@
 
 class MarkSweep {
  public:
+  MarkSweep() :
+      finger_(NULL), condemned_(NULL) {
+  }
+
   ~MarkSweep();
 
+  // Initializes internal structures.
+  bool Init();
+
   // Marks the root set at the start of a garbage collection.
   void MarkRoots();
 
+  // Builds a mark stack and recursively mark until it empties.
+  void RecursiveMark();
+
   // Remarks the root set after completing the concurrent mark.
   void ReMarkRoots();
 
+  void ProcessReferences(bool clear_soft_references) {
+    ProcessReferences(&soft_reference_list_, clear_soft_references,
+                      &weak_reference_list_,
+                      &finalizer_reference_list_,
+                      &phantom_reference_list_);
+  }
+
   // Sweeps unmarked objects to complete the garbage collection.
   void Sweep();
 
@@ -38,6 +55,10 @@
   // Yuck.
   void MarkObject0(const Object* obj, bool check_finger);
 
+  static void ScanBitmapCallback(Object* obj, void* finger, void* arg);
+
+  static void SweepCallback(size_t num_ptrs, void** ptrs, void* arg);
+
   // Blackens an object.
   void ScanObject(const Object* obj);
 
@@ -56,7 +77,7 @@
   // Grays references in an array.
   void ScanArray(const Object* obj);
 
-  void ScanDataObject(const Object* obj);
+  void ScanOther(const Object* obj);
 
   // Blackens objects grayed during a garbage collection.
   void ScanDirtyObjects();
@@ -90,16 +111,18 @@
 
   void ClearWhiteReferences(Object** list);
 
-  void ProcessReferences(Object** soft_references, bool clear_soft,
+  void ProcessReferences(Object** soft_references, bool clear_soft_references,
                          Object** weak_references,
                          Object** finalizer_references,
                          Object** phantom_references);
 
+  void SweepSystemWeaks();
+
   MarkStack* mark_stack_;
 
   HeapBitmap* mark_bitmap_;
 
-  // HeapBitmap* alloc_bitmap_;
+  HeapBitmap* live_bitmap_;
 
   Object* finger_;
 
diff --git a/src/space.cc b/src/space.cc
index 9e091f2..a0da45c 100644
--- a/src/space.cc
+++ b/src/space.cc
@@ -98,6 +98,10 @@
   return num_bytes;
 }
 
+size_t Space::AllocationSize(const Object* obj) {
+  return mspace_usable_size(mspace_, obj) + kChunkOverhead;
+}
+
 void Space::DontNeed(void* start, void* end, void* num_bytes) {
   start = (void*)RoundUp((uintptr_t)start, 4096);
   end = (void*)RoundDown((uintptr_t)end, 4096);
@@ -125,6 +129,7 @@
 }
 
 void Space::Grow(size_t new_size) {
+  LOG(FATAL) << "Unimplemented";
 }
 
 }  // namespace art
diff --git a/src/space.h b/src/space.h
index c61d91f..663efa5 100644
--- a/src/space.h
+++ b/src/space.h
@@ -34,11 +34,24 @@
     return base_;
   }
 
+  byte* GetLimit() {
+    return limit_;
+  }
+
   size_t Size() {
     return limit_ - base_;
   }
 
+  size_t AllocationSize(const Object* obj);
+
+  bool IsCondemned() {
+    return true;  // TODO
+  }
+
  private:
+  // The boundary tag overhead.
+  static const size_t kChunkOverhead = kWordSize;
+
   Space(size_t startup_size, size_t maximum_size) :
       mspace_(NULL),
       base_(NULL),
@@ -46,6 +59,7 @@
       maximum_size_(maximum_size) {
   }
 
+  // Initializes the space and underlying storage.
   bool Init();
 
   void* CreateMallocSpace(void* base, size_t startup_size,
@@ -63,6 +77,8 @@
 
   size_t maximum_size_;
 
+  bool is_condemned_;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(Space);
 };
 
diff --git a/src/stl_util.h b/src/stl_util.h
new file mode 100644
index 0000000..024f162
--- /dev/null
+++ b/src/stl_util.h
@@ -0,0 +1,47 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+#ifndef ART_SRC_STL_UTIL_H_
+#define ART_SRC_STL_UTIL_H_
+
+namespace art {
+
+// STLDeleteContainerPointers()
+//  For a range within a container of pointers, calls delete
+//  (non-array version) on these pointers.
+// NOTE: for these three functions, we could just implement a DeleteObject
+// functor and then call for_each() on the range and functor, but this
+// requires us to pull in all of algorithm.h, which seems expensive.
+// For hash_[multi]set, it is important that this deletes behind the iterator
+// because the hash_set may call the hash function on the iterator when it is
+// advanced, which could result in the hash function trying to deference a
+// stale pointer.
+template <class ForwardIterator>
+void STLDeleteContainerPointers(ForwardIterator begin,
+                                ForwardIterator end) {
+  while (begin != end) {
+    ForwardIterator temp = begin;
+    ++begin;
+    delete *temp;
+  }
+}
+
+// STLDeleteElements() deletes all the elements in an STL container and clears
+// the container.  This function is suitable for use with a vector, set,
+// hash_set, or any other STL container which defines sensible begin(), end(),
+// and clear() methods.
+//
+// If container is NULL, this function is a no-op.
+//
+// As an alternative to calling STLDeleteElements() directly, consider
+// ElementDeleter (defined below), which ensures that your container's elements
+// are deleted when the ElementDeleter goes out of scope.
+template <class T>
+void STLDeleteElements(T *container) {
+  if (!container) return;
+  STLDeleteContainerPointers(container->begin(), container->end());
+  container->clear();
+}
+
+}  // namespace art
+
+#endif  // ART_SRC_STL_UTIL_H_