Make allocations report usable size.

Work-in-progress to allow arrays to fill usable size. Bug: 13028925.
Use C++11's override keyword on GCC >= 2.7 to ensure that we override GC and
allocator methods.
Move initial mirror::Class set up into a Functor so that all allocated objects
have non-zero sizes. Use this property to assert that all objects are never
larger than their usable size.
Other bits of GC related clean-up, missing initialization, missing use of
const, hot methods in .cc files, "unimplemented" functions that fail at
runtime in header files, reducing header file includes, move valgrind's space
into its own files, reduce number of array allocation routines.

Change-Id: Id5760041a2d7f94dcaf17ec760f6095ec75dadaa
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 29fafd6..c55b2b2 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -64,16 +64,18 @@
 
   ~MarkSweep() {}
 
-  virtual void InitializePhase();
-  virtual bool IsConcurrent() const;
-  virtual bool HandleDirtyObjectsPhase() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
-  virtual void MarkingPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  virtual void ReclaimPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  virtual void FinishPhase() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void InitializePhase() OVERRIDE;
+  virtual void MarkingPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual bool HandleDirtyObjectsPhase() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void ReclaimPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  virtual void FinishPhase() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   virtual void MarkReachableObjects()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-  virtual GcType GetGcType() const {
+
+  virtual bool IsConcurrent() const OVERRIDE;
+
+  virtual GcType GetGcType() const OVERRIDE {
     return kGcTypeFull;
   }
 
@@ -131,7 +133,7 @@
   void ProcessReferences(Thread* self)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Update and mark references from immune spaces.
+  // Update and mark references from immune spaces. Virtual as overridden by StickyMarkSweep.
   virtual void UpdateAndMarkModUnion()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -140,7 +142,8 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Sweeps unmarked objects to complete the garbage collection.
+  // Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
+  // all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
   virtual void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Sweeps unmarked objects to complete the garbage collection.
@@ -232,7 +235,7 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Mark the vm thread roots.
-  virtual void MarkThreadRoots(Thread* self)
+  void MarkThreadRoots(Thread* self)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index 3b788f4..44ae9e9 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -26,7 +26,8 @@
 
 class PartialMarkSweep : public MarkSweep {
  public:
-  virtual GcType GetGcType() const {
+  // Virtual as overridden by StickyMarkSweep.
+  virtual GcType GetGcType() const OVERRIDE {
     return kGcTypePartial;
   }
 
@@ -35,8 +36,9 @@
 
  protected:
   // Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
-  // collections, ie the Zygote space. Also mark this space is immune.
-  virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
+  // StickyMarkSweep.
+  virtual void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index d639db5..a4c9dea 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -133,13 +133,15 @@
       immune_end_(nullptr),
       is_large_object_space_immune_(false),
       to_space_(nullptr),
+      to_space_live_bitmap_(nullptr),
       from_space_(nullptr),
       self_(nullptr),
       generational_(generational),
       last_gc_to_space_end_(nullptr),
       bytes_promoted_(0),
       whole_heap_collection_(true),
-      whole_heap_collection_interval_counter_(0) {
+      whole_heap_collection_interval_counter_(0),
+      saved_bytes_(0) {
 }
 
 void SemiSpace::InitializePhase() {
@@ -263,7 +265,7 @@
     semi_space_->ScanObject(obj);
   }
  private:
-  SemiSpace* semi_space_;
+  SemiSpace* const semi_space_;
 };
 
 void SemiSpace::MarkReachableObjects() {
@@ -467,10 +469,10 @@
     // of an old generation.)
     size_t bytes_promoted;
     space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
-    forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted);
+    forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr);
     if (forward_address == nullptr) {
       // If out of space, fall back to the to-space.
-      forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
+      forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
     } else {
       GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted);
       bytes_promoted_ += bytes_promoted;
@@ -511,7 +513,7 @@
     DCHECK(forward_address != nullptr);
   } else {
     // If it's allocated after the last GC (younger), copy it to the to-space.
-    forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated);
+    forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
   }
   // Copy over the object and add it to the mark stack since we still need to update its
   // references.
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index ba97376..c164c5f 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -275,7 +275,7 @@
   // When true, the generational mode (promotion and the bump pointer
   // space only collection) is enabled. TODO: move these to a new file
   // as a new garbage collector?
-  bool generational_;
+  const bool generational_;
 
   // Used for the generational mode. the end/top of the bump
   // pointer space at the end of the last collection.
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 9e3adb4..ce51ac5 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -59,11 +59,6 @@
   SweepArray(GetHeap()->GetLiveStack(), false);
 }
 
-void StickyMarkSweep::MarkThreadRoots(Thread* self) {
-  MarkRootsCheckpoint(self);
-}
-
-
 }  // namespace collector
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index b675877..98f2b59 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -25,9 +25,9 @@
 namespace gc {
 namespace collector {
 
-class StickyMarkSweep : public PartialMarkSweep {
+class StickyMarkSweep FINAL : public PartialMarkSweep {
  public:
-  GcType GetGcType() const {
+  GcType GetGcType() const OVERRIDE {
     return kGcTypeSticky;
   }
 
@@ -37,21 +37,17 @@
  protected:
   // Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
   // alloc space will be marked as immune.
-  void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void MarkReachableObjects()
+  void MarkReachableObjects() OVERRIDE
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  virtual void MarkThreadRoots(Thread* self)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
-  void Sweep(bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void Sweep(bool swap_bitmaps) OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Don't need to do anything special here since we scan all the cards which may have references
   // to the newly allocated objects.
-  virtual void UpdateAndMarkModUnion() { }
+  void UpdateAndMarkModUnion() OVERRIDE { }
 
  private:
   DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);