Add runtime options for large object space

Adds the two following options:
Option -XX:LargeObjectSpace={freelist, map, disabled}
Option -XX:LargeObjectThreshold=size specifies the size for which
primitive arrays are allocated in the large object space.

Added handling for large object space == null in collectors.

Fixed an error in the mem map space where we didn't use the page
aligned size LOS for bytes allocated, this resutled in heaps appearing
a bit smaller than they should be.

Change-Id: I6f17c3534b59e7dc68cd375153e7a846799b3da4
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 4044852..b3bed64 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -547,8 +547,11 @@
 }
 
 void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+  if (los != nullptr) {
+    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());\
+    RecordFreeLOS(los->Sweep(swap_bitmaps));
+  }
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 95530be..930499a 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -374,7 +374,8 @@
     }
     space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
     if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
-                 (kIsDebugBuild && !large_object_space->Contains(obj)))) {
+                 (kIsDebugBuild && large_object_space != nullptr &&
+                     !large_object_space->Contains(obj)))) {
       LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
       LOG(ERROR) << "Attempting see if it's a bad root";
       mark_sweep_->VerifyRoots();
@@ -481,7 +482,7 @@
   // See if the root is on any space bitmap.
   if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
     space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-    if (!large_object_space->Contains(root)) {
+    if (large_object_space != nullptr && !large_object_space->Contains(root)) {
       LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type;
       if (visitor != NULL) {
         LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
@@ -1074,20 +1075,22 @@
   }
   // Handle the large object space.
   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-  accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
-  accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
-  if (swap_bitmaps) {
-    std::swap(large_live_objects, large_mark_objects);
-  }
-  for (size_t i = 0; i < count; ++i) {
-    Object* obj = objects[i];
-    // Handle large objects.
-    if (kUseThreadLocalAllocationStack && obj == nullptr) {
-      continue;
+  if (large_object_space != nullptr) {
+    accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
+    accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
+    if (swap_bitmaps) {
+      std::swap(large_live_objects, large_mark_objects);
     }
-    if (!large_mark_objects->Test(obj)) {
-      ++freed_los.objects;
-      freed_los.bytes += large_object_space->Free(self, obj);
+    for (size_t i = 0; i < count; ++i) {
+      Object* obj = objects[i];
+      // Handle large objects.
+      if (kUseThreadLocalAllocationStack && obj == nullptr) {
+        continue;
+      }
+      if (!large_mark_objects->Test(obj)) {
+        ++freed_los.objects;
+        freed_los.bytes += large_object_space->Free(self, obj);
+      }
     }
   }
   {
@@ -1125,8 +1128,11 @@
 }
 
 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
-  TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
-  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+  if (los != nullptr) {
+    TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
+    RecordFreeLOS(los->Sweep(swap_bitmaps));
+  }
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 8fb33ce..c8fa869 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -365,23 +365,23 @@
   }
 
   CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
-  if (is_large_object_space_immune_) {
+  space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
+  if (is_large_object_space_immune_ && los != nullptr) {
     TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
     DCHECK(collect_from_space_only_);
     // Delay copying the live set to the marked set until here from
     // BindBitmaps() as the large objects on the allocation stack may
     // be newly added to the live set above in MarkAllocStackAsLive().
-    GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+    los->CopyLiveToMarked();
 
     // When the large object space is immune, we need to scan the
     // large object space as roots as they contain references to their
     // classes (primitive array classes) that could move though they
     // don't contain any other references.
-    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-    accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap();
+    accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
     SemiSpaceScanObjectVisitor visitor(this);
-    large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()),
-                                        reinterpret_cast<uintptr_t>(large_object_space->End()),
+    large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
+                                        reinterpret_cast<uintptr_t>(los->End()),
                                         visitor);
   }
   // Recursively process the mark stack.
@@ -655,8 +655,11 @@
 
 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
   DCHECK(!is_large_object_space_immune_);
-  TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
-  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+  if (los != nullptr) {
+    TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
+    RecordFreeLOS(los->Sweep(swap_bitmaps));
+  }
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
@@ -751,6 +754,7 @@
   from_space_ = nullptr;
   CHECK(mark_stack_->IsEmpty());
   mark_stack_->Reset();
+  space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
   if (generational_) {
     // Decide whether to do a whole heap collection or a bump pointer
     // only space collection at the next collection by updating
@@ -762,7 +766,7 @@
       bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
       bool bytes_promoted_threshold_exceeded =
           bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
-      uint64_t current_los_bytes_allocated = GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
+      uint64_t current_los_bytes_allocated = los != nullptr ? los->GetBytesAllocated() : 0U;
       uint64_t last_los_bytes_allocated =
           large_object_bytes_allocated_at_last_whole_heap_collection_;
       bool large_object_bytes_threshold_exceeded =
@@ -775,7 +779,7 @@
       // Reset the counters.
       bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
       large_object_bytes_allocated_at_last_whole_heap_collection_ =
-          GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
+          los != nullptr ? los->GetBytesAllocated() : 0U;
       collect_from_space_only_ = true;
     }
   }
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 5a58446..4ed6abc 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -16,7 +16,7 @@
 
 #include "gc/heap.h"
 #include "gc/space/large_object_space.h"
-#include "gc/space/space.h"
+#include "gc/space/space-inl.h"
 #include "sticky_mark_sweep.h"
 #include "thread-inl.h"
 
@@ -32,7 +32,6 @@
 
 void StickyMarkSweep::BindBitmaps() {
   PartialMarkSweep::BindBitmaps();
-
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   // For sticky GC, we want to bind the bitmaps of all spaces as the allocation stack lets us
   // know what was allocated since the last GC. A side-effect of binding the allocation space mark
@@ -44,7 +43,10 @@
       space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
     }
   }
-  GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+  for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
+    CHECK(space->IsLargeObjectSpace());
+    space->AsLargeObjectSpace()->CopyLiveToMarked();
+  }
 }
 
 void StickyMarkSweep::MarkReachableObjects() {