Add runtime options for large object space

Adds the two following options:
Option -XX:LargeObjectSpace={freelist, map, disabled}
Option -XX:LargeObjectThreshold=size specifies the size for which
primitive arrays are allocated in the large object space.

Added handling for large object space == null in collectors.

Fixed an error in the mem map space where we didn't use the page
aligned size LOS for bytes allocated, this resutled in heaps appearing
a bit smaller than they should be.

Change-Id: I6f17c3534b59e7dc68cd375153e7a846799b3da4
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 4044852..b3bed64 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -547,8 +547,11 @@
 }
 
 void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
-  TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+  if (los != nullptr) {
+    TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());\
+    RecordFreeLOS(los->Sweep(swap_bitmaps));
+  }
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 95530be..930499a 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -374,7 +374,8 @@
     }
     space::LargeObjectSpace* large_object_space = mark_sweep_->GetHeap()->GetLargeObjectsSpace();
     if (UNLIKELY(obj == nullptr || !IsAligned<kPageSize>(obj) ||
-                 (kIsDebugBuild && !large_object_space->Contains(obj)))) {
+                 (kIsDebugBuild && large_object_space != nullptr &&
+                     !large_object_space->Contains(obj)))) {
       LOG(ERROR) << "Tried to mark " << obj << " not contained by any spaces";
       LOG(ERROR) << "Attempting see if it's a bad root";
       mark_sweep_->VerifyRoots();
@@ -481,7 +482,7 @@
   // See if the root is on any space bitmap.
   if (heap_->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
     space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-    if (!large_object_space->Contains(root)) {
+    if (large_object_space != nullptr && !large_object_space->Contains(root)) {
       LOG(ERROR) << "Found invalid root: " << root << " with type " << root_type;
       if (visitor != NULL) {
         LOG(ERROR) << visitor->DescribeLocation() << " in VReg: " << vreg;
@@ -1074,20 +1075,22 @@
   }
   // Handle the large object space.
   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-  accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
-  accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
-  if (swap_bitmaps) {
-    std::swap(large_live_objects, large_mark_objects);
-  }
-  for (size_t i = 0; i < count; ++i) {
-    Object* obj = objects[i];
-    // Handle large objects.
-    if (kUseThreadLocalAllocationStack && obj == nullptr) {
-      continue;
+  if (large_object_space != nullptr) {
+    accounting::LargeObjectBitmap* large_live_objects = large_object_space->GetLiveBitmap();
+    accounting::LargeObjectBitmap* large_mark_objects = large_object_space->GetMarkBitmap();
+    if (swap_bitmaps) {
+      std::swap(large_live_objects, large_mark_objects);
     }
-    if (!large_mark_objects->Test(obj)) {
-      ++freed_los.objects;
-      freed_los.bytes += large_object_space->Free(self, obj);
+    for (size_t i = 0; i < count; ++i) {
+      Object* obj = objects[i];
+      // Handle large objects.
+      if (kUseThreadLocalAllocationStack && obj == nullptr) {
+        continue;
+      }
+      if (!large_mark_objects->Test(obj)) {
+        ++freed_los.objects;
+        freed_los.bytes += large_object_space->Free(self, obj);
+      }
     }
   }
   {
@@ -1125,8 +1128,11 @@
 }
 
 void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
-  TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
-  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+  if (los != nullptr) {
+    TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
+    RecordFreeLOS(los->Sweep(swap_bitmaps));
+  }
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 8fb33ce..c8fa869 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -365,23 +365,23 @@
   }
 
   CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
-  if (is_large_object_space_immune_) {
+  space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
+  if (is_large_object_space_immune_ && los != nullptr) {
     TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
     DCHECK(collect_from_space_only_);
     // Delay copying the live set to the marked set until here from
     // BindBitmaps() as the large objects on the allocation stack may
     // be newly added to the live set above in MarkAllocStackAsLive().
-    GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+    los->CopyLiveToMarked();
 
     // When the large object space is immune, we need to scan the
     // large object space as roots as they contain references to their
     // classes (primitive array classes) that could move though they
     // don't contain any other references.
-    space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
-    accounting::LargeObjectBitmap* large_live_bitmap = large_object_space->GetLiveBitmap();
+    accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
     SemiSpaceScanObjectVisitor visitor(this);
-    large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(large_object_space->Begin()),
-                                        reinterpret_cast<uintptr_t>(large_object_space->End()),
+    large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
+                                        reinterpret_cast<uintptr_t>(los->End()),
                                         visitor);
   }
   // Recursively process the mark stack.
@@ -655,8 +655,11 @@
 
 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
   DCHECK(!is_large_object_space_immune_);
-  TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
-  RecordFreeLOS(heap_->GetLargeObjectsSpace()->Sweep(swap_bitmaps));
+  space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+  if (los != nullptr) {
+    TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
+    RecordFreeLOS(los->Sweep(swap_bitmaps));
+  }
 }
 
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
@@ -751,6 +754,7 @@
   from_space_ = nullptr;
   CHECK(mark_stack_->IsEmpty());
   mark_stack_->Reset();
+  space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
   if (generational_) {
     // Decide whether to do a whole heap collection or a bump pointer
     // only space collection at the next collection by updating
@@ -762,7 +766,7 @@
       bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
       bool bytes_promoted_threshold_exceeded =
           bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
-      uint64_t current_los_bytes_allocated = GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
+      uint64_t current_los_bytes_allocated = los != nullptr ? los->GetBytesAllocated() : 0U;
       uint64_t last_los_bytes_allocated =
           large_object_bytes_allocated_at_last_whole_heap_collection_;
       bool large_object_bytes_threshold_exceeded =
@@ -775,7 +779,7 @@
       // Reset the counters.
       bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
       large_object_bytes_allocated_at_last_whole_heap_collection_ =
-          GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
+          los != nullptr ? los->GetBytesAllocated() : 0U;
       collect_from_space_only_ = true;
     }
   }
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 5a58446..4ed6abc 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -16,7 +16,7 @@
 
 #include "gc/heap.h"
 #include "gc/space/large_object_space.h"
-#include "gc/space/space.h"
+#include "gc/space/space-inl.h"
 #include "sticky_mark_sweep.h"
 #include "thread-inl.h"
 
@@ -32,7 +32,6 @@
 
 void StickyMarkSweep::BindBitmaps() {
   PartialMarkSweep::BindBitmaps();
-
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   // For sticky GC, we want to bind the bitmaps of all spaces as the allocation stack lets us
   // know what was allocated since the last GC. A side-effect of binding the allocation space mark
@@ -44,7 +43,10 @@
       space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
     }
   }
-  GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
+  for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
+    CHECK(space->IsLargeObjectSpace());
+    space->AsLargeObjectSpace()->CopyLiveToMarked();
+  }
 }
 
 void StickyMarkSweep::MarkReachableObjects() {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bb7da0d..3e3b964 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -83,13 +83,6 @@
 // relative to partial/full GC. This may be desirable since sticky GCs interfere less with mutator
 // threads (lower pauses, use less memory bandwidth).
 static constexpr double kStickyGcThroughputAdjustment = 1.0;
-// Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
-// since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
-#if USE_ART_LOW_4G_ALLOCATOR
-static constexpr bool kUseFreeListSpaceForLOS = true;
-#else
-static constexpr bool kUseFreeListSpaceForLOS = false;
-#endif
 // Whether or not we compact the zygote in PreZygoteFork.
 static constexpr bool kCompactZygote = kMovingCollector;
 // How many reserve entries are at the end of the allocation stack, these are only needed if the
@@ -107,8 +100,9 @@
            double target_utilization, double foreground_heap_growth_multiplier,
            size_t capacity, size_t non_moving_space_capacity, const std::string& image_file_name,
            const InstructionSet image_instruction_set, CollectorType foreground_collector_type,
-           CollectorType background_collector_type, size_t parallel_gc_threads,
-           size_t conc_gc_threads, bool low_memory_mode,
+           CollectorType background_collector_type,
+           space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold,
+           size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
            size_t long_pause_log_threshold, size_t long_gc_log_threshold,
            bool ignore_max_footprint, bool use_tlab,
            bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
@@ -135,7 +129,7 @@
       ignore_max_footprint_(ignore_max_footprint),
       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
       zygote_space_(nullptr),
-      large_object_threshold_(kDefaultLargeObjectThreshold),  // Starts out disabled.
+      large_object_threshold_(large_object_threshold),
       collector_type_running_(kCollectorTypeNone),
       last_gc_type_(collector::kGcTypeNone),
       next_gc_type_(collector::kGcTypePartial),
@@ -338,13 +332,21 @@
   CHECK(non_moving_space_ != nullptr);
   CHECK(!non_moving_space_->CanMoveObjects());
   // Allocate the large object space.
-  if (kUseFreeListSpaceForLOS) {
-    large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
+  if (large_object_space_type == space::kLargeObjectSpaceTypeFreeList) {
+    large_object_space_ = space::FreeListSpace::Create("free list large object space", nullptr,
+                                                       capacity_);
+    CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
+  } else if (large_object_space_type == space::kLargeObjectSpaceTypeMap) {
+    large_object_space_ = space::LargeObjectMapSpace::Create("mem map large object space");
+    CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
   } else {
-    large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
+    // Disable the large object space by making the cutoff excessively large.
+    large_object_threshold_ = std::numeric_limits<size_t>::max();
+    large_object_space_ = nullptr;
   }
-  CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
-  AddSpace(large_object_space_);
+  if (large_object_space_ != nullptr) {
+    AddSpace(large_object_space_);
+  }
   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
   CHECK(!continuous_spaces_.empty());
   // Relies on the spaces being sorted.
@@ -712,7 +714,8 @@
   CHECK(space1 != nullptr);
   CHECK(space2 != nullptr);
   MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
-                 large_object_space_->GetLiveBitmap(), stack);
+                 (large_object_space_ != nullptr ? large_object_space_->GetLiveBitmap() : nullptr),
+                 stack);
 }
 
 void Heap::DeleteThreadPool() {
@@ -1002,7 +1005,10 @@
       total_alloc_space_size += malloc_space->Size();
     }
   }
-  total_alloc_space_allocated = GetBytesAllocated() - large_object_space_->GetBytesAllocated();
+  total_alloc_space_allocated = GetBytesAllocated();
+  if (large_object_space_ != nullptr) {
+    total_alloc_space_allocated -= large_object_space_->GetBytesAllocated();
+  }
   if (bump_pointer_space_ != nullptr) {
     total_alloc_space_allocated -= bump_pointer_space_->Size();
   }
@@ -2018,6 +2024,7 @@
       } else if (bitmap2->HasAddress(obj)) {
         bitmap2->Set(obj);
       } else {
+        DCHECK(large_objects != nullptr);
         large_objects->Set(obj);
       }
     }
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 351e1c6..faaea40 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -30,6 +30,7 @@
 #include "gc/collector/garbage_collector.h"
 #include "gc/collector/gc_type.h"
 #include "gc/collector_type.h"
+#include "gc/space/large_object_space.h"
 #include "globals.h"
 #include "gtest/gtest.h"
 #include "instruction_set.h"
@@ -129,8 +130,6 @@
  public:
   // If true, measure the total allocation time.
   static constexpr bool kMeasureAllocationTime = false;
-  // Primitive arrays larger than this size are put in the large object space.
-  static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
   static constexpr size_t kDefaultStartingSize = kPageSize;
   static constexpr size_t kDefaultInitialSize = 2 * MB;
   static constexpr size_t kDefaultMaximumSize = 256 * MB;
@@ -142,7 +141,17 @@
   static constexpr size_t kDefaultTLABSize = 256 * KB;
   static constexpr double kDefaultTargetUtilization = 0.5;
   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
-
+  // Primitive arrays larger than this size are put in the large object space.
+  static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
+  // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
+  // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
+#if USE_ART_LOW_4G_ALLOCATOR
+  static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
+      space::kLargeObjectSpaceTypeFreeList;
+#else
+  static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
+      space::kLargeObjectSpaceTypeMap;
+#endif
   // Used so that we don't overflow the allocation time atomic integer.
   static constexpr size_t kTimeAdjust = 1024;
 
@@ -161,6 +170,7 @@
                 const std::string& original_image_file_name,
                 InstructionSet image_instruction_set,
                 CollectorType foreground_collector_type, CollectorType background_collector_type,
+                space::LargeObjectSpaceType large_object_space_type, size_t large_object_threshold,
                 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
                 size_t long_pause_threshold, size_t long_gc_threshold,
                 bool ignore_max_footprint, bool use_tlab,
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 2a43712..dad5855 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -120,7 +120,7 @@
   mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
   large_objects_.push_back(obj);
   mem_maps_.Put(obj, mem_map);
-  size_t allocation_size = mem_map->Size();
+  const size_t allocation_size = mem_map->BaseSize();
   DCHECK(bytes_allocated != nullptr);
   begin_ = std::min(begin_, reinterpret_cast<byte*>(obj));
   byte* obj_end = reinterpret_cast<byte*>(obj) + allocation_size;
@@ -145,8 +145,9 @@
     Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
     LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
   }
-  DCHECK_GE(num_bytes_allocated_, found->second->Size());
-  size_t allocation_size = found->second->Size();
+  const size_t map_size = found->second->BaseSize();
+  DCHECK_GE(num_bytes_allocated_, map_size);
+  size_t allocation_size = map_size;
   num_bytes_allocated_ -= allocation_size;
   --num_objects_allocated_;
   delete found->second;
@@ -158,7 +159,7 @@
   MutexLock mu(Thread::Current(), lock_);
   auto found = mem_maps_.find(obj);
   CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
-  return found->second->Size();
+  return found->second->BaseSize();
 }
 
 size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 9d5e090..a63c5c0 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -31,6 +31,12 @@
 
 class AllocationInfo;
 
+enum LargeObjectSpaceType {
+  kLargeObjectSpaceTypeDisabled,
+  kLargeObjectSpaceTypeMap,
+  kLargeObjectSpaceTypeFreeList,
+};
+
 // Abstraction implemented by all large object spaces.
 class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
  public:
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 37e08a5..6b4f764 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -63,6 +63,8 @@
     heap_min_free_(gc::Heap::kDefaultMinFree),
     heap_max_free_(gc::Heap::kDefaultMaxFree),
     heap_non_moving_space_capacity_(gc::Heap::kDefaultNonMovingSpaceCapacity),
+    large_object_space_type_(gc::Heap::kDefaultLargeObjectSpaceType),
+    large_object_threshold_(gc::Heap::kDefaultLargeObjectThreshold),
     heap_target_utilization_(gc::Heap::kDefaultTargetUtilization),
     foreground_heap_growth_multiplier_(gc::Heap::kDefaultHeapGrowthMultiplier),
     parallel_gc_threads_(1),
@@ -452,6 +454,32 @@
       if (!ParseXGcOption(option)) {
         return false;
       }
+    } else if (StartsWith(option, "-XX:LargeObjectSpace=")) {
+      std::string substring;
+      if (!ParseStringAfterChar(option, '=', &substring)) {
+        return false;
+      }
+      if (substring == "disabled") {
+        large_object_space_type_ = gc::space::kLargeObjectSpaceTypeDisabled;
+      } else if (substring == "freelist") {
+        large_object_space_type_ = gc::space::kLargeObjectSpaceTypeFreeList;
+      } else if (substring == "map") {
+        large_object_space_type_ = gc::space::kLargeObjectSpaceTypeMap;
+      } else {
+        Usage("Unknown -XX:LargeObjectSpace= option %s\n", substring.c_str());
+        return false;
+      }
+    } else if (StartsWith(option, "-XX:LargeObjectThreshold=")) {
+      std::string substring;
+      if (!ParseStringAfterChar(option, '=', &substring)) {
+        return false;
+      }
+      size_t size = ParseMemoryOption(substring.c_str(), 1);
+      if (size == 0) {
+        Usage("Failed to parse memory option %s\n", option.c_str());
+        return false;
+      }
+      large_object_threshold_ = size;
     } else if (StartsWith(option, "-XX:BackgroundGC=")) {
       std::string substring;
       if (!ParseStringAfterChar(option, '=', &substring)) {
@@ -757,7 +785,6 @@
   UsageMessage(stream, "  -Xstacktracefile:<filename>\n");
   UsageMessage(stream, "  -Xgc:[no]preverify\n");
   UsageMessage(stream, "  -Xgc:[no]postverify\n");
-  UsageMessage(stream, "  -XX:+DisableExplicitGC\n");
   UsageMessage(stream, "  -XX:HeapGrowthLimit=N\n");
   UsageMessage(stream, "  -XX:HeapMinFree=N\n");
   UsageMessage(stream, "  -XX:HeapMaxFree=N\n");
@@ -774,6 +801,7 @@
   UsageMessage(stream, "  -Xgc:[no]postverify_rosalloc\n");
   UsageMessage(stream, "  -Xgc:[no]presweepingverify\n");
   UsageMessage(stream, "  -Ximage:filename\n");
+  UsageMessage(stream, "  -XX:+DisableExplicitGC\n");
   UsageMessage(stream, "  -XX:ParallelGCThreads=integervalue\n");
   UsageMessage(stream, "  -XX:ConcGCThreads=integervalue\n");
   UsageMessage(stream, "  -XX:MaxSpinsBeforeThinLockInflation=integervalue\n");
@@ -783,6 +811,8 @@
   UsageMessage(stream, "  -XX:IgnoreMaxFootprint\n");
   UsageMessage(stream, "  -XX:UseTLAB\n");
   UsageMessage(stream, "  -XX:BackgroundGC=none\n");
+  UsageMessage(stream, "  -XX:LargeObjectSpace={disabled,map,freelist}\n");
+  UsageMessage(stream, "  -XX:LargeObjectThreshold=N\n");
   UsageMessage(stream, "  -Xmethod-trace\n");
   UsageMessage(stream, "  -Xmethod-trace-file:filename");
   UsageMessage(stream, "  -Xmethod-trace-file-size:integervalue\n");
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index 3839e19..26a2f31 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -24,6 +24,7 @@
 
 #include "globals.h"
 #include "gc/collector_type.h"
+#include "gc/space/large_object_space.h"
 #include "instruction_set.h"
 #include "profiler_options.h"
 
@@ -72,6 +73,8 @@
   size_t heap_min_free_;
   size_t heap_max_free_;
   size_t heap_non_moving_space_capacity_;
+  gc::space::LargeObjectSpaceType large_object_space_type_;
+  size_t large_object_threshold_;
   double heap_target_utilization_;
   double foreground_heap_growth_multiplier_;
   unsigned int parallel_gc_threads_;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index a1ea3cf..9b24bec 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -697,6 +697,8 @@
                        options->image_isa_,
                        options->collector_type_,
                        options->background_collector_type_,
+                       options->large_object_space_type_,
+                       options->large_object_threshold_,
                        options->parallel_gc_threads_,
                        options->conc_gc_threads_,
                        options->low_memory_mode_,