Fix memory usage regression and clean up collector changing code.

Memory usage regressed since we didn't properly update
concurrent_start_bytes_ when changing collectors.

Bug: 12034247

Change-Id: I1c69e71cd2919e0d3bf75485a4ac0b0aeca59278
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index ba3cad6..06395cf 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -24,6 +24,8 @@
 
 // Which types of collections are able to be performed.
 enum CollectorType {
+  // No collector selected.
+  kCollectorTypeNone,
   // Non concurrent mark-sweep.
   kCollectorTypeMS,
   // Concurrent mark-sweep.
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 6e9b04a..08ab6b8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -93,7 +93,7 @@
   } else {
     DCHECK(!Dbg::IsAllocTrackingEnabled());
   }
-  if (AllocatorHasConcurrentGC(allocator)) {
+  if (concurrent_gc_) {
     CheckConcurrentGC(self, new_num_bytes_allocated, obj);
   }
   if (kIsDebugBuild) {
@@ -199,9 +199,11 @@
     if (!concurrent_gc_) {
       if (!grow) {
         return true;
-      } else {
-        max_allowed_footprint_ = new_footprint;
       }
+      // TODO: Grow for allocation is racy, fix it.
+      VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
+          << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
+      max_allowed_footprint_ = new_footprint;
     }
   }
   return false;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 1e3689b..f92a821 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -75,12 +75,13 @@
 
 Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
            double target_utilization, size_t capacity, const std::string& image_file_name,
-           CollectorType collector_type, size_t parallel_gc_threads, size_t conc_gc_threads,
-           bool low_memory_mode, size_t long_pause_log_threshold, size_t long_gc_log_threshold,
-           bool ignore_max_footprint)
+           CollectorType post_zygote_collector_type, size_t parallel_gc_threads,
+           size_t conc_gc_threads, bool low_memory_mode, size_t long_pause_log_threshold,
+           size_t long_gc_log_threshold, bool ignore_max_footprint)
     : non_moving_space_(nullptr),
-      concurrent_gc_(collector_type == gc::kCollectorTypeCMS),
-      collector_type_(collector_type),
+      concurrent_gc_(false),
+      collector_type_(kCollectorTypeNone),
+      post_zygote_collector_type_(post_zygote_collector_type),
       parallel_gc_threads_(parallel_gc_threads),
       conc_gc_threads_(conc_gc_threads),
       low_memory_mode_(low_memory_mode),
@@ -109,8 +110,7 @@
       last_process_state_id_(NULL),
       // Initially assume we perceive jank in case the process state is never updated.
       process_state_(kProcessStateJankPerceptible),
-      concurrent_start_bytes_(concurrent_gc_ ? initial_size - kMinConcurrentRemainingBytes
-          :  std::numeric_limits<size_t>::max()),
+      concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
       total_bytes_freed_ever_(0),
       total_objects_freed_ever_(0),
       num_bytes_allocated_(0),
@@ -155,8 +155,12 @@
   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
   // entrypoints.
   if (!Runtime::Current()->IsZygote()) {
-    ChangeCollector(collector_type_);
+    ChangeCollector(post_zygote_collector_type_);
+  } else {
+    // We are the zygote, use bump pointer allocation + semi space collector.
+    ChangeCollector(kCollectorTypeSS);
   }
+
   live_bitmap_.reset(new accounting::HeapBitmap(this));
   mark_bitmap_.reset(new accounting::HeapBitmap(this));
   // Requested begin for the alloc space, to follow the mapped image and oat files
@@ -262,9 +266,6 @@
     garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
     garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
   }
-  gc_plan_.push_back(collector::kGcTypeSticky);
-  gc_plan_.push_back(collector::kGcTypePartial);
-  gc_plan_.push_back(collector::kGcTypeFull);
   if (kMovingCollector) {
     // TODO: Clean this up.
     semi_space_collector_ = new collector::SemiSpace(this);
@@ -1085,22 +1086,46 @@
 void Heap::CollectGarbage(bool clear_soft_references) {
   // Even if we waited for a GC we still need to do another GC since weaks allocated during the
   // last GC will not have necessarily been cleared.
-  CollectGarbageInternal(collector::kGcTypeFull, kGcCauseExplicit, clear_soft_references);
+  CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
 }
 
 void Heap::ChangeCollector(CollectorType collector_type) {
-  switch (collector_type) {
-    case kCollectorTypeSS: {
-      ChangeAllocator(kAllocatorTypeBumpPointer);
-      break;
+  // TODO: Only do this with all mutators suspended to avoid races.
+  if (collector_type != collector_type_) {
+    collector_type_ = collector_type;
+    gc_plan_.clear();
+    switch (collector_type_) {
+      case kCollectorTypeSS: {
+        concurrent_gc_ = false;
+        gc_plan_.push_back(collector::kGcTypeFull);
+        ChangeAllocator(kAllocatorTypeBumpPointer);
+        break;
+      }
+      case kCollectorTypeMS: {
+        concurrent_gc_ = false;
+        gc_plan_.push_back(collector::kGcTypeSticky);
+        gc_plan_.push_back(collector::kGcTypePartial);
+        gc_plan_.push_back(collector::kGcTypeFull);
+        ChangeAllocator(kAllocatorTypeFreeList);
+        break;
+      }
+      case kCollectorTypeCMS: {
+        concurrent_gc_ = true;
+        gc_plan_.push_back(collector::kGcTypeSticky);
+        gc_plan_.push_back(collector::kGcTypePartial);
+        gc_plan_.push_back(collector::kGcTypeFull);
+        ChangeAllocator(kAllocatorTypeFreeList);
+        break;
+      }
+      default: {
+        LOG(FATAL) << "Unimplemented";
+      }
     }
-    case kCollectorTypeMS:
-      // Fall-through.
-    case kCollectorTypeCMS: {
-      ChangeAllocator(kAllocatorTypeFreeList);
-      break;
-    default:
-      LOG(FATAL) << "Unimplemented";
+    if (concurrent_gc_) {
+      concurrent_start_bytes_ =
+          std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) - kMinConcurrentRemainingBytes;
+    } else {
+      concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
     }
   }
 }
@@ -1119,8 +1144,8 @@
   // Trim the pages at the end of the non moving space.
   non_moving_space_->Trim();
   non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
-  // Change the allocator to the post zygote one.
-  ChangeCollector(collector_type_);
+  // Change the collector to the post zygote one.
+  ChangeCollector(post_zygote_collector_type_);
   // TODO: Delete bump_pointer_space_ and temp_pointer_space_?
   if (semi_space_collector_ != nullptr) {
     // Create a new bump pointer space which we will compact into.
@@ -1295,7 +1320,7 @@
   } else {
     LOG(FATAL) << "Invalid current allocator " << current_allocator_;
   }
-  CHECK(collector != NULL)
+  CHECK(collector != nullptr)
       << "Could not find garbage collector with concurrent=" << concurrent_gc_
       << " and type=" << gc_type;
 
@@ -1876,7 +1901,7 @@
   }
   if (!ignore_max_footprint_) {
     SetIdealFootprint(target_size);
-    if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) {
+    if (concurrent_gc_) {
       // Calculate when to perform the next ConcurrentGC.
       // Calculate the estimated GC duration.
       double gc_duration_seconds = NsToMs(gc_duration) / 1000.0;
@@ -1962,7 +1987,6 @@
 void Heap::RequestConcurrentGC(Thread* self) {
   // Make sure that we can do a concurrent GC.
   Runtime* runtime = Runtime::Current();
-  DCHECK(concurrent_gc_);
   if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
       self->IsHandlingStackOverflow()) {
     return;
@@ -2096,7 +2120,7 @@
       // finalizers released native managed allocations.
       UpdateMaxNativeFootprint();
     } else if (!IsGCRequestPending()) {
-      if (concurrent_gc_ && AllocatorHasConcurrentGC(current_allocator_)) {
+      if (concurrent_gc_) {
         RequestConcurrentGC(self);
       } else {
         CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 046fbac..3bff3f9 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -644,12 +644,14 @@
   // A mod-union table remembers all of the references from the it's space to other spaces.
   SafeMap<space::Space*, accounting::ModUnionTable*> mod_union_tables_;
 
-  // What kind of concurrency behavior is the runtime after? True for concurrent mark sweep GC,
-  // false for stop-the-world mark sweep.
-  const bool concurrent_gc_;
+  // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
+  // sweep GC, false for other GC types.
+  bool concurrent_gc_;
 
   // The current collector type.
   CollectorType collector_type_;
+  // Which collector we will switch to after zygote fork.
+  CollectorType post_zygote_collector_type_;
 
   // How many GC threads we may use for paused parts of garbage collection.
   const size_t parallel_gc_threads_;