Recycle mem-maps for collector transitions.

We now create spaces when we need them for collector transitions or
homogeneous compaction by recycling mem maps. Change the bump
pointer space size to be as large as the heap capacity instead of
1/2 heap capacity like it used to be. For GSS, bump pointer spaces
are set to 32MB currently.

Changed GSS to have main space == non moving space since we don't
need to copy from the main space.

Fixes GC stress tests 074, 096.
Fixed test 080 oom throw with -Xmx2m for GC stress test, this was
broken since it was allocating a 4 MB array before starting the
OOM process.

Bug: 14059466
Bug: 16406852
Change-Id: I62877cfa24ec944a6f34ffac30334f454a8002fd
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c7c567f..8fb33ce 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -69,9 +69,9 @@
     } else if (space->GetLiveBitmap() != nullptr) {
       if (space == to_space_ || collect_from_space_only_) {
         if (collect_from_space_only_) {
-          // Bind the main free list space and the non-moving space to the immune space if a bump
-          // pointer space only collection.
-          CHECK(space == to_space_ || space == GetHeap()->GetPrimaryFreeListSpace() ||
+          // Bind the bitmaps of the main free list space and the non-moving space we are doing a
+          // bump pointer space only collection.
+          CHECK(space == GetHeap()->GetPrimaryFreeListSpace() ||
                 space == GetHeap()->GetNonMovingSpace());
         }
         CHECK(space->IsContinuousMemMapAllocSpace());
@@ -222,7 +222,6 @@
   heap_->GetCardTable()->ClearCardTable();
   // Need to do this before the checkpoint since we don't want any threads to add references to
   // the live stack during the recursive mark.
-  t.NewTiming("SwapStacks");
   if (kUseThreadLocalAllocationStack) {
     TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings());
     heap_->RevokeAllThreadLocalAllocationStacks(self_);
@@ -492,7 +491,7 @@
       // If out of space, fall back to the to-space.
       forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
       // No logic for marking the bitmap, so it must be null.
-      DCHECK(to_space_->GetLiveBitmap() == nullptr);
+      DCHECK(to_space_live_bitmap_ == nullptr);
     } else {
       bytes_promoted_ += bytes_allocated;
       // Dirty the card at the destionation as it may contain
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4e38335..48ae84d 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -96,6 +96,7 @@
 static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
 static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
+static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
 
 Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
            double target_utilization, double foreground_heap_growth_multiplier, size_t capacity,
@@ -179,16 +180,16 @@
       running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
       use_tlab_(use_tlab),
       main_space_backup_(nullptr),
-      min_interval_homogeneous_space_compaction_by_oom_(min_interval_homogeneous_space_compaction_by_oom),
+      min_interval_homogeneous_space_compaction_by_oom_(
+          min_interval_homogeneous_space_compaction_by_oom),
       last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
       use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
   }
-  const bool is_zygote = Runtime::Current()->IsZygote();
   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
   // entrypoints.
-  if (!is_zygote) {
+  if (!Runtime::Current()->IsZygote()) {
     large_object_threshold_ = kDefaultLargeObjectThreshold;
     // Background compaction is currently not supported for command line runs.
     if (background_collector_type_ != foreground_collector_type_) {
@@ -197,7 +198,6 @@
     }
   }
   ChangeCollector(desired_collector_type_);
-
   live_bitmap_.reset(new accounting::HeapBitmap(this));
   mark_bitmap_.reset(new accounting::HeapBitmap(this));
   // Requested begin for the alloc space, to follow the mapped image and oat files
@@ -213,130 +213,117 @@
     CHECK_GT(oat_file_end_addr, image_space->End());
     requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
   }
-
   /*
   requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
                                      +-  nonmoving space (kNonMovingSpaceCapacity) +-
                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
-                                     +-        main alloc space (capacity_)        +-
+                                     +-main alloc space / bump space 1 (capacity_) +-
                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
-                                     +-       main alloc space 1 (capacity_)       +-
+                                     +-????????????????????????????????????????????+-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-main alloc space2 / bump space 2 (capacity_)+-
                                      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
   */
-  bool create_backup_main_space =
+  bool support_homogeneous_space_compaction =
       background_collector_type == gc::kCollectorTypeHomogeneousSpaceCompact ||
       use_homogeneous_space_compaction_for_oom;
-  if (is_zygote) {
-    // Reserve the address range before we create the non moving space to make sure bitmaps don't
-    // take it.
-    std::string error_str;
-    MemMap* main_space_map = MemMap::MapAnonymous(
-        kMemMapSpaceName[0], requested_alloc_space_begin + kNonMovingSpaceCapacity, capacity_,
-        PROT_READ | PROT_WRITE, true, &error_str);
-    CHECK(main_space_map != nullptr) << error_str;
-    MemMap* main_space_1_map = nullptr;
-    // Attempt to reserve an extra mem_map for homogeneous space compaction right after the main space map.
-    if (create_backup_main_space) {
-      main_space_1_map = MemMap::MapAnonymous(kMemMapSpaceName[1], main_space_map->End(), capacity_,
-                                               PROT_READ | PROT_WRITE, true, &error_str);
-      if (main_space_1_map == nullptr) {
-        LOG(WARNING) << "Failed to create map " <<  kMemMapSpaceName[1] << " with error "
-                     << error_str;
-      }
-    }
+  // We may use the same space the main space for the non moving space if we don't need to compact
+  // from the main space.
+  // This is not the case if we support homogeneous compaction or have a moving background
+  // collector type.
+  const bool is_zygote = Runtime::Current()->IsZygote();
+  bool separate_non_moving_space = is_zygote ||
+      support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
+      IsMovingGc(background_collector_type_);
+  if (foreground_collector_type == kCollectorTypeGSS) {
+    separate_non_moving_space = false;
+  }
+  std::unique_ptr<MemMap> main_mem_map_1;
+  std::unique_ptr<MemMap> main_mem_map_2;
+  byte* request_begin = requested_alloc_space_begin;
+  if (request_begin != nullptr && separate_non_moving_space) {
+    request_begin += kNonMovingSpaceCapacity;
+  }
+  std::string error_str;
+  std::unique_ptr<MemMap> non_moving_space_mem_map;
+  if (separate_non_moving_space) {
+    // Reserve the non moving mem map before the other two since it needs to be at a specific
+    // address.
+    non_moving_space_mem_map.reset(
+        MemMap::MapAnonymous("non moving space", requested_alloc_space_begin,
+                             kNonMovingSpaceCapacity, PROT_READ | PROT_WRITE, true, &error_str));
+    CHECK(non_moving_space_mem_map != nullptr) << error_str;
+  }
+  // Attempt to create 2 mem maps at or after the requested begin.
+  main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
+                                                    PROT_READ | PROT_WRITE, &error_str));
+  CHECK(main_mem_map_1.get() != nullptr) << error_str;
+  if (support_homogeneous_space_compaction ||
+      background_collector_type_ == kCollectorTypeSS ||
+      foreground_collector_type_ == kCollectorTypeSS) {
+    main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
+                                                      capacity_, PROT_READ | PROT_WRITE,
+                                                      &error_str));
+    CHECK(main_mem_map_2.get() != nullptr) << error_str;
+  }
+  // Create the non moving space first so that bitmaps don't take up the address range.
+  if (separate_non_moving_space) {
     // Non moving space is always dlmalloc since we currently don't have support for multiple
     // active rosalloc spaces.
-    non_moving_space_ = space::DlMallocSpace::Create(
-        "zygote / non moving space", initial_size, kNonMovingSpaceCapacity,
-        kNonMovingSpaceCapacity, requested_alloc_space_begin, false);
+    const size_t size = non_moving_space_mem_map->Size();
+    non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
+        non_moving_space_mem_map.release(), "zygote / non moving space", initial_size,
+        initial_size, size, size, false);
     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
-    CreateMainMallocSpace(main_space_map, initial_size, growth_limit_, capacity_);
-    if (main_space_1_map != nullptr) {
-      const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
-      main_space_backup_ = CreateMallocSpaceFromMemMap(main_space_1_map, initial_size,
-                                                       growth_limit_, capacity_, name, true);
-    }
+    CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
+        << requested_alloc_space_begin;
+    AddSpace(non_moving_space_);
+  }
+  // Create other spaces based on whether or not we have a moving GC.
+  if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
+    // Create bump pointer spaces.
+    // We only to create the bump pointer if the foreground collector is a compacting GC.
+    // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
+    bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
+                                                                    main_mem_map_1.release());
+    CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
+    AddSpace(bump_pointer_space_);
+    temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
+                                                            main_mem_map_2.release());
+    CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
+    AddSpace(temp_space_);
+    CHECK(separate_non_moving_space);
   } else {
-    std::string error_str;
-    byte* request_begin = requested_alloc_space_begin;
-    if (request_begin == nullptr) {
-      // Disable homogeneous space compaction since we don't have an image.
-      create_backup_main_space = false;
-    }
-    MemMap* main_space_1_map = nullptr;
-    if (create_backup_main_space) {
-      request_begin += kNonMovingSpaceCapacity;
-      // Attempt to reserve an extra mem_map for homogeneous space compaction right after the main space map.
-      main_space_1_map = MemMap::MapAnonymous(kMemMapSpaceName[1], request_begin + capacity_,
-                                               capacity_, PROT_READ | PROT_WRITE, true, &error_str);
-      if (main_space_1_map == nullptr) {
-        LOG(WARNING) << "Failed to create map " <<  kMemMapSpaceName[1] << " with error "
-                     << error_str;
-        request_begin = requested_alloc_space_begin;
-      }
-    }
-    MemMap* main_space_map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
-                                                  PROT_READ | PROT_WRITE, true, &error_str);
-    CHECK(main_space_map != nullptr) << error_str;
-    // Introduce a seperate non moving space.
-    if (main_space_1_map != nullptr) {
-      // Do this before creating the main malloc space to prevent bitmaps from being placed here.
-      non_moving_space_ = space::DlMallocSpace::Create(
-          "non moving space", kDefaultInitialSize, kNonMovingSpaceCapacity, kNonMovingSpaceCapacity,
-          requested_alloc_space_begin, false);
-      non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
-    }
-    // Create the main free list space, which doubles as the non moving space. We can do this since
-    // non zygote means that we won't have any background compaction.
-    CreateMainMallocSpace(main_space_map, initial_size, growth_limit_, capacity_);
-    if (main_space_1_map != nullptr) {
-      const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
-      main_space_backup_ = CreateMallocSpaceFromMemMap(main_space_1_map, initial_size,
-                                                       growth_limit_, capacity_, name, true);
-      CHECK(main_space_backup_ != nullptr);
-    } else {
+    CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
+    CHECK(main_space_ != nullptr);
+    AddSpace(main_space_);
+    if (!separate_non_moving_space) {
       non_moving_space_ = main_space_;
+      CHECK(!non_moving_space_->CanMoveObjects());
+    }
+    if (foreground_collector_type_ == kCollectorTypeGSS) {
+      CHECK_EQ(foreground_collector_type_, background_collector_type_);
+      // Create bump pointer spaces instead of a backup space.
+      main_mem_map_2.release();
+      bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
+                                                            kGSSBumpPointerSpaceCapacity, nullptr);
+      CHECK(bump_pointer_space_ != nullptr);
+      AddSpace(bump_pointer_space_);
+      temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
+                                                    kGSSBumpPointerSpaceCapacity, nullptr);
+      CHECK(temp_space_ != nullptr);
+      AddSpace(temp_space_);
+    } else if (main_mem_map_2.get() != nullptr) {
+      const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+      main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
+                                                           growth_limit_, capacity_, name, true));
+      CHECK(main_space_backup_.get() != nullptr);
+      // Add the space so its accounted for in the heap_begin and heap_end.
+      AddSpace(main_space_backup_.get());
     }
   }
   CHECK(non_moving_space_ != nullptr);
-
-  // We need to create the bump pointer if the foreground collector is a compacting GC. We only
-  // create the bump pointer space if we are not a moving foreground collector but have a moving
-  // background collector since the heap transition code will create the temp space by recycling
-  // the bitmap from the main space.
-  if (kMovingCollector &&
-      (IsMovingGc(foreground_collector_type_) || IsMovingGc(background_collector_type_))) {
-    // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
-    // Divide by 2 for a temporary fix for reducing virtual memory usage.
-    const size_t bump_pointer_space_capacity = capacity_ / 2;
-    bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
-                                                          bump_pointer_space_capacity, nullptr);
-    CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
-    AddSpace(bump_pointer_space_);
-    temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
-                                                  bump_pointer_space_capacity, nullptr);
-    CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
-    AddSpace(temp_space_);
-  }
-  if (non_moving_space_ != main_space_) {
-    AddSpace(non_moving_space_);
-  }
-  if (main_space_backup_ != nullptr) {
-    AddSpace(main_space_backup_);
-  } else {
-    const char* disable_msg = "Disabling homogenous space compact due to no backup main space";
-    if (background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact) {
-      background_collector_type_ = collector_type_;
-      LOG(WARNING) << disable_msg;
-    } else if (use_homogeneous_space_compaction_for_oom_) {
-      LOG(WARNING) << disable_msg;
-    }
-    use_homogeneous_space_compaction_for_oom_ = false;
-  }
-  if (main_space_ != nullptr) {
-    AddSpace(main_space_);
-  }
-
+  CHECK(!non_moving_space_->CanMoveObjects());
   // Allocate the large object space.
   if (kUseFreeListSpaceForLOS) {
     large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
@@ -345,19 +332,19 @@
   }
   CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
   AddSpace(large_object_space_);
-
   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
   CHECK(!continuous_spaces_.empty());
-
   // Relies on the spaces being sorted.
   byte* heap_begin = continuous_spaces_.front()->Begin();
   byte* heap_end = continuous_spaces_.back()->Limit();
   size_t heap_capacity = heap_end - heap_begin;
-
+  // Remove the main backup space since it slows down the GC to have unused extra spaces.
+  if (main_space_backup_.get() != nullptr) {
+    RemoveSpace(main_space_backup_.get());
+  }
   // Allocate the card table.
   card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
   CHECK(card_table_.get() != NULL) << "Failed to create card table";
-
   // Card cache for now since it makes it easier for us to update the references to the copying
   // spaces.
   accounting::ModUnionTable* mod_union_table =
@@ -365,17 +352,14 @@
                                                       GetImageSpace());
   CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
   AddModUnionTable(mod_union_table);
-
   if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
     accounting::RememberedSet* non_moving_space_rem_set =
         new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
     CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
     AddRememberedSet(non_moving_space_rem_set);
   }
-
-  // TODO: Count objects in the image space here.
+  // TODO: Count objects in the image space here?
   num_bytes_allocated_.StoreRelaxed(0);
-
   mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
                                                     kDefaultMarkStackSize));
   const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
@@ -383,7 +367,6 @@
       "allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
   live_stack_.reset(accounting::ObjectStack::Create(
       "live stack", max_allocation_stack_size_, alloc_stack_capacity));
-
   // It's still too early to take a lock because there are no threads yet, but we can create locks
   // now. We don't create it earlier to make it clear that you can't use locks during heap
   // initialization.
@@ -392,13 +375,11 @@
                                                 *gc_complete_lock_));
   heap_trim_request_lock_ = new Mutex("Heap trim request lock");
   last_gc_size_ = GetBytesAllocated();
-
   if (ignore_max_footprint_) {
     SetIdealFootprint(std::numeric_limits<size_t>::max());
     concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
   }
   CHECK_NE(max_allowed_footprint_, 0U);
-
   // Create our garbage collectors.
   for (size_t i = 0; i < 2; ++i) {
     const bool concurrent = i != 0;
@@ -417,26 +398,38 @@
     mark_compact_collector_ = new collector::MarkCompact(this);
     garbage_collectors_.push_back(mark_compact_collector_);
   }
-
-  if (GetImageSpace() != nullptr && main_space_ != nullptr) {
-    // Check that there's no gap between the image space and the main space so that the immune
-    // region won't break (eg. due to a large object allocated in the gap).
-    bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(), main_space_->GetMemMap());
+  if (GetImageSpace() != nullptr && non_moving_space_ != nullptr) {
+    // Check that there's no gap between the image space and the non moving space so that the
+    // immune region won't break (eg. due to a large object allocated in the gap).
+    bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
+                                      non_moving_space_->GetMemMap());
     if (!no_gap) {
       MemMap::DumpMaps(LOG(ERROR));
       LOG(FATAL) << "There's a gap between the image space and the main space";
     }
   }
-
   if (running_on_valgrind_) {
     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
   }
-
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() exiting";
   }
 }
 
+MemMap* Heap::MapAnonymousPreferredAddress(const char* name, byte* request_begin, size_t capacity,
+                                           int prot_flags, std::string* out_error_str) {
+  while (true) {
+    MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
+                                       PROT_READ | PROT_WRITE, true, out_error_str);
+    if (map != nullptr || request_begin == nullptr) {
+      return map;
+    }
+    // Retry a  second time with no specified request begin.
+    request_begin = nullptr;
+  }
+  return nullptr;
+}
+
 space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
                                                       size_t growth_limit, size_t capacity,
                                                       const char* name, bool can_move_objects) {
@@ -474,7 +467,8 @@
   if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
     // After the zygote we want this to be false if we don't have background compaction enabled so
     // that getting primitive array elements is faster.
-    can_move_objects = !have_zygote_space_;
+    // We never have homogeneous compaction with GSS and don't need a space with movable objects.
+    can_move_objects = !have_zygote_space_ && foreground_collector_type_ != kCollectorTypeGSS;
   }
   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
     RemoveRememberedSet(main_space_);
@@ -899,12 +893,15 @@
       << " free bytes";
   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
   if (total_bytes_free >= byte_count) {
-    space::MallocSpace* space = nullptr;
+    space::AllocSpace* space = nullptr;
     if (allocator_type == kAllocatorTypeNonMoving) {
       space = non_moving_space_;
     } else if (allocator_type == kAllocatorTypeRosAlloc ||
                allocator_type == kAllocatorTypeDlMalloc) {
       space = main_space_;
+    } else if (allocator_type == kAllocatorTypeBumpPointer ||
+               allocator_type == kAllocatorTypeTLAB) {
+      space = bump_pointer_space_;
     }
     if (space != nullptr) {
       space->LogFragmentationAllocFailure(oss, byte_count);
@@ -1512,15 +1509,18 @@
   tl->SuspendAll();
   uint64_t start_time = NanoTime();
   // Launch compaction.
-  space::MallocSpace* to_space = main_space_backup_;
+  space::MallocSpace* to_space = main_space_backup_.release();
   space::MallocSpace* from_space = main_space_;
   to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
   const uint64_t space_size_before_compaction = from_space->Size();
+  AddSpace(to_space);
   Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
   // Leave as prot read so that we can still run ROSAlloc verification on this space.
   from_space->GetMemMap()->Protect(PROT_READ);
   const uint64_t space_size_after_compaction = to_space->Size();
-  std::swap(main_space_, main_space_backup_);
+  main_space_ = to_space;
+  main_space_backup_.reset(from_space);
+  RemoveSpace(from_space);
   SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
   // Update performed homogeneous space compaction count.
   count_performed_homogeneous_space_compaction_++;
@@ -1587,17 +1587,27 @@
   }
   tl->SuspendAll();
   switch (collector_type) {
-    case kCollectorTypeSS:
-      // Fall-through.
-    case kCollectorTypeGSS: {
+    case kCollectorTypeSS: {
       if (!IsMovingGc(collector_type_)) {
+        // Create the bump pointer space from the backup space.
+        CHECK(main_space_backup_ != nullptr);
+        std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
         // We are transitioning from non moving GC -> moving GC, since we copied from the bump
         // pointer space last transition it will be protected.
-        bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+        CHECK(mem_map != nullptr);
+        mem_map->Protect(PROT_READ | PROT_WRITE);
+        bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
+                                                                        mem_map.release());
+        AddSpace(bump_pointer_space_);
         Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
+        // Use the now empty main space mem map for the bump pointer temp space.
+        mem_map.reset(main_space_->ReleaseMemMap());
         // Remove the main space so that we don't try to trim it, this doens't work for debug
         // builds since RosAlloc attempts to read the magic number from a protected page.
         RemoveSpace(main_space_);
+        temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
+                                                                mem_map.release());
+        AddSpace(temp_space_);
       }
       break;
     }
@@ -1605,10 +1615,25 @@
       // Fall through.
     case kCollectorTypeCMS: {
       if (IsMovingGc(collector_type_)) {
+        CHECK(temp_space_ != nullptr);
+        std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
+        RemoveSpace(temp_space_);
+        temp_space_ = nullptr;
+        CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, mem_map->Size(),
+                              mem_map->Size());
+        mem_map.release();
         // Compact to the main space from the bump pointer space, don't need to swap semispaces.
         AddSpace(main_space_);
         main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
         Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
+        mem_map.reset(bump_pointer_space_->ReleaseMemMap());
+        RemoveSpace(bump_pointer_space_);
+        bump_pointer_space_ = nullptr;
+        const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+        main_space_backup_.reset(CreateMallocSpaceFromMemMap(mem_map.get(), kDefaultInitialSize,
+                                                             mem_map->Size(), mem_map->Size(),
+                                                             name, true));
+        mem_map.release();
       }
       break;
     }
@@ -1811,6 +1836,7 @@
   // there.
   non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
   // Change the collector to the post zygote one.
+  bool same_space = non_moving_space_ == main_space_;
   if (kCompactZygote) {
     DCHECK(semi_space_collector_ != nullptr);
     // Temporarily disable rosalloc verification because the zygote
@@ -1877,6 +1903,11 @@
   space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
                                                                         low_memory_mode_,
                                                                         &non_moving_space_);
+  CHECK(!non_moving_space_->CanMoveObjects());
+  if (same_space) {
+    main_space_ = non_moving_space_;
+    SetSpaceAsDefault(main_space_);
+  }
   delete old_alloc_space;
   CHECK(zygote_space != nullptr) << "Failed creating zygote space";
   AddSpace(zygote_space);
@@ -2178,7 +2209,7 @@
         LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
       }
 
-      // Attmept to find the class inside of the recently freed objects.
+      // Attempt to find the class inside of the recently freed objects.
       space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
       if (ref_space != nullptr && ref_space->IsMallocSpace()) {
         space::MallocSpace* space = ref_space->AsMallocSpace();
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 0da113f..56c6d6d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -444,8 +444,7 @@
                                                               bool fail_ok) const;
   space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
 
-  void DumpForSigQuit(std::ostream& os);
-
+  void DumpForSigQuit(std::ostream& os) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Do a pending heap transition or trim.
   void DoPendingTransitionOrTrim() LOCKS_EXCLUDED(heap_trim_request_lock_);
@@ -594,8 +593,13 @@
 
   void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
 
+  // Create a mem map with a preferred base address.
+  static MemMap* MapAnonymousPreferredAddress(const char* name, byte* request_begin,
+                                              size_t capacity, int prot_flags,
+                                              std::string* out_error_str);
+
   bool SupportHSpaceCompaction() const {
-    // Returns true if we can do hspace compaction.
+    // Returns true if we can do hspace compaction
     return main_space_backup_ != nullptr;
   }
 
@@ -1007,7 +1011,8 @@
   const bool use_tlab_;
 
   // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
-  space::MallocSpace* main_space_backup_;
+  // Use unique_ptr since the space is only added during the homogeneous compaction phase.
+  std::unique_ptr<space::MallocSpace> main_space_backup_;
 
   // Minimal interval allowed between two homogeneous space compactions caused by OOM.
   uint64_t min_interval_homogeneous_space_compaction_by_oom_;
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 8b35692..fb6bbac 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -258,6 +258,14 @@
   return true;
 }
 
+void BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
+                                                    size_t /* failed_alloc_bytes */) {
+  size_t max_contiguous_allocation = Limit() - End();
+  os << "; failed due to fragmentation (largest possible contiguous allocation "
+     <<  max_contiguous_allocation << " bytes)";
+  // Caller's job to print failed_alloc_bytes.
+}
+
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index feee34f..71b15ba 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -151,6 +151,9 @@
     bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes);
   }
 
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Object alignment within the space.
   static constexpr size_t kAlignment = 8;
 
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index abae8ff..d5a03c6 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -431,6 +431,11 @@
   return scc.freed;
 }
 
+void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
+                                                    size_t /*failed_alloc_bytes*/) {
+  UNIMPLEMENTED(FATAL);
+}
+
 }  // namespace space
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 01982d0..b1c20ca 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -89,6 +89,9 @@
     return end_;
   }
 
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  protected:
   explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
 
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 6f49fbf..a52b92b 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -133,8 +133,6 @@
     return can_move_objects_;
   }
 
-  virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
-
  protected:
   MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
               byte* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 71c8eb5..523d4fe 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -223,6 +223,8 @@
   // threads, if the alloc space implementation uses any.
   virtual void RevokeAllThreadLocalBuffers() = 0;
 
+  virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
+
  protected:
   struct SweepCallbackContext {
     SweepCallbackContext(bool swap_bitmaps, space::Space* space);
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index fb3a12e..51d84f5 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -77,25 +77,30 @@
 
 mirror::Object* ZygoteSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
                                    size_t* usable_size) {
-  LOG(FATAL) << "Unimplemented";
+  UNIMPLEMENTED(FATAL);
   return nullptr;
 }
 
 size_t ZygoteSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
-  LOG(FATAL) << "Unimplemented";
+  UNIMPLEMENTED(FATAL);
   return 0;
 }
 
 size_t ZygoteSpace::Free(Thread* self, mirror::Object* ptr) {
-  LOG(FATAL) << "Unimplemented";
+  UNIMPLEMENTED(FATAL);
   return 0;
 }
 
 size_t ZygoteSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
-  LOG(FATAL) << "Unimplemented";
+  UNIMPLEMENTED(FATAL);
   return 0;
 }
 
+void ZygoteSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
+                                               size_t /*failed_alloc_bytes*/) {
+  UNIMPLEMENTED(FATAL);
+}
+
 void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
   SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
   DCHECK(context->space->IsZygoteSpace());
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 5d5fe76..0cf4bb1 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -74,6 +74,9 @@
     return false;
   }
 
+  void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  protected:
   virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
     return &SweepCallback;