Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE

This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.

FPIIM-449

Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index 90d252a..6b98fc1 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -7,6 +7,7 @@
 #include "src/base/bits.h"
 #include "src/base/platform/platform.h"
 #include "src/full-codegen/full-codegen.h"
+#include "src/heap/slot-set.h"
 #include "src/heap/slots-buffer.h"
 #include "src/macro-assembler.h"
 #include "src/msan.h"
@@ -35,7 +36,7 @@
          owner == page->heap()->code_space());
   Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
              page->area_end(), kOnePageOnly);
-  DCHECK(page->WasSwept() || page->SweepingCompleted());
+  DCHECK(page->SweepingDone());
 }
 
 
@@ -66,10 +67,24 @@
       cur_page);
   cur_addr_ = cur_page->area_start();
   cur_end_ = cur_page->area_end();
-  DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
+  DCHECK(cur_page->SweepingDone());
   return true;
 }
 
+PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
+    : heap_(heap) {
+  AllSpaces spaces(heap_);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+    space->PauseAllocationObservers();
+  }
+}
+
+PauseAllocationObserversScope::~PauseAllocationObserversScope() {
+  AllSpaces spaces(heap_);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+    space->ResumeAllocationObservers();
+  }
+}
 
 // -----------------------------------------------------------------------------
 // CodeRange
@@ -427,8 +442,7 @@
 
   MemoryChunk* chunk =
       MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
-                              area_end, NOT_EXECUTABLE, semi_space);
-  chunk->initialize_scan_on_scavenge(true);
+                              area_end, NOT_EXECUTABLE, semi_space, nullptr);
   bool in_to_space = (semi_space->id() != kFromSpace);
   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
                              : MemoryChunk::IN_FROM_SPACE);
@@ -449,10 +463,10 @@
   SetFlags(0, ~0);
 }
 
-
 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
                                      Address area_start, Address area_end,
-                                     Executability executable, Space* owner) {
+                                     Executability executable, Space* owner,
+                                     base::VirtualMemory* reservation) {
   MemoryChunk* chunk = FromAddress(base);
 
   DCHECK(base == chunk->address());
@@ -464,23 +478,20 @@
   chunk->flags_ = 0;
   chunk->set_owner(owner);
   chunk->InitializeReservedMemory();
-  chunk->slots_buffer_ = NULL;
-  chunk->skip_list_ = NULL;
+  chunk->slots_buffer_ = nullptr;
+  chunk->old_to_new_slots_ = nullptr;
+  chunk->old_to_old_slots_ = nullptr;
+  chunk->skip_list_ = nullptr;
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
-  chunk->parallel_sweeping_state().SetValue(kSweepingDone);
+  chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
   chunk->parallel_compaction_state().SetValue(kCompactingDone);
-  chunk->mutex_ = NULL;
-  chunk->available_in_small_free_list_ = 0;
-  chunk->available_in_medium_free_list_ = 0;
-  chunk->available_in_large_free_list_ = 0;
-  chunk->available_in_huge_free_list_ = 0;
-  chunk->non_available_small_blocks_ = 0;
+  chunk->mutex_ = nullptr;
+  chunk->available_in_free_list_ = 0;
+  chunk->wasted_memory_ = 0;
   chunk->ResetLiveBytes();
   Bitmap::Clear(chunk);
-  chunk->initialize_scan_on_scavenge(false);
-  chunk->SetFlag(WAS_SWEPT);
   chunk->set_next_chunk(nullptr);
   chunk->set_prev_chunk(nullptr);
 
@@ -491,6 +502,10 @@
     chunk->SetFlag(IS_EXECUTABLE);
   }
 
+  if (reservation != nullptr) {
+    chunk->reservation_.TakeControl(reservation);
+  }
+
   return chunk;
 }
 
@@ -692,19 +707,14 @@
     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
   }
 
-  MemoryChunk* result = MemoryChunk::Initialize(
-      heap, base, chunk_size, area_start, area_end, executable, owner);
-  result->set_reserved_memory(&reservation);
-  return result;
+  return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
+                                 executable, owner, &reservation);
 }
 
 
 void Page::ResetFreeListStatistics() {
-  non_available_small_blocks_ = 0;
-  available_in_small_free_list_ = 0;
-  available_in_medium_free_list_ = 0;
-  available_in_large_free_list_ = 0;
-  available_in_huge_free_list_ = 0;
+  wasted_memory_ = 0;
+  available_in_free_list_ = 0;
 }
 
 
@@ -921,21 +931,46 @@
 // -----------------------------------------------------------------------------
 // MemoryChunk implementation
 
-void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
-  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
-  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
-    static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
-  }
-  chunk->IncrementLiveBytes(by);
-}
-
-
 void MemoryChunk::ReleaseAllocatedMemory() {
   delete slots_buffer_;
+  slots_buffer_ = nullptr;
   delete skip_list_;
+  skip_list_ = nullptr;
   delete mutex_;
+  mutex_ = nullptr;
+  ReleaseOldToNewSlots();
+  ReleaseOldToOldSlots();
 }
 
+static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
+  size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
+  DCHECK(pages > 0);
+  SlotSet* slot_set = new SlotSet[pages];
+  for (size_t i = 0; i < pages; i++) {
+    slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
+  }
+  return slot_set;
+}
+
+void MemoryChunk::AllocateOldToNewSlots() {
+  DCHECK(nullptr == old_to_new_slots_);
+  old_to_new_slots_ = AllocateSlotSet(size_, address());
+}
+
+void MemoryChunk::ReleaseOldToNewSlots() {
+  delete[] old_to_new_slots_;
+  old_to_new_slots_ = nullptr;
+}
+
+void MemoryChunk::AllocateOldToOldSlots() {
+  DCHECK(nullptr == old_to_old_slots_);
+  old_to_old_slots_ = AllocateSlotSet(size_, address());
+}
+
+void MemoryChunk::ReleaseOldToOldSlots() {
+  delete[] old_to_old_slots_;
+  old_to_old_slots_ = nullptr;
+}
 
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
@@ -949,12 +984,18 @@
 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
               ObjectSpace::kObjectSpaceMapSpace);
 
+void Space::AllocationStep(Address soon_object, int size) {
+  if (!allocation_observers_paused_) {
+    for (int i = 0; i < allocation_observers_->length(); ++i) {
+      AllocationObserver* o = (*allocation_observers_)[i];
+      o->AllocationStep(size, soon_object, size);
+    }
+  }
+}
 
 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
                        Executability executable)
-    : Space(heap, space, executable),
-      free_list_(this),
-      end_of_unswept_pages_(NULL) {
+    : Space(heap, space, executable), free_list_(this) {
   area_size_ = MemoryAllocator::PageAreaSize(space);
   accounting_stats_.Clear();
 
@@ -987,52 +1028,6 @@
 }
 
 
-FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
-  FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
-  if (free_space != nullptr) {
-    accounting_stats_.DecreaseCapacity(free_space->size());
-  }
-  return free_space;
-}
-
-
-void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
-                                            int num, intptr_t limit) {
-  DCHECK_GT(num, 0);
-  DCHECK(other != nullptr);
-
-  if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
-
-  EmptyAllocationInfo();
-
-  bool memory_available = true;
-  bool spaces_need_memory = true;
-  FreeSpace* node = nullptr;
-  CompactionSpace* current_space = nullptr;
-  // Iterate over spaces and memory as long as we have memory and there are
-  // spaces in need of some.
-  while (memory_available && spaces_need_memory) {
-    spaces_need_memory = false;
-    // Round-robin over all spaces.
-    for (int i = 0; i < num; i++) {
-      current_space = other[i]->Get(identity());
-      if (current_space->free_list()->Available() < limit) {
-        // Space has not reached its limit. Try to get some memory.
-        spaces_need_memory = true;
-        node = TryRemoveMemory(limit - current_space->free_list()->Available());
-        if (node != nullptr) {
-          CHECK(current_space->identity() == identity());
-          current_space->AddMemory(node->address(), node->size());
-        } else {
-          memory_available = false;
-          break;
-        }
-      }
-    }
-  }
-}
-
-
 void PagedSpace::RefillFreeList() {
   MarkCompactCollector* collector = heap()->mark_compact_collector();
   FreeList* free_list = nullptr;
@@ -1075,7 +1070,6 @@
   }
 }
 
-
 void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
   DCHECK(identity() == other->identity());
   // Destroy the linear allocation space of {other}. This is needed to
@@ -1109,8 +1103,6 @@
   DCHECK(other->top() == nullptr);
   DCHECK(other->limit() == nullptr);
 
-  DCHECK(other->end_of_unswept_pages_ == nullptr);
-
   AccountCommitted(other->CommittedMemory());
 
   // Move over pages.
@@ -1136,8 +1128,7 @@
   return size;
 }
 
-
-bool PagedSpace::ContainsSafe(Address addr) {
+bool PagedSpace::ContainsSlow(Address addr) {
   Page* p = Page::FromAddress(addr);
   PageIterator iterator(this);
   while (iterator.has_next()) {
@@ -1229,21 +1220,16 @@
 }
 
 
-void PagedSpace::ReleasePage(Page* page) {
+void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
   DCHECK(page->LiveBytes() == 0);
   DCHECK(AreaSize() == page->area_size());
 
-  if (page->WasSwept()) {
+  if (evict_free_list_items) {
     intptr_t size = free_list_.EvictFreeListItems(page);
     accounting_stats_.AllocateBytes(size);
     DCHECK_EQ(AreaSize(), static_cast<int>(size));
   }
 
-  if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
-    heap()->decrement_scan_on_scavenge_pages();
-    page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
-  }
-
   DCHECK(!free_list_.ContainsPageFreeListItems(page));
 
   if (Page::FromAllocationTop(allocation_info_.top()) == page) {
@@ -1279,7 +1265,7 @@
     if (page == Page::FromAllocationTop(allocation_info_.top())) {
       allocation_pointer_found_in_space = true;
     }
-    CHECK(page->WasSwept());
+    CHECK(page->SweepingDone());
     HeapObjectIterator it(page);
     Address end_of_previous_object = page->area_start();
     Address top = page->area_end();
@@ -1327,8 +1313,6 @@
   // this chunk must be a power of two and it must be aligned to its size.
   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
 
-  int target_semispace_capacity = heap()->TargetSemiSpaceSize();
-
   size_t size = 2 * reserved_semispace_capacity;
   Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
       size, size, &reservation_);
@@ -1357,19 +1341,15 @@
   DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
 
   to_space_.SetUp(chunk_base_, initial_semispace_capacity,
-                  target_semispace_capacity, maximum_semispace_capacity);
+                  maximum_semispace_capacity);
   from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
-                    initial_semispace_capacity, target_semispace_capacity,
-                    maximum_semispace_capacity);
+                    initial_semispace_capacity, maximum_semispace_capacity);
   if (!to_space_.Commit()) {
     return false;
   }
   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
 
   start_ = chunk_base_;
-  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
-  object_mask_ = address_mask_ | kHeapObjectTagMask;
-  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
 
   ResetAllocationInfo();
 
@@ -1416,7 +1396,7 @@
     if (!from_space_.GrowTo(new_capacity)) {
       // If we managed to grow to-space but couldn't grow from-space,
       // attempt to shrink to-space.
-      if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
+      if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
         CHECK(false);
@@ -1427,36 +1407,6 @@
 }
 
 
-bool NewSpace::GrowOnePage() {
-  if (TotalCapacity() == MaximumCapacity()) return false;
-  int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize;
-  if (to_space_.GrowTo(new_capacity)) {
-    // Only grow from space if we managed to grow to-space and the from space
-    // is actually committed.
-    if (from_space_.is_committed()) {
-      if (!from_space_.GrowTo(new_capacity)) {
-        // If we managed to grow to-space but couldn't grow from-space,
-        // attempt to shrink to-space.
-        if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
-          // We are in an inconsistent state because we could not
-          // commit/uncommit memory from new space.
-          CHECK(false);
-        }
-        return false;
-      }
-    } else {
-      if (!from_space_.SetTotalCapacity(new_capacity)) {
-        // Can't really happen, but better safe than sorry.
-        CHECK(false);
-      }
-    }
-    DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-    return true;
-  }
-  return false;
-}
-
-
 void NewSpace::Shrink() {
   int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
   int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
@@ -1467,7 +1417,7 @@
     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
       // If we managed to shrink to-space but couldn't shrink from
       // space, attempt to grow to-space again.
-      if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
+      if (!to_space_.GrowTo(from_space_.current_capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
         CHECK(false);
@@ -1547,8 +1497,7 @@
     Address high = to_space_.page_high();
     Address new_top = allocation_info_.top() + size_in_bytes;
     allocation_info_.set_limit(Min(new_top, high));
-  } else if (inline_allocation_observers_paused_ ||
-             top_on_previous_step_ == 0) {
+  } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
     // Normal limit is the end of the current page.
     allocation_info_.set_limit(to_space_.page_high());
   } else {
@@ -1564,29 +1513,10 @@
 
 bool NewSpace::AddFreshPage() {
   Address top = allocation_info_.top();
-  if (NewSpacePage::IsAtStart(top)) {
-    // The current page is already empty. Don't try to make another.
-
-    // We should only get here if someone asks to allocate more
-    // than what can be stored in a single page.
-    // TODO(gc): Change the limit on new-space allocation to prevent this
-    // from happening (all such allocations should go directly to LOSpace).
-    return false;
-  }
+  DCHECK(!NewSpacePage::IsAtStart(top));
   if (!to_space_.AdvancePage()) {
-    // Check if we reached the target capacity yet. If not, try to commit a page
-    // and continue.
-    if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) &&
-        GrowOnePage()) {
-      if (!to_space_.AdvancePage()) {
-        // It doesn't make sense that we managed to commit a page, but can't use
-        // it.
-        CHECK(false);
-      }
-    } else {
-      // Failed to get a new page in to-space.
-      return false;
-    }
+    // No more pages left to advance.
+    return false;
   }
 
   // Clear remainder of current page.
@@ -1648,9 +1578,9 @@
 
 
 void NewSpace::StartNextInlineAllocationStep() {
-  if (!inline_allocation_observers_paused_) {
+  if (!allocation_observers_paused_) {
     top_on_previous_step_ =
-        inline_allocation_observers_.length() ? allocation_info_.top() : 0;
+        allocation_observers_->length() ? allocation_info_.top() : 0;
     UpdateInlineAllocationLimit(0);
   }
 }
@@ -1658,44 +1588,36 @@
 
 intptr_t NewSpace::GetNextInlineAllocationStepSize() {
   intptr_t next_step = 0;
-  for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
-    InlineAllocationObserver* o = inline_allocation_observers_[i];
+  for (int i = 0; i < allocation_observers_->length(); ++i) {
+    AllocationObserver* o = (*allocation_observers_)[i];
     next_step = next_step ? Min(next_step, o->bytes_to_next_step())
                           : o->bytes_to_next_step();
   }
-  DCHECK(inline_allocation_observers_.length() == 0 || next_step != 0);
+  DCHECK(allocation_observers_->length() == 0 || next_step != 0);
   return next_step;
 }
 
-
-void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) {
-  inline_allocation_observers_.Add(observer);
+void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
+  Space::AddAllocationObserver(observer);
   StartNextInlineAllocationStep();
 }
 
-
-void NewSpace::RemoveInlineAllocationObserver(
-    InlineAllocationObserver* observer) {
-  bool removed = inline_allocation_observers_.RemoveElement(observer);
-  // Only used in assertion. Suppress unused variable warning.
-  static_cast<void>(removed);
-  DCHECK(removed);
+void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
+  Space::RemoveAllocationObserver(observer);
   StartNextInlineAllocationStep();
 }
 
-
-void NewSpace::PauseInlineAllocationObservers() {
+void NewSpace::PauseAllocationObservers() {
   // Do a step to account for memory allocated so far.
   InlineAllocationStep(top(), top(), nullptr, 0);
-  inline_allocation_observers_paused_ = true;
+  Space::PauseAllocationObservers();
   top_on_previous_step_ = 0;
   UpdateInlineAllocationLimit(0);
 }
 
-
-void NewSpace::ResumeInlineAllocationObservers() {
+void NewSpace::ResumeAllocationObservers() {
   DCHECK(top_on_previous_step_ == 0);
-  inline_allocation_observers_paused_ = false;
+  Space::ResumeAllocationObservers();
   StartNextInlineAllocationStep();
 }
 
@@ -1704,9 +1626,9 @@
                                     Address soon_object, size_t size) {
   if (top_on_previous_step_) {
     int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
-    for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
-      inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated,
-                                                            soon_object, size);
+    for (int i = 0; i < allocation_observers_->length(); ++i) {
+      (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
+                                                  size);
     }
     top_on_previous_step_ = new_top;
   }
@@ -1771,68 +1693,56 @@
 // -----------------------------------------------------------------------------
 // SemiSpace implementation
 
-void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
+void SemiSpace::SetUp(Address start, int initial_capacity,
                       int maximum_capacity) {
-  // Creates a space in the young generation. The constructor does not
-  // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
-  // memory of size 'capacity' when set up, and does not grow or shrink
-  // otherwise.  In the mark-compact collector, the memory region of the from
-  // space is used as the marking stack. It requires contiguous memory
-  // addresses.
-  DCHECK(maximum_capacity >= Page::kPageSize);
-  DCHECK(initial_capacity <= target_capacity);
-  DCHECK(target_capacity <= maximum_capacity);
-  initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
-  total_capacity_ = initial_capacity;
-  target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
-  maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+  DCHECK_GE(maximum_capacity, Page::kPageSize);
+  minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
+  current_capacity_ = minimum_capacity_;
+  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
   committed_ = false;
   start_ = start;
-  address_mask_ = ~(maximum_capacity - 1);
-  object_mask_ = address_mask_ | kHeapObjectTagMask;
-  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
   age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
 }
 
 
 void SemiSpace::TearDown() {
-  start_ = NULL;
-  total_capacity_ = 0;
+  start_ = nullptr;
+  current_capacity_ = 0;
 }
 
 
 bool SemiSpace::Commit() {
   DCHECK(!is_committed());
-  int pages = total_capacity_ / Page::kPageSize;
   if (!heap()->isolate()->memory_allocator()->CommitBlock(
-          start_, total_capacity_, executable())) {
+          start_, current_capacity_, executable())) {
     return false;
   }
-  AccountCommitted(total_capacity_);
+  AccountCommitted(current_capacity_);
 
   NewSpacePage* current = anchor();
-  for (int i = 0; i < pages; i++) {
+  const int num_pages = current_capacity_ / Page::kPageSize;
+  for (int i = 0; i < num_pages; i++) {
     NewSpacePage* new_page =
         NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
     new_page->InsertAfter(current);
     current = new_page;
   }
-
-  SetCapacity(total_capacity_);
-  committed_ = true;
   Reset();
+
+  set_current_capacity(current_capacity_);
+  committed_ = true;
   return true;
 }
 
 
 bool SemiSpace::Uncommit() {
   DCHECK(is_committed());
-  Address start = start_ + maximum_total_capacity_ - total_capacity_;
-  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
-                                                            total_capacity_)) {
+  Address start = start_ + maximum_capacity_ - current_capacity_;
+  if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+          start, current_capacity_)) {
     return false;
   }
-  AccountUncommitted(total_capacity_);
+  AccountUncommitted(current_capacity_);
 
   anchor()->set_next_page(anchor());
   anchor()->set_prev_page(anchor());
@@ -1857,23 +1767,23 @@
   if (!is_committed()) {
     if (!Commit()) return false;
   }
-  DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
-  DCHECK(new_capacity <= maximum_total_capacity_);
-  DCHECK(new_capacity > total_capacity_);
-  int pages_before = total_capacity_ / Page::kPageSize;
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+  DCHECK_LE(new_capacity, maximum_capacity_);
+  DCHECK_GT(new_capacity, current_capacity_);
+  int pages_before = current_capacity_ / Page::kPageSize;
   int pages_after = new_capacity / Page::kPageSize;
 
-  size_t delta = new_capacity - total_capacity_;
+  size_t delta = new_capacity - current_capacity_;
 
   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
   if (!heap()->isolate()->memory_allocator()->CommitBlock(
-          start_ + total_capacity_, delta, executable())) {
+          start_ + current_capacity_, delta, executable())) {
     return false;
   }
   AccountCommitted(static_cast<intptr_t>(delta));
-  SetCapacity(new_capacity);
+  set_current_capacity(new_capacity);
   NewSpacePage* last_page = anchor()->prev_page();
-  DCHECK(last_page != anchor());
+  DCHECK_NE(last_page, anchor());
   for (int i = pages_before; i < pages_after; i++) {
     Address page_address = start_ + i * Page::kPageSize;
     NewSpacePage* new_page =
@@ -1890,11 +1800,11 @@
 
 
 bool SemiSpace::ShrinkTo(int new_capacity) {
-  DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
-  DCHECK(new_capacity >= initial_total_capacity_);
-  DCHECK(new_capacity < total_capacity_);
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+  DCHECK_GE(new_capacity, minimum_capacity_);
+  DCHECK_LT(new_capacity, current_capacity_);
   if (is_committed()) {
-    size_t delta = total_capacity_ - new_capacity;
+    size_t delta = current_capacity_ - new_capacity;
     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
 
     MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
@@ -1911,37 +1821,23 @@
     DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
   }
 
-  SetCapacity(new_capacity);
+  set_current_capacity(new_capacity);
 
   return true;
 }
 
-
-bool SemiSpace::SetTotalCapacity(int new_capacity) {
-  CHECK(!is_committed());
-  if (new_capacity >= initial_total_capacity_ &&
-      new_capacity <= maximum_total_capacity_) {
-    total_capacity_ = new_capacity;
-    return true;
-  }
-  return false;
-}
-
-
-void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
+void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
   anchor_.set_owner(this);
-  // Fixup back-pointers to anchor. Address of anchor changes
-  // when we swap.
+  // Fixup back-pointers to anchor. Address of anchor changes when we swap.
   anchor_.prev_page()->set_next_page(&anchor_);
   anchor_.next_page()->set_prev_page(&anchor_);
 
-  bool becomes_to_space = (id_ == kFromSpace);
-  id_ = becomes_to_space ? kToSpace : kFromSpace;
-  NewSpacePage* page = anchor_.next_page();
-  while (page != &anchor_) {
+  NewSpacePageIterator it(this);
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
     page->set_owner(this);
     page->SetFlags(flags, mask);
-    if (becomes_to_space) {
+    if (id_ == kToSpace) {
       page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
       page->SetFlag(MemoryChunk::IN_TO_SPACE);
       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
@@ -1950,48 +1846,42 @@
       page->SetFlag(MemoryChunk::IN_FROM_SPACE);
       page->ClearFlag(MemoryChunk::IN_TO_SPACE);
     }
-    DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
     DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
            page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
-    page = page->next_page();
   }
 }
 
 
 void SemiSpace::Reset() {
-  DCHECK(anchor_.next_page() != &anchor_);
+  DCHECK_NE(anchor_.next_page(), &anchor_);
   current_page_ = anchor_.next_page();
 }
 
 
 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
   // We won't be swapping semispaces without data in them.
-  DCHECK(from->anchor_.next_page() != &from->anchor_);
-  DCHECK(to->anchor_.next_page() != &to->anchor_);
+  DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
+  DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
 
-  // Swap bits.
-  SemiSpace tmp = *from;
-  *from = *to;
-  *to = tmp;
+  intptr_t saved_to_space_flags = to->current_page()->GetFlags();
 
-  // Fixup back-pointers to the page list anchor now that its address
-  // has changed.
-  // Swap to/from-space bits on pages.
-  // Copy GC flags from old active space (from-space) to new (to-space).
-  intptr_t flags = from->current_page()->GetFlags();
-  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
+  // We swap all properties but id_.
+  std::swap(from->current_capacity_, to->current_capacity_);
+  std::swap(from->maximum_capacity_, to->maximum_capacity_);
+  std::swap(from->minimum_capacity_, to->minimum_capacity_);
+  std::swap(from->start_, to->start_);
+  std::swap(from->age_mark_, to->age_mark_);
+  std::swap(from->committed_, to->committed_);
+  std::swap(from->anchor_, to->anchor_);
+  std::swap(from->current_page_, to->current_page_);
 
-  from->FlipPages(0, 0);
-}
-
-
-void SemiSpace::SetCapacity(int new_capacity) {
-  total_capacity_ = new_capacity;
+  to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
+  from->FixPagesFlags(0, 0);
 }
 
 
 void SemiSpace::set_age_mark(Address mark) {
-  DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
+  DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this);
   age_mark_ = mark;
   // Mark all pages up to the one containing mark.
   NewSpacePageIterator it(space_start(), mark);
@@ -2011,7 +1901,7 @@
   NewSpacePage* page = anchor_.next_page();
   CHECK(anchor_.semi_space() == this);
   while (page != &anchor_) {
-    CHECK(page->semi_space() == this);
+    CHECK_EQ(page->semi_space(), this);
     CHECK(page->InNewSpace());
     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
                                         : MemoryChunk::IN_TO_SPACE));
@@ -2030,8 +1920,7 @@
       // TODO(gc): Check that the live_bytes_count_ field matches the
       // black marking on the page (if we make it match in new-space).
     }
-    CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
-    CHECK(page->prev_page()->next_page() == page);
+    CHECK_EQ(page->prev_page()->next_page(), page);
     page = page->next_page();
   }
 }
@@ -2048,7 +1937,7 @@
   // or end address is on a later page in the linked list of
   // semi-space pages.
   if (page == end_page) {
-    CHECK(start <= end);
+    CHECK_LE(start, end);
   } else {
     while (page != end_page) {
       page = page->next_page();
@@ -2313,8 +2202,7 @@
     }
     prev_node = cur_node;
   }
-  DCHECK_EQ(p->available_in_free_list(type_), sum);
-  p->add_available_in_free_list(type_, -sum);
+  p->add_available_in_free_list(-sum);
   available_ -= sum;
   return sum;
 }
@@ -2337,7 +2225,7 @@
   Page* page = Page::FromAddress(node->address());
   while ((node != nullptr) && !page->CanAllocate()) {
     available_ -= node->size();
-    page->add_available_in_free_list(type_, -(node->Size()));
+    page->add_available_in_free_list(-(node->Size()));
     node = node->next();
   }
 
@@ -2392,7 +2280,7 @@
       }
       // For evacuation candidates we continue.
       if (!page_for_node->CanAllocate()) {
-        page_for_node->add_available_in_free_list(type_, -size);
+        page_for_node->add_available_in_free_list(-size);
         continue;
       }
       // Otherwise we have a large enough node and can return.
@@ -2429,14 +2317,10 @@
   }
 }
 
-
-FreeList::FreeList(PagedSpace* owner)
-    : owner_(owner),
-      wasted_bytes_(0),
-      small_list_(this, kSmall),
-      medium_list_(this, kMedium),
-      large_list_(this, kLarge),
-      huge_list_(this, kHuge) {
+FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    category_[i].Initialize(this, static_cast<FreeListCategoryType>(i));
+  }
   Reset();
 }
 
@@ -2456,10 +2340,10 @@
   wasted_bytes_ += wasted_bytes;
   other->wasted_bytes_ = 0;
 
-  usable_bytes += small_list_.Concatenate(other->GetFreeListCategory(kSmall));
-  usable_bytes += medium_list_.Concatenate(other->GetFreeListCategory(kMedium));
-  usable_bytes += large_list_.Concatenate(other->GetFreeListCategory(kLarge));
-  usable_bytes += huge_list_.Concatenate(other->GetFreeListCategory(kHuge));
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    usable_bytes += category_[i].Concatenate(
+        other->GetFreeListCategory(static_cast<FreeListCategoryType>(i)));
+  }
 
   if (!other->owner()->is_local()) other->mutex()->Unlock();
   if (!owner()->is_local()) mutex_.Unlock();
@@ -2468,10 +2352,9 @@
 
 
 void FreeList::Reset() {
-  small_list_.Reset();
-  medium_list_.Reset();
-  large_list_.Reset();
-  huge_list_.Reset();
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    category_[i].Reset();
+  }
   ResetStats();
 }
 
@@ -2485,7 +2368,7 @@
 
   // Early return to drop too-small blocks on the floor.
   if (size_in_bytes <= kSmallListMin) {
-    page->add_non_available_small_blocks(size_in_bytes);
+    page->add_wasted_memory(size_in_bytes);
     wasted_bytes_ += size_in_bytes;
     return size_in_bytes;
   }
@@ -2493,19 +2376,9 @@
   FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
   // Insert other blocks at the head of a free list of the appropriate
   // magnitude.
-  if (size_in_bytes <= kSmallListMax) {
-    small_list_.Free(free_space, size_in_bytes);
-    page->add_available_in_small_free_list(size_in_bytes);
-  } else if (size_in_bytes <= kMediumListMax) {
-    medium_list_.Free(free_space, size_in_bytes);
-    page->add_available_in_medium_free_list(size_in_bytes);
-  } else if (size_in_bytes <= kLargeListMax) {
-    large_list_.Free(free_space, size_in_bytes);
-    page->add_available_in_large_free_list(size_in_bytes);
-  } else {
-    huge_list_.Free(free_space, size_in_bytes);
-    page->add_available_in_huge_free_list(size_in_bytes);
-  }
+  FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+  category_[type].Free(free_space, size_in_bytes);
+  page->add_available_in_free_list(size_in_bytes);
 
   DCHECK(IsVeryLong() || Available() == SumFreeLists());
   return 0;
@@ -2516,7 +2389,7 @@
   FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
   if (node != nullptr) {
     Page::FromAddress(node->address())
-        ->add_available_in_free_list(category, -(*node_size));
+        ->add_available_in_free_list(-(*node_size));
     DCHECK(IsVeryLong() || Available() == SumFreeLists());
   }
   return node;
@@ -2527,50 +2400,37 @@
   FreeSpace* node = nullptr;
   Page* page = nullptr;
 
-  if (size_in_bytes <= kSmallAllocationMax) {
-    node = FindNodeIn(kSmall, node_size);
+  // First try the allocation fast path: try to allocate the minimum element
+  // size of a free list category. This operation is constant time.
+  FreeListCategoryType type =
+      SelectFastAllocationFreeListCategoryType(size_in_bytes);
+  for (int i = type; i < kHuge; i++) {
+    node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
     if (node != nullptr) return node;
   }
 
-  if (size_in_bytes <= kMediumAllocationMax) {
-    node = FindNodeIn(kMedium, node_size);
-    if (node != nullptr) return node;
-  }
-
-  if (size_in_bytes <= kLargeAllocationMax) {
-    node = FindNodeIn(kLarge, node_size);
-    if (node != nullptr) return node;
-  }
-
-  node = huge_list_.SearchForNodeInList(size_in_bytes, node_size);
+  // Next search the huge list for free list nodes. This takes linear time in
+  // the number of huge elements.
+  node = category_[kHuge].SearchForNodeInList(size_in_bytes, node_size);
   if (node != nullptr) {
     page = Page::FromAddress(node->address());
-    page->add_available_in_large_free_list(-(*node_size));
+    page->add_available_in_free_list(-(*node_size));
     DCHECK(IsVeryLong() || Available() == SumFreeLists());
     return node;
   }
 
-  if (size_in_bytes <= kSmallListMax) {
-    node = small_list_.PickNodeFromList(size_in_bytes, node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_small_free_list(-(*node_size));
-    }
-  } else if (size_in_bytes <= kMediumListMax) {
-    node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_medium_free_list(-(*node_size));
-    }
-  } else if (size_in_bytes <= kLargeListMax) {
-    node = large_list_.PickNodeFromList(size_in_bytes, node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_large_free_list(-(*node_size));
-    }
+  // We need a huge block of memory, but we didn't find anything in the huge
+  // list.
+  if (type == kHuge) return nullptr;
+
+  // Now search the best fitting free list for a node that has at least the
+  // requested size. This takes linear time in the number of elements.
+  type = SelectFreeListCategoryType(size_in_bytes);
+  node = category_[type].PickNodeFromList(size_in_bytes, node_size);
+  if (node != nullptr) {
+    DCHECK(size_in_bytes <= *node_size);
+    page = Page::FromAddress(node->address());
+    page->add_available_in_free_list(-(*node_size));
   }
 
   DCHECK(IsVeryLong() || Available() == SumFreeLists());
@@ -2633,6 +2493,7 @@
   int new_node_size = 0;
   FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
   if (new_node == nullptr) return nullptr;
+  owner_->AllocationStep(new_node->address(), size_in_bytes);
 
   int bytes_left = new_node_size - size_in_bytes;
   DCHECK(bytes_left >= 0);
@@ -2683,29 +2544,30 @@
 
 
 intptr_t FreeList::EvictFreeListItems(Page* p) {
-  intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
+  intptr_t sum = category_[kHuge].EvictFreeListItemsInList(p);
   if (sum < p->area_size()) {
-    sum += small_list_.EvictFreeListItemsInList(p) +
-           medium_list_.EvictFreeListItemsInList(p) +
-           large_list_.EvictFreeListItemsInList(p);
+    for (int i = kFirstCategory; i <= kLarge; i++) {
+      sum += category_[i].EvictFreeListItemsInList(p);
+    }
   }
   return sum;
 }
 
 
 bool FreeList::ContainsPageFreeListItems(Page* p) {
-  return huge_list_.EvictFreeListItemsInList(p) ||
-         small_list_.EvictFreeListItemsInList(p) ||
-         medium_list_.EvictFreeListItemsInList(p) ||
-         large_list_.EvictFreeListItemsInList(p);
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    if (category_[i].EvictFreeListItemsInList(p)) {
+      return true;
+    }
+  }
+  return false;
 }
 
 
 void FreeList::RepairLists(Heap* heap) {
-  small_list_.RepairFreeList(heap);
-  medium_list_.RepairFreeList(heap);
-  large_list_.RepairFreeList(heap);
-  huge_list_.RepairFreeList(heap);
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    category_[i].RepairFreeList(heap);
+  }
 }
 
 
@@ -2740,8 +2602,12 @@
 
 
 bool FreeList::IsVeryLong() {
-  return small_list_.IsVeryLong() || medium_list_.IsVeryLong() ||
-         large_list_.IsVeryLong() || huge_list_.IsVeryLong();
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    if (category_[i].IsVeryLong()) {
+      return true;
+    }
+  }
+  return false;
 }
 
 
@@ -2749,10 +2615,10 @@
 // on the free list, so it should not be called if FreeListLength returns
 // kVeryLongFreeList.
 intptr_t FreeList::SumFreeLists() {
-  intptr_t sum = small_list_.SumFreeList();
-  sum += medium_list_.SumFreeList();
-  sum += large_list_.SumFreeList();
-  sum += huge_list_.SumFreeList();
+  intptr_t sum = 0;
+  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+    sum += category_[i].SumFreeList();
+  }
   return sum;
 }
 #endif
@@ -2791,7 +2657,7 @@
   PageIterator iterator(this);
   while (iterator.has_next()) {
     Page* page = iterator.next();
-    int size = static_cast<int>(page->non_available_small_blocks());
+    int size = static_cast<int>(page->wasted_memory());
     if (size == 0) continue;
     Address address = page->OffsetToAddress(Page::kPageSize - size);
     heap()->CreateFillerObjectAt(address, size);
@@ -2837,6 +2703,8 @@
 
 
 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+  const int kMaxPagesToSweep = 1;
+
   // Allocation in this space has failed.
 
   MarkCompactCollector* collector = heap()->mark_compact_collector();
@@ -2851,10 +2719,13 @@
     if (object != NULL) return object;
 
     // If sweeping is still in progress try to sweep pages on the main thread.
-    collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes);
+    int max_freed = collector->SweepInParallel(heap()->paged_space(identity()),
+                                               size_in_bytes, kMaxPagesToSweep);
     RefillFreeList();
-    object = free_list_.Allocate(size_in_bytes);
-    if (object != nullptr) return object;
+    if (max_freed >= size_in_bytes) {
+      object = free_list_.Allocate(size_in_bytes);
+      if (object != nullptr) return object;
+    }
   }
 
   // Free list allocation failed and there is no next page.  Fail if we have
@@ -3139,6 +3010,7 @@
   }
 
   heap()->incremental_marking()->OldSpaceStep(object_size);
+  AllocationStep(object->address(), object_size);
   return object;
 }
 
@@ -3252,11 +3124,6 @@
 }
 
 
-bool LargeObjectSpace::Contains(Address address) {
-  return FindPage(address) != NULL;
-}
-
-
 #ifdef VERIFY_HEAP
 // We do not assume that the large object iterator works, because it depends
 // on the invariants we are checking during verification.