Merge V8 5.2.361.47  DO NOT MERGE

https://chromium.googlesource.com/v8/v8/+/5.2.361.47

FPIIM-449

Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index a0a3752..e517c45 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -6,11 +6,13 @@
 
 #include "src/base/bits.h"
 #include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/heap/slot-set.h"
 #include "src/macro-assembler.h"
 #include "src/msan.h"
 #include "src/snapshot/snapshot.h"
+#include "src/v8.h"
 
 namespace v8 {
 namespace internal {
@@ -62,8 +64,10 @@
   }
   cur_page = cur_page->next_page();
   if (cur_page == space_->anchor()) return false;
-  cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
-      cur_page);
+  cur_page->heap()
+      ->mark_compact_collector()
+      ->sweeper()
+      .SweepOrWaitUntilSweepingCompleted(cur_page);
   cur_addr_ = cur_page->area_start();
   cur_end_ = cur_page->area_end();
   DCHECK(cur_page->SweepingDone());
@@ -222,7 +226,7 @@
   *allocated = current.size;
   DCHECK(*allocated <= current.size);
   DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
-  if (!isolate_->memory_allocator()->CommitExecutableMemory(
+  if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
           code_range_, current.start, commit_size, *allocated)) {
     *allocated = 0;
     ReleaseBlock(&current);
@@ -233,7 +237,8 @@
 
 
 bool CodeRange::CommitRawMemory(Address start, size_t length) {
-  return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
+  return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
+                                                            EXECUTABLE);
 }
 
 
@@ -294,15 +299,17 @@
 
 MemoryAllocator::MemoryAllocator(Isolate* isolate)
     : isolate_(isolate),
+      code_range_(nullptr),
       capacity_(0),
       capacity_executable_(0),
       size_(0),
       size_executable_(0),
       lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
-      highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
+      highest_ever_allocated_(reinterpret_cast<void*>(0)),
+      unmapper_(this) {}
 
-
-bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
+bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
+                            intptr_t code_range_size) {
   capacity_ = RoundUp(capacity, Page::kPageSize);
   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
   DCHECK_GE(capacity_, capacity_executable_);
@@ -310,21 +317,84 @@
   size_ = 0;
   size_executable_ = 0;
 
+  code_range_ = new CodeRange(isolate_);
+  if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
+
   return true;
 }
 
 
 void MemoryAllocator::TearDown() {
-  for (MemoryChunk* chunk : chunk_pool_) {
+  unmapper()->WaitUntilCompleted();
+
+  MemoryChunk* chunk = nullptr;
+  while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) {
     FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
                NOT_EXECUTABLE);
   }
+
   // Check that spaces were torn down before MemoryAllocator.
   DCHECK_EQ(size_.Value(), 0);
   // TODO(gc) this will be true again when we fix FreeMemory.
   // DCHECK(size_executable_ == 0);
   capacity_ = 0;
   capacity_executable_ = 0;
+
+  if (last_chunk_.IsReserved()) {
+    last_chunk_.Release();
+  }
+
+  delete code_range_;
+  code_range_ = nullptr;
+}
+
+class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
+ public:
+  explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
+
+ private:
+  // v8::Task overrides.
+  void Run() override {
+    unmapper_->PerformFreeMemoryOnQueuedChunks();
+    unmapper_->pending_unmapping_tasks_semaphore_.Signal();
+  }
+
+  Unmapper* unmapper_;
+  DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
+};
+
+void MemoryAllocator::Unmapper::FreeQueuedChunks() {
+  if (FLAG_concurrent_sweeping) {
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
+    concurrent_unmapping_tasks_active_++;
+  } else {
+    PerformFreeMemoryOnQueuedChunks();
+  }
+}
+
+bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
+  bool waited = false;
+  while (concurrent_unmapping_tasks_active_ > 0) {
+    pending_unmapping_tasks_semaphore_.Wait();
+    concurrent_unmapping_tasks_active_--;
+    waited = true;
+  }
+  return waited;
+}
+
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
+  MemoryChunk* chunk = nullptr;
+  // Regular chunks.
+  while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
+    bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
+    allocator_->PerformFreeMemory(chunk);
+    if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
+  }
+  // Non-regular chunks.
+  while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
+    allocator_->PerformFreeMemory(chunk);
+  }
 }
 
 bool MemoryAllocator::CommitMemory(Address base, size_t size,
@@ -342,12 +412,10 @@
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
   // Code which is part of the code-range does not have its own VirtualMemory.
-  DCHECK(isolate_->code_range() == NULL ||
-         !isolate_->code_range()->contains(
-             static_cast<Address>(reservation->address())));
-  DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
-         !isolate_->code_range()->valid() ||
-         reservation->size() <= Page::kPageSize);
+  DCHECK(code_range() == NULL ||
+         !code_range()->contains(static_cast<Address>(reservation->address())));
+  DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
+         !code_range()->valid() || reservation->size() <= Page::kPageSize);
 
   reservation->Release();
 }
@@ -356,20 +424,19 @@
 void MemoryAllocator::FreeMemory(Address base, size_t size,
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
-  if (isolate_->code_range() != NULL &&
-      isolate_->code_range()->contains(static_cast<Address>(base))) {
+  if (code_range() != NULL &&
+      code_range()->contains(static_cast<Address>(base))) {
     DCHECK(executable == EXECUTABLE);
-    isolate_->code_range()->FreeRawMemory(base, size);
+    code_range()->FreeRawMemory(base, size);
   } else {
-    DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
-           !isolate_->code_range()->valid());
+    DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
+           !code_range()->valid());
     bool result = base::VirtualMemory::ReleaseRegion(base, size);
     USE(result);
     DCHECK(result);
   }
 }
 
-
 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
                                               base::VirtualMemory* controller) {
   base::VirtualMemory reservation(size, alignment);
@@ -382,7 +449,6 @@
   return base;
 }
 
-
 Address MemoryAllocator::AllocateAlignedMemory(
     size_t reserve_size, size_t commit_size, size_t alignment,
     Executability executable, base::VirtualMemory* controller) {
@@ -415,20 +481,12 @@
   return base;
 }
 
-
-void Page::InitializeAsAnchor(PagedSpace* owner) {
-  set_owner(owner);
-  set_prev_page(this);
-  set_next_page(this);
-}
-
-void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
-  set_owner(semi_space);
+void Page::InitializeAsAnchor(Space* space) {
+  set_owner(space);
   set_next_chunk(this);
   set_prev_chunk(this);
-  // Flags marks this invalid page as not being in new-space.
-  // All real new-space pages will be in new-space.
   SetFlags(0, ~0);
+  SetFlag(ANCHOR);
 }
 
 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
@@ -496,19 +554,19 @@
     if (reservation_.IsReserved()) {
       Executability executable =
           IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
-      if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
-                                                               executable)) {
+      if (!heap()->memory_allocator()->CommitMemory(start, length,
+                                                    executable)) {
         return false;
       }
     } else {
-      CodeRange* code_range = heap_->isolate()->code_range();
+      CodeRange* code_range = heap_->memory_allocator()->code_range();
       DCHECK(code_range != NULL && code_range->valid() &&
              IsFlagSet(IS_EXECUTABLE));
       if (!code_range->CommitRawMemory(start, length)) return false;
     }
 
     if (Heap::ShouldZapGarbage()) {
-      heap_->isolate()->memory_allocator()->ZapBlock(start, length);
+      heap_->memory_allocator()->ZapBlock(start, length);
     }
   } else if (commit_size < committed_size) {
     DCHECK(commit_size > 0);
@@ -518,7 +576,7 @@
     if (reservation_.IsReserved()) {
       if (!reservation_.Uncommit(start, length)) return false;
     } else {
-      CodeRange* code_range = heap_->isolate()->code_range();
+      CodeRange* code_range = heap_->memory_allocator()->code_range();
       DCHECK(code_range != NULL && code_range->valid() &&
              IsFlagSet(IS_EXECUTABLE));
       if (!code_range->UncommitRawMemory(start, length)) return false;
@@ -614,13 +672,13 @@
 #ifdef V8_TARGET_ARCH_MIPS64
     // Use code range only for large object space on mips64 to keep address
     // range within 256-MB memory region.
-    if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
+    if (code_range() != NULL && code_range()->valid() &&
         reserve_area_size > CodePageAreaSize()) {
 #else
-    if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
+    if (code_range() != NULL && code_range()->valid()) {
 #endif
-      base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
-                                                       &chunk_size);
+      base =
+          code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
       DCHECK(
           IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
       if (base == NULL) return NULL;
@@ -674,6 +732,23 @@
     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
   }
 
+  // We cannot use the last chunk in the address space because we would
+  // overflow when comparing top and limit if this chunk is used for a
+  // linear allocation area.
+  if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) {
+    CHECK(!last_chunk_.IsReserved());
+    last_chunk_.TakeControl(&reservation);
+    UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
+                  last_chunk_.size());
+    size_.Increment(-static_cast<intptr_t>(chunk_size));
+    if (executable == EXECUTABLE) {
+      size_executable_.Increment(-static_cast<intptr_t>(chunk_size));
+    }
+    CHECK(last_chunk_.IsReserved());
+    return AllocateChunk(reserve_area_size, commit_area_size, executable,
+                         owner);
+  }
+
   return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
                                  executable, owner, &reservation);
 }
@@ -684,20 +759,6 @@
   available_in_free_list_ = 0;
 }
 
-LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
-                                              Space* owner,
-                                              Executability executable) {
-  MemoryChunk* chunk =
-      AllocateChunk(object_size, object_size, executable, owner);
-  if (chunk == NULL) return NULL;
-  if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
-    STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
-    FATAL("Code page is too large.");
-  }
-  return LargePage::Initialize(isolate_->heap(), chunk);
-}
-
-
 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
   DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
@@ -735,36 +796,52 @@
   chunk->ReleaseAllocatedMemory();
 
   base::VirtualMemory* reservation = chunk->reserved_memory();
-  if (reservation->IsReserved()) {
-    FreeMemory(reservation, chunk->executable());
+  if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
+    UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
   } else {
-    FreeMemory(chunk->address(), chunk->size(), chunk->executable());
+    if (reservation->IsReserved()) {
+      FreeMemory(reservation, chunk->executable());
+    } else {
+      FreeMemory(chunk->address(), chunk->size(), chunk->executable());
+    }
   }
 }
 
-template <MemoryAllocator::AllocationMode mode>
+template <MemoryAllocator::FreeMode mode>
 void MemoryAllocator::Free(MemoryChunk* chunk) {
-  if (mode == kRegular) {
-    PreFreeMemory(chunk);
-    PerformFreeMemory(chunk);
-  } else {
-    DCHECK_EQ(mode, kPooled);
-    FreePooled(chunk);
+  switch (mode) {
+    case kFull:
+      PreFreeMemory(chunk);
+      PerformFreeMemory(chunk);
+      break;
+    case kPooledAndQueue:
+      DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
+      DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
+      chunk->SetFlag(MemoryChunk::POOLED);
+    // Fall through to kPreFreeAndQueue.
+    case kPreFreeAndQueue:
+      PreFreeMemory(chunk);
+      // The chunks added to this queue will be freed by a concurrent thread.
+      unmapper()->AddMemoryChunkSafe(chunk);
+      break;
+    default:
+      UNREACHABLE();
   }
 }
 
-template void MemoryAllocator::Free<MemoryAllocator::kRegular>(
+template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
+
+template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
     MemoryChunk* chunk);
 
-template void MemoryAllocator::Free<MemoryAllocator::kPooled>(
+template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
     MemoryChunk* chunk);
 
-template <typename PageType, MemoryAllocator::AllocationMode mode,
-          typename SpaceType>
-PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
-                                        Executability executable) {
+template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
+Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
+                                    Executability executable) {
   MemoryChunk* chunk = nullptr;
-  if (mode == kPooled) {
+  if (alloc_mode == kPooled) {
     DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
     DCHECK_EQ(executable, NOT_EXECUTABLE);
     chunk = AllocatePagePooled(owner);
@@ -773,22 +850,32 @@
     chunk = AllocateChunk(size, size, executable, owner);
   }
   if (chunk == nullptr) return nullptr;
-  return PageType::Initialize(isolate_->heap(), chunk, executable, owner);
+  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
 }
 
-template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
-                                             PagedSpace>(intptr_t, PagedSpace*,
-                                                         Executability);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+    intptr_t size, PagedSpace* owner, Executability executable);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+    intptr_t size, SemiSpace* owner, Executability executable);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+    intptr_t size, SemiSpace* owner, Executability executable);
 
-template NewSpacePage* MemoryAllocator::AllocatePage<
-    NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
-                                                       Executability);
+LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
+                                              LargeObjectSpace* owner,
+                                              Executability executable) {
+  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+  if (chunk == nullptr) return nullptr;
+  return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
+}
 
 template <typename SpaceType>
 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
-  if (chunk_pool_.is_empty()) return nullptr;
+  MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
+  if (chunk == nullptr) return nullptr;
   const int size = MemoryChunk::kPageSize;
-  MemoryChunk* chunk = chunk_pool_.RemoveLast();
   const Address start = reinterpret_cast<Address>(chunk);
   const Address area_start = start + MemoryChunk::kObjectStartOffset;
   const Address area_end = start + size;
@@ -802,18 +889,6 @@
   return chunk;
 }
 
-void MemoryAllocator::FreePooled(MemoryChunk* chunk) {
-  DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
-  DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
-  chunk_pool_.Add(chunk);
-  intptr_t chunk_size = static_cast<intptr_t>(chunk->size());
-  if (chunk->executable() == EXECUTABLE) {
-    size_executable_.Increment(-chunk_size);
-  }
-  size_.Increment(-chunk_size);
-  UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
-}
-
 bool MemoryAllocator::CommitBlock(Address start, size_t size,
                                   Executability executable) {
   if (!CommitMemory(start, size, executable)) return false;
@@ -890,10 +965,7 @@
 void MemoryAllocator::ReportStatistics() {
   intptr_t size = Size();
   float pct = static_cast<float>(capacity_ - size) / capacity_;
-  PrintF("  capacity: %" V8_PTR_PREFIX
-         "d"
-         ", used: %" V8_PTR_PREFIX
-         "d"
+  PrintF("  capacity: %" V8PRIdPTR ", used: %" V8PRIdPTR
          ", available: %%%d\n\n",
          capacity_, size, static_cast<int>(pct * 100));
 }
@@ -959,12 +1031,16 @@
 // MemoryChunk implementation
 
 void MemoryChunk::ReleaseAllocatedMemory() {
-  delete skip_list_;
-  skip_list_ = nullptr;
-  delete mutex_;
-  mutex_ = nullptr;
-  ReleaseOldToNewSlots();
-  ReleaseOldToOldSlots();
+  if (skip_list_ != nullptr) {
+    delete skip_list_;
+    skip_list_ = nullptr;
+  }
+  if (mutex_ != nullptr) {
+    delete mutex_;
+    mutex_ = nullptr;
+  }
+  if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
+  if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
 }
 
 static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
@@ -1029,13 +1105,11 @@
 
 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
                        Executability executable)
-    : Space(heap, space, executable), free_list_(this) {
+    : Space(heap, space, executable), anchor_(this), free_list_(this) {
   area_size_ = MemoryAllocator::PageAreaSize(space);
   accounting_stats_.Clear();
 
   allocation_info_.Reset(nullptr, nullptr);
-
-  anchor_.InitializeAsAnchor(this);
 }
 
 
@@ -1048,7 +1122,7 @@
 void PagedSpace::TearDown() {
   PageIterator iterator(this);
   while (iterator.has_next()) {
-    heap()->isolate()->memory_allocator()->Free(iterator.next());
+    heap()->memory_allocator()->Free<MemoryAllocator::kFull>(iterator.next());
   }
   anchor_.set_next_page(&anchor_);
   anchor_.set_prev_page(&anchor_);
@@ -1063,17 +1137,14 @@
     return;
   }
   MarkCompactCollector* collector = heap()->mark_compact_collector();
-  List<Page*>* swept_pages = collector->swept_pages(identity());
   intptr_t added = 0;
   {
-    base::LockGuard<base::Mutex> guard(collector->swept_pages_mutex());
-    for (int i = swept_pages->length() - 1; i >= 0; --i) {
-      Page* p = (*swept_pages)[i];
+    Page* p = nullptr;
+    while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
       // Only during compaction pages can actually change ownership. This is
       // safe because there exists no other competing action on the page links
       // during compaction.
       if (is_local() && (p->owner() != this)) {
-        if (added > kCompactionMemoryWanted) break;
         base::LockGuard<base::Mutex> guard(
             reinterpret_cast<PagedSpace*>(p->owner())->mutex());
         p->Unlink();
@@ -1082,7 +1153,7 @@
       }
       added += RelinkFreeListCategories(p);
       added += p->wasted_memory();
-      swept_pages->Remove(i);
+      if (is_local() && (added > kCompactionMemoryWanted)) break;
     }
   }
   accounting_stats_.IncreaseCapacity(added);
@@ -1163,30 +1234,16 @@
 }
 
 
-bool PagedSpace::CanExpand(size_t size) {
-  DCHECK(heap()->mark_compact_collector()->is_compacting() ||
-         Capacity() <= heap()->MaxOldGenerationSize());
-
-  // Are we going to exceed capacity for this space? At this point we can be
-  // way over the maximum size because of AlwaysAllocate scopes and large
-  // objects.
-  if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false;
-
-  return true;
-}
-
-
 bool PagedSpace::Expand() {
-  intptr_t size = AreaSize();
+  int size = AreaSize();
   if (snapshotable() && !HasPages()) {
     size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
   }
 
-  if (!CanExpand(size)) return false;
+  if (!heap()->CanExpandOldGeneration(size)) return false;
 
-  Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>(
-      size, this, executable());
-  if (p == NULL) return false;
+  Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
+  if (p == nullptr) return false;
 
   AccountCommitted(static_cast<intptr_t>(p->size()));
 
@@ -1244,7 +1301,7 @@
   free_list_.EvictFreeListItems(page);
   DCHECK(!free_list_.ContainsPageFreeListItems(page));
 
-  if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+  if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
     allocation_info_.Reset(nullptr, nullptr);
   }
 
@@ -1255,7 +1312,7 @@
   }
 
   AccountUncommitted(static_cast<intptr_t>(page->size()));
-  heap()->QueueMemoryChunkForFree(page);
+  heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
 
   DCHECK(Capacity() > 0);
   accounting_stats_.ShrinkSpace(AreaSize());
@@ -1273,7 +1330,7 @@
   while (page_iterator.has_next()) {
     Page* page = page_iterator.next();
     CHECK(page->owner() == this);
-    if (page == Page::FromAllocationTop(allocation_info_.top())) {
+    if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
       allocation_pointer_found_in_space = true;
     }
     CHECK(page->SweepingDone());
@@ -1492,14 +1549,14 @@
 
 bool NewSpace::AddFreshPage() {
   Address top = allocation_info_.top();
-  DCHECK(!NewSpacePage::IsAtStart(top));
+  DCHECK(!Page::IsAtObjectStart(top));
   if (!to_space_.AdvancePage()) {
     // No more pages left to advance.
     return false;
   }
 
   // Clear remainder of current page.
-  Address limit = NewSpacePage::FromLimit(top)->area_end();
+  Address limit = Page::FromAllocationAreaAddress(top)->area_end();
   if (heap()->gc_state() == Heap::SCAVENGE) {
     heap()->promotion_queue()->SetNewLimit(limit);
   }
@@ -1526,7 +1583,7 @@
   int filler_size = Heap::GetFillToAlign(old_top, alignment);
   int aligned_size_in_bytes = size_in_bytes + filler_size;
 
-  if (old_top + aligned_size_in_bytes >= high) {
+  if (old_top + aligned_size_in_bytes > high) {
     // Not enough room in the page, try to allocate a new one.
     if (!AddFreshPage()) {
       return false;
@@ -1537,10 +1594,9 @@
     old_top = allocation_info_.top();
     high = to_space_.page_high();
     filler_size = Heap::GetFillToAlign(old_top, alignment);
-    aligned_size_in_bytes = size_in_bytes + filler_size;
   }
 
-  DCHECK(old_top + aligned_size_in_bytes < high);
+  DCHECK(old_top + aligned_size_in_bytes <= high);
 
   if (allocation_info_.limit() < high) {
     // Either the limit has been lowered because linear allocation was disabled
@@ -1626,9 +1682,9 @@
   CHECK_EQ(current, to_space_.space_start());
 
   while (current != top()) {
-    if (!NewSpacePage::IsAtEnd(current)) {
+    if (!Page::IsAlignedToPageSize(current)) {
       // The allocation pointer should not be in the middle of an object.
-      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
+      CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
             current < top());
 
       HeapObject* object = HeapObject::FromAddress(current);
@@ -1654,7 +1710,7 @@
       current += size;
     } else {
       // At end of page, switch to next page.
-      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
+      Page* page = Page::FromAllocationAreaAddress(current)->next_page();
       // Next page should be valid.
       CHECK(!page->is_anchor());
       current = page->area_start();
@@ -1690,15 +1746,12 @@
 
 bool SemiSpace::Commit() {
   DCHECK(!is_committed());
-  NewSpacePage* current = anchor();
+  Page* current = anchor();
   const int num_pages = current_capacity_ / Page::kPageSize;
   for (int pages_added = 0; pages_added < num_pages; pages_added++) {
-    NewSpacePage* new_page =
-        heap()
-            ->isolate()
-            ->memory_allocator()
-            ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
-                NewSpacePage::kAllocatableMemory, this, executable());
+    Page* new_page =
+        heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+            Page::kAllocatableMemory, this, executable());
     if (new_page == nullptr) {
       RewindPages(current, pages_added);
       return false;
@@ -1720,13 +1773,14 @@
   DCHECK(is_committed());
   NewSpacePageIterator it(this);
   while (it.has_next()) {
-    heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
+    heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
         it.next());
   }
   anchor()->set_next_page(anchor());
   anchor()->set_prev_page(anchor());
   AccountUncommitted(current_capacity_);
   committed_ = false;
+  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
   return true;
 }
 
@@ -1746,21 +1800,18 @@
   if (!is_committed()) {
     if (!Commit()) return false;
   }
-  DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
   DCHECK_LE(new_capacity, maximum_capacity_);
   DCHECK_GT(new_capacity, current_capacity_);
   const int delta = new_capacity - current_capacity_;
   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
-  int delta_pages = delta / NewSpacePage::kPageSize;
-  NewSpacePage* last_page = anchor()->prev_page();
+  const int delta_pages = delta / Page::kPageSize;
+  Page* last_page = anchor()->prev_page();
   DCHECK_NE(last_page, anchor());
   for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
-    NewSpacePage* new_page =
-        heap()
-            ->isolate()
-            ->memory_allocator()
-            ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
-                NewSpacePage::kAllocatableMemory, this, executable());
+    Page* new_page =
+        heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+            Page::kAllocatableMemory, this, executable());
     if (new_page == nullptr) {
       RewindPages(last_page, pages_added);
       return false;
@@ -1768,8 +1819,7 @@
     new_page->InsertAfter(last_page);
     Bitmap::Clear(new_page);
     // Duplicate the flags that was set on the old page.
-    new_page->SetFlags(last_page->GetFlags(),
-                       NewSpacePage::kCopyOnFlipFlagsMask);
+    new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
     last_page = new_page;
   }
   AccountCommitted(static_cast<intptr_t>(delta));
@@ -1777,9 +1827,9 @@
   return true;
 }
 
-void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
-  NewSpacePage* new_last_page = nullptr;
-  NewSpacePage* last_page = start;
+void SemiSpace::RewindPages(Page* start, int num_pages) {
+  Page* new_last_page = nullptr;
+  Page* last_page = start;
   while (num_pages > 0) {
     DCHECK_NE(last_page, anchor());
     new_last_page = last_page->prev_page();
@@ -1791,25 +1841,26 @@
 }
 
 bool SemiSpace::ShrinkTo(int new_capacity) {
-  DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
+  DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
   DCHECK_GE(new_capacity, minimum_capacity_);
   DCHECK_LT(new_capacity, current_capacity_);
   if (is_committed()) {
     const int delta = current_capacity_ - new_capacity;
     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
-    int delta_pages = delta / NewSpacePage::kPageSize;
-    NewSpacePage* new_last_page;
-    NewSpacePage* last_page;
+    int delta_pages = delta / Page::kPageSize;
+    Page* new_last_page;
+    Page* last_page;
     while (delta_pages > 0) {
       last_page = anchor()->prev_page();
       new_last_page = last_page->prev_page();
       new_last_page->set_next_page(anchor());
       anchor()->set_prev_page(new_last_page);
-      heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
+      heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
           last_page);
       delta_pages--;
     }
     AccountUncommitted(static_cast<intptr_t>(delta));
+    heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
   }
   current_capacity_ = new_capacity;
   return true;
@@ -1817,13 +1868,12 @@
 
 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
   anchor_.set_owner(this);
-  // Fixup back-pointers to anchor. Address of anchor changes when we swap.
   anchor_.prev_page()->set_next_page(&anchor_);
   anchor_.next_page()->set_prev_page(&anchor_);
 
   NewSpacePageIterator it(this);
   while (it.has_next()) {
-    NewSpacePage* page = it.next();
+    Page* page = it.next();
     page->set_owner(this);
     page->SetFlags(flags, mask);
     if (id_ == kToSpace) {
@@ -1846,6 +1896,22 @@
   current_page_ = anchor_.next_page();
 }
 
+bool SemiSpace::ReplaceWithEmptyPage(Page* old_page) {
+  // TODO(mlippautz): We do not have to get a new page here when the semispace
+  // is uncommitted later on.
+  Page* new_page = heap()->memory_allocator()->AllocatePage(
+      Page::kAllocatableMemory, this, executable());
+  if (new_page == nullptr) return false;
+  Bitmap::Clear(new_page);
+  new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags);
+  new_page->set_next_page(old_page->next_page());
+  new_page->set_prev_page(old_page->prev_page());
+  old_page->next_page()->set_prev_page(new_page);
+  old_page->prev_page()->set_next_page(new_page);
+  heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(),
+                               ClearRecordedSlots::kNo);
+  return true;
+}
 
 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
   // We won't be swapping semispaces without data in them.
@@ -1863,13 +1929,13 @@
   std::swap(from->anchor_, to->anchor_);
   std::swap(from->current_page_, to->current_page_);
 
-  to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
+  to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
   from->FixPagesFlags(0, 0);
 }
 
 
 void SemiSpace::set_age_mark(Address mark) {
-  DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this);
+  DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
   age_mark_ = mark;
   // Mark all pages up to the one containing mark.
   NewSpacePageIterator it(space_start(), mark);
@@ -1886,10 +1952,10 @@
 #ifdef VERIFY_HEAP
 void SemiSpace::Verify() {
   bool is_from_space = (id_ == kFromSpace);
-  NewSpacePage* page = anchor_.next_page();
-  CHECK(anchor_.semi_space() == this);
+  Page* page = anchor_.next_page();
+  CHECK(anchor_.owner() == this);
   while (page != &anchor_) {
-    CHECK_EQ(page->semi_space(), this);
+    CHECK_EQ(page->owner(), this);
     CHECK(page->InNewSpace());
     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
                                         : MemoryChunk::IN_TO_SPACE));
@@ -1917,10 +1983,10 @@
 #ifdef DEBUG
 void SemiSpace::AssertValidRange(Address start, Address end) {
   // Addresses belong to same semi-space
-  NewSpacePage* page = NewSpacePage::FromLimit(start);
-  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
-  SemiSpace* space = page->semi_space();
-  CHECK_EQ(space, end_page->semi_space());
+  Page* page = Page::FromAllocationAreaAddress(start);
+  Page* end_page = Page::FromAllocationAreaAddress(end);
+  SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
+  CHECK_EQ(space, end_page->owner());
   // Start address is before end address, either on same page,
   // or end address is on a later page in the linked list of
   // semi-space pages.
@@ -2087,9 +2153,7 @@
 #ifdef DEBUG
   if (FLAG_heap_stats) {
     float pct = static_cast<float>(Available()) / TotalCapacity();
-    PrintF("  capacity: %" V8_PTR_PREFIX
-           "d"
-           ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+    PrintF("  capacity: %" V8PRIdPTR ", available: %" V8PRIdPTR ", %%%d\n",
            TotalCapacity(), Available(), static_cast<int>(pct * 100));
     PrintF("\n  Object Histogram:\n");
     for (int i = 0; i <= LAST_TYPE; i++) {
@@ -2382,7 +2446,6 @@
   int new_node_size = 0;
   FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
   if (new_node == nullptr) return nullptr;
-  owner_->AllocationStep(new_node->address(), size_in_bytes);
 
   int bytes_left = new_node_size - size_in_bytes;
   DCHECK(bytes_left >= 0);
@@ -2409,7 +2472,8 @@
     // Keep the linear allocation area empty if requested to do so, just
     // return area back to the free list instead.
     owner_->Free(new_node->address() + size_in_bytes, bytes_left);
-    DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
+    owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+                           new_node->address() + size_in_bytes);
   } else if (bytes_left > kThreshold &&
              owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
              FLAG_incremental_marking) {
@@ -2421,13 +2485,16 @@
                  new_node_size - size_in_bytes - linear_size);
     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
                            new_node->address() + size_in_bytes + linear_size);
-  } else if (bytes_left > 0) {
+  } else {
+    DCHECK(bytes_left >= 0);
     // Normally we give the rest of the node to the allocator as its new
     // linear allocation area.
     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
                            new_node->address() + new_node_size);
   }
 
+  owner_->AllocationStep(new_node->address(), size_in_bytes);
+
   return new_node;
 }
 
@@ -2596,7 +2663,7 @@
 void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
   if (allocation_info_.top() >= allocation_info_.limit()) return;
 
-  if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
+  if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
     // Create filler object to keep page iterable if it was iterable.
     int remaining =
         static_cast<int>(allocation_info_.limit() - allocation_info_.top());
@@ -2648,8 +2715,8 @@
     if (object != NULL) return object;
 
     // If sweeping is still in progress try to sweep pages on the main thread.
-    int max_freed = collector->SweepInParallel(heap()->paged_space(identity()),
-                                               size_in_bytes, kMaxPagesToSweep);
+    int max_freed = collector->sweeper().ParallelSweepSpace(
+        identity(), size_in_bytes, kMaxPagesToSweep);
     RefillFreeList();
     if (max_freed >= size_in_bytes) {
       object = free_list_.Allocate(size_in_bytes);
@@ -2815,11 +2882,8 @@
 
 void PagedSpace::ReportStatistics() {
   int pct = static_cast<int>(Available() * 100 / Capacity());
-  PrintF("  capacity: %" V8_PTR_PREFIX
-         "d"
-         ", waste: %" V8_PTR_PREFIX
-         "d"
-         ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+  PrintF("  capacity: %" V8PRIdPTR ", waste: %" V8PRIdPTR
+         ", available: %" V8PRIdPTR ", %%%d\n",
          Capacity(), Waste(), Available(), pct);
 
   if (heap()->mark_compact_collector()->sweeping_in_progress()) {
@@ -2892,9 +2956,9 @@
     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
 
     ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
-    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
+    heap()->memory_allocator()->PerformAllocationCallback(
         space, kAllocationActionFree, page->size());
-    heap()->isolate()->memory_allocator()->Free(page);
+    heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
   }
   SetUp();
 }
@@ -2908,7 +2972,7 @@
     return AllocationResult::Retry(identity());
   }
 
-  LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
+  LargePage* page = heap()->memory_allocator()->AllocateLargePage(
       object_size, this, executable);
   if (page == NULL) return AllocationResult::Retry(identity());
   DCHECK(page->area_size() >= object_size);
@@ -2977,7 +3041,7 @@
   if (e != NULL) {
     DCHECK(e->value != NULL);
     LargePage* page = reinterpret_cast<LargePage*>(e->value);
-    DCHECK(page->is_valid());
+    DCHECK(LargePage::IsValid(page));
     if (page->Contains(a)) {
       return page;
     }
@@ -3037,7 +3101,7 @@
                           static_cast<uint32_t>(key));
       }
 
-      heap()->QueueMemoryChunkForFree(page);
+      heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
     }
   }
 }
@@ -3115,7 +3179,7 @@
 
 
 void LargeObjectSpace::ReportStatistics() {
-  PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
+  PrintF("  size: %" V8PRIdPTR "\n", size_);
   int num_objects = 0;
   ClearHistograms(heap()->isolate());
   LargeObjectIterator it(this);
@@ -3126,7 +3190,7 @@
 
   PrintF(
       "  number of objects %d, "
-      "size of objects %" V8_PTR_PREFIX "d\n",
+      "size of objects %" V8PRIdPTR "\n",
       num_objects, objects_size_);
   if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
 }