Merge V8 5.3.332.45.  DO NOT MERGE

Test: Manual

FPIIM-449

Change-Id: Id3254828b068abdea3cb10442e0172a8c9a98e03
(cherry picked from commit 13e2dadd00298019ed862f2b2fc5068bba730bcf)
diff --git a/src/heap/array-buffer-tracker-inl.h b/src/heap/array-buffer-tracker-inl.h
new file mode 100644
index 0000000..a176744
--- /dev/null
+++ b/src/heap/array-buffer-tracker-inl.h
@@ -0,0 +1,67 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/conversions-inl.h"
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/heap.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
+  void* data = buffer->backing_store();
+  if (!data) return;
+
+  size_t length = NumberToSize(heap->isolate(), buffer->byte_length());
+  Page* page = Page::FromAddress(buffer->address());
+  {
+    base::LockGuard<base::Mutex> guard(page->mutex());
+    LocalArrayBufferTracker* tracker = page->local_tracker();
+    if (tracker == nullptr) {
+      page->AllocateLocalTracker();
+      tracker = page->local_tracker();
+    }
+    DCHECK_NOT_NULL(tracker);
+    tracker->Add(buffer, std::make_pair(data, length));
+  }
+  // We may go over the limit of externally allocated memory here. We call the
+  // api function to trigger a GC in this case.
+  reinterpret_cast<v8::Isolate*>(heap->isolate())
+      ->AdjustAmountOfExternalAllocatedMemory(length);
+}
+
+void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
+  void* data = buffer->backing_store();
+  if (!data) return;
+
+  Page* page = Page::FromAddress(buffer->address());
+  size_t length = 0;
+  {
+    base::LockGuard<base::Mutex> guard(page->mutex());
+    LocalArrayBufferTracker* tracker = page->local_tracker();
+    DCHECK_NOT_NULL(tracker);
+    length = tracker->Remove(buffer).second;
+  }
+  heap->update_external_memory(-static_cast<intptr_t>(length));
+}
+
+void LocalArrayBufferTracker::Add(Key key, const Value& value) {
+  auto ret = array_buffers_.insert(std::make_pair(key, value));
+  USE(ret);
+  // Check that we indeed inserted a new value and did not overwrite an existing
+  // one (which would be a bug).
+  DCHECK(ret.second);
+}
+
+LocalArrayBufferTracker::Value LocalArrayBufferTracker::Remove(Key key) {
+  TrackingMap::iterator it = array_buffers_.find(key);
+  DCHECK(it != array_buffers_.end());
+  Value value = it->second;
+  array_buffers_.erase(it);
+  return value;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/array-buffer-tracker.cc b/src/heap/array-buffer-tracker.cc
index 6e389c1..a870b35 100644
--- a/src/heap/array-buffer-tracker.cc
+++ b/src/heap/array-buffer-tracker.cc
@@ -3,138 +3,133 @@
 // found in the LICENSE file.
 
 #include "src/heap/array-buffer-tracker.h"
+#include "src/heap/array-buffer-tracker-inl.h"
 #include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/objects.h"
-#include "src/objects-inl.h"
-#include "src/v8.h"
 
 namespace v8 {
 namespace internal {
 
-ArrayBufferTracker::~ArrayBufferTracker() {
-  Isolate* isolate = heap()->isolate();
+LocalArrayBufferTracker::~LocalArrayBufferTracker() {
+  CHECK(array_buffers_.empty());
+}
+
+template <LocalArrayBufferTracker::FreeMode free_mode>
+void LocalArrayBufferTracker::Free() {
   size_t freed_memory = 0;
-  for (auto& buffer : live_array_buffers_) {
-    isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
-    freed_memory += buffer.second;
-  }
-  for (auto& buffer : live_array_buffers_for_scavenge_) {
-    isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
-    freed_memory += buffer.second;
-  }
-  live_array_buffers_.clear();
-  live_array_buffers_for_scavenge_.clear();
-  not_yet_discovered_array_buffers_.clear();
-  not_yet_discovered_array_buffers_for_scavenge_.clear();
-
-  if (freed_memory > 0) {
-    heap()->update_amount_of_external_allocated_memory(
-        -static_cast<int64_t>(freed_memory));
-  }
-}
-
-
-void ArrayBufferTracker::RegisterNew(JSArrayBuffer* buffer) {
-  void* data = buffer->backing_store();
-  if (!data) return;
-
-  bool in_new_space = heap()->InNewSpace(buffer);
-  size_t length = NumberToSize(heap()->isolate(), buffer->byte_length());
-  if (in_new_space) {
-    live_array_buffers_for_scavenge_[data] = length;
-  } else {
-    live_array_buffers_[data] = length;
-  }
-
-  // We may go over the limit of externally allocated memory here. We call the
-  // api function to trigger a GC in this case.
-  reinterpret_cast<v8::Isolate*>(heap()->isolate())
-      ->AdjustAmountOfExternalAllocatedMemory(length);
-}
-
-
-void ArrayBufferTracker::Unregister(JSArrayBuffer* buffer) {
-  void* data = buffer->backing_store();
-  if (!data) return;
-
-  bool in_new_space = heap()->InNewSpace(buffer);
-  std::map<void*, size_t>* live_buffers =
-      in_new_space ? &live_array_buffers_for_scavenge_ : &live_array_buffers_;
-  std::map<void*, size_t>* not_yet_discovered_buffers =
-      in_new_space ? &not_yet_discovered_array_buffers_for_scavenge_
-                   : &not_yet_discovered_array_buffers_;
-
-  DCHECK(live_buffers->count(data) > 0);
-
-  size_t length = (*live_buffers)[data];
-  live_buffers->erase(data);
-  not_yet_discovered_buffers->erase(data);
-
-  heap()->update_amount_of_external_allocated_memory(
-      -static_cast<int64_t>(length));
-}
-
-
-void ArrayBufferTracker::MarkLive(JSArrayBuffer* buffer) {
-  base::LockGuard<base::Mutex> guard(&mutex_);
-  void* data = buffer->backing_store();
-
-  // ArrayBuffer might be in the middle of being constructed.
-  if (data == heap()->undefined_value()) return;
-  if (heap()->InNewSpace(buffer)) {
-    not_yet_discovered_array_buffers_for_scavenge_.erase(data);
-  } else {
-    not_yet_discovered_array_buffers_.erase(data);
-  }
-}
-
-
-void ArrayBufferTracker::FreeDead(bool from_scavenge) {
-  size_t freed_memory = 0;
-  Isolate* isolate = heap()->isolate();
-  for (auto& buffer : not_yet_discovered_array_buffers_for_scavenge_) {
-    isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
-    freed_memory += buffer.second;
-    live_array_buffers_for_scavenge_.erase(buffer.first);
-  }
-
-  if (!from_scavenge) {
-    for (auto& buffer : not_yet_discovered_array_buffers_) {
-      isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
-      freed_memory += buffer.second;
-      live_array_buffers_.erase(buffer.first);
+  for (TrackingMap::iterator it = array_buffers_.begin();
+       it != array_buffers_.end();) {
+    if ((free_mode == kFreeAll) ||
+        Marking::IsWhite(Marking::MarkBitFrom(it->first))) {
+      heap_->isolate()->array_buffer_allocator()->Free(it->second.first,
+                                                       it->second.second);
+      freed_memory += it->second.second;
+      it = array_buffers_.erase(it);
+    } else {
+      it++;
     }
   }
-
-  not_yet_discovered_array_buffers_for_scavenge_ =
-      live_array_buffers_for_scavenge_;
-  if (!from_scavenge) not_yet_discovered_array_buffers_ = live_array_buffers_;
-
-  // Do not call through the api as this code is triggered while doing a GC.
-  heap()->update_amount_of_external_allocated_memory(
-      -static_cast<int64_t>(freed_memory));
+  if (freed_memory > 0) {
+    heap_->update_external_memory_concurrently_freed(
+        static_cast<intptr_t>(freed_memory));
+  }
 }
 
-
-void ArrayBufferTracker::PrepareDiscoveryInNewSpace() {
-  not_yet_discovered_array_buffers_for_scavenge_ =
-      live_array_buffers_for_scavenge_;
+template <typename Callback>
+void LocalArrayBufferTracker::Process(Callback callback) {
+  JSArrayBuffer* new_buffer = nullptr;
+  size_t freed_memory = 0;
+  for (TrackingMap::iterator it = array_buffers_.begin();
+       it != array_buffers_.end();) {
+    const CallbackResult result = callback(it->first, &new_buffer);
+    if (result == kKeepEntry) {
+      it++;
+    } else if (result == kUpdateEntry) {
+      DCHECK_NOT_NULL(new_buffer);
+      Page* target_page = Page::FromAddress(new_buffer->address());
+      // We need to lock the target page because we cannot guarantee
+      // exclusive access to new space pages.
+      if (target_page->InNewSpace()) target_page->mutex()->Lock();
+      LocalArrayBufferTracker* tracker = target_page->local_tracker();
+      if (tracker == nullptr) {
+        target_page->AllocateLocalTracker();
+        tracker = target_page->local_tracker();
+      }
+      DCHECK_NOT_NULL(tracker);
+      tracker->Add(new_buffer, it->second);
+      if (target_page->InNewSpace()) target_page->mutex()->Unlock();
+      it = array_buffers_.erase(it);
+    } else if (result == kRemoveEntry) {
+      heap_->isolate()->array_buffer_allocator()->Free(it->second.first,
+                                                       it->second.second);
+      freed_memory += it->second.second;
+      it = array_buffers_.erase(it);
+    } else {
+      UNREACHABLE();
+    }
+  }
+  if (freed_memory > 0) {
+    heap_->update_external_memory_concurrently_freed(
+        static_cast<intptr_t>(freed_memory));
+  }
 }
 
+void ArrayBufferTracker::FreeDeadInNewSpace(Heap* heap) {
+  DCHECK_EQ(heap->gc_state(), Heap::HeapState::SCAVENGE);
+  for (Page* page : NewSpacePageRange(heap->new_space()->FromSpaceStart(),
+                                      heap->new_space()->FromSpaceEnd())) {
+    bool empty = ProcessBuffers(page, kUpdateForwardedRemoveOthers);
+    CHECK(empty);
+  }
+  heap->account_external_memory_concurrently_freed();
+}
 
-void ArrayBufferTracker::Promote(JSArrayBuffer* buffer) {
-  base::LockGuard<base::Mutex> guard(&mutex_);
+void ArrayBufferTracker::FreeDead(Page* page) {
+  // Callers need to ensure having the page lock.
+  LocalArrayBufferTracker* tracker = page->local_tracker();
+  if (tracker == nullptr) return;
+  DCHECK(!page->SweepingDone());
+  tracker->Free<LocalArrayBufferTracker::kFreeDead>();
+  if (tracker->IsEmpty()) {
+    page->ReleaseLocalTracker();
+  }
+}
 
-  if (buffer->is_external()) return;
-  void* data = buffer->backing_store();
-  if (!data) return;
-  // ArrayBuffer might be in the middle of being constructed.
-  if (data == heap()->undefined_value()) return;
-  DCHECK(live_array_buffers_for_scavenge_.count(data) > 0);
-  live_array_buffers_[data] = live_array_buffers_for_scavenge_[data];
-  live_array_buffers_for_scavenge_.erase(data);
-  not_yet_discovered_array_buffers_for_scavenge_.erase(data);
+void ArrayBufferTracker::FreeAll(Page* page) {
+  LocalArrayBufferTracker* tracker = page->local_tracker();
+  if (tracker == nullptr) return;
+  tracker->Free<LocalArrayBufferTracker::kFreeAll>();
+  if (tracker->IsEmpty()) {
+    page->ReleaseLocalTracker();
+  }
+}
+
+bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
+  LocalArrayBufferTracker* tracker = page->local_tracker();
+  if (tracker == nullptr) return true;
+
+  DCHECK(page->SweepingDone());
+  tracker->Process(
+      [mode](JSArrayBuffer* old_buffer, JSArrayBuffer** new_buffer) {
+        MapWord map_word = old_buffer->map_word();
+        if (map_word.IsForwardingAddress()) {
+          *new_buffer = JSArrayBuffer::cast(map_word.ToForwardingAddress());
+          return LocalArrayBufferTracker::kUpdateEntry;
+        }
+        return mode == kUpdateForwardedKeepOthers
+                   ? LocalArrayBufferTracker::kKeepEntry
+                   : LocalArrayBufferTracker::kRemoveEntry;
+      });
+  return tracker->IsEmpty();
+}
+
+bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) {
+  Page* page = Page::FromAddress(buffer->address());
+  {
+    base::LockGuard<base::Mutex> guard(page->mutex());
+    LocalArrayBufferTracker* tracker = page->local_tracker();
+    if (tracker == nullptr) return false;
+    return tracker->IsTracked(buffer);
+  }
 }
 
 }  // namespace internal
diff --git a/src/heap/array-buffer-tracker.h b/src/heap/array-buffer-tracker.h
index 6130003..b015aa0 100644
--- a/src/heap/array-buffer-tracker.h
+++ b/src/heap/array-buffer-tracker.h
@@ -5,71 +5,97 @@
 #ifndef V8_HEAP_ARRAY_BUFFER_TRACKER_H_
 #define V8_HEAP_ARRAY_BUFFER_TRACKER_H_
 
-#include <map>
+#include <unordered_map>
 
+#include "src/allocation.h"
 #include "src/base/platform/mutex.h"
 #include "src/globals.h"
 
 namespace v8 {
 namespace internal {
 
-// Forward declarations.
 class Heap;
 class JSArrayBuffer;
+class Page;
 
-class ArrayBufferTracker {
+class ArrayBufferTracker : public AllStatic {
  public:
-  explicit ArrayBufferTracker(Heap* heap) : heap_(heap) {}
-  ~ArrayBufferTracker();
-
-  inline Heap* heap() { return heap_; }
+  enum ProcessingMode {
+    kUpdateForwardedRemoveOthers,
+    kUpdateForwardedKeepOthers,
+  };
 
   // The following methods are used to track raw C++ pointers to externally
   // allocated memory used as backing store in live array buffers.
 
-  // A new ArrayBuffer was created with |data| as backing store.
-  void RegisterNew(JSArrayBuffer* buffer);
+  // Register/unregister a new JSArrayBuffer |buffer| for tracking. Guards all
+  // access to the tracker by taking the page lock for the corresponding page.
+  inline static void RegisterNew(Heap* heap, JSArrayBuffer* buffer);
+  inline static void Unregister(Heap* heap, JSArrayBuffer* buffer);
 
-  // The backing store |data| is no longer owned by V8.
-  void Unregister(JSArrayBuffer* buffer);
+  // Frees all backing store pointers for dead JSArrayBuffers in new space.
+  // Does not take any locks and can only be called during Scavenge.
+  static void FreeDeadInNewSpace(Heap* heap);
 
-  // A live ArrayBuffer was discovered during marking/scavenge.
-  void MarkLive(JSArrayBuffer* buffer);
+  // Frees all backing store pointers for dead JSArrayBuffer on a given page.
+  // Requires marking information to be present. Requires the page lock to be
+  // taken by the caller.
+  static void FreeDead(Page* page);
 
-  // Frees all backing store pointers that weren't discovered in the previous
-  // marking or scavenge phase.
-  void FreeDead(bool from_scavenge);
+  // Frees all remaining, live or dead, array buffers on a page. Only useful
+  // during tear down.
+  static void FreeAll(Page* page);
 
-  // Prepare for a new scavenge phase. A new marking phase is implicitly
-  // prepared by finishing the previous one.
-  void PrepareDiscoveryInNewSpace();
+  // Processes all array buffers on a given page. |mode| specifies the action
+  // to perform on the buffers. Returns whether the tracker is empty or not.
+  static bool ProcessBuffers(Page* page, ProcessingMode mode);
 
-  // An ArrayBuffer moved from new space to old space.
-  void Promote(JSArrayBuffer* buffer);
+  // Returns whether a buffer is currently tracked.
+  static bool IsTracked(JSArrayBuffer* buffer);
+};
+
+// LocalArrayBufferTracker tracks internalized array buffers.
+//
+// Never use directly but instead always call through |ArrayBufferTracker|.
+class LocalArrayBufferTracker {
+ public:
+  typedef std::pair<void*, size_t> Value;
+  typedef JSArrayBuffer* Key;
+
+  enum CallbackResult { kKeepEntry, kUpdateEntry, kRemoveEntry };
+  enum FreeMode { kFreeDead, kFreeAll };
+
+  explicit LocalArrayBufferTracker(Heap* heap) : heap_(heap) {}
+  ~LocalArrayBufferTracker();
+
+  inline void Add(Key key, const Value& value);
+  inline Value Remove(Key key);
+
+  // Frees up array buffers determined by |free_mode|.
+  template <FreeMode free_mode>
+  void Free();
+
+  // Processes buffers one by one. The CallbackResult of the callback decides
+  // what action to take on the buffer.
+  //
+  // Callback should be of type:
+  //   CallbackResult fn(JSArrayBuffer* buffer, JSArrayBuffer** new_buffer);
+  template <typename Callback>
+  inline void Process(Callback callback);
+
+  bool IsEmpty() { return array_buffers_.empty(); }
+
+  bool IsTracked(Key key) {
+    return array_buffers_.find(key) != array_buffers_.end();
+  }
 
  private:
-  base::Mutex mutex_;
+  typedef std::unordered_map<Key, Value> TrackingMap;
+
   Heap* heap_;
-
-  // |live_array_buffers_| maps externally allocated memory used as backing
-  // store for ArrayBuffers to the length of the respective memory blocks.
-  //
-  // At the beginning of mark/compact, |not_yet_discovered_array_buffers_| is
-  // a copy of |live_array_buffers_| and we remove pointers as we discover live
-  // ArrayBuffer objects during marking. At the end of mark/compact, the
-  // remaining memory blocks can be freed.
-  std::map<void*, size_t> live_array_buffers_;
-  std::map<void*, size_t> not_yet_discovered_array_buffers_;
-
-  // To be able to free memory held by ArrayBuffers during scavenge as well, we
-  // have a separate list of allocated memory held by ArrayBuffers in new space.
-  //
-  // Since mark/compact also evacuates the new space, all pointers in the
-  // |live_array_buffers_for_scavenge_| list are also in the
-  // |live_array_buffers_| list.
-  std::map<void*, size_t> live_array_buffers_for_scavenge_;
-  std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_;
+  TrackingMap array_buffers_;
 };
+
 }  // namespace internal
 }  // namespace v8
 #endif  // V8_HEAP_ARRAY_BUFFER_TRACKER_H_
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
index 4bae0a4..2b1f06a 100644
--- a/src/heap/gc-tracer.cc
+++ b/src/heap/gc-tracer.cc
@@ -135,6 +135,26 @@
   previous_ = previous_incremental_mark_compactor_event_ = current_;
 }
 
+void GCTracer::ResetForTesting() {
+  cumulative_incremental_marking_steps_ = 0.0;
+  cumulative_incremental_marking_bytes_ = 0.0;
+  cumulative_incremental_marking_duration_ = 0.0;
+  cumulative_pure_incremental_marking_duration_ = 0.0;
+  longest_incremental_marking_step_ = 0.0;
+  cumulative_incremental_marking_finalization_steps_ = 0.0;
+  cumulative_incremental_marking_finalization_duration_ = 0.0;
+  longest_incremental_marking_finalization_step_ = 0.0;
+  cumulative_marking_duration_ = 0.0;
+  cumulative_sweeping_duration_ = 0.0;
+  allocation_time_ms_ = 0.0;
+  new_space_allocation_counter_bytes_ = 0.0;
+  old_generation_allocation_counter_bytes_ = 0.0;
+  allocation_duration_since_gc_ = 0.0;
+  new_space_allocation_in_bytes_since_gc_ = 0.0;
+  old_generation_allocation_in_bytes_since_gc_ = 0.0;
+  combined_mark_compact_speed_cache_ = 0.0;
+  start_counter_ = 0;
+}
 
 void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
                      const char* collector_reason) {
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
index a657f15..ed07750 100644
--- a/src/heap/gc-tracer.h
+++ b/src/heap/gc-tracer.h
@@ -372,6 +372,8 @@
   static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
                              const BytesAndDuration& initial, double time_ms);
 
+  void ResetForTesting();
+
  private:
   // Print one detailed trace line in name=value format.
   // TODO(ernstm): Move to Heap.
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
index f9c9235..d6c509e 100644
--- a/src/heap/heap-inl.h
+++ b/src/heap/heap-inl.h
@@ -393,14 +393,30 @@
   return OldGenerationSpaceAvailable() < 0;
 }
 
-
+template <PromotionMode promotion_mode>
 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
   Page* page = Page::FromAddress(old_address);
   Address age_mark = new_space_.age_mark();
+
+  if (promotion_mode == PROMOTE_MARKED) {
+    MarkBit mark_bit = Marking::MarkBitFrom(old_address);
+    if (!Marking::IsWhite(mark_bit)) {
+      return true;
+    }
+  }
+
   return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
          (!page->ContainsLimit(age_mark) || old_address < age_mark);
 }
 
+PromotionMode Heap::CurrentPromotionMode() {
+  if (incremental_marking()->IsMarking()) {
+    return PROMOTE_MARKED;
+  } else {
+    return DEFAULT_PROMOTION;
+  }
+}
+
 void Heap::RecordWrite(Object* object, int offset, Object* o) {
   if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
     return;
@@ -460,6 +476,31 @@
             static_cast<size_t>(byte_size / kPointerSize));
 }
 
+bool Heap::PurgeLeftTrimmedObject(Object** object) {
+  HeapObject* current = reinterpret_cast<HeapObject*>(*object);
+  const MapWord map_word = current->map_word();
+  if (current->IsFiller() && !map_word.IsForwardingAddress()) {
+#ifdef DEBUG
+    // We need to find a FixedArrayBase map after walking the fillers.
+    while (current->IsFiller()) {
+      Address next = reinterpret_cast<Address>(current);
+      if (current->map() == one_pointer_filler_map()) {
+        next += kPointerSize;
+      } else if (current->map() == two_pointer_filler_map()) {
+        next += 2 * kPointerSize;
+      } else {
+        next += current->Size();
+      }
+      current = reinterpret_cast<HeapObject*>(next);
+    }
+    DCHECK(current->IsFixedArrayBase());
+#endif  // DEBUG
+    *object = nullptr;
+    return true;
+  }
+  return false;
+}
+
 template <Heap::FindMementoMode mode>
 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
   // Check if there is potentially a memento behind the object. If
@@ -510,7 +551,7 @@
 
 template <Heap::UpdateAllocationSiteMode mode>
 void Heap::UpdateAllocationSite(HeapObject* object,
-                                HashMap* pretenuring_feedback) {
+                                base::HashMap* pretenuring_feedback) {
   DCHECK(InFromSpace(object));
   if (!FLAG_allocation_site_pretenuring ||
       !AllocationSite::CanTrack(object->map()->instance_type()))
@@ -538,7 +579,7 @@
     // to dereference the allocation site and rather have to postpone all checks
     // till actually merging the data.
     Address key = memento_candidate->GetAllocationSiteUnchecked();
-    HashMap::Entry* e =
+    base::HashMap::Entry* e =
         pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
     DCHECK(e != nullptr);
     (*bit_cast<intptr_t*>(&e->value))++;
@@ -596,12 +637,12 @@
   for (int i = 0; i < new_space_strings_.length(); ++i) {
     Object* obj = Object::cast(new_space_strings_[i]);
     DCHECK(heap_->InNewSpace(obj));
-    DCHECK(obj != heap_->the_hole_value());
+    DCHECK(!obj->IsTheHole(heap_->isolate()));
   }
   for (int i = 0; i < old_space_strings_.length(); ++i) {
     Object* obj = Object::cast(old_space_strings_[i]);
     DCHECK(!heap_->InNewSpace(obj));
-    DCHECK(obj != heap_->the_hole_value());
+    DCHECK(!obj->IsTheHole(heap_->isolate()));
   }
 #endif
 }
@@ -710,6 +751,17 @@
   set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
 }
 
+int Heap::GetNextTemplateSerialNumber() {
+  int next_serial_number = next_template_serial_number()->value() + 1;
+  set_next_template_serial_number(Smi::FromInt(next_serial_number));
+  return next_serial_number;
+}
+
+void Heap::SetSerializedTemplates(FixedArray* templates) {
+  DCHECK_EQ(empty_fixed_array(), serialized_templates());
+  set_serialized_templates(templates);
+}
+
 AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
     : heap_(isolate->heap()) {
   heap_->always_allocate_scope_count_.Increment(1);
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index c8f1557..c59a8d3 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -17,7 +17,7 @@
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
 #include "src/global-handles.h"
-#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/array-buffer-tracker-inl.h"
 #include "src/heap/gc-idle-time-handler.h"
 #include "src/heap/gc-tracer.h"
 #include "src/heap/incremental-marking.h"
@@ -32,7 +32,6 @@
 #include "src/heap/scavenger-inl.h"
 #include "src/heap/store-buffer.h"
 #include "src/interpreter/interpreter.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/regexp/jsregexp.h"
 #include "src/runtime-profiler.h"
 #include "src/snapshot/natives.h"
@@ -69,8 +68,9 @@
 };
 
 Heap::Heap()
-    : amount_of_external_allocated_memory_(0),
-      amount_of_external_allocated_memory_at_last_global_gc_(0),
+    : external_memory_(0),
+      external_memory_limit_(kExternalAllocationLimit),
+      external_memory_at_last_mark_compact_(0),
       isolate_(nullptr),
       code_range_size_(0),
       // semispace_size_ should be a power of 2 and old_generation_size_ should
@@ -160,7 +160,6 @@
       gc_callbacks_depth_(0),
       deserialization_complete_(false),
       strong_roots_list_(NULL),
-      array_buffer_tracker_(NULL),
       heap_iterator_depth_(0),
       force_oom_(false) {
 // Allow build-time customization of the max semispace size. Building
@@ -385,9 +384,8 @@
                          ", committed: %6" V8PRIdPTR " KB\n",
                this->SizeOfObjects() / KB, this->Available() / KB,
                this->CommittedMemory() / KB);
-  PrintIsolate(
-      isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
-      static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
+  PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
+               static_cast<intptr_t>(external_memory_ / KB));
   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
                total_gc_time_ms_);
 }
@@ -502,11 +500,10 @@
   }
 }
 
-
 void Heap::MergeAllocationSitePretenuringFeedback(
-    const HashMap& local_pretenuring_feedback) {
+    const base::HashMap& local_pretenuring_feedback) {
   AllocationSite* site = nullptr;
-  for (HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
+  for (base::HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
        local_entry != nullptr;
        local_entry = local_pretenuring_feedback.Next(local_entry)) {
     site = reinterpret_cast<AllocationSite*>(local_entry->key);
@@ -535,8 +532,8 @@
 class Heap::PretenuringScope {
  public:
   explicit PretenuringScope(Heap* heap) : heap_(heap) {
-    heap_->global_pretenuring_feedback_ =
-        new HashMap(HashMap::PointersMatch, kInitialFeedbackCapacity);
+    heap_->global_pretenuring_feedback_ = new base::HashMap(
+        base::HashMap::PointersMatch, kInitialFeedbackCapacity);
   }
 
   ~PretenuringScope() {
@@ -562,7 +559,7 @@
 
     // Step 1: Digest feedback for recorded allocation sites.
     bool maximum_size_scavenge = MaximumSizeScavenge();
-    for (HashMap::Entry* e = global_pretenuring_feedback_->Start();
+    for (base::HashMap::Entry* e = global_pretenuring_feedback_->Start();
          e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
       allocation_sites++;
       site = reinterpret_cast<AllocationSite*>(e->key);
@@ -1111,9 +1108,11 @@
     // Visit all HeapObject pointers in [start, end).
     for (Object** p = start; p < end; p++) {
       if ((*p)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*p);
+        Isolate* isolate = object->GetIsolate();
         // Check that the string is actually internalized.
-        CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
-              (*p)->IsInternalizedString());
+        CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) ||
+              object->IsInternalizedString());
       }
     }
   }
@@ -1212,12 +1211,12 @@
   }
 
   Object* context = native_contexts_list();
-  while (!context->IsUndefined()) {
+  while (!context->IsUndefined(isolate())) {
     // GC can happen when the context is not fully initialized,
     // so the cache can be undefined.
     Object* cache =
         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
-    if (!cache->IsUndefined()) {
+    if (!cache->IsUndefined(isolate())) {
       NormalizedMapCache::cast(cache)->Clear();
     }
     context = Context::cast(context)->next_context_link();
@@ -1340,8 +1339,8 @@
   intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
   if (collector == MARK_COMPACTOR) {
     // Register the amount of external allocated memory.
-    amount_of_external_allocated_memory_at_last_global_gc_ =
-        amount_of_external_allocated_memory_;
+    external_memory_at_last_mark_compact_ = external_memory_;
+    external_memory_limit_ = external_memory_ + kExternalAllocationLimit;
     SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
   } else if (HasLowYoungGenerationAllocationRate() &&
              old_generation_size_configured_) {
@@ -1612,6 +1611,8 @@
   // Pause the inline allocation steps.
   PauseAllocationObserversScope pause_observers(this);
 
+  mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
+
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
 #endif
@@ -1626,7 +1627,12 @@
 
   scavenge_collector_->SelectScavengingVisitorsTable();
 
-  array_buffer_tracker()->PrepareDiscoveryInNewSpace();
+  if (UsingEmbedderHeapTracer()) {
+    // Register found wrappers with embedder so it can add them to its marking
+    // deque and correctly manage the case when v8 scavenger collects the
+    // wrappers by either keeping wrappables alive, or cleaning marking deque.
+    mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer();
+  }
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
@@ -1653,6 +1659,7 @@
   Address new_space_front = new_space_.ToSpaceStart();
   promotion_queue_.Initialize();
 
+  PromotionMode promotion_mode = CurrentPromotionMode();
   ScavengeVisitor scavenge_visitor(this);
 
   if (FLAG_scavenge_reclaim_unmodified_objects) {
@@ -1669,8 +1676,21 @@
   {
     // Copy objects reachable from the old generation.
     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
-    RememberedSet<OLD_TO_NEW>::IterateWithWrapper(this,
-                                                  Scavenger::ScavengeObject);
+    RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) {
+      return Scavenger::CheckAndScavengeObject(this, addr);
+    });
+
+    RememberedSet<OLD_TO_NEW>::IterateTyped(
+        this, [this](SlotType type, Address host_addr, Address addr) {
+          return UpdateTypedSlotHelper::UpdateTypedSlot(
+              isolate(), type, addr, [this](Object** addr) {
+                // We expect that objects referenced by code are long living.
+                // If we do not force promotion, then we need to clear
+                // old_to_new slots in dead code objects after mark-compact.
+                return Scavenger::CheckAndScavengeObject(
+                    this, reinterpret_cast<Address>(addr));
+              });
+        });
   }
 
   {
@@ -1692,7 +1712,8 @@
 
   {
     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
-    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+    new_space_front =
+        DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
   }
 
   if (FLAG_scavenge_reclaim_unmodified_objects) {
@@ -1701,12 +1722,14 @@
 
     isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
         &scavenge_visitor);
-    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+    new_space_front =
+        DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
   } else {
     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
     while (isolate()->global_handles()->IterateObjectGroups(
         &scavenge_visitor, &IsUnscavengedHeapObject)) {
-      new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+      new_space_front =
+          DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
     }
     isolate()->global_handles()->RemoveObjectGroups();
     isolate()->global_handles()->RemoveImplicitRefGroups();
@@ -1716,7 +1739,8 @@
 
     isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
         &scavenge_visitor);
-    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+    new_space_front =
+        DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
   }
 
   UpdateNewSpaceReferencesInExternalStringTable(
@@ -1734,7 +1758,7 @@
   // Set age mark.
   new_space_.set_age_mark(new_space_.top());
 
-  array_buffer_tracker()->FreeDead(true);
+  ArrayBufferTracker::FreeDeadInNewSpace(this);
 
   // Update how much has survived scavenge.
   IncrementYoungSurvivorsCounter(static_cast<int>(
@@ -1898,9 +1922,9 @@
   external_string_table_.Iterate(&external_string_table_visitor);
 }
 
-
 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
-                         Address new_space_front) {
+                         Address new_space_front,
+                         PromotionMode promotion_mode) {
   do {
     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
     // The addresses new_space_front and new_space_.top() define a
@@ -1909,8 +1933,14 @@
     while (new_space_front != new_space_.top()) {
       if (!Page::IsAlignedToPageSize(new_space_front)) {
         HeapObject* object = HeapObject::FromAddress(new_space_front);
-        new_space_front +=
-            StaticScavengeVisitor::IterateBody(object->map(), object);
+        if (promotion_mode == PROMOTE_MARKED) {
+          new_space_front += StaticScavengeVisitor<PROMOTE_MARKED>::IterateBody(
+              object->map(), object);
+        } else {
+          new_space_front +=
+              StaticScavengeVisitor<DEFAULT_PROMOTION>::IterateBody(
+                  object->map(), object);
+        }
       } else {
         new_space_front = Page::FromAllocationAreaAddress(new_space_front)
                               ->next_page()
@@ -2014,12 +2044,12 @@
 
 
 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
-  return array_buffer_tracker()->RegisterNew(buffer);
+  ArrayBufferTracker::RegisterNew(this, buffer);
 }
 
 
 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
-  return array_buffer_tracker()->Unregister(buffer);
+  ArrayBufferTracker::Unregister(this, buffer);
 }
 
 
@@ -2306,8 +2336,9 @@
     }
 
     {  // Create a separate external one byte string map for native sources.
-      AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
-                                                ExternalOneByteString::kSize);
+      AllocationResult allocation =
+          AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
+                      ExternalOneByteString::kShortSize);
       if (!allocation.To(&obj)) return false;
       Map* map = Map::cast(obj);
       map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
@@ -2794,6 +2825,20 @@
   }
 
   {
+    Handle<FixedArray> empty_literals_array =
+        factory->NewFixedArray(1, TENURED);
+    empty_literals_array->set(0, *factory->empty_fixed_array());
+    set_empty_literals_array(*empty_literals_array);
+  }
+
+  {
+    Handle<FixedArray> empty_sloppy_arguments_elements =
+        factory->NewFixedArray(2, TENURED);
+    empty_sloppy_arguments_elements->set_map(sloppy_arguments_elements_map());
+    set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
+  }
+
+  {
     Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
     set_empty_weak_cell(*cell);
     cell->clear();
@@ -2825,6 +2870,7 @@
 
   // Handling of script id generation is in Heap::NextScriptId().
   set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
+  set_next_template_serial_number(Smi::FromInt(0));
 
   // Allocate the empty script.
   Handle<Script> script = factory->NewScript(factory->empty_string());
@@ -2851,6 +2897,8 @@
       handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
   set_species_protector(*species_cell);
 
+  set_serialized_templates(empty_fixed_array());
+
   set_weak_stack_trace_list(Smi::FromInt(0));
 
   set_noscript_shared_function_infos(Smi::FromInt(0));
@@ -2888,6 +2936,7 @@
     case kRetainedMapsRootIndex:
     case kNoScriptSharedFunctionInfosRootIndex:
     case kWeakStackTraceListRootIndex:
+    case kSerializedTemplatesRootIndex:
 // Smi values
 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
       SMI_ROOT_LIST(SMI_ENTRY)
@@ -3073,14 +3122,8 @@
 
   if (lo_space()->Contains(object)) return false;
 
-  Page* page = Page::FromAddress(address);
-  // We can move the object start if:
-  // (1) the object is not in old space,
-  // (2) the page of the object was already swept,
-  // (3) the page was already concurrently swept. This case is an optimization
-  // for concurrent sweeping. The WasSwept predicate for concurrently swept
-  // pages is set after sweeping all pages.
-  return !InOldSpace(object) || page->SweepingDone();
+  // We can move the object start if the page was already swept.
+  return Page::FromAddress(address)->SweepingDone();
 }
 
 
@@ -3105,6 +3148,7 @@
 
 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
                                          int elements_to_trim) {
+  CHECK_NOT_NULL(object);
   DCHECK(!object->IsFixedTypedArrayBase());
   DCHECK(!object->IsByteArray());
   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
@@ -3323,8 +3367,7 @@
   result->set_map_no_write_barrier(code_map());
   Code* code = Code::cast(result);
   DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
-  DCHECK(memory_allocator()->code_range() == NULL ||
-         !memory_allocator()->code_range()->valid() ||
+  DCHECK(!memory_allocator()->code_range()->valid() ||
          memory_allocator()->code_range()->contains(code->address()) ||
          object_size <= code_space()->AreaSize());
   code->set_gc_metadata(Smi::FromInt(0));
@@ -3350,8 +3393,7 @@
 
   // Relocate the copy.
   DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
-  DCHECK(memory_allocator()->code_range() == NULL ||
-         !memory_allocator()->code_range()->valid() ||
+  DCHECK(!memory_allocator()->code_range()->valid() ||
          memory_allocator()->code_range()->contains(code->address()) ||
          obj_size <= code_space()->AreaSize());
   new_code->Relocate(new_addr - old_addr);
@@ -3382,60 +3424,6 @@
   return copy;
 }
 
-AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
-  // Allocate ByteArray before the Code object, so that we do not risk
-  // leaving uninitialized Code object (and breaking the heap).
-  ByteArray* reloc_info_array = nullptr;
-  {
-    AllocationResult allocation =
-        AllocateByteArray(reloc_info.length(), TENURED);
-    if (!allocation.To(&reloc_info_array)) return allocation;
-  }
-
-  int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
-
-  int new_obj_size = Code::SizeFor(new_body_size);
-
-  Address old_addr = code->address();
-
-  size_t relocation_offset =
-      static_cast<size_t>(code->instruction_end() - old_addr);
-
-  HeapObject* result = nullptr;
-  AllocationResult allocation = AllocateRaw(new_obj_size, CODE_SPACE);
-  if (!allocation.To(&result)) return allocation;
-
-  // Copy code object.
-  Address new_addr = result->address();
-
-  // Copy header and instructions.
-  CopyBytes(new_addr, old_addr, relocation_offset);
-
-  Code* new_code = Code::cast(result);
-  new_code->set_relocation_info(reloc_info_array);
-
-  // Copy patched rinfo.
-  CopyBytes(new_code->relocation_start(), reloc_info.start(),
-            static_cast<size_t>(reloc_info.length()));
-
-  // Relocate the copy.
-  DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
-  DCHECK(memory_allocator()->code_range() == NULL ||
-         !memory_allocator()->code_range()->valid() ||
-         memory_allocator()->code_range()->contains(code->address()) ||
-         new_obj_size <= code_space()->AreaSize());
-
-  new_code->Relocate(new_addr - old_addr);
-  // We have to iterate over over the object and process its pointers when
-  // black allocation is on.
-  incremental_marking()->IterateBlackObject(new_code);
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) code->ObjectVerify();
-#endif
-  return new_code;
-}
-
-
 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
                                        AllocationSite* allocation_site) {
   memento->set_map_no_write_barrier(allocation_memento_map());
@@ -3533,7 +3521,8 @@
   // Initialize the JSObject.
   InitializeJSObjectFromMap(js_obj, properties, map);
   DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
-         js_obj->HasFastStringWrapperElements());
+         js_obj->HasFastStringWrapperElements() ||
+         js_obj->HasFastArgumentsElements());
   return js_obj;
 }
 
@@ -3559,10 +3548,11 @@
   // Make the clone.
   Map* map = source->map();
 
-  // We can only clone regexps, normal objects, api objects or arrays. Copying
-  // anything else will break invariants.
+  // We can only clone regexps, normal objects, api objects, errors or arrays.
+  // Copying anything else will break invariants.
   CHECK(map->instance_type() == JS_REGEXP_TYPE ||
         map->instance_type() == JS_OBJECT_TYPE ||
+        map->instance_type() == JS_ERROR_TYPE ||
         map->instance_type() == JS_ARRAY_TYPE ||
         map->instance_type() == JS_API_OBJECT_TYPE ||
         map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
@@ -4420,8 +4410,36 @@
 }
 
 void Heap::CollectGarbageOnMemoryPressure(const char* source) {
+  const int kGarbageThresholdInBytes = 8 * MB;
+  const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
+  // This constant is the maximum response time in RAIL performance model.
+  const double kMaxMemoryPressurePauseMs = 100;
+
+  double start = MonotonicallyIncreasingTimeInMs();
   CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
-                    source);
+                    source, kGCCallbackFlagCollectAllAvailableGarbage);
+  double end = MonotonicallyIncreasingTimeInMs();
+
+  // Estimate how much memory we can free.
+  int64_t potential_garbage =
+      (CommittedMemory() - SizeOfObjects()) + external_memory_;
+  // If we can potentially free large amount of memory, then start GC right
+  // away instead of waiting for memory reducer.
+  if (potential_garbage >= kGarbageThresholdInBytes &&
+      potential_garbage >=
+          CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
+    // If we spent less than half of the time budget, then perform full GC
+    // Otherwise, start incremental marking.
+    if (end - start < kMaxMemoryPressurePauseMs / 2) {
+      CollectAllGarbage(
+          kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, source,
+          kGCCallbackFlagCollectAllAvailableGarbage);
+    } else {
+      if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
+        StartIdleIncrementalMarking();
+      }
+    }
+  }
 }
 
 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
@@ -4444,6 +4462,15 @@
   }
 }
 
+void Heap::CollectCodeStatistics() {
+  PagedSpace::ResetCodeAndMetadataStatistics(isolate());
+  // We do not look for code in new space, or map space.  If code
+  // somehow ends up in those spaces, we would miss it here.
+  code_space_->CollectCodeStatistics();
+  old_space_->CollectCodeStatistics();
+  lo_space_->CollectCodeStatistics();
+}
+
 #ifdef DEBUG
 
 void Heap::Print() {
@@ -4458,11 +4485,7 @@
 
 void Heap::ReportCodeStatistics(const char* title) {
   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
-  PagedSpace::ResetCodeStatistics(isolate());
-  // We do not look for code in new space, map space, or old space.  If code
-  // somehow ends up in those spaces, we would miss it here.
-  code_space_->CollectCodeStatistics();
-  lo_space_->CollectCodeStatistics();
+  CollectCodeStatistics();
   PagedSpace::ReportCodeStatistics(isolate());
 }
 
@@ -4632,10 +4655,8 @@
 
 void Heap::ZapFromSpace() {
   if (!new_space_.IsFromSpaceCommitted()) return;
-  NewSpacePageIterator it(new_space_.FromSpaceStart(),
-                          new_space_.FromSpaceEnd());
-  while (it.has_next()) {
-    Page* page = it.next();
+  for (Page* page : NewSpacePageRange(new_space_.FromSpaceStart(),
+                                      new_space_.FromSpaceEnd())) {
     for (Address cursor = page->area_start(), limit = page->area_end();
          cursor < limit; cursor += kPointerSize) {
       Memory::Address_at(cursor) = kFromSpaceZapValue;
@@ -4759,49 +4780,6 @@
   v->Synchronize(VisitorSynchronization::kSmiRootList);
 }
 
-// We cannot avoid stale handles to left-trimmed objects, but can only make
-// sure all handles still needed are updated. Filter out a stale pointer
-// and clear the slot to allow post processing of handles (needed because
-// the sweeper might actually free the underlying page).
-class FixStaleLeftTrimmedHandlesVisitor : public ObjectVisitor {
- public:
-  explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
-    USE(heap_);
-  }
-
-  void VisitPointer(Object** p) override { FixHandle(p); }
-
-  void VisitPointers(Object** start, Object** end) override {
-    for (Object** p = start; p < end; p++) FixHandle(p);
-  }
-
- private:
-  inline void FixHandle(Object** p) {
-    HeapObject* current = reinterpret_cast<HeapObject*>(*p);
-    if (!current->IsHeapObject()) return;
-    const MapWord map_word = current->map_word();
-    if (!map_word.IsForwardingAddress() && current->IsFiller()) {
-#ifdef DEBUG
-      // We need to find a FixedArrayBase map after walking the fillers.
-      while (current->IsFiller()) {
-        Address next = reinterpret_cast<Address>(current);
-        if (current->map() == heap_->one_pointer_filler_map()) {
-          next += kPointerSize;
-        } else if (current->map() == heap_->two_pointer_filler_map()) {
-          next += 2 * kPointerSize;
-        } else {
-          next += current->Size();
-        }
-        current = reinterpret_cast<HeapObject*>(next);
-      }
-      DCHECK(current->IsFixedArrayBase());
-#endif  // DEBUG
-      *p = nullptr;
-    }
-  }
-
-  Heap* heap_;
-};
 
 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
@@ -4817,13 +4795,13 @@
   v->Synchronize(VisitorSynchronization::kTop);
   Relocatable::Iterate(isolate_, v);
   v->Synchronize(VisitorSynchronization::kRelocatable);
+  isolate_->debug()->Iterate(v);
+  v->Synchronize(VisitorSynchronization::kDebug);
 
   isolate_->compilation_cache()->Iterate(v);
   v->Synchronize(VisitorSynchronization::kCompilationCache);
 
   // Iterate over local handles in handle scopes.
-  FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
-  isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
   isolate_->handle_scope_implementer()->Iterate(v);
   isolate_->IterateDeferredHandles(v);
   v->Synchronize(VisitorSynchronization::kHandleScope);
@@ -5064,11 +5042,8 @@
 
 
 int64_t Heap::PromotedExternalMemorySize() {
-  if (amount_of_external_allocated_memory_ <=
-      amount_of_external_allocated_memory_at_last_global_gc_)
-    return 0;
-  return amount_of_external_allocated_memory_ -
-         amount_of_external_allocated_memory_at_last_global_gc_;
+  if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
+  return external_memory_ - external_memory_at_last_mark_compact_;
 }
 
 
@@ -5242,7 +5217,8 @@
 
 static void InitializeGCOnce() {
   Scavenger::Initialize();
-  StaticScavengeVisitor::Initialize();
+  StaticScavengeVisitor<DEFAULT_PROMOTION>::Initialize();
+  StaticScavengeVisitor<PROMOTE_MARKED>::Initialize();
   MarkCompactCollector::Initialize();
 }
 
@@ -5335,8 +5311,6 @@
 
   scavenge_job_ = new ScavengeJob();
 
-  array_buffer_tracker_ = new ArrayBufferTracker(this);
-
   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
 
@@ -5399,8 +5373,9 @@
   // All pages right after bootstrapping must be marked as never-evacuate.
   PagedSpaces spaces(this);
   for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
-    PageIterator it(s);
-    while (it.has_next()) CHECK(it.next()->NeverEvacuate());
+    for (Page* p : *s) {
+      CHECK(p->NeverEvacuate());
+    }
   }
 #endif  // DEBUG
 }
@@ -5496,9 +5471,6 @@
   delete scavenge_job_;
   scavenge_job_ = nullptr;
 
-  delete array_buffer_tracker_;
-  array_buffer_tracker_ = nullptr;
-
   isolate_->global_handles()->TearDown();
 
   external_string_table_.TearDown();
@@ -5605,6 +5577,33 @@
   return DependentCode::cast(empty_fixed_array());
 }
 
+namespace {
+void CompactWeakFixedArray(Object* object) {
+  if (object->IsWeakFixedArray()) {
+    WeakFixedArray* array = WeakFixedArray::cast(object);
+    array->Compact<WeakFixedArray::NullCallback>();
+  }
+}
+}  // anonymous namespace
+
+void Heap::CompactWeakFixedArrays() {
+  // Find known WeakFixedArrays and compact them.
+  HeapIterator iterator(this);
+  for (HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
+    if (o->IsPrototypeInfo()) {
+      Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
+      if (prototype_users->IsWeakFixedArray()) {
+        WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
+        array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
+      }
+    } else if (o->IsScript()) {
+      CompactWeakFixedArray(Script::cast(o)->shared_function_infos());
+    }
+  }
+  CompactWeakFixedArray(noscript_shared_function_infos());
+  CompactWeakFixedArray(script_list());
+  CompactWeakFixedArray(weak_stack_trace_list());
+}
 
 void Heap::AddRetainedMap(Handle<Map> map) {
   Handle<WeakCell> cell = Map::WeakCellForMap(map);
@@ -5932,14 +5931,14 @@
   // No iterator means we are done.
   if (object_iterator_ == nullptr) return nullptr;
 
-  if (HeapObject* obj = object_iterator_->next_object()) {
+  if (HeapObject* obj = object_iterator_->Next()) {
     // If the current iterator has more objects we are fine.
     return obj;
   } else {
     // Go though the spaces looking for one that has objects.
     while (space_iterator_->has_next()) {
       object_iterator_ = space_iterator_->next();
-      if (HeapObject* obj = object_iterator_->next_object()) {
+      if (HeapObject* obj = object_iterator_->Next()) {
         return obj;
       }
     }
@@ -6231,8 +6230,9 @@
 
 void Heap::ExternalStringTable::CleanUp() {
   int last = 0;
+  Isolate* isolate = heap_->isolate();
   for (int i = 0; i < new_space_strings_.length(); ++i) {
-    if (new_space_strings_[i] == heap_->the_hole_value()) {
+    if (new_space_strings_[i]->IsTheHole(isolate)) {
       continue;
     }
     DCHECK(new_space_strings_[i]->IsExternalString());
@@ -6247,7 +6247,7 @@
 
   last = 0;
   for (int i = 0; i < old_space_strings_.length(); ++i) {
-    if (old_space_strings_[i] == heap_->the_hole_value()) {
+    if (old_space_strings_[i]->IsTheHole(isolate)) {
       continue;
     }
     DCHECK(old_space_strings_[i]->IsExternalString());
diff --git a/src/heap/heap.h b/src/heap/heap.h
index 8fdb64a..ed1e652 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -28,68 +28,72 @@
 
 // Defines all the roots in Heap.
 #define STRONG_ROOT_LIST(V)                                                    \
-  V(Map, byte_array_map, ByteArrayMap)                                         \
+  /* Cluster the most popular ones in a few cache lines here at the top.    */ \
+  /* The first 32 entries are most often used in the startup snapshot and   */ \
+  /* can use a shorter representation in the serialization format.          */ \
   V(Map, free_space_map, FreeSpaceMap)                                         \
   V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
   V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
-  /* Cluster the most popular ones in a few cache lines here at the top.    */ \
+  V(Oddball, uninitialized_value, UninitializedValue)                          \
   V(Oddball, undefined_value, UndefinedValue)                                  \
   V(Oddball, the_hole_value, TheHoleValue)                                     \
   V(Oddball, null_value, NullValue)                                            \
   V(Oddball, true_value, TrueValue)                                            \
   V(Oddball, false_value, FalseValue)                                          \
   V(String, empty_string, empty_string)                                        \
-  V(Oddball, uninitialized_value, UninitializedValue)                          \
-  V(Map, cell_map, CellMap)                                                    \
-  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
-  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, meta_map, MetaMap)                                                    \
-  V(Map, heap_number_map, HeapNumberMap)                                       \
-  V(Map, mutable_heap_number_map, MutableHeapNumberMap)                        \
-  V(Map, float32x4_map, Float32x4Map)                                          \
-  V(Map, int32x4_map, Int32x4Map)                                              \
-  V(Map, uint32x4_map, Uint32x4Map)                                            \
-  V(Map, bool32x4_map, Bool32x4Map)                                            \
-  V(Map, int16x8_map, Int16x8Map)                                              \
-  V(Map, uint16x8_map, Uint16x8Map)                                            \
-  V(Map, bool16x8_map, Bool16x8Map)                                            \
-  V(Map, int8x16_map, Int8x16Map)                                              \
-  V(Map, uint8x16_map, Uint8x16Map)                                            \
-  V(Map, bool8x16_map, Bool8x16Map)                                            \
-  V(Map, native_context_map, NativeContextMap)                                 \
+  V(Map, byte_array_map, ByteArrayMap)                                         \
   V(Map, fixed_array_map, FixedArrayMap)                                       \
-  V(Map, code_map, CodeMap)                                                    \
-  V(Map, scope_info_map, ScopeInfoMap)                                         \
   V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
-  V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
-  V(Map, weak_cell_map, WeakCellMap)                                           \
-  V(Map, transition_array_map, TransitionArrayMap)                             \
+  V(Map, hash_table_map, HashTableMap)                                         \
+  V(Map, symbol_map, SymbolMap)                                                \
   V(Map, one_byte_string_map, OneByteStringMap)                                \
   V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap)       \
+  V(Map, scope_info_map, ScopeInfoMap)                                         \
+  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
+  V(Map, code_map, CodeMap)                                                    \
   V(Map, function_context_map, FunctionContextMap)                             \
+  V(Map, cell_map, CellMap)                                                    \
+  V(Map, weak_cell_map, WeakCellMap)                                           \
+  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
+  V(Map, foreign_map, ForeignMap)                                              \
+  V(Map, heap_number_map, HeapNumberMap)                                       \
+  V(Map, transition_array_map, TransitionArrayMap)                             \
+  V(FixedArray, empty_literals_array, EmptyLiteralsArray)                      \
   V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
-  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
+  V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap)           \
   V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
+  /* Entries beyond the first 32                                            */ \
   /* The roots above this line should be boring from a GC point of view.    */ \
   /* This means they are never in new space and never on a page that is     */ \
   /* being compacted.                                                       */ \
+  /* Oddballs */                                                               \
   V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel)      \
   V(Oddball, arguments_marker, ArgumentsMarker)                                \
   V(Oddball, exception, Exception)                                             \
   V(Oddball, termination_exception, TerminationException)                      \
   V(Oddball, optimized_out, OptimizedOut)                                      \
   V(Oddball, stale_register, StaleRegister)                                    \
-  V(FixedArray, number_string_cache, NumberStringCache)                        \
-  V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
-  V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
-  V(Object, instanceof_cache_answer, InstanceofCacheAnswer)                    \
-  V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
-  V(FixedArray, string_split_cache, StringSplitCache)                          \
-  V(FixedArray, regexp_multiple_cache, RegExpMultipleCache)                    \
-  V(Smi, hash_seed, HashSeed)                                                  \
-  V(Map, hash_table_map, HashTableMap)                                         \
+  /* Context maps */                                                           \
+  V(Map, native_context_map, NativeContextMap)                                 \
+  V(Map, module_context_map, ModuleContextMap)                                 \
+  V(Map, script_context_map, ScriptContextMap)                                 \
+  V(Map, block_context_map, BlockContextMap)                                   \
+  V(Map, catch_context_map, CatchContextMap)                                   \
+  V(Map, with_context_map, WithContextMap)                                     \
+  V(Map, debug_evaluate_context_map, DebugEvaluateContextMap)                  \
+  V(Map, script_context_table_map, ScriptContextTableMap)                      \
+  /* Maps */                                                                   \
+  V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
+  V(Map, mutable_heap_number_map, MutableHeapNumberMap)                        \
   V(Map, ordered_hash_table_map, OrderedHashTableMap)                          \
-  V(Map, symbol_map, SymbolMap)                                                \
+  V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
+  V(Map, message_object_map, JSMessageObjectMap)                               \
+  V(Map, neander_map, NeanderMap)                                              \
+  V(Map, external_map, ExternalMap)                                            \
+  V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
+  /* String maps */                                                            \
+  V(Map, native_source_string_map, NativeSourceStringMap)                      \
   V(Map, string_map, StringMap)                                                \
   V(Map, cons_one_byte_string_map, ConsOneByteStringMap)                       \
   V(Map, cons_string_map, ConsStringMap)                                       \
@@ -99,7 +103,6 @@
   V(Map, external_string_with_one_byte_data_map,                               \
     ExternalStringWithOneByteDataMap)                                          \
   V(Map, external_one_byte_string_map, ExternalOneByteStringMap)               \
-  V(Map, native_source_string_map, NativeSourceStringMap)                      \
   V(Map, short_external_string_map, ShortExternalStringMap)                    \
   V(Map, short_external_string_with_one_byte_data_map,                         \
     ShortExternalStringWithOneByteDataMap)                                     \
@@ -116,6 +119,7 @@
   V(Map, short_external_one_byte_internalized_string_map,                      \
     ShortExternalOneByteInternalizedStringMap)                                 \
   V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap)    \
+  /* Array element maps */                                                     \
   V(Map, fixed_uint8_array_map, FixedUint8ArrayMap)                            \
   V(Map, fixed_int8_array_map, FixedInt8ArrayMap)                              \
   V(Map, fixed_uint16_array_map, FixedUint16ArrayMap)                          \
@@ -125,6 +129,18 @@
   V(Map, fixed_float32_array_map, FixedFloat32ArrayMap)                        \
   V(Map, fixed_float64_array_map, FixedFloat64ArrayMap)                        \
   V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap)             \
+  V(Map, float32x4_map, Float32x4Map)                                          \
+  V(Map, int32x4_map, Int32x4Map)                                              \
+  V(Map, uint32x4_map, Uint32x4Map)                                            \
+  V(Map, bool32x4_map, Bool32x4Map)                                            \
+  V(Map, int16x8_map, Int16x8Map)                                              \
+  V(Map, uint16x8_map, Uint16x8Map)                                            \
+  V(Map, bool16x8_map, Bool16x8Map)                                            \
+  V(Map, int8x16_map, Int8x16Map)                                              \
+  V(Map, uint8x16_map, Uint8x16Map)                                            \
+  V(Map, bool8x16_map, Bool8x16Map)                                            \
+  /* Canonical empty values */                                                 \
+  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
   V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array)        \
   V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array)          \
   V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array)      \
@@ -135,14 +151,57 @@
   V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array)    \
   V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array,                      \
     EmptyFixedUint8ClampedArray)                                               \
-  V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
-  V(Map, catch_context_map, CatchContextMap)                                   \
-  V(Map, with_context_map, WithContextMap)                                     \
-  V(Map, debug_evaluate_context_map, DebugEvaluateContextMap)                  \
-  V(Map, block_context_map, BlockContextMap)                                   \
-  V(Map, module_context_map, ModuleContextMap)                                 \
-  V(Map, script_context_map, ScriptContextMap)                                 \
-  V(Map, script_context_table_map, ScriptContextTableMap)                      \
+  V(Script, empty_script, EmptyScript)                                         \
+  V(Cell, undefined_cell, UndefinedCell)                                       \
+  V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
+  V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
+    EmptySlowElementDictionary)                                                \
+  V(TypeFeedbackVector, dummy_vector, DummyVector)                             \
+  V(PropertyCell, empty_property_cell, EmptyPropertyCell)                      \
+  V(WeakCell, empty_weak_cell, EmptyWeakCell)                                  \
+  /* Protectors */                                                             \
+  V(PropertyCell, array_protector, ArrayProtector)                             \
+  V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector)         \
+  V(PropertyCell, has_instance_protector, HasInstanceProtector)                \
+  V(Cell, species_protector, SpeciesProtector)                                 \
+  /* Special numbers */                                                        \
+  V(HeapNumber, nan_value, NanValue)                                           \
+  V(HeapNumber, infinity_value, InfinityValue)                                 \
+  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
+  V(HeapNumber, minus_infinity_value, MinusInfinityValue)                      \
+  /* Caches */                                                                 \
+  V(FixedArray, number_string_cache, NumberStringCache)                        \
+  V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
+  V(FixedArray, string_split_cache, StringSplitCache)                          \
+  V(FixedArray, regexp_multiple_cache, RegExpMultipleCache)                    \
+  V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
+  V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
+  V(Object, instanceof_cache_answer, InstanceofCacheAnswer)                    \
+  V(FixedArray, natives_source_cache, NativesSourceCache)                      \
+  V(FixedArray, experimental_natives_source_cache,                             \
+    ExperimentalNativesSourceCache)                                            \
+  V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache)           \
+  V(FixedArray, experimental_extra_natives_source_cache,                       \
+    ExperimentalExtraNativesSourceCache)                                       \
+  /* Lists and dictionaries */                                                 \
+  V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames)          \
+  V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary)    \
+  V(Object, symbol_registry, SymbolRegistry)                                   \
+  V(Object, script_list, ScriptList)                                           \
+  V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
+  V(FixedArray, materialized_objects, MaterializedObjects)                     \
+  V(FixedArray, microtask_queue, MicrotaskQueue)                               \
+  V(FixedArray, detached_contexts, DetachedContexts)                           \
+  V(ArrayList, retained_maps, RetainedMaps)                                    \
+  V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable)           \
+  V(Object, weak_stack_trace_list, WeakStackTraceList)                         \
+  V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos)       \
+  V(FixedArray, serialized_templates, SerializedTemplates)                     \
+  /* Configured values */                                                      \
+  V(JSObject, message_listeners, MessageListeners)                             \
+  V(Code, js_entry_code, JsEntryCode)                                          \
+  V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
+  /* Oddball maps */                                                           \
   V(Map, undefined_map, UndefinedMap)                                          \
   V(Map, the_hole_map, TheHoleMap)                                             \
   V(Map, null_map, NullMap)                                                    \
@@ -153,59 +212,21 @@
   V(Map, exception_map, ExceptionMap)                                          \
   V(Map, termination_exception_map, TerminationExceptionMap)                   \
   V(Map, optimized_out_map, OptimizedOutMap)                                   \
-  V(Map, stale_register_map, StaleRegisterMap)                                 \
-  V(Map, message_object_map, JSMessageObjectMap)                               \
-  V(Map, foreign_map, ForeignMap)                                              \
-  V(Map, neander_map, NeanderMap)                                              \
-  V(Map, external_map, ExternalMap)                                            \
-  V(HeapNumber, nan_value, NanValue)                                           \
-  V(HeapNumber, infinity_value, InfinityValue)                                 \
-  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
-  V(HeapNumber, minus_infinity_value, MinusInfinityValue)                      \
-  V(JSObject, message_listeners, MessageListeners)                             \
-  V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
-  V(Code, js_entry_code, JsEntryCode)                                          \
-  V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
-  V(FixedArray, natives_source_cache, NativesSourceCache)                      \
-  V(FixedArray, experimental_natives_source_cache,                             \
-    ExperimentalNativesSourceCache)                                            \
-  V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache)           \
-  V(FixedArray, experimental_extra_natives_source_cache,                       \
-    ExperimentalExtraNativesSourceCache)                                       \
-  V(Script, empty_script, EmptyScript)                                         \
-  V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames)          \
-  V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary)    \
-  V(Cell, undefined_cell, UndefinedCell)                                       \
-  V(Object, symbol_registry, SymbolRegistry)                                   \
-  V(Object, script_list, ScriptList)                                           \
-  V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
-    EmptySlowElementDictionary)                                                \
-  V(FixedArray, materialized_objects, MaterializedObjects)                     \
-  V(FixedArray, microtask_queue, MicrotaskQueue)                               \
-  V(TypeFeedbackVector, dummy_vector, DummyVector)                             \
-  V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap)           \
-  V(FixedArray, detached_contexts, DetachedContexts)                           \
-  V(ArrayList, retained_maps, RetainedMaps)                                    \
-  V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable)           \
-  V(PropertyCell, array_protector, ArrayProtector)                             \
-  V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector)         \
-  V(PropertyCell, empty_property_cell, EmptyPropertyCell)                      \
-  V(Object, weak_stack_trace_list, WeakStackTraceList)                         \
-  V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos)       \
-  V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
-  V(WeakCell, empty_weak_cell, EmptyWeakCell)                                  \
-  V(PropertyCell, has_instance_protector, HasInstanceProtector)                \
-  V(Cell, species_protector, SpeciesProtector)
+  V(Map, stale_register_map, StaleRegisterMap)
 
 // Entries in this list are limited to Smis and are not visited during GC.
-#define SMI_ROOT_LIST(V)                                                   \
-  V(Smi, stack_limit, StackLimit)                                          \
-  V(Smi, real_stack_limit, RealStackLimit)                                 \
-  V(Smi, last_script_id, LastScriptId)                                     \
-  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
-  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)       \
-  V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)             \
-  V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)             \
+#define SMI_ROOT_LIST(V)                                                       \
+  V(Smi, stack_limit, StackLimit)                                              \
+  V(Smi, real_stack_limit, RealStackLimit)                                     \
+  V(Smi, last_script_id, LastScriptId)                                         \
+  V(Smi, hash_seed, HashSeed)                                                  \
+  /* To distinguish the function templates, so that we can find them in the */ \
+  /* function cache of the native context. */                                  \
+  V(Smi, next_template_serial_number, NextTemplateSerialNumber)                \
+  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)     \
+  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)           \
+  V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)                 \
+  V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)                 \
   V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
 
 #define ROOT_LIST(V)  \
@@ -301,6 +322,8 @@
 class ScavengeJob;
 class WeakObjectRetainer;
 
+enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION };
+
 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
 
 // A queue of objects promoted during scavenge. Each object is accompanied
@@ -603,6 +626,12 @@
   // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
   static int GetStaticVisitorIdForMap(Map* map);
 
+  // We cannot avoid stale handles to left-trimmed objects, but can only make
+  // sure all handles still needed are updated. Filter out a stale pointer
+  // and clear the slot to allow post processing of handles (needed because
+  // the sweeper might actually free the underlying page).
+  inline bool PurgeLeftTrimmedObject(Object** object);
+
   // Notifies the heap that is ok to start marking or other activities that
   // should not happen during deserialization.
   void NotifyDeserializationComplete();
@@ -774,8 +803,11 @@
 
   // An object should be promoted if the object has survived a
   // scavenge operation.
+  template <PromotionMode promotion_mode>
   inline bool ShouldBePromoted(Address old_address, int object_size);
 
+  inline PromotionMode CurrentPromotionMode();
+
   void ClearNormalizedMapCaches();
 
   void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
@@ -795,6 +827,9 @@
   inline void SetGetterStubDeoptPCOffset(int pc_offset);
   inline void SetSetterStubDeoptPCOffset(int pc_offset);
   inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
+  inline int GetNextTemplateSerialNumber();
+
+  inline void SetSerializedTemplates(FixedArray* templates);
 
   // For post mortem debugging.
   void RememberUnmappedPage(Address page, bool compacted);
@@ -807,12 +842,16 @@
     global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
   }
 
-  int64_t amount_of_external_allocated_memory() {
-    return amount_of_external_allocated_memory_;
+  int64_t external_memory() { return external_memory_; }
+  void update_external_memory(int64_t delta) { external_memory_ += delta; }
+
+  void update_external_memory_concurrently_freed(intptr_t freed) {
+    external_memory_concurrently_freed_.Increment(freed);
   }
 
-  void update_amount_of_external_allocated_memory(int64_t delta) {
-    amount_of_external_allocated_memory_ += delta;
+  void account_external_memory_concurrently_freed() {
+    external_memory_ -= external_memory_concurrently_freed_.Value();
+    external_memory_concurrently_freed_.SetValue(0);
   }
 
   void DeoptMarkedAllocationSites();
@@ -826,6 +865,8 @@
 
   DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
 
+  void CompactWeakFixedArrays();
+
   void AddRetainedMap(Handle<Map> map);
 
   // This event is triggered after successful allocation of a new object made
@@ -1168,6 +1209,13 @@
                          const char** object_sub_type);
 
   // ===========================================================================
+  // Code statistics. ==========================================================
+  // ===========================================================================
+
+  // Collect code (Code and BytecodeArray objects) statistics.
+  void CollectCodeStatistics();
+
+  // ===========================================================================
   // GC statistics. ============================================================
   // ===========================================================================
 
@@ -1342,10 +1390,6 @@
   void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
   void UnregisterArrayBuffer(JSArrayBuffer* buffer);
 
-  inline ArrayBufferTracker* array_buffer_tracker() {
-    return array_buffer_tracker_;
-  }
-
   // ===========================================================================
   // Allocation site tracking. =================================================
   // ===========================================================================
@@ -1357,7 +1401,7 @@
   // value) is cached on the local pretenuring feedback.
   template <UpdateAllocationSiteMode mode>
   inline void UpdateAllocationSite(HeapObject* object,
-                                   HashMap* pretenuring_feedback);
+                                   base::HashMap* pretenuring_feedback);
 
   // Removes an entry from the global pretenuring storage.
   inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
@@ -1366,7 +1410,7 @@
   // method needs to be called after evacuation, as allocation sites may be
   // evacuated and this method resolves forward pointers accordingly.
   void MergeAllocationSitePretenuringFeedback(
-      const HashMap& local_pretenuring_feedback);
+      const base::HashMap& local_pretenuring_feedback);
 
 // =============================================================================
 
@@ -1683,7 +1727,8 @@
   // Performs a minor collection in new generation.
   void Scavenge();
 
-  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front,
+                     PromotionMode promotion_mode);
 
   void UpdateNewSpaceReferencesInExternalStringTable(
       ExternalStringTableUpdaterCallback updater_func);
@@ -1808,11 +1853,6 @@
   AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
                         int parameter_count, FixedArray* constant_pool);
 
-  // Copy the code and scope info part of the code object, but insert
-  // the provided data as the relocation information.
-  MUST_USE_RESULT AllocationResult CopyCode(Code* code,
-                                            Vector<byte> reloc_info);
-
   MUST_USE_RESULT AllocationResult CopyCode(Code* code);
 
   MUST_USE_RESULT AllocationResult
@@ -1972,12 +2012,17 @@
 
   void set_force_oom(bool value) { force_oom_ = value; }
 
-  // The amount of external memory registered through the API kept alive
-  // by global handles
-  int64_t amount_of_external_allocated_memory_;
+  // The amount of external memory registered through the API.
+  int64_t external_memory_;
 
-  // Caches the amount of external memory registered at the last global gc.
-  int64_t amount_of_external_allocated_memory_at_last_global_gc_;
+  // The limit when to trigger memory pressure from the API.
+  int64_t external_memory_limit_;
+
+  // Caches the amount of external memory registered at the last MC.
+  int64_t external_memory_at_last_mark_compact_;
+
+  // The amount of memory that has been freed concurrently.
+  base::AtomicNumber<intptr_t> external_memory_concurrently_freed_;
 
   // This can be calculated directly from a pointer to the heap; however, it is
   // more expedient to get at the isolate directly from within Heap methods.
@@ -2184,7 +2229,7 @@
   // storage is only alive temporary during a GC. The invariant is that all
   // pointers in this map are already fixed, i.e., they do not point to
   // forwarding pointers.
-  HashMap* global_pretenuring_feedback_;
+  base::HashMap* global_pretenuring_feedback_;
 
   char trace_ring_buffer_[kTraceRingBufferSize];
   // If it's not full then the data is from 0 to ring_buffer_end_.  If it's
@@ -2217,8 +2262,6 @@
 
   StrongRootsList* strong_roots_list_;
 
-  ArrayBufferTracker* array_buffer_tracker_;
-
   // The depth of HeapIterator nestings.
   int heap_iterator_depth_;
 
@@ -2236,7 +2279,7 @@
   friend class MarkCompactCollector;
   friend class MarkCompactMarkingVisitor;
   friend class NewSpace;
-  friend class ObjectStatsVisitor;
+  friend class ObjectStatsCollector;
   friend class Page;
   friend class Scavenger;
   friend class StoreBuffer;
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
index c250b90..f578d43 100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -10,8 +10,9 @@
 #include "src/heap/gc-idle-time-handler.h"
 #include "src/heap/gc-tracer.h"
 #include "src/heap/mark-compact-inl.h"
-#include "src/heap/objects-visiting.h"
+#include "src/heap/object-stats.h"
 #include "src/heap/objects-visiting-inl.h"
+#include "src/heap/objects-visiting.h"
 #include "src/tracing/trace-event.h"
 #include "src/v8.h"
 
@@ -175,6 +176,9 @@
     table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
     table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
     table_.Register(kVisitJSRegExp, &VisitJSRegExp);
+    if (FLAG_track_gc_object_stats) {
+      IncrementalMarkingObjectStatsVisitor::Initialize(&table_);
+    }
   }
 
   static const int kProgressBarScanningChunk = 32 * 1024;
@@ -231,7 +235,7 @@
     // Note that GC can happen when the context is not fully initialized,
     // so the cache can be undefined.
     Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
-    if (!cache->IsUndefined()) {
+    if (!cache->IsUndefined(map->GetIsolate())) {
       MarkObjectGreyDoNotEnqueue(cache);
     }
     VisitNativeContext(map, context);
@@ -341,9 +345,7 @@
 
 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
     PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     SetOldSpacePageFlags(p, false, false);
   }
 }
@@ -351,9 +353,7 @@
 
 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
     NewSpace* space) {
-  NewSpacePageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     SetNewSpacePageFlags(p, false);
   }
 }
@@ -365,27 +365,21 @@
   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
 
-  LargePage* lop = heap_->lo_space()->first_page();
-  while (LargePage::IsValid(lop)) {
+  for (LargePage* lop : *heap_->lo_space()) {
     SetOldSpacePageFlags(lop, false, false);
-    lop = lop->next_page();
   }
 }
 
 
 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     SetOldSpacePageFlags(p, true, is_compacting_);
   }
 }
 
 
 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
-  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     SetNewSpacePageFlags(p, true);
   }
 }
@@ -397,10 +391,8 @@
   ActivateIncrementalWriteBarrier(heap_->code_space());
   ActivateIncrementalWriteBarrier(heap_->new_space());
 
-  LargePage* lop = heap_->lo_space()->first_page();
-  while (LargePage::IsValid(lop)) {
+  for (LargePage* lop : *heap_->lo_space()) {
     SetOldSpacePageFlags(lop, true, is_compacting_);
-    lop = lop->next_page();
   }
 }
 
@@ -469,9 +461,10 @@
   UnseededNumberDictionary* stubs = heap->code_stubs();
 
   int capacity = stubs->Capacity();
+  Isolate* isolate = heap->isolate();
   for (int i = 0; i < capacity; i++) {
     Object* k = stubs->KeyAt(i);
-    if (stubs->IsKey(k)) {
+    if (stubs->IsKey(isolate, k)) {
       uint32_t key = NumberToUint32(k);
 
       if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
@@ -537,6 +530,10 @@
 
   state_ = MARKING;
 
+  if (heap_->UsingEmbedderHeapTracer()) {
+    heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+  }
+
   RecordWriteStub::Mode mode = is_compacting_
                                    ? RecordWriteStub::INCREMENTAL_COMPACTION
                                    : RecordWriteStub::INCREMENTAL;
@@ -930,12 +927,12 @@
   }
 
   Object* context = heap_->native_contexts_list();
-  while (!context->IsUndefined()) {
+  while (!context->IsUndefined(heap_->isolate())) {
     // GC can happen when the context is not fully initialized,
     // so the cache can be undefined.
     HeapObject* cache = HeapObject::cast(
         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
-    if (!cache->IsUndefined()) {
+    if (!cache->IsUndefined(heap_->isolate())) {
       MarkBit mark_bit = Marking::MarkBitFrom(cache);
       if (Marking::IsGrey(mark_bit)) {
         Marking::GreyToBlack(mark_bit);
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index 9c5a3b5..5f5a1de 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -215,6 +215,8 @@
 
   bool black_allocation() { return black_allocation_; }
 
+  void StartBlackAllocationForTesting() { StartBlackAllocation(); }
+
  private:
   class Observer : public AllocationObserver {
    public:
diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h
index 455f443..8ecdd62 100644
--- a/src/heap/mark-compact-inl.h
+++ b/src/heap/mark-compact-inl.h
@@ -79,7 +79,7 @@
 
 void CodeFlusher::AddCandidate(JSFunction* function) {
   DCHECK(function->code() == function->shared()->code());
-  if (function->next_function_link()->IsUndefined()) {
+  if (function->next_function_link()->IsUndefined(isolate_)) {
     SetNextCandidate(function, jsfunction_candidates_head_);
     jsfunction_candidates_head_ = function;
   }
@@ -105,7 +105,7 @@
 
 
 void CodeFlusher::ClearNextCandidate(JSFunction* candidate, Object* undefined) {
-  DCHECK(undefined->IsUndefined());
+  DCHECK(undefined->IsUndefined(candidate->GetIsolate()));
   candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
 }
 
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index b2ae93d..f9a55df 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -25,7 +25,6 @@
 #include "src/heap/spaces-inl.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/utils-inl.h"
 #include "src/v8.h"
 
@@ -132,13 +131,14 @@
 
 static void VerifyMarking(NewSpace* space) {
   Address end = space->top();
-  NewSpacePageIterator it(space->bottom(), end);
   // The bottom position is at the start of its page. Allows us to use
   // page->area_start() as start of range on all pages.
   CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
-  while (it.has_next()) {
-    Page* page = it.next();
-    Address limit = it.has_next() ? page->area_end() : end;
+
+  NewSpacePageRange range(space->bottom(), end);
+  for (auto it = range.begin(); it != range.end();) {
+    Page* page = *(it++);
+    Address limit = it != range.end() ? page->area_end() : end;
     CHECK(limit == end || !page->Contains(end));
     VerifyMarking(space->heap(), page->area_start(), limit);
   }
@@ -146,10 +146,7 @@
 
 
 static void VerifyMarking(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     if (p->IsFlagSet(Page::BLACK_PAGE)) {
       VerifyMarkingBlackPage(space->heap(), p);
     } else {
@@ -205,13 +202,12 @@
 
 
 static void VerifyEvacuation(NewSpace* space) {
-  NewSpacePageIterator it(space->bottom(), space->top());
   VerifyEvacuationVisitor visitor;
-
-  while (it.has_next()) {
-    Page* page = it.next();
+  NewSpacePageRange range(space->bottom(), space->top());
+  for (auto it = range.begin(); it != range.end();) {
+    Page* page = *(it++);
     Address current = page->area_start();
-    Address limit = it.has_next() ? page->area_end() : space->top();
+    Address limit = it != range.end() ? page->area_end() : space->top();
     CHECK(limit == space->top() || !page->Contains(space->top()));
     while (current < limit) {
       HeapObject* object = HeapObject::FromAddress(current);
@@ -226,10 +222,7 @@
   if (FLAG_use_allocation_folding && (space == heap->old_space())) {
     return;
   }
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     if (p->IsEvacuationCandidate()) continue;
     VerifyEvacuation(p);
   }
@@ -361,10 +354,7 @@
 
 #ifdef VERIFY_HEAP
 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     CHECK(p->markbits()->IsClean());
     CHECK_EQ(0, p->LiveBytes());
   }
@@ -372,10 +362,7 @@
 
 
 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
-  NewSpacePageIterator it(space->bottom(), space->top());
-
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
     CHECK(p->markbits()->IsClean());
     CHECK_EQ(0, p->LiveBytes());
   }
@@ -420,10 +407,7 @@
 
 
 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     Bitmap::Clear(p);
     if (p->IsFlagSet(Page::BLACK_PAGE)) {
       p->ClearFlag(Page::BLACK_PAGE);
@@ -433,10 +417,8 @@
 
 
 static void ClearMarkbitsInNewSpace(NewSpace* space) {
-  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
+  for (Page* page : *space) {
+    Bitmap::Clear(page);
   }
 }
 
@@ -472,13 +454,13 @@
  private:
   // v8::Task overrides.
   void Run() override {
-    DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
+    DCHECK_GE(space_to_start_, FIRST_SPACE);
     DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
-    const int offset = space_to_start_ - FIRST_PAGED_SPACE;
-    const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+    const int offset = space_to_start_ - FIRST_SPACE;
+    const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
     for (int i = 0; i < num_spaces; i++) {
-      const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
-      DCHECK_GE(space_id, FIRST_PAGED_SPACE);
+      const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
+      DCHECK_GE(space_id, FIRST_SPACE);
       DCHECK_LE(space_id, LAST_PAGED_SPACE);
       sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
     }
@@ -508,7 +490,7 @@
 
 void MarkCompactCollector::Sweeper::StartSweepingHelper(
     AllocationSpace space_to_start) {
-  num_sweeping_tasks_++;
+  num_sweeping_tasks_.Increment(1);
   V8::GetCurrentPlatform()->CallOnBackgroundThread(
       new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
       v8::Platform::kShortRunningTask);
@@ -516,9 +498,8 @@
 
 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
     Page* page) {
-  PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
   if (!page->SweepingDone()) {
-    ParallelSweepPage(page, owner);
+    ParallelSweepPage(page, page->owner()->identity());
     if (!page->SweepingDone()) {
       // We were not able to sweep that page, i.e., a concurrent
       // sweeper thread currently owns this page. Wait for the sweeper
@@ -555,18 +536,31 @@
   }
 
   if (FLAG_concurrent_sweeping) {
-    while (num_sweeping_tasks_ > 0) {
+    while (num_sweeping_tasks_.Value() > 0) {
       pending_sweeper_tasks_semaphore_.Wait();
-      num_sweeping_tasks_--;
+      num_sweeping_tasks_.Increment(-1);
     }
   }
 
-  ForAllSweepingSpaces(
-      [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); });
+  ForAllSweepingSpaces([this](AllocationSpace space) {
+    if (space == NEW_SPACE) {
+      swept_list_[NEW_SPACE].Clear();
+    }
+    DCHECK(sweeping_list_[space].empty());
+  });
   late_pages_ = false;
   sweeping_in_progress_ = false;
 }
 
+void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
+  if (!sweeping_in_progress_) return;
+  if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
+    for (Page* p : *heap_->new_space()) {
+      SweepOrWaitUntilSweepingCompleted(p);
+    }
+  }
+}
+
 void MarkCompactCollector::EnsureSweepingCompleted() {
   if (!sweeper().sweeping_in_progress()) return;
 
@@ -583,12 +577,11 @@
 }
 
 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
-  if (!pending_sweeper_tasks_semaphore_.WaitFor(
-          base::TimeDelta::FromSeconds(0))) {
-    return false;
+  while (pending_sweeper_tasks_semaphore_.WaitFor(
+      base::TimeDelta::FromSeconds(0))) {
+    num_sweeping_tasks_.Increment(-1);
   }
-  pending_sweeper_tasks_semaphore_.Signal();
-  return true;
+  return num_sweeping_tasks_.Value() == 0;
 }
 
 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
@@ -703,9 +696,7 @@
   std::vector<LiveBytesPagePair> pages;
   pages.reserve(number_of_pages);
 
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     if (p->NeverEvacuate()) continue;
     if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
     // Invariant: Evacuation candidates are just created when marking is
@@ -858,9 +849,22 @@
     AbortWeakCells();
     AbortTransitionArrays();
     AbortCompaction();
+    if (heap_->UsingEmbedderHeapTracer()) {
+      heap_->mark_compact_collector()->embedder_heap_tracer()->AbortTracing();
+    }
     was_marked_incrementally_ = false;
   }
 
+  if (!was_marked_incrementally_) {
+    if (heap_->UsingEmbedderHeapTracer()) {
+      heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+    }
+  }
+
+  if (UsingEmbedderHeapTracer()) {
+    embedder_heap_tracer()->EnterFinalPause();
+  }
+
   // Don't start compaction if we are in the middle of incremental
   // marking cycle. We did not collect any slots.
   if (!FLAG_never_compact && !was_marked_incrementally_) {
@@ -872,6 +876,7 @@
        space = spaces.next()) {
     space->PrepareForMarkCompact();
   }
+  heap()->account_external_memory_concurrently_freed();
 
 #ifdef VERIFY_HEAP
   if (!was_marked_incrementally_ && FLAG_verify_heap) {
@@ -1074,7 +1079,7 @@
 
 
 void CodeFlusher::EvictCandidate(JSFunction* function) {
-  DCHECK(!function->next_function_link()->IsUndefined());
+  DCHECK(!function->next_function_link()->IsUndefined(isolate_));
   Object* undefined = isolate_->heap()->undefined_value();
 
   // Make sure previous flushing decisions are revisited.
@@ -1299,7 +1304,7 @@
   table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
 
   if (FLAG_track_gc_object_stats) {
-    ObjectStatsVisitor::Initialize(&table_);
+    MarkCompactObjectStatsVisitor::Initialize(&table_);
   }
 }
 
@@ -1408,6 +1413,8 @@
 
     HeapObject* object = HeapObject::cast(*p);
 
+    if (collector_->heap()->PurgeLeftTrimmedObject(p)) return;
+
     MarkBit mark_bit = Marking::MarkBitFrom(object);
     if (Marking::IsBlackOrGrey(mark_bit)) return;
 
@@ -1535,6 +1542,9 @@
 
 class RecordMigratedSlotVisitor final : public ObjectVisitor {
  public:
+  explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
+      : collector_(collector) {}
+
   inline void VisitPointer(Object** p) final {
     RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
   }
@@ -1550,10 +1560,59 @@
     Address code_entry = Memory::Address_at(code_entry_slot);
     if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
       RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
-                                             CODE_ENTRY_SLOT, code_entry_slot);
+                                             nullptr, CODE_ENTRY_SLOT,
+                                             code_entry_slot);
     }
   }
 
+  inline void VisitCodeTarget(RelocInfo* rinfo) final {
+    DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    Code* host = rinfo->host();
+    collector_->RecordRelocSlot(host, rinfo, target);
+  }
+
+  inline void VisitDebugTarget(RelocInfo* rinfo) final {
+    DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+           rinfo->IsPatchedDebugBreakSlotSequence());
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+    Code* host = rinfo->host();
+    collector_->RecordRelocSlot(host, rinfo, target);
+  }
+
+  inline void VisitEmbeddedPointer(RelocInfo* rinfo) final {
+    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    HeapObject* object = HeapObject::cast(rinfo->target_object());
+    Code* host = rinfo->host();
+    collector_->RecordRelocSlot(host, rinfo, object);
+  }
+
+  inline void VisitCell(RelocInfo* rinfo) final {
+    DCHECK(rinfo->rmode() == RelocInfo::CELL);
+    Cell* cell = rinfo->target_cell();
+    Code* host = rinfo->host();
+    collector_->RecordRelocSlot(host, rinfo, cell);
+  }
+
+  // Entries that will never move.
+  inline void VisitCodeAgeSequence(RelocInfo* rinfo) final {
+    DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+    Code* stub = rinfo->code_age_stub();
+    USE(stub);
+    DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
+  }
+
+  // Entries that are skipped for recording.
+  inline void VisitExternalReference(RelocInfo* rinfo) final {}
+  inline void VisitExternalReference(Address* p) final {}
+  inline void VisitRuntimeEntry(RelocInfo* rinfo) final {}
+  inline void VisitExternalOneByteString(
+      v8::String::ExternalOneByteStringResource** resource) final {}
+  inline void VisitExternalTwoByteString(
+      v8::String::ExternalStringResource** resource) final {}
+  inline void VisitInternalReference(RelocInfo* rinfo) final {}
+  inline void VisitEmbedderReference(Object** p, uint16_t class_id) final {}
+
  private:
   inline void RecordMigratedSlot(Object* value, Address slot) {
     if (value->IsHeapObject()) {
@@ -1565,6 +1624,8 @@
       }
     }
   }
+
+  MarkCompactCollector* collector_;
 };
 
 class MarkCompactCollector::HeapObjectVisitor {
@@ -1582,12 +1643,15 @@
       : heap_(heap),
         compaction_spaces_(compaction_spaces),
         profiling_(
-            heap->isolate()->cpu_profiler()->is_profiling() ||
+            heap->isolate()->is_profiling() ||
             heap->isolate()->logger()->is_logging_code_events() ||
             heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
 
   inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
                                 HeapObject** target_object) {
+#ifdef VERIFY_HEAP
+    if (AbortCompactionForTesting(object)) return false;
+#endif  // VERIFY_HEAP
     int size = object->Size();
     AllocationAlignment alignment = object->RequiredAlignment();
     AllocationResult allocation = target_space->AllocateRaw(size, alignment);
@@ -1622,7 +1686,7 @@
         PROFILE(heap_->isolate(),
                 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
       }
-      RecordMigratedSlotVisitor visitor;
+      RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
       dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
     } else if (dest == CODE_SPACE) {
       DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
@@ -1631,9 +1695,9 @@
                 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
       }
       heap_->CopyBlock(dst_addr, src_addr, size);
-      RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(dst_addr),
-                                             RELOCATED_CODE_OBJECT, dst_addr);
       Code::cast(dst)->Relocate(dst_addr - src_addr);
+      RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+      dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
     } else {
       DCHECK_OBJECT_SIZE(size);
       DCHECK(dest == NEW_SPACE);
@@ -1645,6 +1709,26 @@
     Memory::Address_at(src_addr) = dst_addr;
   }
 
+#ifdef VERIFY_HEAP
+  bool AbortCompactionForTesting(HeapObject* object) {
+    if (FLAG_stress_compaction) {
+      const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
+                             Page::kPageAlignmentMask & ~kPointerAlignmentMask;
+      if ((reinterpret_cast<uintptr_t>(object->address()) &
+           Page::kPageAlignmentMask) == mask) {
+        Page* page = Page::FromAddress(object->address());
+        if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
+          page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
+        } else {
+          page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+#endif  // VERIFY_HEAP
+
   Heap* heap_;
   CompactionSpaceCollection* compaction_spaces_;
   bool profiling_;
@@ -1658,7 +1742,7 @@
 
   explicit EvacuateNewSpaceVisitor(Heap* heap,
                                    CompactionSpaceCollection* compaction_spaces,
-                                   HashMap* local_pretenuring_feedback)
+                                   base::HashMap* local_pretenuring_feedback)
       : EvacuateVisitorBase(heap, compaction_spaces),
         buffer_(LocalAllocationBuffer::InvalidBuffer()),
         space_to_allocate_(NEW_SPACE),
@@ -1671,23 +1755,15 @@
                                                local_pretenuring_feedback_);
     int size = object->Size();
     HeapObject* target_object = nullptr;
-    if (heap_->ShouldBePromoted(object->address(), size) &&
+    if (heap_->ShouldBePromoted<DEFAULT_PROMOTION>(object->address(), size) &&
         TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
                           &target_object)) {
-      // If we end up needing more special cases, we should factor this out.
-      if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
-        heap_->array_buffer_tracker()->Promote(
-            JSArrayBuffer::cast(target_object));
-      }
       promoted_size_ += size;
       return true;
     }
     HeapObject* target = nullptr;
     AllocationSpace space = AllocateTargetObject(object, &target);
     MigrateObject(HeapObject::cast(target), object, size, space);
-    if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
-      heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
-    }
     semispace_copied_size_ += size;
     return true;
   }
@@ -1706,6 +1782,7 @@
     const int size = old_object->Size();
     AllocationAlignment alignment = old_object->RequiredAlignment();
     AllocationResult allocation;
+    AllocationSpace space_allocated_in = space_to_allocate_;
     if (space_to_allocate_ == NEW_SPACE) {
       if (size > kMaxLabObjectSize) {
         allocation =
@@ -1716,11 +1793,12 @@
     }
     if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
       allocation = AllocateInOldSpace(size, alignment);
+      space_allocated_in = OLD_SPACE;
     }
     bool ok = allocation.To(target_object);
     DCHECK(ok);
     USE(ok);
-    return space_to_allocate_;
+    return space_allocated_in;
   }
 
   inline bool NewLocalAllocationBuffer() {
@@ -1795,36 +1873,44 @@
   AllocationSpace space_to_allocate_;
   intptr_t promoted_size_;
   intptr_t semispace_copied_size_;
-  HashMap* local_pretenuring_feedback_;
+  base::HashMap* local_pretenuring_feedback_;
 };
 
 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
     : public MarkCompactCollector::HeapObjectVisitor {
  public:
-  EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
+  explicit EvacuateNewSpacePageVisitor(Heap* heap)
+      : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
 
-  static void TryMoveToOldSpace(Page* page, PagedSpace* owner) {
-    if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) {
-      Page* new_page = Page::ConvertNewToOld(page, owner);
-      new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
-    }
+  static void MoveToOldSpace(Page* page, PagedSpace* owner) {
+    page->Unlink();
+    Page* new_page = Page::ConvertNewToOld(page, owner);
+    new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+  }
+
+  static void MoveToToSpace(Page* page) {
+    page->heap()->new_space()->MovePageFromSpaceToSpace(page);
+    page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
   }
 
   inline bool Visit(HeapObject* object) {
-    if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
-      object->GetHeap()->array_buffer_tracker()->Promote(
-          JSArrayBuffer::cast(object));
-    }
-    RecordMigratedSlotVisitor visitor;
+    RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
     object->IterateBodyFast(&visitor);
     promoted_size_ += object->Size();
     return true;
   }
 
   intptr_t promoted_size() { return promoted_size_; }
+  intptr_t semispace_copied_size() { return semispace_copied_size_; }
+
+  void account_semispace_copied(intptr_t copied) {
+    semispace_copied_size_ += copied;
+  }
 
  private:
+  Heap* heap_;
   intptr_t promoted_size_;
+  intptr_t semispace_copied_size_;
 };
 
 class MarkCompactCollector::EvacuateOldSpaceVisitor final
@@ -1849,30 +1935,20 @@
 class MarkCompactCollector::EvacuateRecordOnlyVisitor final
     : public MarkCompactCollector::HeapObjectVisitor {
  public:
-  explicit EvacuateRecordOnlyVisitor(AllocationSpace space) : space_(space) {}
+  explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
 
   inline bool Visit(HeapObject* object) {
-    if (space_ == OLD_SPACE) {
-      RecordMigratedSlotVisitor visitor;
-      object->IterateBody(&visitor);
-    } else {
-      DCHECK_EQ(space_, CODE_SPACE);
-      // Add a typed slot for the whole code object.
-      RememberedSet<OLD_TO_OLD>::InsertTyped(
-          Page::FromAddress(object->address()), RELOCATED_CODE_OBJECT,
-          object->address());
-    }
+    RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+    object->IterateBody(&visitor);
     return true;
   }
 
  private:
-  AllocationSpace space_;
+  Heap* heap_;
 };
 
 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *space) {
     if (!p->IsFlagSet(Page::BLACK_PAGE)) {
       DiscoverGreyObjectsOnPage(p);
     }
@@ -1883,9 +1959,7 @@
 
 void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
   NewSpace* space = heap()->new_space();
-  NewSpacePageIterator it(space->bottom(), space->top());
-  while (it.has_next()) {
-    Page* page = it.next();
+  for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
     DiscoverGreyObjectsOnPage(page);
     if (marking_deque()->IsFull()) return;
   }
@@ -2052,8 +2126,10 @@
   bool work_to_do = true;
   while (work_to_do) {
     if (UsingEmbedderHeapTracer()) {
-      embedder_heap_tracer()->TraceWrappersFrom(wrappers_to_trace_);
-      wrappers_to_trace_.clear();
+      RegisterWrappersWithEmbedderHeapTracer();
+      embedder_heap_tracer()->AdvanceTracing(
+          0, EmbedderHeapTracer::AdvanceTracingActions(
+                 EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
     }
     if (!only_process_harmony_weak_collections) {
       isolate()->global_handles()->IterateObjectGroups(
@@ -2170,6 +2246,15 @@
   embedder_heap_tracer_ = tracer;
 }
 
+void MarkCompactCollector::RegisterWrappersWithEmbedderHeapTracer() {
+  DCHECK(UsingEmbedderHeapTracer());
+  if (wrappers_to_trace_.empty()) {
+    return;
+  }
+  embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
+  wrappers_to_trace_.clear();
+}
+
 void MarkCompactCollector::TracePossibleWrapper(JSObject* js_object) {
   DCHECK(js_object->WasConstructedFromApiFunction());
   if (js_object->GetInternalFieldCount() >= 2 &&
@@ -2177,7 +2262,7 @@
       js_object->GetInternalField(0) != heap_->undefined_value() &&
       js_object->GetInternalField(1) != heap_->undefined_value()) {
     DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
-    wrappers_to_trace().push_back(std::pair<void*, void*>(
+    wrappers_to_trace_.push_back(std::pair<void*, void*>(
         reinterpret_cast<void*>(js_object->GetInternalField(0)),
         reinterpret_cast<void*>(js_object->GetInternalField(1))));
   }
@@ -2210,6 +2295,10 @@
     } else {
       // Abort any pending incremental activities e.g. incremental sweeping.
       incremental_marking->Stop();
+      if (FLAG_track_gc_object_stats) {
+        // Clear object stats collected during incremental marking.
+        heap()->object_stats_->ClearObjectStats();
+      }
       if (marking_deque_.in_use()) {
         marking_deque_.Uninitialize(true);
       }
@@ -2246,10 +2335,6 @@
     {
       TRACE_GC(heap()->tracer(),
                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
-      if (UsingEmbedderHeapTracer()) {
-        embedder_heap_tracer()->TracePrologue();
-        ProcessMarkingDeque();
-      }
       ProcessEphemeralMarking(&root_visitor, false);
     }
 
@@ -2378,7 +2463,7 @@
   for (uint32_t i = 0; i < capacity; i++) {
     uint32_t key_index = table->EntryToIndex(i);
     Object* key = table->get(key_index);
-    if (!table->IsKey(key)) continue;
+    if (!table->IsKey(isolate, key)) continue;
     uint32_t value_index = table->EntryToValueIndex(i);
     Object* value = table->get(value_index);
     DCHECK(key->IsWeakCell());
@@ -2750,128 +2835,20 @@
         slot_type = OBJECT_SLOT;
       }
     }
-    RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, slot_type, addr);
+    RememberedSet<OLD_TO_OLD>::InsertTyped(
+        source_page, reinterpret_cast<Address>(host), slot_type, addr);
   }
 }
 
-static inline void UpdateTypedSlot(Isolate* isolate, ObjectVisitor* v,
-                                   SlotType slot_type, Address addr) {
-  switch (slot_type) {
-    case CODE_TARGET_SLOT: {
-      RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
-      rinfo.Visit(isolate, v);
-      break;
-    }
-    case CELL_TARGET_SLOT: {
-      RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
-      rinfo.Visit(isolate, v);
-      break;
-    }
-    case CODE_ENTRY_SLOT: {
-      v->VisitCodeEntry(addr);
-      break;
-    }
-    case RELOCATED_CODE_OBJECT: {
-      HeapObject* obj = HeapObject::FromAddress(addr);
-      Code::BodyDescriptor::IterateBody(obj, v);
-      break;
-    }
-    case DEBUG_TARGET_SLOT: {
-      RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
-                      NULL);
-      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
-      break;
-    }
-    case EMBEDDED_OBJECT_SLOT: {
-      RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
-      rinfo.Visit(isolate, v);
-      break;
-    }
-    case OBJECT_SLOT: {
-      v->VisitPointer(reinterpret_cast<Object**>(addr));
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
+static inline SlotCallbackResult UpdateSlot(Object** slot) {
+  Object* obj = reinterpret_cast<Object*>(
+      base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
 
-
-// Visitor for updating pointers from live objects in old spaces to new space.
-// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor : public ObjectVisitor {
- public:
-  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
-
-  void VisitPointer(Object** p) override { UpdatePointer(p); }
-
-  void VisitPointers(Object** start, Object** end) override {
-    for (Object** p = start; p < end; p++) UpdatePointer(p);
-  }
-
-  void VisitCell(RelocInfo* rinfo) override {
-    DCHECK(rinfo->rmode() == RelocInfo::CELL);
-    Object* cell = rinfo->target_cell();
-    Object* old_cell = cell;
-    VisitPointer(&cell);
-    if (cell != old_cell) {
-      rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
-    }
-  }
-
-  void VisitEmbeddedPointer(RelocInfo* rinfo) override {
-    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
-    Object* target = rinfo->target_object();
-    Object* old_target = target;
-    VisitPointer(&target);
-    // Avoid unnecessary changes that might unnecessary flush the instruction
-    // cache.
-    if (target != old_target) {
-      rinfo->set_target_object(target);
-    }
-  }
-
-  void VisitCodeTarget(RelocInfo* rinfo) override {
-    DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    Object* old_target = target;
-    VisitPointer(&target);
-    if (target != old_target) {
-      rinfo->set_target_address(Code::cast(target)->instruction_start());
-    }
-  }
-
-  void VisitCodeAgeSequence(RelocInfo* rinfo) override {
-    DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
-    Object* stub = rinfo->code_age_stub();
-    DCHECK(stub != NULL);
-    VisitPointer(&stub);
-    if (stub != rinfo->code_age_stub()) {
-      rinfo->set_code_age_stub(Code::cast(stub));
-    }
-  }
-
-  void VisitDebugTarget(RelocInfo* rinfo) override {
-    DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
-           rinfo->IsPatchedDebugBreakSlotSequence());
-    Object* target =
-        Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
-    VisitPointer(&target);
-    rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
-  }
-
-  static inline void UpdateSlot(Heap* heap, Object** slot) {
-    Object* obj = reinterpret_cast<Object*>(
-        base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-
-    if (!obj->IsHeapObject()) return;
-
+  if (obj->IsHeapObject()) {
     HeapObject* heap_obj = HeapObject::cast(obj);
-
     MapWord map_word = heap_obj->map_word();
     if (map_word.IsForwardingAddress()) {
-      DCHECK(heap->InFromSpace(heap_obj) ||
+      DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
              MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
              Page::FromAddress(heap_obj->address())
                  ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
@@ -2880,15 +2857,42 @@
           reinterpret_cast<base::AtomicWord*>(slot),
           reinterpret_cast<base::AtomicWord>(obj),
           reinterpret_cast<base::AtomicWord>(target));
-      DCHECK(!heap->InFromSpace(target) &&
+      DCHECK(!heap_obj->GetHeap()->InFromSpace(target) &&
              !MarkCompactCollector::IsOnEvacuationCandidate(target));
     }
   }
+  return REMOVE_SLOT;
+}
 
- private:
-  inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
+// Visitor for updating pointers from live objects in old spaces to new space.
+// It does not expect to encounter pointers to dead objects.
+class PointersUpdatingVisitor : public ObjectVisitor {
+ public:
+  void VisitPointer(Object** p) override { UpdateSlot(p); }
 
-  Heap* heap_;
+  void VisitPointers(Object** start, Object** end) override {
+    for (Object** p = start; p < end; p++) UpdateSlot(p);
+  }
+
+  void VisitCell(RelocInfo* rinfo) override {
+    UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
+  }
+
+  void VisitEmbeddedPointer(RelocInfo* rinfo) override {
+    UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) override {
+    UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
+  }
+
+  void VisitCodeEntry(Address entry_address) override {
+    UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
+  }
+
+  void VisitDebugTarget(RelocInfo* rinfo) override {
+    UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
+  }
 };
 
 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
@@ -3033,21 +3037,34 @@
 
 void MarkCompactCollector::EvacuateNewSpacePrologue() {
   NewSpace* new_space = heap()->new_space();
-  NewSpacePageIterator it(new_space->bottom(), new_space->top());
   // Append the list of new space pages to be processed.
-  while (it.has_next()) {
-    newspace_evacuation_candidates_.Add(it.next());
+  for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
+    newspace_evacuation_candidates_.Add(p);
   }
   new_space->Flip();
   new_space->ResetAllocationInfo();
 }
 
-void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
-  newspace_evacuation_candidates_.Rewind(0);
-}
-
 class MarkCompactCollector::Evacuator : public Malloced {
  public:
+  enum EvacuationMode {
+    kObjectsNewToOld,
+    kPageNewToOld,
+    kObjectsOldToOld,
+    kPageNewToNew,
+  };
+
+  static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
+    // Note: The order of checks is important in this function.
+    if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
+      return kPageNewToOld;
+    if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
+      return kPageNewToNew;
+    if (chunk->InNewSpace()) return kObjectsNewToOld;
+    DCHECK(chunk->IsEvacuationCandidate());
+    return kObjectsOldToOld;
+  }
+
   // NewSpacePages with more live bytes than this threshold qualify for fast
   // evacuation.
   static int PageEvacuationThreshold() {
@@ -3059,11 +3076,11 @@
   explicit Evacuator(MarkCompactCollector* collector)
       : collector_(collector),
         compaction_spaces_(collector->heap()),
-        local_pretenuring_feedback_(HashMap::PointersMatch,
+        local_pretenuring_feedback_(base::HashMap::PointersMatch,
                                     kInitialLocalPretenuringFeedbackCapacity),
         new_space_visitor_(collector->heap(), &compaction_spaces_,
                            &local_pretenuring_feedback_),
-        new_space_page_visitor(),
+        new_space_page_visitor(collector->heap()),
         old_space_visitor_(collector->heap(), &compaction_spaces_),
         duration_(0.0),
         bytes_compacted_(0) {}
@@ -3077,38 +3094,20 @@
   CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
 
  private:
-  enum EvacuationMode {
-    kObjectsNewToOld,
-    kPageNewToOld,
-    kObjectsOldToOld,
-  };
-
   static const int kInitialLocalPretenuringFeedbackCapacity = 256;
 
   inline Heap* heap() { return collector_->heap(); }
 
-  inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
-    // Note: The order of checks is important in this function.
-    if (chunk->InNewSpace()) return kObjectsNewToOld;
-    if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
-      return kPageNewToOld;
-    DCHECK(chunk->IsEvacuationCandidate());
-    return kObjectsOldToOld;
-  }
-
   void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
     duration_ += duration;
     bytes_compacted_ += bytes_compacted;
   }
 
-  template <IterationMode mode, class Visitor>
-  inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
-
   MarkCompactCollector* collector_;
 
   // Locally cached collector data.
   CompactionSpaceCollection compaction_spaces_;
-  HashMap local_pretenuring_feedback_;
+  base::HashMap local_pretenuring_feedback_;
 
   // Visitors for the corresponding spaces.
   EvacuateNewSpaceVisitor new_space_visitor_;
@@ -3120,75 +3119,78 @@
   intptr_t bytes_compacted_;
 };
 
-template <MarkCompactCollector::IterationMode mode, class Visitor>
-bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
-                                                         Visitor* visitor) {
+bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
   bool success = false;
-  DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
-         p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
-  int saved_live_bytes = p->LiveBytes();
-  double evacuation_time;
+  DCHECK(page->SweepingDone());
+  int saved_live_bytes = page->LiveBytes();
+  double evacuation_time = 0.0;
+  Heap* heap = page->heap();
   {
-    AlwaysAllocateScope always_allocate(heap()->isolate());
+    AlwaysAllocateScope always_allocate(heap->isolate());
     TimedScope timed_scope(&evacuation_time);
-    success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode);
+    switch (ComputeEvacuationMode(page)) {
+      case kObjectsNewToOld:
+        success = collector_->VisitLiveObjects(page, &new_space_visitor_,
+                                               kClearMarkbits);
+        ArrayBufferTracker::ProcessBuffers(
+            page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+        DCHECK(success);
+        break;
+      case kPageNewToOld:
+        success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
+                                               kKeepMarking);
+        // ArrayBufferTracker will be updated during sweeping.
+        DCHECK(success);
+        break;
+      case kPageNewToNew:
+        new_space_page_visitor.account_semispace_copied(page->LiveBytes());
+        // ArrayBufferTracker will be updated during sweeping.
+        success = true;
+        break;
+      case kObjectsOldToOld:
+        success = collector_->VisitLiveObjects(page, &old_space_visitor_,
+                                               kClearMarkbits);
+        if (!success) {
+          // Aborted compaction page. We have to record slots here, since we
+          // might not have recorded them in first place.
+          // Note: We mark the page as aborted here to be able to record slots
+          // for code objects in |RecordMigratedSlotVisitor|.
+          page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+          EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
+          success =
+              collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
+          ArrayBufferTracker::ProcessBuffers(
+              page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
+          DCHECK(success);
+          // We need to return failure here to indicate that we want this page
+          // added to the sweeper.
+          success = false;
+        } else {
+          ArrayBufferTracker::ProcessBuffers(
+              page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+        }
+        break;
+      default:
+        UNREACHABLE();
+    }
   }
+  ReportCompactionProgress(evacuation_time, saved_live_bytes);
   if (FLAG_trace_evacuation) {
-    const char age_mark_tag =
-        !p->InNewSpace()
-            ? 'x'
-            : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)
-                  ? '>'
-                  : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<'
-                                                                       : '#';
-    PrintIsolate(heap()->isolate(),
-                 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
-                 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
-                 this, p, p->InNewSpace(), age_mark_tag,
-                 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
-                 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
-                 evacuation_time);
-  }
-  if (success) {
-    ReportCompactionProgress(evacuation_time, saved_live_bytes);
+    PrintIsolate(heap->isolate(),
+                 "evacuation[%p]: page=%p new_space=%d "
+                 "page_evacuation=%d executable=%d contains_age_mark=%d "
+                 "live_bytes=%d time=%f\n",
+                 static_cast<void*>(this), static_cast<void*>(page),
+                 page->InNewSpace(),
+                 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
+                     page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
+                 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
+                 page->Contains(heap->new_space()->age_mark()),
+                 saved_live_bytes, evacuation_time);
   }
   return success;
 }
 
-bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
-  bool result = false;
-  DCHECK(page->SweepingDone());
-  switch (ComputeEvacuationMode(page)) {
-    case kObjectsNewToOld:
-      result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
-      DCHECK(result);
-      USE(result);
-      break;
-    case kPageNewToOld:
-      result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
-      DCHECK(result);
-      USE(result);
-      break;
-    case kObjectsOldToOld:
-      result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
-      if (!result) {
-        // Aborted compaction page. We can record slots here to have them
-        // processed in parallel later on.
-        EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity());
-        result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
-        DCHECK(result);
-        USE(result);
-        // We need to return failure here to indicate that we want this page
-        // added to the sweeper.
-        return false;
-      }
-      break;
-    default:
-      UNREACHABLE();
-  }
-  return result;
-}
-
 void MarkCompactCollector::Evacuator::Finalize() {
   heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
   heap()->code_space()->MergeCompactionSpace(
@@ -3197,11 +3199,13 @@
   heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
                                        new_space_page_visitor.promoted_size());
   heap()->IncrementSemiSpaceCopiedObjectSize(
-      new_space_visitor_.semispace_copied_size());
+      new_space_visitor_.semispace_copied_size() +
+      new_space_page_visitor.semispace_copied_size());
   heap()->IncrementYoungSurvivorsCounter(
       new_space_visitor_.promoted_size() +
       new_space_visitor_.semispace_copied_size() +
-      new_space_page_visitor.promoted_size());
+      new_space_page_visitor.promoted_size() +
+      new_space_page_visitor.semispace_copied_size());
   heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
 }
 
@@ -3249,31 +3253,33 @@
 
   static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
                                        bool success, PerPageData data) {
-    if (chunk->InNewSpace()) {
-      DCHECK(success);
-    } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
-      DCHECK(success);
-      Page* p = static_cast<Page*>(chunk);
-      p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
-      p->ForAllFreeListCategories(
-          [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
-      heap->mark_compact_collector()->sweeper().AddLatePage(
-          p->owner()->identity(), p);
-    } else {
-      Page* p = static_cast<Page*>(chunk);
-      if (success) {
-        DCHECK(p->IsEvacuationCandidate());
-        DCHECK(p->SweepingDone());
-        p->Unlink();
-      } else {
-        // We have partially compacted the page, i.e., some objects may have
-        // moved, others are still in place.
-        p->SetFlag(Page::COMPACTION_WAS_ABORTED);
-        p->ClearEvacuationCandidate();
-        // Slots have already been recorded so we just need to add it to the
-        // sweeper.
-        *data += 1;
-      }
+    using Evacuator = MarkCompactCollector::Evacuator;
+    Page* p = static_cast<Page*>(chunk);
+    switch (Evacuator::ComputeEvacuationMode(p)) {
+      case Evacuator::kPageNewToOld:
+        break;
+      case Evacuator::kPageNewToNew:
+        DCHECK(success);
+        break;
+      case Evacuator::kObjectsNewToOld:
+        DCHECK(success);
+        break;
+      case Evacuator::kObjectsOldToOld:
+        if (success) {
+          DCHECK(p->IsEvacuationCandidate());
+          DCHECK(p->SweepingDone());
+          p->Unlink();
+        } else {
+          // We have partially compacted the page, i.e., some objects may have
+          // moved, others are still in place.
+          p->ClearEvacuationCandidate();
+          // Slots have already been recorded so we just need to add it to the
+          // sweeper, which will happen after updating pointers.
+          *data += 1;
+        }
+        break;
+      default:
+        UNREACHABLE();
     }
   }
 };
@@ -3295,10 +3301,14 @@
     live_bytes += page->LiveBytes();
     if (!page->NeverEvacuate() &&
         (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
-        page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
         !page->Contains(age_mark)) {
-      EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space());
+      if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
+        EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
+      } else {
+        EvacuateNewSpacePageVisitor::MoveToToSpace(page);
+      }
     }
+
     job.AddPage(page, &abandoned_pages);
   }
   DCHECK_GE(job.NumberOfPages(), 1);
@@ -3352,16 +3362,21 @@
 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode,
           MarkCompactCollector::Sweeper::SweepingParallelism parallelism,
           MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode,
+          MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode,
           MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode>
 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
                                             ObjectVisitor* v) {
   DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
   DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
-  DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
-            space->identity() == CODE_SPACE);
+  DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) ||
+         (skip_list_mode == REBUILD_SKIP_LIST));
   DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
   DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY);
 
+  // Before we sweep objects on the page, we free dead array buffers which
+  // requires valid mark bits.
+  ArrayBufferTracker::FreeDead(p);
+
   Address free_start = p->area_start();
   DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
 
@@ -3387,8 +3402,13 @@
       if (free_space_mode == ZAP_FREE_SPACE) {
         memset(free_start, 0xcc, size);
       }
-      freed_bytes = space->UnaccountedFree(free_start, size);
-      max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+      if (free_list_mode == REBUILD_FREE_LIST) {
+        freed_bytes = space->UnaccountedFree(free_start, size);
+        max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+      } else {
+        p->heap()->CreateFillerObjectAt(free_start, size,
+                                        ClearRecordedSlots::kNo);
+      }
     }
     Map* map = object->synchronized_map();
     int size = object->SizeFromMap(map);
@@ -3415,10 +3435,16 @@
     if (free_space_mode == ZAP_FREE_SPACE) {
       memset(free_start, 0xcc, size);
     }
-    freed_bytes = space->UnaccountedFree(free_start, size);
-    max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+    if (free_list_mode == REBUILD_FREE_LIST) {
+      freed_bytes = space->UnaccountedFree(free_start, size);
+      max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+    } else {
+      p->heap()->CreateFillerObjectAt(free_start, size,
+                                      ClearRecordedSlots::kNo);
+    }
   }
   p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
+  if (free_list_mode == IGNORE_FREE_LIST) return 0;
   return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
 }
 
@@ -3438,6 +3464,7 @@
     Address start = code->instruction_start();
     Address end = code->address() + code->Size();
     RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
+    RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end);
   }
 }
 
@@ -3533,12 +3560,15 @@
 
     EvacuateNewSpacePrologue();
     EvacuatePagesInParallel();
-    EvacuateNewSpaceEpilogue();
     heap()->new_space()->set_age_mark(heap()->new_space()->top());
   }
 
   UpdatePointersAfterEvacuation();
 
+  if (!heap()->new_space()->Rebalance()) {
+    FatalProcessOutOfMemory("NewSpace::Rebalance");
+  }
+
   // Give pages that are queued to be freed back to the OS. Note that filtering
   // slots only handles old space (for unboxed doubles), and thus map space can
   // still contain stale pointers. We only free the chunks after pointer updates
@@ -3548,6 +3578,19 @@
   {
     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
 
+    for (Page* p : newspace_evacuation_candidates_) {
+      if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+        p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
+        sweeper().AddLatePage(p->owner()->identity(), p);
+      } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+        p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+        p->ForAllFreeListCategories(
+            [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
+        sweeper().AddLatePage(p->owner()->identity(), p);
+      }
+    }
+    newspace_evacuation_candidates_.Rewind(0);
+
     for (Page* p : evacuation_candidates_) {
       // Important: skip list should be cleared only after roots were updated
       // because root iteration traverses the stack and might have to find
@@ -3560,11 +3603,6 @@
       }
     }
 
-    // EvacuateNewSpaceAndCandidates iterates over new space objects and for
-    // ArrayBuffers either re-registers them as live or promotes them. This is
-    // needed to properly free them.
-    heap()->array_buffer_tracker()->FreeDead(false);
-
     // Deallocate evacuated candidate pages.
     ReleaseEvacuationCandidates();
   }
@@ -3580,12 +3618,12 @@
 class PointerUpdateJobTraits {
  public:
   typedef int PerPageData;  // Per page data is not used in this job.
-  typedef PointersUpdatingVisitor* PerTaskData;
+  typedef int PerTaskData;  // Per task data is not used in this job.
 
-  static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
-                                    MemoryChunk* chunk, PerPageData) {
+  static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
+                                    PerPageData) {
     UpdateUntypedPointers(heap, chunk);
-    UpdateTypedPointers(heap, chunk, visitor);
+    UpdateTypedPointers(heap, chunk);
     return true;
   }
   static const bool NeedSequentialFinalization = false;
@@ -3595,37 +3633,71 @@
  private:
   static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
     if (direction == OLD_TO_NEW) {
-      RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk,
-                                                    UpdateOldToNewSlot);
+      RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap, chunk](Address slot) {
+        return CheckAndUpdateOldToNewSlot(heap, slot);
+      });
     } else {
-      RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) {
-        PointersUpdatingVisitor::UpdateSlot(heap,
-                                            reinterpret_cast<Object**>(slot));
-        return REMOVE_SLOT;
+      RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
+        return UpdateSlot(reinterpret_cast<Object**>(slot));
       });
     }
   }
 
-  static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
-                                  PointersUpdatingVisitor* visitor) {
+  static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
     if (direction == OLD_TO_OLD) {
       Isolate* isolate = heap->isolate();
       RememberedSet<OLD_TO_OLD>::IterateTyped(
-          chunk, [isolate, visitor](SlotType type, Address slot) {
-            UpdateTypedSlot(isolate, visitor, type, slot);
-            return REMOVE_SLOT;
+          chunk, [isolate](SlotType type, Address host_addr, Address slot) {
+            return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot,
+                                                          UpdateSlot);
+          });
+    } else {
+      Isolate* isolate = heap->isolate();
+      RememberedSet<OLD_TO_NEW>::IterateTyped(
+          chunk,
+          [isolate, heap](SlotType type, Address host_addr, Address slot) {
+            return UpdateTypedSlotHelper::UpdateTypedSlot(
+                isolate, type, slot, [heap](Object** slot) {
+                  return CheckAndUpdateOldToNewSlot(
+                      heap, reinterpret_cast<Address>(slot));
+                });
           });
     }
   }
 
-  static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
-    MapWord map_word = object->map_word();
-    // There could still be stale pointers in large object space, map space,
-    // and old space for pages that have been promoted.
-    if (map_word.IsForwardingAddress()) {
-      // Update the corresponding slot.
-      *address = map_word.ToForwardingAddress();
+  static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
+                                                       Address slot_address) {
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    if (heap->InFromSpace(*slot)) {
+      HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
+      DCHECK(heap_object->IsHeapObject());
+      MapWord map_word = heap_object->map_word();
+      // There could still be stale pointers in large object space, map space,
+      // and old space for pages that have been promoted.
+      if (map_word.IsForwardingAddress()) {
+        // Update the corresponding slot.
+        *slot = map_word.ToForwardingAddress();
+      }
+      // If the object was in from space before and is after executing the
+      // callback in to space, the object is still live.
+      // Unfortunately, we do not know about the slot. It could be in a
+      // just freed free space object.
+      if (heap->InToSpace(*slot)) {
+        return KEEP_SLOT;
+      }
+    } else if (heap->InToSpace(*slot)) {
+      DCHECK(Page::FromAddress(reinterpret_cast<HeapObject*>(*slot)->address())
+                 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+      // Slots can be in "to" space after a page has been moved. Since there is
+      // no forwarding information present we need to check the markbits to
+      // determine liveness.
+      if (Marking::IsBlack(
+              Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
+        return KEEP_SLOT;
+    } else {
+      DCHECK(!heap->InNewSpace(*slot));
     }
+    return REMOVE_SLOT;
   }
 };
 
@@ -3642,10 +3714,9 @@
       heap, heap->isolate()->cancelable_task_manager(), semaphore);
   RememberedSet<direction>::IterateMemoryChunks(
       heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
-  PointersUpdatingVisitor visitor(heap);
   int num_pages = job.NumberOfPages();
   int num_tasks = NumberOfPointerUpdateTasks(num_pages);
-  job.Run(num_tasks, [&visitor](int i) { return &visitor; });
+  job.Run(num_tasks, [](int i) { return 0; });
 }
 
 class ToSpacePointerUpdateJobTraits {
@@ -3655,6 +3726,24 @@
 
   static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
                                     MemoryChunk* chunk, PerPageData limits) {
+    if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+      // New->new promoted pages contain garbage so they require iteration
+      // using markbits.
+      ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
+    } else {
+      ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
+    }
+    return true;
+  }
+
+  static const bool NeedSequentialFinalization = false;
+  static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+  }
+
+ private:
+  static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
+                                            MemoryChunk* chunk,
+                                            PerPageData limits) {
     for (Address cur = limits.first; cur < limits.second;) {
       HeapObject* object = HeapObject::FromAddress(cur);
       Map* map = object->map();
@@ -3662,10 +3751,18 @@
       object->IterateBody(map->instance_type(), size, visitor);
       cur += size;
     }
-    return true;
   }
-  static const bool NeedSequentialFinalization = false;
-  static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+
+  static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
+                                             MemoryChunk* chunk,
+                                             PerPageData limits) {
+    LiveObjectIterator<kBlackObjects> it(chunk);
+    HeapObject* object = NULL;
+    while ((object = it.Next()) != NULL) {
+      Map* map = object->map();
+      int size = object->SizeFromMap(map);
+      object->IterateBody(map->instance_type(), size, visitor);
+    }
   }
 };
 
@@ -3674,15 +3771,13 @@
       heap, heap->isolate()->cancelable_task_manager(), semaphore);
   Address space_start = heap->new_space()->bottom();
   Address space_end = heap->new_space()->top();
-  NewSpacePageIterator it(space_start, space_end);
-  while (it.has_next()) {
-    Page* page = it.next();
+  for (Page* page : NewSpacePageRange(space_start, space_end)) {
     Address start =
         page->Contains(space_start) ? space_start : page->area_start();
     Address end = page->Contains(space_end) ? space_end : page->area_end();
     job.AddPage(page, std::make_pair(start, end));
   }
-  PointersUpdatingVisitor visitor(heap);
+  PointersUpdatingVisitor visitor;
   int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
   job.Run(num_tasks, [&visitor](int i) { return &visitor; });
 }
@@ -3690,7 +3785,7 @@
 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
 
-  PointersUpdatingVisitor updating_visitor(heap());
+  PointersUpdatingVisitor updating_visitor;
 
   {
     TRACE_GC(heap()->tracer(),
@@ -3741,7 +3836,7 @@
   int pages_freed = 0;
   Page* page = nullptr;
   while ((page = GetSweepingPageSafe(identity)) != nullptr) {
-    int freed = ParallelSweepPage(page, heap_->paged_space(identity));
+    int freed = ParallelSweepPage(page, identity);
     pages_freed += 1;
     DCHECK_GE(freed, 0);
     max_freed = Max(max_freed, freed);
@@ -3753,7 +3848,7 @@
 }
 
 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
-                                                     PagedSpace* space) {
+                                                     AllocationSpace identity) {
   int max_freed = 0;
   if (page->mutex()->TryLock()) {
     // If this page was already swept in the meantime, we can return here.
@@ -3762,19 +3857,25 @@
       return 0;
     }
     page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
-    if (space->identity() == OLD_SPACE) {
+    if (identity == NEW_SPACE) {
+      RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+               IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr);
+    } else if (identity == OLD_SPACE) {
       max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
-                           IGNORE_FREE_SPACE>(space, page, NULL);
-    } else if (space->identity() == CODE_SPACE) {
+                           REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
+          heap_->paged_space(identity), page, nullptr);
+    } else if (identity == CODE_SPACE) {
       max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
-                           IGNORE_FREE_SPACE>(space, page, NULL);
+                           REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
+          heap_->paged_space(identity), page, nullptr);
     } else {
       max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
-                           IGNORE_FREE_SPACE>(space, page, NULL);
+                           REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
+          heap_->paged_space(identity), page, nullptr);
     }
     {
       base::LockGuard<base::Mutex> guard(&mutex_);
-      swept_list_[space->identity()].Add(page);
+      swept_list_[identity].Add(page);
     }
     page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
     page->mutex()->Unlock();
@@ -3800,7 +3901,8 @@
                                                          Page* page) {
   page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
   int to_sweep = page->area_size() - page->LiveBytes();
-  heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
+  if (space != NEW_SPACE)
+    heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
 }
 
 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
@@ -3821,15 +3923,15 @@
 }
 
 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
+  Address space_top = space->top();
   space->ClearStats();
 
-  PageIterator it(space);
-
   int will_be_swept = 0;
   bool unused_page_present = false;
 
-  while (it.has_next()) {
-    Page* p = it.next();
+  // Loop needs to support deletion if live bytes == 0 for a page.
+  for (auto it = space->begin(); it != space->end();) {
+    Page* p = *(it++);
     DCHECK(p->SweepingDone());
 
     if (p->IsEvacuationCandidate()) {
@@ -3844,7 +3946,15 @@
       Bitmap::Clear(p);
       p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
       p->ClearFlag(Page::BLACK_PAGE);
-      // TODO(hpayer): Free unused memory of last black page.
+      // Area above the high watermark is free.
+      Address free_start = p->HighWaterMark();
+      // Check if the space top was in this page, which means that the
+      // high watermark is not up-to-date.
+      if (free_start < space_top && space_top <= p->area_end()) {
+        free_start = space_top;
+      }
+      int size = static_cast<int>(p->area_end() - free_start);
+      space->Free(free_start, size);
       continue;
     }
 
@@ -3855,8 +3965,8 @@
       // testing this is fine.
       p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
       Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
-                        Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>(
-          space, p, nullptr);
+                        Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST,
+                        Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr);
       continue;
     }
 
@@ -3864,8 +3974,10 @@
     if (p->LiveBytes() == 0) {
       if (unused_page_present) {
         if (FLAG_gc_verbose) {
-          PrintIsolate(isolate(), "sweeping: released page: %p", p);
+          PrintIsolate(isolate(), "sweeping: released page: %p",
+                       static_cast<void*>(p));
         }
+        ArrayBufferTracker::FreeAll(p);
         space->ReleasePage(p);
         continue;
       }
@@ -3936,7 +4048,10 @@
   Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
   if (target_page->IsEvacuationCandidate() &&
       !ShouldSkipEvacuationSlotRecording(host)) {
-    RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, CODE_ENTRY_SLOT, slot);
+    // TODO(ulan): remove this check after investigating crbug.com/414964.
+    CHECK(target->IsCode());
+    RememberedSet<OLD_TO_OLD>::InsertTyped(
+        source_page, reinterpret_cast<Address>(host), CODE_ENTRY_SLOT, slot);
   }
 }
 
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index d6adb03..07b289e 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -408,6 +408,7 @@
 
     enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
     enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
+    enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
     enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
     enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
 
@@ -416,6 +417,7 @@
 
     template <SweepingMode sweeping_mode, SweepingParallelism parallelism,
               SkipListRebuildingMode skip_list_mode,
+              FreeListRebuildingMode free_list_mode,
               FreeSpaceTreatmentMode free_space_mode>
     static int RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v);
 
@@ -434,11 +436,12 @@
 
     int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
                            int max_pages = 0);
-    int ParallelSweepPage(Page* page, PagedSpace* space);
+    int ParallelSweepPage(Page* page, AllocationSpace identity);
 
     void StartSweeping();
     void StartSweepingHelper(AllocationSpace space_to_start);
     void EnsureCompleted();
+    void EnsureNewSpaceCompleted();
     bool IsSweepingCompleted();
     void SweepOrWaitUntilSweepingCompleted(Page* page);
 
@@ -467,7 +470,7 @@
     SweepingList sweeping_list_[kAllocationSpaces];
     bool sweeping_in_progress_;
     bool late_pages_;
-    int num_sweeping_tasks_;
+    base::AtomicNumber<intptr_t> num_sweeping_tasks_;
   };
 
   enum IterationMode {
@@ -613,9 +616,7 @@
 
   Sweeper& sweeper() { return sweeper_; }
 
-  std::vector<std::pair<void*, void*>>& wrappers_to_trace() {
-    return wrappers_to_trace_;
-  }
+  void RegisterWrappersWithEmbedderHeapTracer();
 
   void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
 
@@ -793,7 +794,6 @@
   void SweepSpaces();
 
   void EvacuateNewSpacePrologue();
-  void EvacuateNewSpaceEpilogue();
 
   void EvacuatePagesInParallel();
 
diff --git a/src/heap/object-stats.cc b/src/heap/object-stats.cc
index 0198c6b..e7d90b3 100644
--- a/src/heap/object-stats.cc
+++ b/src/heap/object-stats.cc
@@ -134,8 +134,7 @@
 
 Isolate* ObjectStats::isolate() { return heap()->isolate(); }
 
-
-void ObjectStatsVisitor::CountFixedArray(
+void ObjectStatsCollector::CountFixedArray(
     FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
     FixedArraySubInstanceType dictionary_type) {
   Heap* heap = fixed_array->map()->GetHeap();
@@ -152,12 +151,32 @@
   }
 }
 
+void ObjectStatsCollector::CollectStatistics(StaticVisitorBase::VisitorId id,
+                                             Map* map, HeapObject* obj) {
+  // Record any type specific statistics here.
+  switch (id) {
+    case StaticVisitorBase::kVisitMap:
+      RecordMapStats(map, obj);
+      break;
+    case StaticVisitorBase::kVisitCode:
+      RecordCodeStats(map, obj);
+      break;
+    case StaticVisitorBase::kVisitSharedFunctionInfo:
+      RecordSharedFunctionInfoStats(map, obj);
+      break;
+    case StaticVisitorBase::kVisitFixedArray:
+      RecordFixedArrayStats(map, obj);
+      break;
+    default:
+      break;
+  }
 
-void ObjectStatsVisitor::VisitBase(VisitorId id, Map* map, HeapObject* obj) {
   Heap* heap = map->GetHeap();
   int object_size = obj->Size();
   heap->object_stats_->RecordObjectStats(map->instance_type(), object_size);
-  table_.GetVisitorById(id)(map, obj);
+}
+
+void ObjectStatsCollector::CollectFixedArrayStatistics(HeapObject* obj) {
   if (obj->IsJSObject()) {
     JSObject* object = JSObject::cast(obj);
     CountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
@@ -167,16 +186,7 @@
   }
 }
 
-
-template <ObjectStatsVisitor::VisitorId id>
-void ObjectStatsVisitor::Visit(Map* map, HeapObject* obj) {
-  VisitBase(id, map, obj);
-}
-
-
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitMap>(Map* map,
-                                                              HeapObject* obj) {
+void ObjectStatsCollector::RecordMapStats(Map* map, HeapObject* obj) {
   Heap* heap = map->GetHeap();
   Map* map_obj = Map::cast(obj);
   DCHECK(map->instance_type() == MAP_TYPE);
@@ -187,54 +197,42 @@
                                                       fixed_array_size);
   }
   if (map_obj->has_code_cache()) {
-    FixedArray* cache = FixedArray::cast(map_obj->code_cache());
+    FixedArray* cache = map_obj->code_cache();
     heap->object_stats_->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
                                                       cache->Size());
   }
-  VisitBase(kVisitMap, map, obj);
 }
 
-
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitCode>(
-    Map* map, HeapObject* obj) {
+void ObjectStatsCollector::RecordCodeStats(Map* map, HeapObject* obj) {
   Heap* heap = map->GetHeap();
   int object_size = obj->Size();
   DCHECK(map->instance_type() == CODE_TYPE);
   Code* code_obj = Code::cast(obj);
   heap->object_stats_->RecordCodeSubTypeStats(code_obj->kind(),
                                               code_obj->GetAge(), object_size);
-  VisitBase(kVisitCode, map, obj);
 }
 
-
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitSharedFunctionInfo>(
-    Map* map, HeapObject* obj) {
+void ObjectStatsCollector::RecordSharedFunctionInfoStats(Map* map,
+                                                         HeapObject* obj) {
   Heap* heap = map->GetHeap();
   SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
   if (sfi->scope_info() != heap->empty_fixed_array()) {
     heap->object_stats_->RecordFixedArraySubTypeStats(
         SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
   }
-  VisitBase(kVisitSharedFunctionInfo, map, obj);
 }
 
-
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitFixedArray>(
-    Map* map, HeapObject* obj) {
+void ObjectStatsCollector::RecordFixedArrayStats(Map* map, HeapObject* obj) {
   Heap* heap = map->GetHeap();
   FixedArray* fixed_array = FixedArray::cast(obj);
   if (fixed_array == heap->string_table()) {
     heap->object_stats_->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
                                                       fixed_array->Size());
   }
-  VisitBase(kVisitFixedArray, map, obj);
 }
 
-
-void ObjectStatsVisitor::Initialize(VisitorDispatchTable<Callback>* original) {
+void MarkCompactObjectStatsVisitor::Initialize(
+    VisitorDispatchTable<Callback>* original) {
   // Copy the original visitor table to make call-through possible. After we
   // preserved a copy locally, we patch the original table to call us.
   table_.CopyFrom(original);
@@ -243,5 +241,29 @@
 #undef COUNT_FUNCTION
 }
 
+template <MarkCompactObjectStatsVisitor::VisitorId id>
+void MarkCompactObjectStatsVisitor::Visit(Map* map, HeapObject* obj) {
+  ObjectStatsCollector::CollectStatistics(id, map, obj);
+  table_.GetVisitorById(id)(map, obj);
+  ObjectStatsCollector::CollectFixedArrayStatistics(obj);
+}
+
+void IncrementalMarkingObjectStatsVisitor::Initialize(
+    VisitorDispatchTable<Callback>* original) {
+  // Copy the original visitor table to make call-through possible. After we
+  // preserved a copy locally, we patch the original table to call us.
+  table_.CopyFrom(original);
+#define COUNT_FUNCTION(id) original->Register(kVisit##id, Visit<kVisit##id>);
+  VISITOR_ID_LIST(COUNT_FUNCTION)
+#undef COUNT_FUNCTION
+}
+
+template <IncrementalMarkingObjectStatsVisitor::VisitorId id>
+void IncrementalMarkingObjectStatsVisitor::Visit(Map* map, HeapObject* obj) {
+  ObjectStatsCollector::CollectStatistics(id, map, obj);
+  table_.GetVisitorById(id)(map, obj);
+  ObjectStatsCollector::CollectFixedArrayStatistics(obj);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/object-stats.h b/src/heap/object-stats.h
index e2dcfaa..ce0a317 100644
--- a/src/heap/object-stats.h
+++ b/src/heap/object-stats.h
@@ -81,16 +81,34 @@
   size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
 };
 
-
-class ObjectStatsVisitor : public StaticMarkingVisitor<ObjectStatsVisitor> {
+class ObjectStatsCollector {
  public:
-  static void Initialize(VisitorDispatchTable<Callback>* original);
-
-  static void VisitBase(VisitorId id, Map* map, HeapObject* obj);
+  static void CollectStatistics(StaticVisitorBase::VisitorId id, Map* map,
+                                HeapObject* obj);
+  static void CollectFixedArrayStatistics(HeapObject* obj);
 
   static void CountFixedArray(FixedArrayBase* fixed_array,
                               FixedArraySubInstanceType fast_type,
                               FixedArraySubInstanceType dictionary_type);
+  static void RecordMapStats(Map* map, HeapObject* obj);
+  static void RecordCodeStats(Map* map, HeapObject* obj);
+  static void RecordSharedFunctionInfoStats(Map* map, HeapObject* obj);
+  static void RecordFixedArrayStats(Map* map, HeapObject* obj);
+};
+
+class MarkCompactObjectStatsVisitor
+    : public StaticMarkingVisitor<MarkCompactObjectStatsVisitor> {
+ public:
+  static void Initialize(VisitorDispatchTable<Callback>* original);
+
+  template <VisitorId id>
+  static inline void Visit(Map* map, HeapObject* obj);
+};
+
+class IncrementalMarkingObjectStatsVisitor
+    : public StaticMarkingVisitor<IncrementalMarkingObjectStatsVisitor> {
+ public:
+  static void Initialize(VisitorDispatchTable<Callback>* original);
 
   template <VisitorId id>
   static inline void Visit(Map* map, HeapObject* obj);
diff --git a/src/heap/objects-visiting-inl.h b/src/heap/objects-visiting-inl.h
index 4373451..6d26ad0 100644
--- a/src/heap/objects-visiting-inl.h
+++ b/src/heap/objects-visiting-inl.h
@@ -77,7 +77,10 @@
       &FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
                            int>::Visit);
 
-  table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+  table_.Register(
+      kVisitJSArrayBuffer,
+      &FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
+                           int>::Visit);
 
   table_.Register(kVisitFreeSpace, &VisitFreeSpace);
 
@@ -99,21 +102,6 @@
                                           kVisitStructGeneric>();
 }
 
-
-template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
-    Map* map, HeapObject* object) {
-  typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor, int>
-      JSArrayBufferBodyVisitor;
-
-  if (!JSArrayBuffer::cast(object)->is_external()) {
-    Heap* heap = map->GetHeap();
-    heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
-  }
-  return JSArrayBufferBodyVisitor::Visit(map, object);
-}
-
-
 template <typename StaticVisitor>
 int StaticNewSpaceVisitor<StaticVisitor>::VisitBytecodeArray(
     Map* map, HeapObject* object) {
@@ -185,7 +173,10 @@
 
   table_.Register(kVisitJSFunction, &VisitJSFunction);
 
-  table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+  table_.Register(
+      kVisitJSArrayBuffer,
+      &FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
+                           void>::Visit);
 
   // Registration for kVisitJSRegExp is done by StaticVisitor.
 
@@ -385,7 +376,7 @@
   }
   // Enqueue the array in linked list of encountered transition arrays if it is
   // not already in the list.
-  if (array->next_link()->IsUndefined()) {
+  if (array->next_link()->IsUndefined(heap->isolate())) {
     Heap* heap = map->GetHeap();
     array->set_next_link(heap->encountered_transition_arrays(),
                          UPDATE_WEAK_WRITE_BARRIER);
@@ -459,9 +450,6 @@
   if (shared->ic_age() != heap->global_ic_age()) {
     shared->ResetForNewContext(heap->global_ic_age());
   }
-  if (FLAG_cleanup_code_caches_at_gc) {
-    shared->ClearTypeFeedbackInfoAtGCTime();
-  }
   if (FLAG_flush_optimized_code_cache) {
     if (!shared->OptimizedCodeMapIsCleared()) {
       // Always flush the optimized code map if requested by flag.
@@ -492,6 +480,9 @@
                                                           HeapObject* object) {
   Heap* heap = map->GetHeap();
   JSFunction* function = JSFunction::cast(object);
+  if (FLAG_cleanup_code_caches_at_gc) {
+    function->ClearTypeFeedbackInfoAtGCTime();
+  }
   MarkCompactCollector* collector = heap->mark_compact_collector();
   if (collector->is_code_flushing_enabled()) {
     if (IsFlushable(heap, function)) {
@@ -520,24 +511,6 @@
   JSObjectVisitor::Visit(map, object);
 }
 
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-
-  typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
-                              void> JSArrayBufferBodyVisitor;
-
-  JSArrayBufferBodyVisitor::Visit(map, object);
-
-  if (!JSArrayBuffer::cast(object)->is_external() &&
-      !heap->InNewSpace(object)) {
-    heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
-  }
-}
-
-
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
     Map* map, HeapObject* object) {
@@ -647,9 +620,10 @@
     return false;
   }
 
-  // We do not (yet?) flush code for generator functions, because we don't know
-  // if there are still live activations (generator objects) on the heap.
-  if (shared_info->is_generator()) {
+  // We do not (yet?) flush code for generator functions, or async functions,
+  // because we don't know if there are still live activations
+  // (generator objects) on the heap.
+  if (shared_info->is_resumable()) {
     return false;
   }
 
diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc
index dfde574..83e2e1c 100644
--- a/src/heap/objects-visiting.cc
+++ b/src/heap/objects-visiting.cc
@@ -103,6 +103,8 @@
       return kVisitJSArrayBuffer;
 
     case JS_OBJECT_TYPE:
+    case JS_ERROR_TYPE:
+    case JS_ARGUMENTS_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
     case JS_MODULE_TYPE:
@@ -212,7 +214,7 @@
         }
       }
       // Retained object is new tail.
-      DCHECK(!retained->IsUndefined());
+      DCHECK(!retained->IsUndefined(heap->isolate()));
       candidate = reinterpret_cast<T*>(retained);
       tail = candidate;
 
diff --git a/src/heap/objects-visiting.h b/src/heap/objects-visiting.h
index 4be40cd..303db0e 100644
--- a/src/heap/objects-visiting.h
+++ b/src/heap/objects-visiting.h
@@ -300,7 +300,6 @@
     return FreeSpace::cast(object)->size();
   }
 
-  INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
   INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object));
 
   class DataObjectVisitor {
@@ -379,7 +378,6 @@
   INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
   INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
   INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
-  INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
   INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
   INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
 
diff --git a/src/heap/remembered-set.cc b/src/heap/remembered-set.cc
index 403c99b..0bc5e6e 100644
--- a/src/heap/remembered-set.cc
+++ b/src/heap/remembered-set.cc
@@ -16,10 +16,7 @@
 template <PointerDirection direction>
 void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
   STATIC_ASSERT(direction == OLD_TO_NEW);
-  PageIterator it(heap->old_space());
-  MemoryChunk* chunk;
-  while (it.has_next()) {
-    chunk = it.next();
+  for (MemoryChunk* chunk : *heap->old_space()) {
     SlotSet* slots = GetSlotSet(chunk);
     if (slots != nullptr) {
       slots->Iterate([heap, chunk](Address addr) {
diff --git a/src/heap/remembered-set.h b/src/heap/remembered-set.h
index 45408bf..339748c 100644
--- a/src/heap/remembered-set.h
+++ b/src/heap/remembered-set.h
@@ -5,6 +5,7 @@
 #ifndef V8_REMEMBERED_SET_H
 #define V8_REMEMBERED_SET_H
 
+#include "src/assembler.h"
 #include "src/heap/heap.h"
 #include "src/heap/slot-set.h"
 #include "src/heap/spaces.h"
@@ -14,6 +15,7 @@
 
 enum PointerDirection { OLD_TO_OLD, OLD_TO_NEW };
 
+// TODO(ulan): Investigate performance of de-templatizing this class.
 template <PointerDirection direction>
 class RememberedSet {
  public:
@@ -67,9 +69,7 @@
   // The callback should take (MemoryChunk* chunk) and return void.
   template <typename Callback>
   static void IterateMemoryChunks(Heap* heap, Callback callback) {
-    MemoryChunkIterator it(heap, direction == OLD_TO_OLD
-                                     ? MemoryChunkIterator::ALL
-                                     : MemoryChunkIterator::ALL_BUT_CODE_SPACE);
+    MemoryChunkIterator it(heap);
     MemoryChunk* chunk;
     while ((chunk = it.next()) != nullptr) {
       SlotSet* slots = GetSlotSet(chunk);
@@ -98,62 +98,58 @@
     }
   }
 
-  // Iterates and filters the remembered set with the given callback.
-  // The callback should take (HeapObject** slot, HeapObject* target) and
-  // update the slot.
-  // A special wrapper takes care of filtering the slots based on their values.
-  // For OLD_TO_NEW case: slots that do not point to the ToSpace after
-  // callback invocation will be removed from the set.
-  template <typename Callback>
-  static void IterateWithWrapper(Heap* heap, Callback callback) {
-    Iterate(heap, [heap, callback](Address addr) {
-      return Wrapper(heap, addr, callback);
-    });
-  }
-
-  template <typename Callback>
-  static void IterateWithWrapper(Heap* heap, MemoryChunk* chunk,
-                                 Callback callback) {
-    Iterate(chunk, [heap, callback](Address addr) {
-      return Wrapper(heap, addr, callback);
-    });
-  }
-
   // Given a page and a typed slot in that page, this function adds the slot
   // to the remembered set.
-  static void InsertTyped(Page* page, SlotType slot_type, Address slot_addr) {
-    STATIC_ASSERT(direction == OLD_TO_OLD);
-    TypedSlotSet* slot_set = page->typed_old_to_old_slots();
+  static void InsertTyped(Page* page, Address host_addr, SlotType slot_type,
+                          Address slot_addr) {
+    TypedSlotSet* slot_set = GetTypedSlotSet(page);
     if (slot_set == nullptr) {
-      page->AllocateTypedOldToOldSlots();
-      slot_set = page->typed_old_to_old_slots();
+      AllocateTypedSlotSet(page);
+      slot_set = GetTypedSlotSet(page);
+    }
+    if (host_addr == nullptr) {
+      host_addr = page->address();
     }
     uintptr_t offset = slot_addr - page->address();
+    uintptr_t host_offset = host_addr - page->address();
     DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
-    slot_set->Insert(slot_type, static_cast<uint32_t>(offset));
+    DCHECK_LT(host_offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
+    slot_set->Insert(slot_type, static_cast<uint32_t>(host_offset),
+                     static_cast<uint32_t>(offset));
   }
 
   // Given a page and a range of typed slots in that page, this function removes
   // the slots from the remembered set.
   static void RemoveRangeTyped(Page* page, Address start, Address end) {
-    TypedSlotSet* slots = page->typed_old_to_old_slots();
+    TypedSlotSet* slots = GetTypedSlotSet(page);
     if (slots != nullptr) {
-      slots->Iterate([start, end](SlotType slot_type, Address slot_addr) {
+      slots->Iterate([start, end](SlotType slot_type, Address host_addr,
+                                  Address slot_addr) {
         return start <= slot_addr && slot_addr < end ? REMOVE_SLOT : KEEP_SLOT;
       });
     }
   }
 
+  // Iterates and filters the remembered set with the given callback.
+  // The callback should take (SlotType slot_type, SlotAddress slot) and return
+  // SlotCallbackResult.
+  template <typename Callback>
+  static void IterateTyped(Heap* heap, Callback callback) {
+    IterateMemoryChunks(heap, [callback](MemoryChunk* chunk) {
+      IterateTyped(chunk, callback);
+    });
+  }
+
   // Iterates and filters typed old to old pointers in the given memory chunk
   // with the given callback. The callback should take (SlotType slot_type,
   // Address slot_addr) and return SlotCallbackResult.
   template <typename Callback>
   static void IterateTyped(MemoryChunk* chunk, Callback callback) {
-    TypedSlotSet* slots = chunk->typed_old_to_old_slots();
+    TypedSlotSet* slots = GetTypedSlotSet(chunk);
     if (slots != nullptr) {
       int new_count = slots->Iterate(callback);
       if (new_count == 0) {
-        chunk->ReleaseTypedOldToOldSlots();
+        ReleaseTypedSlotSet(chunk);
       }
     }
   }
@@ -161,7 +157,7 @@
   // Clear all old to old slots from the remembered set.
   static void ClearAll(Heap* heap) {
     STATIC_ASSERT(direction == OLD_TO_OLD);
-    MemoryChunkIterator it(heap, MemoryChunkIterator::ALL);
+    MemoryChunkIterator it(heap);
     MemoryChunk* chunk;
     while ((chunk = it.next()) != nullptr) {
       chunk->ReleaseOldToOldSlots();
@@ -190,7 +186,7 @@
     if (direction == OLD_TO_OLD) {
       return chunk->typed_old_to_old_slots();
     } else {
-      return nullptr;
+      return chunk->typed_old_to_new_slots();
     }
   }
 
@@ -202,6 +198,14 @@
     }
   }
 
+  static void ReleaseTypedSlotSet(MemoryChunk* chunk) {
+    if (direction == OLD_TO_OLD) {
+      chunk->ReleaseTypedOldToOldSlots();
+    } else {
+      chunk->ReleaseTypedOldToNewSlots();
+    }
+  }
+
   static SlotSet* AllocateSlotSet(MemoryChunk* chunk) {
     if (direction == OLD_TO_OLD) {
       chunk->AllocateOldToOldSlots();
@@ -212,33 +216,135 @@
     }
   }
 
-  template <typename Callback>
-  static SlotCallbackResult Wrapper(Heap* heap, Address slot_address,
-                                    Callback slot_callback) {
-    STATIC_ASSERT(direction == OLD_TO_NEW);
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    Object* object = *slot;
-    if (heap->InFromSpace(object)) {
-      HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
-      DCHECK(heap_object->IsHeapObject());
-      slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
-      object = *slot;
-      // If the object was in from space before and is after executing the
-      // callback in to space, the object is still live.
-      // Unfortunately, we do not know about the slot. It could be in a
-      // just freed free space object.
-      if (heap->InToSpace(object)) {
-        return KEEP_SLOT;
-      }
+  static TypedSlotSet* AllocateTypedSlotSet(MemoryChunk* chunk) {
+    if (direction == OLD_TO_OLD) {
+      chunk->AllocateTypedOldToOldSlots();
+      return chunk->typed_old_to_old_slots();
     } else {
-      DCHECK(!heap->InNewSpace(object));
+      chunk->AllocateTypedOldToNewSlots();
+      return chunk->typed_old_to_new_slots();
     }
-    return REMOVE_SLOT;
   }
 
   static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, Object** slot);
 };
 
+class UpdateTypedSlotHelper {
+ public:
+  // Updates a cell slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateCell(RelocInfo* rinfo, Callback callback) {
+    DCHECK(rinfo->rmode() == RelocInfo::CELL);
+    Object* cell = rinfo->target_cell();
+    Object* old_cell = cell;
+    SlotCallbackResult result = callback(&cell);
+    if (cell != old_cell) {
+      rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
+    }
+    return result;
+  }
+
+  // Updates a code entry slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateCodeEntry(Address entry_address,
+                                            Callback callback) {
+    Object* code = Code::GetObjectFromEntryAddress(entry_address);
+    Object* old_code = code;
+    SlotCallbackResult result = callback(&code);
+    if (code != old_code) {
+      Memory::Address_at(entry_address) =
+          reinterpret_cast<Code*>(code)->entry();
+    }
+    return result;
+  }
+
+  // Updates a code target slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateCodeTarget(RelocInfo* rinfo,
+                                             Callback callback) {
+    DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    Object* old_target = target;
+    SlotCallbackResult result = callback(&target);
+    if (target != old_target) {
+      rinfo->set_target_address(Code::cast(target)->instruction_start());
+    }
+    return result;
+  }
+
+  // Updates an embedded pointer slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateEmbeddedPointer(RelocInfo* rinfo,
+                                                  Callback callback) {
+    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    Object* target = rinfo->target_object();
+    Object* old_target = target;
+    SlotCallbackResult result = callback(&target);
+    if (target != old_target) {
+      rinfo->set_target_object(target);
+    }
+    return result;
+  }
+
+  // Updates a debug target slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateDebugTarget(RelocInfo* rinfo,
+                                              Callback callback) {
+    DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+           rinfo->IsPatchedDebugBreakSlotSequence());
+    Object* target =
+        Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+    SlotCallbackResult result = callback(&target);
+    rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
+    return result;
+  }
+
+  // Updates a typed slot using an untyped slot callback.
+  // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+  template <typename Callback>
+  static SlotCallbackResult UpdateTypedSlot(Isolate* isolate,
+                                            SlotType slot_type, Address addr,
+                                            Callback callback) {
+    switch (slot_type) {
+      case CODE_TARGET_SLOT: {
+        RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
+        return UpdateCodeTarget(&rinfo, callback);
+      }
+      case CELL_TARGET_SLOT: {
+        RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
+        return UpdateCell(&rinfo, callback);
+      }
+      case CODE_ENTRY_SLOT: {
+        return UpdateCodeEntry(addr, callback);
+      }
+      case DEBUG_TARGET_SLOT: {
+        RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION,
+                        0, NULL);
+        if (rinfo.IsPatchedDebugBreakSlotSequence()) {
+          return UpdateDebugTarget(&rinfo, callback);
+        }
+        return REMOVE_SLOT;
+      }
+      case EMBEDDED_OBJECT_SLOT: {
+        RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+        return UpdateEmbeddedPointer(&rinfo, callback);
+      }
+      case OBJECT_SLOT: {
+        return callback(reinterpret_cast<Object**>(addr));
+      }
+      case NUMBER_OF_SLOT_TYPES:
+        break;
+    }
+    UNREACHABLE();
+    return REMOVE_SLOT;
+  }
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/heap/scavenger-inl.h b/src/heap/scavenger-inl.h
index b8fd1c8..0b6a0f4 100644
--- a/src/heap/scavenger-inl.h
+++ b/src/heap/scavenger-inl.h
@@ -37,10 +37,35 @@
   return ScavengeObjectSlow(p, object);
 }
 
+SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
+                                                     Address slot_address) {
+  Object** slot = reinterpret_cast<Object**>(slot_address);
+  Object* object = *slot;
+  if (heap->InFromSpace(object)) {
+    HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+    DCHECK(heap_object->IsHeapObject());
+
+    ScavengeObject(reinterpret_cast<HeapObject**>(slot), heap_object);
+
+    object = *slot;
+    // If the object was in from space before and is after executing the
+    // callback in to space, the object is still live.
+    // Unfortunately, we do not know about the slot. It could be in a
+    // just freed free space object.
+    if (heap->InToSpace(object)) {
+      return KEEP_SLOT;
+    }
+  } else {
+    DCHECK(!heap->InNewSpace(object));
+  }
+  return REMOVE_SLOT;
+}
 
 // static
-void StaticScavengeVisitor::VisitPointer(Heap* heap, HeapObject* obj,
-                                         Object** p) {
+template <PromotionMode promotion_mode>
+void StaticScavengeVisitor<promotion_mode>::VisitPointer(Heap* heap,
+                                                         HeapObject* obj,
+                                                         Object** p) {
   Object* object = *p;
   if (!heap->InNewSpace(object)) return;
   Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
diff --git a/src/heap/scavenger.cc b/src/heap/scavenger.cc
index 456d8a4..9b8bfc2 100644
--- a/src/heap/scavenger.cc
+++ b/src/heap/scavenger.cc
@@ -10,7 +10,6 @@
 #include "src/heap/scavenger-inl.h"
 #include "src/isolate.h"
 #include "src/log.h"
-#include "src/profiler/cpu-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -23,8 +22,7 @@
 
 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
 
-
-template <MarksHandling marks_handling,
+template <MarksHandling marks_handling, PromotionMode promotion_mode,
           LoggingAndProfiling logging_and_profiling_mode>
 class ScavengingVisitor : public StaticVisitorBase {
  public:
@@ -37,7 +35,8 @@
     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
     table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
     table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
-    table_.Register(kVisitJSArrayBuffer, &EvacuateJSArrayBuffer);
+    table_.Register(kVisitJSArrayBuffer,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
 
     table_.Register(
         kVisitNativeContext,
@@ -200,7 +199,6 @@
     return false;
   }
 
-
   template <ObjectContents object_contents, AllocationAlignment alignment>
   static inline void EvacuateObject(Map* map, HeapObject** slot,
                                     HeapObject* object, int object_size) {
@@ -208,7 +206,8 @@
     SLOW_DCHECK(object->Size() == object_size);
     Heap* heap = map->GetHeap();
 
-    if (!heap->ShouldBePromoted(object->address(), object_size)) {
+    if (!heap->ShouldBePromoted<promotion_mode>(object->address(),
+                                                object_size)) {
       // A semi-space copy may fail due to fragmentation. In that case, we
       // try to promote the object.
       if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
@@ -220,14 +219,15 @@
                                                   object_size)) {
       return;
     }
-
+    if (promotion_mode == PROMOTE_MARKED) {
+      FatalProcessOutOfMemory("Scavenger: promoting marked\n");
+    }
     // If promotion failed, we try to copy the object to the other semi-space
     if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
 
     FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
   }
 
-
   static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
                                         HeapObject* object) {
     ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
@@ -252,7 +252,6 @@
     }
   }
 
-
   static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
                                         HeapObject* object) {
     int length = reinterpret_cast<FixedArray*>(object)->synchronized_length();
@@ -261,7 +260,6 @@
                                                  object_size);
   }
 
-
   static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
                                               HeapObject* object) {
     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
@@ -269,7 +267,6 @@
     EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
   }
 
-
   static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
                                              HeapObject* object) {
     int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
@@ -277,7 +274,6 @@
                                                  object_size);
   }
 
-
   static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
                                                HeapObject* object) {
     int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
@@ -285,28 +281,12 @@
                                                    object_size);
   }
 
-
-  static inline void EvacuateJSArrayBuffer(Map* map, HeapObject** slot,
-                                           HeapObject* object) {
-    ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
-
-    Heap* heap = map->GetHeap();
-    MapWord map_word = object->map_word();
-    DCHECK(map_word.IsForwardingAddress());
-    HeapObject* target = map_word.ToForwardingAddress();
-    if (!heap->InNewSpace(target)) {
-      heap->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
-    }
-  }
-
-
   static inline void EvacuateByteArray(Map* map, HeapObject** slot,
                                        HeapObject* object) {
     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
     EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
   }
 
-
   static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
                                               HeapObject* object) {
     int object_size = SeqOneByteString::cast(object)
@@ -314,7 +294,6 @@
     EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
   }
 
-
   static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
                                               HeapObject* object) {
     int object_size = SeqTwoByteString::cast(object)
@@ -322,7 +301,6 @@
     EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
   }
 
-
   static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
                                                HeapObject* object) {
     DCHECK(IsShortcutCandidate(map->instance_type()));
@@ -380,21 +358,21 @@
   static VisitorDispatchTable<ScavengingCallback> table_;
 };
 
-
-template <MarksHandling marks_handling,
+template <MarksHandling marks_handling, PromotionMode promotion_mode,
           LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback>
-    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
-
+VisitorDispatchTable<ScavengingCallback> ScavengingVisitor<
+    marks_handling, promotion_mode, logging_and_profiling_mode>::table_;
 
 // static
 void Scavenger::Initialize() {
-  ScavengingVisitor<TRANSFER_MARKS,
+  ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<TRANSFER_MARKS,
+  ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
 }
 
 
@@ -412,28 +390,28 @@
 void Scavenger::SelectScavengingVisitorsTable() {
   bool logging_and_profiling =
       FLAG_verify_predictable || isolate()->logger()->is_logging() ||
-      isolate()->cpu_profiler()->is_profiling() ||
+      isolate()->is_profiling() ||
       (isolate()->heap_profiler() != NULL &&
        isolate()->heap_profiler()->is_tracking_object_moves());
 
   if (!heap()->incremental_marking()->IsMarking()) {
     if (!logging_and_profiling) {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS,
+          ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
     } else {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS,
+          ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
     }
   } else {
     if (!logging_and_profiling) {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS,
+          ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
     } else {
       scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS,
+          ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
     }
 
@@ -467,6 +445,8 @@
   Object* object = *p;
   if (!heap_->InNewSpace(object)) return;
 
+  if (heap_->PurgeLeftTrimmedObject(p)) return;
+
   Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
                             reinterpret_cast<HeapObject*>(object));
 }
diff --git a/src/heap/scavenger.h b/src/heap/scavenger.h
index 5d0abf4..f2213b8 100644
--- a/src/heap/scavenger.h
+++ b/src/heap/scavenger.h
@@ -6,6 +6,7 @@
 #define V8_HEAP_SCAVENGER_H_
 
 #include "src/heap/objects-visiting.h"
+#include "src/heap/slot-set.h"
 
 namespace v8 {
 namespace internal {
@@ -25,6 +26,8 @@
   // ensure the precondition that the object is (a) a heap object and (b) in
   // the heap's from space.
   static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+  static inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
+                                                          Address slot_address);
 
   // Slow part of {ScavengeObject} above.
   static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
@@ -60,8 +63,9 @@
 
 // Helper class for turning the scavenger into an object visitor that is also
 // filtering out non-HeapObjects and objects which do not reside in new space.
+template <PromotionMode promotion_mode>
 class StaticScavengeVisitor
-    : public StaticNewSpaceVisitor<StaticScavengeVisitor> {
+    : public StaticNewSpaceVisitor<StaticScavengeVisitor<promotion_mode>> {
  public:
   static inline void VisitPointer(Heap* heap, HeapObject* object, Object** p);
 };
diff --git a/src/heap/slot-set.h b/src/heap/slot-set.h
index e55ffe9..2fac50f 100644
--- a/src/heap/slot-set.h
+++ b/src/heap/slot-set.h
@@ -217,7 +217,6 @@
 enum SlotType {
   EMBEDDED_OBJECT_SLOT,
   OBJECT_SLOT,
-  RELOCATED_CODE_OBJECT,
   CELL_TARGET_SLOT,
   CODE_TARGET_SLOT,
   CODE_ENTRY_SLOT,
@@ -234,7 +233,30 @@
 // typed slots contain V8 internal pointers that are not directly exposed to JS.
 class TypedSlotSet {
  public:
-  typedef uint32_t TypedSlot;
+  struct TypedSlot {
+    TypedSlot() : type_and_offset_(0), host_offset_(0) {}
+
+    TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
+        : type_and_offset_(TypeField::encode(type) |
+                           OffsetField::encode(offset)),
+          host_offset_(host_offset) {}
+
+    bool operator==(const TypedSlot other) {
+      return type_and_offset_ == other.type_and_offset_ &&
+             host_offset_ == other.host_offset_;
+    }
+
+    bool operator!=(const TypedSlot other) { return !(*this == other); }
+
+    SlotType type() { return TypeField::decode(type_and_offset_); }
+
+    uint32_t offset() { return OffsetField::decode(type_and_offset_); }
+
+    uint32_t host_offset() { return host_offset_; }
+
+    uint32_t type_and_offset_;
+    uint32_t host_offset_;
+  };
   static const int kMaxOffset = 1 << 29;
 
   explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
@@ -251,8 +273,8 @@
   }
 
   // The slot offset specifies a slot at address page_start_ + offset.
-  void Insert(SlotType type, int offset) {
-    TypedSlot slot = ToTypedSlot(type, offset);
+  void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
+    TypedSlot slot(type, host_offset, offset);
     if (!chunk_->AddSlot(slot)) {
       chunk_ = new Chunk(chunk_, NextCapacity(chunk_->capacity));
       bool added = chunk_->AddSlot(slot);
@@ -273,7 +295,7 @@
   template <typename Callback>
   int Iterate(Callback callback) {
     STATIC_ASSERT(NUMBER_OF_SLOT_TYPES < 8);
-    const TypedSlot kRemovedSlot = TypeField::encode(NUMBER_OF_SLOT_TYPES);
+    const TypedSlot kRemovedSlot(NUMBER_OF_SLOT_TYPES, 0, 0);
     Chunk* chunk = chunk_;
     int new_count = 0;
     while (chunk != nullptr) {
@@ -282,9 +304,10 @@
       for (int i = 0; i < count; i++) {
         TypedSlot slot = buffer[i];
         if (slot != kRemovedSlot) {
-          SlotType type = TypeField::decode(slot);
-          Address addr = page_start_ + OffsetField::decode(slot);
-          if (callback(type, addr) == KEEP_SLOT) {
+          SlotType type = slot.type();
+          Address addr = page_start_ + slot.offset();
+          Address host_addr = page_start_ + slot.host_offset();
+          if (callback(type, host_addr, addr) == KEEP_SLOT) {
             new_count++;
           } else {
             buffer[i] = kRemovedSlot;
@@ -304,10 +327,6 @@
     return Min(kMaxBufferSize, capacity * 2);
   }
 
-  static TypedSlot ToTypedSlot(SlotType type, int offset) {
-    return TypeField::encode(type) | OffsetField::encode(offset);
-  }
-
   class OffsetField : public BitField<int, 0, 29> {};
   class TypeField : public BitField<SlotType, 29, 3> {};
 
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index f9e40bb..dbf3fff 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -15,6 +15,24 @@
 namespace v8 {
 namespace internal {
 
+template <class PAGE_TYPE>
+PageIteratorImpl<PAGE_TYPE>& PageIteratorImpl<PAGE_TYPE>::operator++() {
+  p_ = p_->next_page();
+  return *this;
+}
+
+template <class PAGE_TYPE>
+PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
+  PageIteratorImpl<PAGE_TYPE> tmp(*this);
+  operator++();
+  return tmp;
+}
+
+NewSpacePageRange::NewSpacePageRange(Address start, Address limit)
+    : range_(Page::FromAddress(start),
+             Page::FromAllocationAreaAddress(limit)->next_page()) {
+  SemiSpace::AssertValidRange(start, limit);
+}
 
 // -----------------------------------------------------------------------------
 // Bitmap
@@ -31,25 +49,6 @@
     bitmap->cells()[i] = 0xffffffff;
 }
 
-// -----------------------------------------------------------------------------
-// PageIterator
-
-PageIterator::PageIterator(PagedSpace* space)
-    : space_(space),
-      prev_page_(&space->anchor_),
-      next_page_(prev_page_->next_page()) {}
-
-
-bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
-
-
-Page* PageIterator::next() {
-  DCHECK(has_next());
-  prev_page_ = next_page_;
-  next_page_ = next_page_->next_page();
-  return prev_page_;
-}
-
 
 // -----------------------------------------------------------------------------
 // SemiSpaceIterator
@@ -72,56 +71,17 @@
   return nullptr;
 }
 
-
-HeapObject* SemiSpaceIterator::next_object() { return Next(); }
-
-
-// -----------------------------------------------------------------------------
-// NewSpacePageIterator
-
-NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
-    : prev_page_(Page::FromAddress(space->ToSpaceStart())->prev_page()),
-      next_page_(Page::FromAddress(space->ToSpaceStart())),
-      last_page_(Page::FromAllocationAreaAddress(space->ToSpaceEnd())) {}
-
-NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
-    : prev_page_(space->anchor()),
-      next_page_(prev_page_->next_page()),
-      last_page_(prev_page_->prev_page()) {}
-
-NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
-    : prev_page_(Page::FromAddress(start)->prev_page()),
-      next_page_(Page::FromAddress(start)),
-      last_page_(Page::FromAllocationAreaAddress(limit)) {
-  SemiSpace::AssertValidRange(start, limit);
-}
-
-
-bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
-
-Page* NewSpacePageIterator::next() {
-  DCHECK(has_next());
-  prev_page_ = next_page_;
-  next_page_ = next_page_->next_page();
-  return prev_page_;
-}
-
-
 // -----------------------------------------------------------------------------
 // HeapObjectIterator
 
 HeapObject* HeapObjectIterator::Next() {
   do {
     HeapObject* next_obj = FromCurrentPage();
-    if (next_obj != NULL) return next_obj;
+    if (next_obj != nullptr) return next_obj;
   } while (AdvanceToNextPage());
-  return NULL;
+  return nullptr;
 }
 
-
-HeapObject* HeapObjectIterator::next_object() { return Next(); }
-
-
 HeapObject* HeapObjectIterator::FromCurrentPage() {
   while (cur_addr_ != cur_end_) {
     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
@@ -129,15 +89,9 @@
       continue;
     }
     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
-    int obj_size = obj->Size();
+    const int obj_size = obj->Size();
     cur_addr_ += obj_size;
-    DCHECK(cur_addr_ <= cur_end_);
-    // TODO(hpayer): Remove the debugging code.
-    if (cur_addr_ > cur_end_) {
-      space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
-                                                      obj_size);
-    }
-
+    DCHECK_LE(cur_addr_, cur_end_);
     if (!obj->IsFiller()) {
       if (obj->IsCode()) {
         DCHECK_EQ(space_, space_->heap()->code_space());
@@ -148,21 +102,7 @@
       return obj;
     }
   }
-  return NULL;
-}
-
-// -----------------------------------------------------------------------------
-// LargePageIterator
-
-LargePageIterator::LargePageIterator(LargeObjectSpace* space)
-    : next_page_(space->first_page()) {}
-
-LargePage* LargePageIterator::next() {
-  LargePage* result = next_page_;
-  if (next_page_ != nullptr) {
-    next_page_ = next_page_->next_page();
-  }
-  return result;
+  return nullptr;
 }
 
 // -----------------------------------------------------------------------------
@@ -209,9 +149,8 @@
 }
 
 bool SemiSpace::ContainsSlow(Address a) {
-  NewSpacePageIterator it(this);
-  while (it.has_next()) {
-    if (it.next() == MemoryChunk::FromAddress(a)) return true;
+  for (Page* p : *this) {
+    if (p == MemoryChunk::FromAddress(a)) return true;
   }
   return false;
 }
@@ -260,6 +199,7 @@
                                        : MemoryChunk::IN_TO_SPACE));
   Page* page = static_cast<Page*>(chunk);
   heap->incremental_marking()->SetNewSpacePageFlags(page);
+  page->AllocateLocalTracker();
   return page;
 }
 
@@ -270,7 +210,6 @@
 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
                        PagedSpace* owner) {
   Page* page = reinterpret_cast<Page*>(chunk);
-  page->mutex_ = new base::Mutex();
   DCHECK(page->area_size() <= kAllocatableMemory);
   DCHECK(chunk->owner() == owner);
 
@@ -311,8 +250,8 @@
 
 void MemoryChunk::ResetLiveBytes() {
   if (FLAG_trace_live_bytes) {
-    PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n", this,
-                 live_byte_count_);
+    PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n",
+                 static_cast<void*>(this), live_byte_count_);
   }
   live_byte_count_ = 0;
 }
@@ -320,9 +259,9 @@
 void MemoryChunk::IncrementLiveBytes(int by) {
   if (IsFlagSet(BLACK_PAGE)) return;
   if (FLAG_trace_live_bytes) {
-    PrintIsolate(heap()->isolate(),
-                 "live-bytes: update page=%p delta=%d %d->%d\n", this, by,
-                 live_byte_count_, live_byte_count_ + by);
+    PrintIsolate(
+        heap()->isolate(), "live-bytes: update page=%p delta=%d %d->%d\n",
+        static_cast<void*>(this), by, live_byte_count_, live_byte_count_ + by);
   }
   live_byte_count_ += by;
   DCHECK_GE(live_byte_count_, 0);
@@ -382,6 +321,7 @@
 }
 
 void Page::MarkNeverAllocateForTesting() {
+  DCHECK(this->owner()->identity() != NEW_SPACE);
   DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
   SetFlag(NEVER_ALLOCATE_ON_PAGE);
   reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
@@ -404,42 +344,34 @@
   InitializeFreeListCategories();
 }
 
-MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode)
-    : state_(kOldSpaceState),
-      mode_(mode),
-      old_iterator_(heap->old_space()),
-      code_iterator_(heap->code_space()),
-      map_iterator_(heap->map_space()),
-      lo_iterator_(heap->lo_space()) {}
+MemoryChunkIterator::MemoryChunkIterator(Heap* heap)
+    : heap_(heap),
+      state_(kOldSpaceState),
+      old_iterator_(heap->old_space()->begin()),
+      code_iterator_(heap->code_space()->begin()),
+      map_iterator_(heap->map_space()->begin()),
+      lo_iterator_(heap->lo_space()->begin()) {}
 
 MemoryChunk* MemoryChunkIterator::next() {
   switch (state_) {
     case kOldSpaceState: {
-      if (old_iterator_.has_next()) {
-        return old_iterator_.next();
-      }
+      if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
       state_ = kMapState;
       // Fall through.
     }
     case kMapState: {
-      if (mode_ != ALL_BUT_MAP_SPACE && map_iterator_.has_next()) {
-        return map_iterator_.next();
-      }
+      if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
       state_ = kCodeState;
       // Fall through.
     }
     case kCodeState: {
-      if (mode_ != ALL_BUT_CODE_SPACE && code_iterator_.has_next()) {
-        return code_iterator_.next();
-      }
+      if (code_iterator_ != heap_->code_space()->end())
+        return *(code_iterator_++);
       state_ = kLargeObjectState;
       // Fall through.
     }
     case kLargeObjectState: {
-      MemoryChunk* answer = lo_iterator_.next();
-      if (answer != nullptr) {
-        return answer;
-      }
+      if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
       state_ = kFinishedState;
       // Fall through;
     }
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index e517c45..1dcd044 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -4,10 +4,13 @@
 
 #include "src/heap/spaces.h"
 
+#include <utility>
+
 #include "src/base/bits.h"
 #include "src/base/platform/platform.h"
 #include "src/base/platform/semaphore.h"
 #include "src/full-codegen/full-codegen.h"
+#include "src/heap/array-buffer-tracker.h"
 #include "src/heap/slot-set.h"
 #include "src/macro-assembler.h"
 #include "src/msan.h"
@@ -21,50 +24,34 @@
 // ----------------------------------------------------------------------------
 // HeapObjectIterator
 
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
-  // You can't actually iterate over the anchor page.  It is not a real page,
-  // just an anchor for the double linked page list.  Initialize as if we have
-  // reached the end of the anchor page, then the first iteration will move on
-  // to the first page.
-  Initialize(space, NULL, NULL, kAllPagesInSpace);
-}
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
+    : cur_addr_(nullptr),
+      cur_end_(nullptr),
+      space_(space),
+      page_range_(space->anchor()->next_page(), space->anchor()),
+      current_page_(page_range_.begin()) {}
 
-
-HeapObjectIterator::HeapObjectIterator(Page* page) {
+HeapObjectIterator::HeapObjectIterator(Page* page)
+    : cur_addr_(nullptr),
+      cur_end_(nullptr),
+      space_(reinterpret_cast<PagedSpace*>(page->owner())),
+      page_range_(page),
+      current_page_(page_range_.begin()) {
+#ifdef DEBUG
   Space* owner = page->owner();
   DCHECK(owner == page->heap()->old_space() ||
          owner == page->heap()->map_space() ||
          owner == page->heap()->code_space());
-  Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
-             page->area_end(), kOnePageOnly);
-  DCHECK(page->SweepingDone());
+#endif  // DEBUG
 }
 
-
-void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
-                                    HeapObjectIterator::PageMode mode) {
-  space_ = space;
-  cur_addr_ = cur;
-  cur_end_ = end;
-  page_mode_ = mode;
-}
-
-
 // We have hit the end of the page and should advance to the next block of
 // objects.  This happens at the end of the page.
 bool HeapObjectIterator::AdvanceToNextPage() {
-  DCHECK(cur_addr_ == cur_end_);
-  if (page_mode_ == kOnePageOnly) return false;
-  Page* cur_page;
-  if (cur_addr_ == NULL) {
-    cur_page = space_->anchor();
-  } else {
-    cur_page = Page::FromAddress(cur_addr_ - 1);
-    DCHECK(cur_addr_ == cur_page->area_end());
-  }
-  cur_page = cur_page->next_page();
-  if (cur_page == space_->anchor()) return false;
-  cur_page->heap()
+  DCHECK_EQ(cur_addr_, cur_end_);
+  if (current_page_ == page_range_.end()) return false;
+  Page* cur_page = *(current_page_++);
+  space_->heap()
       ->mark_compact_collector()
       ->sweeper()
       .SweepOrWaitUntilSweepingCompleted(cur_page);
@@ -119,15 +106,16 @@
     requested = kMinimumCodeRangeSize;
   }
 
+  const size_t reserved_area =
+      kReservedCodeRangePages * base::OS::CommitPageSize();
+  if (requested < (kMaximalCodeRangeSize - reserved_area))
+    requested += reserved_area;
+
   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
-#ifdef V8_TARGET_ARCH_MIPS64
-  // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
-  // encoded immediate, the addresses have to be in range of 256Mb aligned
-  // region.
-  code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
-#else
-  code_range_ = new base::VirtualMemory(requested);
-#endif
+
+  code_range_ = new base::VirtualMemory(
+      requested, Max(kCodeRangeAreaAlignment,
+                     static_cast<size_t>(base::OS::AllocateAlignment())));
   CHECK(code_range_ != NULL);
   if (!code_range_->IsReserved()) {
     delete code_range_;
@@ -141,18 +129,16 @@
 
   // On some platforms, specifically Win64, we need to reserve some pages at
   // the beginning of an executable space.
-  if (kReservedCodeRangePages) {
-    if (!code_range_->Commit(
-            base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
+  if (reserved_area > 0) {
+    if (!code_range_->Commit(base, reserved_area, true)) {
       delete code_range_;
       code_range_ = NULL;
       return false;
     }
-    base += kReservedCodeRangePages * base::OS::CommitPageSize();
+    base += reserved_area;
   }
   Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
-  size_t size = code_range_->size() - (aligned_base - base) -
-                kReservedCodeRangePages * base::OS::CommitPageSize();
+  size_t size = code_range_->size() - (aligned_base - base) - reserved_area;
   allocation_list_.Add(FreeBlock(aligned_base, size));
   current_allocation_block_index_ = 0;
 
@@ -364,6 +350,7 @@
 };
 
 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
+  ReconsiderDelayedChunks();
   if (FLAG_concurrent_sweeping) {
     V8::GetCurrentPlatform()->CallOnBackgroundThread(
         new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
@@ -397,6 +384,24 @@
   }
 }
 
+void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
+  std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
+  // Move constructed, so the permanent list should be empty.
+  DCHECK(delayed_regular_chunks_.empty());
+  for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
+    AddMemoryChunkSafe<kRegular>(*it);
+  }
+}
+
+bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
+  MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
+  // We cannot free memory chunks in new space while the sweeper is running
+  // since a sweeper thread might be stuck right before trying to lock the
+  // corresponding page.
+  return !chunk->InNewSpace() || (mc == nullptr) ||
+         mc->sweeper().IsSweepingCompleted();
+}
+
 bool MemoryAllocator::CommitMemory(Address base, size_t size,
                                    Executability executable) {
   if (!base::VirtualMemory::CommitRegion(base, size,
@@ -414,8 +419,8 @@
   // Code which is part of the code-range does not have its own VirtualMemory.
   DCHECK(code_range() == NULL ||
          !code_range()->contains(static_cast<Address>(reservation->address())));
-  DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
-         !code_range()->valid() || reservation->size() <= Page::kPageSize);
+  DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
+         reservation->size() <= Page::kPageSize);
 
   reservation->Release();
 }
@@ -429,8 +434,7 @@
     DCHECK(executable == EXECUTABLE);
     code_range()->FreeRawMemory(base, size);
   } else {
-    DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
-           !code_range()->valid());
+    DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
     bool result = base::VirtualMemory::ReleaseRegion(base, size);
     USE(result);
     DCHECK(result);
@@ -506,19 +510,21 @@
   chunk->InitializeReservedMemory();
   chunk->old_to_new_slots_ = nullptr;
   chunk->old_to_old_slots_ = nullptr;
+  chunk->typed_old_to_new_slots_ = nullptr;
   chunk->typed_old_to_old_slots_ = nullptr;
   chunk->skip_list_ = nullptr;
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
   chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
-  chunk->mutex_ = nullptr;
+  chunk->mutex_ = new base::Mutex();
   chunk->available_in_free_list_ = 0;
   chunk->wasted_memory_ = 0;
   chunk->ResetLiveBytes();
   Bitmap::Clear(chunk);
   chunk->set_next_chunk(nullptr);
   chunk->set_prev_chunk(nullptr);
+  chunk->local_tracker_ = nullptr;
 
   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
   DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
@@ -560,8 +566,7 @@
       }
     } else {
       CodeRange* code_range = heap_->memory_allocator()->code_range();
-      DCHECK(code_range != NULL && code_range->valid() &&
-             IsFlagSet(IS_EXECUTABLE));
+      DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
       if (!code_range->CommitRawMemory(start, length)) return false;
     }
 
@@ -577,8 +582,7 @@
       if (!reservation_.Uncommit(start, length)) return false;
     } else {
       CodeRange* code_range = heap_->memory_allocator()->code_range();
-      DCHECK(code_range != NULL && code_range->valid() &&
-             IsFlagSet(IS_EXECUTABLE));
+      DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
       if (!code_range->UncommitRawMemory(start, length)) return false;
     }
   }
@@ -672,10 +676,9 @@
 #ifdef V8_TARGET_ARCH_MIPS64
     // Use code range only for large object space on mips64 to keep address
     // range within 256-MB memory region.
-    if (code_range() != NULL && code_range()->valid() &&
-        reserve_area_size > CodePageAreaSize()) {
+    if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
 #else
-    if (code_range() != NULL && code_range()->valid()) {
+    if (code_range()->valid()) {
 #endif
       base =
           code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
@@ -727,10 +730,6 @@
       static_cast<int>(chunk_size));
 
   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
-  if (owner != NULL) {
-    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
-    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
-  }
 
   // We cannot use the last chunk in the address space because we would
   // overflow when comparing top and limit if this chunk is used for a
@@ -762,11 +761,6 @@
 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
   DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
-  if (chunk->owner() != NULL) {
-    ObjectSpace space =
-        static_cast<ObjectSpace>(1 << chunk->owner()->identity());
-    PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
-  }
 
   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
                                          chunk->IsEvacuationCandidate());
@@ -915,52 +909,6 @@
   }
 }
 
-
-void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
-                                                AllocationAction action,
-                                                size_t size) {
-  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
-    MemoryAllocationCallbackRegistration registration =
-        memory_allocation_callbacks_[i];
-    if ((registration.space & space) == space &&
-        (registration.action & action) == action)
-      registration.callback(space, action, static_cast<int>(size));
-  }
-}
-
-
-bool MemoryAllocator::MemoryAllocationCallbackRegistered(
-    MemoryAllocationCallback callback) {
-  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
-    if (memory_allocation_callbacks_[i].callback == callback) return true;
-  }
-  return false;
-}
-
-
-void MemoryAllocator::AddMemoryAllocationCallback(
-    MemoryAllocationCallback callback, ObjectSpace space,
-    AllocationAction action) {
-  DCHECK(callback != NULL);
-  MemoryAllocationCallbackRegistration registration(callback, space, action);
-  DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
-  return memory_allocation_callbacks_.Add(registration);
-}
-
-
-void MemoryAllocator::RemoveMemoryAllocationCallback(
-    MemoryAllocationCallback callback) {
-  DCHECK(callback != NULL);
-  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
-    if (memory_allocation_callbacks_[i].callback == callback) {
-      memory_allocation_callbacks_.Remove(i);
-      return;
-    }
-  }
-  UNREACHABLE();
-}
-
-
 #ifdef DEBUG
 void MemoryAllocator::ReportStatistics() {
   intptr_t size = Size();
@@ -1041,6 +989,9 @@
   }
   if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
   if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
+  if (typed_old_to_new_slots_ != nullptr) ReleaseTypedOldToNewSlots();
+  if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots();
+  if (local_tracker_ != nullptr) ReleaseLocalTracker();
 }
 
 static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
@@ -1073,6 +1024,16 @@
   old_to_old_slots_ = nullptr;
 }
 
+void MemoryChunk::AllocateTypedOldToNewSlots() {
+  DCHECK(nullptr == typed_old_to_new_slots_);
+  typed_old_to_new_slots_ = new TypedSlotSet(address());
+}
+
+void MemoryChunk::ReleaseTypedOldToNewSlots() {
+  delete typed_old_to_new_slots_;
+  typed_old_to_new_slots_ = nullptr;
+}
+
 void MemoryChunk::AllocateTypedOldToOldSlots() {
   DCHECK(nullptr == typed_old_to_old_slots_);
   typed_old_to_old_slots_ = new TypedSlotSet(address());
@@ -1082,6 +1043,18 @@
   delete typed_old_to_old_slots_;
   typed_old_to_old_slots_ = nullptr;
 }
+
+void MemoryChunk::AllocateLocalTracker() {
+  DCHECK_NULL(local_tracker_);
+  local_tracker_ = new LocalArrayBufferTracker(heap());
+}
+
+void MemoryChunk::ReleaseLocalTracker() {
+  DCHECK_NOT_NULL(local_tracker_);
+  delete local_tracker_;
+  local_tracker_ = nullptr;
+}
+
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
 
@@ -1120,9 +1093,10 @@
 
 
 void PagedSpace::TearDown() {
-  PageIterator iterator(this);
-  while (iterator.has_next()) {
-    heap()->memory_allocator()->Free<MemoryAllocator::kFull>(iterator.next());
+  for (auto it = begin(); it != end();) {
+    Page* page = *(it++);  // Will be erased.
+    ArrayBufferTracker::FreeAll(page);
+    heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
   }
   anchor_.set_next_page(&anchor_);
   anchor_.set_prev_page(&anchor_);
@@ -1178,10 +1152,8 @@
   AccountCommitted(other->CommittedMemory());
 
   // Move over pages.
-  PageIterator it(other);
-  Page* p = nullptr;
-  while (it.has_next()) {
-    p = it.next();
+  for (auto it = other->begin(); it != other->end();) {
+    Page* p = *(it++);
 
     // Relinking requires the category to be unlinked.
     other->UnlinkFreeListCategories(p);
@@ -1198,18 +1170,16 @@
   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
   size_t size = 0;
-  PageIterator it(this);
-  while (it.has_next()) {
-    size += it.next()->CommittedPhysicalMemory();
+  for (Page* page : *this) {
+    size += page->CommittedPhysicalMemory();
   }
   return size;
 }
 
 bool PagedSpace::ContainsSlow(Address addr) {
   Page* p = Page::FromAddress(addr);
-  PageIterator iterator(this);
-  while (iterator.has_next()) {
-    if (iterator.next() == p) return true;
+  for (Page* page : *this) {
+    if (page == p) return true;
   }
   return false;
 }
@@ -1233,7 +1203,6 @@
   return Smi::FromInt(0);
 }
 
-
 bool PagedSpace::Expand() {
   int size = AreaSize();
   if (snapshotable() && !HasPages()) {
@@ -1257,7 +1226,8 @@
     Bitmap::SetAllBits(p);
     p->SetFlag(Page::BLACK_PAGE);
     if (FLAG_trace_incremental_marking) {
-      PrintIsolate(heap()->isolate(), "Added black page %p\n", p);
+      PrintIsolate(heap()->isolate(), "Added black page %p\n",
+                   static_cast<void*>(p));
     }
   }
 
@@ -1270,20 +1240,17 @@
 
 
 int PagedSpace::CountTotalPages() {
-  PageIterator it(this);
   int count = 0;
-  while (it.has_next()) {
-    it.next();
+  for (Page* page : *this) {
     count++;
+    USE(page);
   }
   return count;
 }
 
 
 void PagedSpace::ResetFreeListStatistics() {
-  PageIterator page_iterator(this);
-  while (page_iterator.has_next()) {
-    Page* page = page_iterator.next();
+  for (Page* page : *this) {
     page->ResetFreeListStatistics();
   }
 }
@@ -1326,9 +1293,7 @@
 void PagedSpace::Verify(ObjectVisitor* visitor) {
   bool allocation_pointer_found_in_space =
       (allocation_info_.top() == allocation_info_.limit());
-  PageIterator page_iterator(this);
-  while (page_iterator.has_next()) {
-    Page* page = page_iterator.next();
+  for (Page* page : *this) {
     CHECK(page->owner() == this);
     if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
       allocation_pointer_found_in_space = true;
@@ -1415,7 +1380,6 @@
   from_space_.TearDown();
 }
 
-
 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
 
 
@@ -1461,6 +1425,48 @@
   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
+bool NewSpace::Rebalance() {
+  CHECK(heap()->promotion_queue()->is_empty());
+  // Order here is important to make use of the page pool.
+  return to_space_.EnsureCurrentCapacity() &&
+         from_space_.EnsureCurrentCapacity();
+}
+
+bool SemiSpace::EnsureCurrentCapacity() {
+  if (is_committed()) {
+    const int expected_pages = current_capacity_ / Page::kPageSize;
+    int actual_pages = 0;
+    Page* current_page = anchor()->next_page();
+    while (current_page != anchor()) {
+      actual_pages++;
+      current_page = current_page->next_page();
+      if (actual_pages > expected_pages) {
+        Page* to_remove = current_page->prev_page();
+        // Make sure we don't overtake the actual top pointer.
+        CHECK_NE(to_remove, current_page_);
+        to_remove->Unlink();
+        heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
+            to_remove);
+      }
+    }
+    while (actual_pages < expected_pages) {
+      actual_pages++;
+      current_page =
+          heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+              Page::kAllocatableMemory, this, executable());
+      if (current_page == nullptr) return false;
+      DCHECK_NOT_NULL(current_page);
+      current_page->InsertAfter(anchor());
+      Bitmap::Clear(current_page);
+      current_page->SetFlags(anchor()->prev_page()->GetFlags(),
+                             Page::kCopyAllFlags);
+      heap()->CreateFillerObjectAt(current_page->area_start(),
+                                   current_page->area_size(),
+                                   ClearRecordedSlots::kNo);
+    }
+  }
+  return true;
+}
 
 void LocalAllocationBuffer::Close() {
   if (IsValid()) {
@@ -1517,11 +1523,9 @@
   Address old_top = allocation_info_.top();
   to_space_.Reset();
   UpdateAllocationInfo();
-  pages_used_ = 0;
   // Clear all mark-bits in the to-space.
-  NewSpacePageIterator it(&to_space_);
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
+  for (Page* p : to_space_) {
+    Bitmap::Clear(p);
   }
   InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
 }
@@ -1563,7 +1567,6 @@
 
   int remaining_in_page = static_cast<int>(limit - top);
   heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
-  pages_used_++;
   UpdateAllocationInfo();
 
   return true;
@@ -1739,7 +1742,12 @@
 
 void SemiSpace::TearDown() {
   // Properly uncommit memory to keep the allocator counters in sync.
-  if (is_committed()) Uncommit();
+  if (is_committed()) {
+    for (Page* p : *this) {
+      ArrayBufferTracker::FreeAll(p);
+    }
+    Uncommit();
+  }
   current_capacity_ = maximum_capacity_ = 0;
 }
 
@@ -1771,10 +1779,9 @@
 
 bool SemiSpace::Uncommit() {
   DCHECK(is_committed());
-  NewSpacePageIterator it(this);
-  while (it.has_next()) {
-    heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
-        it.next());
+  for (auto it = begin(); it != end();) {
+    Page* p = *(it++);
+    heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
   }
   anchor()->set_next_page(anchor());
   anchor()->set_prev_page(anchor());
@@ -1788,9 +1795,8 @@
 size_t SemiSpace::CommittedPhysicalMemory() {
   if (!is_committed()) return 0;
   size_t size = 0;
-  NewSpacePageIterator it(this);
-  while (it.has_next()) {
-    size += it.next()->CommittedPhysicalMemory();
+  for (Page* p : *this) {
+    size += p->CommittedPhysicalMemory();
   }
   return size;
 }
@@ -1871,9 +1877,7 @@
   anchor_.prev_page()->set_next_page(&anchor_);
   anchor_.next_page()->set_prev_page(&anchor_);
 
-  NewSpacePageIterator it(this);
-  while (it.has_next()) {
-    Page* page = it.next();
+  for (Page* page : *this) {
     page->set_owner(this);
     page->SetFlags(flags, mask);
     if (id_ == kToSpace) {
@@ -1894,23 +1898,21 @@
 void SemiSpace::Reset() {
   DCHECK_NE(anchor_.next_page(), &anchor_);
   current_page_ = anchor_.next_page();
+  pages_used_ = 0;
 }
 
-bool SemiSpace::ReplaceWithEmptyPage(Page* old_page) {
-  // TODO(mlippautz): We do not have to get a new page here when the semispace
-  // is uncommitted later on.
-  Page* new_page = heap()->memory_allocator()->AllocatePage(
-      Page::kAllocatableMemory, this, executable());
-  if (new_page == nullptr) return false;
-  Bitmap::Clear(new_page);
-  new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags);
-  new_page->set_next_page(old_page->next_page());
-  new_page->set_prev_page(old_page->prev_page());
-  old_page->next_page()->set_prev_page(new_page);
-  old_page->prev_page()->set_next_page(new_page);
-  heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(),
-                               ClearRecordedSlots::kNo);
-  return true;
+void SemiSpace::RemovePage(Page* page) {
+  if (current_page_ == page) {
+    current_page_ = page->prev_page();
+  }
+  page->Unlink();
+}
+
+void SemiSpace::PrependPage(Page* page) {
+  page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags);
+  page->set_owner(this);
+  page->InsertAfter(anchor());
+  pages_used_++;
 }
 
 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
@@ -1938,9 +1940,8 @@
   DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
   age_mark_ = mark;
   // Mark all pages up to the one containing mark.
-  NewSpacePageIterator it(space_start(), mark);
-  while (it.has_next()) {
-    it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+  for (Page* p : NewSpacePageRange(space_start(), mark)) {
+    p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
   }
 }
 
@@ -2016,7 +2017,6 @@
   limit_ = end;
 }
 
-
 #ifdef DEBUG
 // heap_histograms is shared, always clear it before using it.
 static void ClearHistograms(Isolate* isolate) {
@@ -2034,25 +2034,22 @@
 
 
 static void ClearCodeKindStatistics(int* code_kind_statistics) {
-  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+  for (int i = 0; i < AbstractCode::NUMBER_OF_KINDS; i++) {
     code_kind_statistics[i] = 0;
   }
 }
-
-
 static void ReportCodeKindStatistics(int* code_kind_statistics) {
   PrintF("\n   Code kind histograms: \n");
-  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+  for (int i = 0; i < AbstractCode::NUMBER_OF_KINDS; i++) {
     if (code_kind_statistics[i] > 0) {
       PrintF("     %-20s: %10d bytes\n",
-             Code::Kind2String(static_cast<Code::Kind>(i)),
+             AbstractCode::Kind2String(static_cast<AbstractCode::Kind>(i)),
              code_kind_statistics[i]);
     }
   }
   PrintF("\n");
 }
 
-
 static int CollectHistogramInfo(HeapObject* obj) {
   Isolate* isolate = obj->GetIsolate();
   InstanceType type = obj->map()->instance_type();
@@ -2562,10 +2559,11 @@
 
 void FreeList::PrintCategories(FreeListCategoryType type) {
   FreeListCategoryIterator it(this, type);
-  PrintF("FreeList[%p, top=%p, %d] ", this, categories_[type], type);
+  PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
+         static_cast<void*>(categories_[type]), type);
   while (it.HasNext()) {
     FreeListCategory* current = it.Next();
-    PrintF("%p -> ", current);
+    PrintF("%p -> ", static_cast<void*>(current));
   }
   PrintF("null\n");
 }
@@ -2649,9 +2647,7 @@
   free_list_.RepairLists(heap());
   // Each page may have a small free space that is not tracked by a free list.
   // Update the maps for those free space objects.
-  PageIterator iterator(this);
-  while (iterator.has_next()) {
-    Page* page = iterator.next();
+  for (Page* page : *this) {
     int size = static_cast<int>(page->wasted_memory());
     if (size == 0) continue;
     Address address = page->OffsetToAddress(Page::kPageSize - size);
@@ -2748,12 +2744,15 @@
   return SweepAndRetryAllocation(size_in_bytes);
 }
 
-
 #ifdef DEBUG
 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
   CommentStatistic* comments_statistics =
       isolate->paged_space_comments_statistics();
   ReportCodeKindStatistics(isolate->code_kind_statistics());
+  PrintF("Code size including metadata    : %10d bytes\n",
+         isolate->code_and_metadata_size());
+  PrintF("Bytecode size including metadata: %10d bytes\n",
+         isolate->bytecode_and_metadata_size());
   PrintF(
       "Code comment statistics (\"   [ comment-txt   :    size/   "
       "count  (average)\"):\n");
@@ -2767,7 +2766,6 @@
   PrintF("\n");
 }
 
-
 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
   CommentStatistic* comments_statistics =
       isolate->paged_space_comments_statistics();
@@ -2843,40 +2841,28 @@
   EnterComment(isolate, comment_txt, flat_delta);
 }
 
-
-// Collects code size statistics:
-// - by code kind
-// - by code comment
-void PagedSpace::CollectCodeStatistics() {
-  Isolate* isolate = heap()->isolate();
-  HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
-    if (obj->IsAbstractCode()) {
-      AbstractCode* code = AbstractCode::cast(obj);
-      isolate->code_kind_statistics()[code->kind()] += code->Size();
-    }
-    if (obj->IsCode()) {
-      // TODO(mythria): Also enable this for BytecodeArray when it supports
-      // RelocInformation.
-      Code* code = Code::cast(obj);
-      RelocIterator it(code);
-      int delta = 0;
-      const byte* prev_pc = code->instruction_start();
-      while (!it.done()) {
-        if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
-          delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
-          CollectCommentStatistics(isolate, &it);
-          prev_pc = it.rinfo()->pc();
-        }
-        it.next();
-      }
-
-      DCHECK(code->instruction_start() <= prev_pc &&
-             prev_pc <= code->instruction_end());
-      delta += static_cast<int>(code->instruction_end() - prev_pc);
-      EnterComment(isolate, "NoComment", delta);
-    }
+// Collects code comment statistics
+static void CollectCodeCommentStatistics(HeapObject* obj, Isolate* isolate) {
+  if (!obj->IsCode()) {
+    return;
   }
+  Code* code = Code::cast(obj);
+  RelocIterator it(code);
+  int delta = 0;
+  const byte* prev_pc = code->instruction_start();
+  while (!it.done()) {
+    if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
+      delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
+      CollectCommentStatistics(isolate, &it);
+      prev_pc = it.rinfo()->pc();
+    }
+    it.next();
+  }
+
+  DCHECK(code->instruction_start() <= prev_pc &&
+         prev_pc <= code->instruction_end());
+  delta += static_cast<int>(code->instruction_end() - prev_pc);
+  EnterComment(isolate, "NoComment", delta);
 }
 
 
@@ -2897,6 +2883,44 @@
 }
 #endif
 
+static void RecordCodeSizeIncludingMetadata(AbstractCode* abstract_code,
+                                            Isolate* isolate) {
+  int size = abstract_code->SizeIncludingMetadata();
+  if (abstract_code->IsCode()) {
+    size += isolate->code_and_metadata_size();
+    isolate->set_code_and_metadata_size(size);
+  } else {
+    size += isolate->bytecode_and_metadata_size();
+    isolate->set_bytecode_and_metadata_size(size);
+  }
+}
+
+// Collects code size statistics:
+// - code and metadata size
+// - by code kind (only in debug mode)
+// - by code comment (only in debug mode)
+void PagedSpace::CollectCodeStatistics() {
+  Isolate* isolate = heap()->isolate();
+  HeapObjectIterator obj_it(this);
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+    if (obj->IsAbstractCode()) {
+      AbstractCode* code = AbstractCode::cast(obj);
+      RecordCodeSizeIncludingMetadata(code, isolate);
+#ifdef DEBUG
+      isolate->code_kind_statistics()[code->kind()] += code->Size();
+      CollectCodeCommentStatistics(obj, isolate);
+#endif
+    }
+  }
+}
+
+void PagedSpace::ResetCodeAndMetadataStatistics(Isolate* isolate) {
+  isolate->set_code_and_metadata_size(0);
+  isolate->set_bytecode_and_metadata_size(0);
+#ifdef DEBUG
+  ResetCodeStatistics(isolate);
+#endif
+}
 
 // -----------------------------------------------------------------------------
 // MapSpace implementation
@@ -2926,15 +2950,13 @@
 // -----------------------------------------------------------------------------
 // LargeObjectSpace
 
-
 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
       first_page_(NULL),
       size_(0),
       page_count_(0),
       objects_size_(0),
-      chunk_map_(HashMap::PointersMatch, 1024) {}
-
+      chunk_map_(base::HashMap::PointersMatch, 1024) {}
 
 LargeObjectSpace::~LargeObjectSpace() {}
 
@@ -2954,10 +2976,6 @@
     LargePage* page = first_page_;
     first_page_ = first_page_->next_page();
     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
-
-    ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
-    heap()->memory_allocator()->PerformAllocationCallback(
-        space, kAllocationActionFree, page->size());
     heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
   }
   SetUp();
@@ -2989,7 +3007,7 @@
   uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
   uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
   for (uintptr_t key = base; key <= limit; key++) {
-    HashMap::Entry* entry = chunk_map_.LookupOrInsert(
+    base::HashMap::Entry* entry = chunk_map_.LookupOrInsert(
         reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
     DCHECK(entry != NULL);
     entry->value = page;
@@ -3013,14 +3031,10 @@
 
 
 size_t LargeObjectSpace::CommittedPhysicalMemory() {
-  if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
-  size_t size = 0;
-  LargePage* current = first_page_;
-  while (current != NULL) {
-    size += current->CommittedPhysicalMemory();
-    current = current->next_page();
-  }
-  return size;
+  // On a platform that provides lazy committing of memory, we over-account
+  // the actually committed memory. There is no easy way right now to support
+  // precise accounting of committed memory in large object space.
+  return CommittedMemory();
 }
 
 
@@ -3036,8 +3050,8 @@
 
 LargePage* LargeObjectSpace::FindPage(Address a) {
   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
-  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
-                                        static_cast<uint32_t>(key));
+  base::HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+                                              static_cast<uint32_t>(key));
   if (e != NULL) {
     DCHECK(e->value != NULL);
     LargePage* page = reinterpret_cast<LargePage*>(e->value);
@@ -3167,6 +3181,20 @@
 }
 #endif
 
+void LargeObjectSpace::CollectCodeStatistics() {
+  Isolate* isolate = heap()->isolate();
+  LargeObjectIterator obj_it(this);
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+    if (obj->IsAbstractCode()) {
+      AbstractCode* code = AbstractCode::cast(obj);
+      RecordCodeSizeIncludingMetadata(code, isolate);
+#ifdef DEBUG
+      isolate->code_kind_statistics()[code->kind()] += code->Size();
+      CollectCodeCommentStatistics(obj, isolate);
+#endif
+    }
+  }
+}
 
 #ifdef DEBUG
 void LargeObjectSpace::Print() {
@@ -3196,21 +3224,9 @@
 }
 
 
-void LargeObjectSpace::CollectCodeStatistics() {
-  Isolate* isolate = heap()->isolate();
-  LargeObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
-    if (obj->IsAbstractCode()) {
-      AbstractCode* code = AbstractCode::cast(obj);
-      isolate->code_kind_statistics()[code->kind()] += code->Size();
-    }
-  }
-}
-
-
 void Page::Print() {
   // Make a best-effort to print the objects in the page.
-  PrintF("Page@%p in %s\n", this->address(),
+  PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
          AllocationSpaceName(this->owner()->identity()));
   printf(" --------------------------------------\n");
   HeapObjectIterator objects(this);
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index 67e9aae..04c89a8 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -11,9 +11,9 @@
 #include "src/base/atomic-utils.h"
 #include "src/base/atomicops.h"
 #include "src/base/bits.h"
+#include "src/base/hashmap.h"
 #include "src/base/platform/mutex.h"
 #include "src/flags.h"
-#include "src/hashmap.h"
 #include "src/list.h"
 #include "src/objects.h"
 #include "src/utils.h"
@@ -27,6 +27,7 @@
 class CompactionSpaceCollection;
 class FreeList;
 class Isolate;
+class LocalArrayBufferTracker;
 class MemoryAllocator;
 class MemoryChunk;
 class Page;
@@ -424,6 +425,10 @@
     // from new to old space during evacuation.
     PAGE_NEW_OLD_PROMOTION,
 
+    // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
+    // within the new space during evacuation.
+    PAGE_NEW_NEW_PROMOTION,
+
     // A black page has all mark bits set to 1 (black). A black page currently
     // cannot be iterated because it is not swept. Moreover live bytes are also
     // not updated.
@@ -450,6 +455,11 @@
     //   has been aborted and needs special handling by the sweeper.
     COMPACTION_WAS_ABORTED,
 
+    // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
+    // on pages is sometimes aborted. The flag is used to avoid repeatedly
+    // triggering on the same page.
+    COMPACTION_WAS_ABORTED_FOR_TESTING,
+
     // |ANCHOR|: Flag is set if page is an anchor.
     ANCHOR,
 
@@ -509,6 +519,7 @@
   static const size_t kWriteBarrierCounterOffset =
       kOldToNewSlotsOffset + kPointerSize  // SlotSet* old_to_new_slots_;
       + kPointerSize                       // SlotSet* old_to_old_slots_;
+      + kPointerSize   // TypedSlotSet* typed_old_to_new_slots_;
       + kPointerSize   // TypedSlotSet* typed_old_to_old_slots_;
       + kPointerSize;  // SkipList* skip_list_;
 
@@ -522,7 +533,8 @@
       + kPointerSize      // AtomicValue next_chunk_
       + kPointerSize      // AtomicValue prev_chunk_
       // FreeListCategory categories_[kNumberOfCategories]
-      + FreeListCategory::kSize * kNumberOfCategories;
+      + FreeListCategory::kSize * kNumberOfCategories +
+      kPointerSize;  // LocalArrayBufferTracker* local_tracker_;
 
   // We add some more space to the computed header size to amount for missing
   // alignment requirements in our computation.
@@ -625,16 +637,24 @@
 
   inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
   inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
+  inline TypedSlotSet* typed_old_to_new_slots() {
+    return typed_old_to_new_slots_;
+  }
   inline TypedSlotSet* typed_old_to_old_slots() {
     return typed_old_to_old_slots_;
   }
+  inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
 
   void AllocateOldToNewSlots();
   void ReleaseOldToNewSlots();
   void AllocateOldToOldSlots();
   void ReleaseOldToOldSlots();
+  void AllocateTypedOldToNewSlots();
+  void ReleaseTypedOldToNewSlots();
   void AllocateTypedOldToOldSlots();
   void ReleaseTypedOldToOldSlots();
+  void AllocateLocalTracker();
+  void ReleaseLocalTracker();
 
   Address area_start() { return area_start_; }
   Address area_end() { return area_end_; }
@@ -645,6 +665,8 @@
   // Approximate amount of physical memory committed for this chunk.
   size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
 
+  Address HighWaterMark() { return address() + high_water_mark_.Value(); }
+
   int progress_bar() {
     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
     return progress_bar_;
@@ -707,7 +729,8 @@
   }
 
   bool ShouldSkipEvacuationSlotRecording() {
-    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
+    return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) &&
+           !IsFlagSet(COMPACTION_WAS_ABORTED);
   }
 
   Executability executable() {
@@ -792,6 +815,7 @@
   // is ceil(size() / kPageSize).
   SlotSet* old_to_new_slots_;
   SlotSet* old_to_old_slots_;
+  TypedSlotSet* typed_old_to_new_slots_;
   TypedSlotSet* typed_old_to_old_slots_;
 
   SkipList* skip_list_;
@@ -817,6 +841,8 @@
 
   FreeListCategory categories_[kNumberOfCategories];
 
+  LocalArrayBufferTracker* local_tracker_;
+
  private:
   void InitializeReservedMemory() { reservation_.Reset(); }
 
@@ -1154,15 +1180,6 @@
   void FreeRawMemory(Address buf, size_t length);
 
  private:
-  // Frees the range of virtual memory, and frees the data structures used to
-  // manage it.
-  void TearDown();
-
-  Isolate* isolate_;
-
-  // The reserved range of virtual memory that all code objects are put in.
-  base::VirtualMemory* code_range_;
-  // Plain old data class, just a struct plus a constructor.
   class FreeBlock {
    public:
     FreeBlock() : start(0), size(0) {}
@@ -1181,6 +1198,26 @@
     size_t size;
   };
 
+  // Frees the range of virtual memory, and frees the data structures used to
+  // manage it.
+  void TearDown();
+
+  // Finds a block on the allocation list that contains at least the
+  // requested amount of memory.  If none is found, sorts and merges
+  // the existing free memory blocks, and searches again.
+  // If none can be found, returns false.
+  bool GetNextAllocationBlock(size_t requested);
+  // Compares the start addresses of two free blocks.
+  static int CompareFreeBlockAddress(const FreeBlock* left,
+                                     const FreeBlock* right);
+  bool ReserveBlock(const size_t requested_size, FreeBlock* block);
+  void ReleaseBlock(const FreeBlock* block);
+
+  Isolate* isolate_;
+
+  // The reserved range of virtual memory that all code objects are put in.
+  base::VirtualMemory* code_range_;
+
   // The global mutex guards free_list_ and allocation_list_ as GC threads may
   // access both lists concurrently to the main thread.
   base::Mutex code_range_mutex_;
@@ -1195,17 +1232,6 @@
   List<FreeBlock> allocation_list_;
   int current_allocation_block_index_;
 
-  // Finds a block on the allocation list that contains at least the
-  // requested amount of memory.  If none is found, sorts and merges
-  // the existing free memory blocks, and searches again.
-  // If none can be found, returns false.
-  bool GetNextAllocationBlock(size_t requested);
-  // Compares the start addresses of two free blocks.
-  static int CompareFreeBlockAddress(const FreeBlock* left,
-                                     const FreeBlock* right);
-  bool ReserveBlock(const size_t requested_size, FreeBlock* block);
-  void ReleaseBlock(const FreeBlock* block);
-
   DISALLOW_COPY_AND_ASSIGN(CodeRange);
 };
 
@@ -1321,7 +1347,12 @@
     template <ChunkQueueType type>
     void AddMemoryChunkSafe(MemoryChunk* chunk) {
       base::LockGuard<base::Mutex> guard(&mutex_);
-      chunks_[type].push_back(chunk);
+      if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
+        chunks_[type].push_back(chunk);
+      } else {
+        DCHECK_EQ(type, kRegular);
+        delayed_regular_chunks_.push_back(chunk);
+      }
     }
 
     template <ChunkQueueType type>
@@ -1333,11 +1364,16 @@
       return chunk;
     }
 
+    void ReconsiderDelayedChunks();
     void PerformFreeMemoryOnQueuedChunks();
 
     base::Mutex mutex_;
     MemoryAllocator* allocator_;
     std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
+    // Delayed chunks cannot be processed in the current unmapping cycle because
+    // of dependencies such as an active sweeper.
+    // See MemoryAllocator::CanFreeMemoryChunk.
+    std::list<MemoryChunk*> delayed_regular_chunks_;
     base::Semaphore pending_unmapping_tasks_semaphore_;
     intptr_t concurrent_unmapping_tasks_active_;
 
@@ -1376,6 +1412,8 @@
   template <MemoryAllocator::FreeMode mode = kFull>
   void Free(MemoryChunk* chunk);
 
+  bool CanFreeMemoryChunk(MemoryChunk* chunk);
+
   // Returns allocated spaces in bytes.
   intptr_t Size() { return size_.Value(); }
 
@@ -1446,16 +1484,6 @@
   // filling it up with a recognizable non-NULL bit pattern.
   void ZapBlock(Address start, size_t size);
 
-  void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
-                                 size_t size);
-
-  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
-                                   ObjectSpace space, AllocationAction action);
-
-  void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
-
-  bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
-
   static int CodePageGuardStartOffset();
 
   static int CodePageGuardSize();
@@ -1516,19 +1544,6 @@
   base::AtomicValue<void*> lowest_ever_allocated_;
   base::AtomicValue<void*> highest_ever_allocated_;
 
-  struct MemoryAllocationCallbackRegistration {
-    MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
-                                         ObjectSpace space,
-                                         AllocationAction action)
-        : callback(callback), space(space), action(action) {}
-    MemoryAllocationCallback callback;
-    ObjectSpace space;
-    AllocationAction action;
-  };
-
-  // A List of callback that are triggered when memory is allocated or free'd
-  List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
-
   // Initializes pages in a chunk. Returns the first page address.
   // This function and GetChunkId() are provided for the mark-compact
   // collector to rebuild page headers in the from space, which is
@@ -1569,10 +1584,44 @@
 class ObjectIterator : public Malloced {
  public:
   virtual ~ObjectIterator() {}
-
-  virtual HeapObject* next_object() = 0;
+  virtual HeapObject* Next() = 0;
 };
 
+template <class PAGE_TYPE>
+class PageIteratorImpl
+    : public std::iterator<std::forward_iterator_tag, PAGE_TYPE> {
+ public:
+  explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
+  PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
+  PAGE_TYPE* operator*() { return p_; }
+  bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
+    return rhs.p_ == p_;
+  }
+  bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
+    return rhs.p_ != p_;
+  }
+  inline PageIteratorImpl<PAGE_TYPE>& operator++();
+  inline PageIteratorImpl<PAGE_TYPE> operator++(int);
+
+ private:
+  PAGE_TYPE* p_;
+};
+
+typedef PageIteratorImpl<Page> PageIterator;
+typedef PageIteratorImpl<LargePage> LargePageIterator;
+
+class PageRange {
+ public:
+  typedef PageIterator iterator;
+  PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
+  explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
+  iterator begin() { return iterator(begin_); }
+  iterator end() { return iterator(end_); }
+
+ private:
+  Page* begin_;
+  Page* end_;
+};
 
 // -----------------------------------------------------------------------------
 // Heap object iterator in new/old/map spaces.
@@ -1591,18 +1640,10 @@
 
   // Advance to the next object, skipping free spaces and other fillers and
   // skipping the special garbage section of which there is one per space.
-  // Returns NULL when the iteration has ended.
-  inline HeapObject* Next();
-  inline HeapObject* next_object() override;
+  // Returns nullptr when the iteration has ended.
+  inline HeapObject* Next() override;
 
  private:
-  enum PageMode { kOnePageOnly, kAllPagesInSpace };
-
-  Address cur_addr_;              // Current iteration point.
-  Address cur_end_;               // End iteration point.
-  PagedSpace* space_;
-  PageMode page_mode_;
-
   // Fast (inlined) path of next().
   inline HeapObject* FromCurrentPage();
 
@@ -1610,28 +1651,11 @@
   // iteration has ended.
   bool AdvanceToNextPage();
 
-  // Initializes fields.
-  inline void Initialize(PagedSpace* owner, Address start, Address end,
-                         PageMode mode);
-};
-
-
-// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a paged space.
-
-class PageIterator BASE_EMBEDDED {
- public:
-  explicit inline PageIterator(PagedSpace* space);
-
-  inline bool has_next();
-  inline Page* next();
-
- private:
+  Address cur_addr_;  // Current iteration point.
+  Address cur_end_;   // End iteration point.
   PagedSpace* space_;
-  Page* prev_page_;  // Previous page returned.
-  // Next page that will be returned.  Cached here so that we can use this
-  // iterator for operations that deallocate pages.
-  Page* next_page_;
+  PageRange page_range_;
+  PageRange::iterator current_page_;
 };
 
 
@@ -2083,8 +2107,21 @@
   AllocationInfo allocation_info_;
 };
 
+class NewSpacePageRange {
+ public:
+  typedef PageRange::iterator iterator;
+  inline NewSpacePageRange(Address start, Address limit);
+  iterator begin() { return range_.begin(); }
+  iterator end() { return range_.end(); }
+
+ private:
+  PageRange range_;
+};
+
 class PagedSpace : public Space {
  public:
+  typedef PageIterator iterator;
+
   static const intptr_t kCompactionMemoryWanted = 500 * KB;
 
   // Creates a space with an id.
@@ -2236,6 +2273,12 @@
   // The dummy page that anchors the linked list of pages.
   Page* anchor() { return &anchor_; }
 
+  // Collect code size related statistics
+  void CollectCodeStatistics();
+
+  // Reset code size related statistics
+  static void ResetCodeAndMetadataStatistics(Isolate* isolate);
+
 #ifdef VERIFY_HEAP
   // Verify integrity of this space.
   virtual void Verify(ObjectVisitor* visitor);
@@ -2253,7 +2296,6 @@
   void ReportStatistics();
 
   // Report code object related statistics
-  void CollectCodeStatistics();
   static void ReportCodeStatistics(Isolate* isolate);
   static void ResetCodeStatistics(Isolate* isolate);
 #endif
@@ -2288,6 +2330,9 @@
   inline void UnlinkFreeListCategories(Page* page);
   inline intptr_t RelinkFreeListCategories(Page* page);
 
+  iterator begin() { return iterator(anchor_.next_page()); }
+  iterator end() { return iterator(&anchor_); }
+
  protected:
   // PagedSpaces that should be included in snapshots have different, i.e.,
   // smaller, initial pages.
@@ -2342,7 +2387,6 @@
 
   friend class IncrementalMarking;
   friend class MarkCompactCollector;
-  friend class PageIterator;
 
   // Used in cctest.
   friend class HeapTester;
@@ -2393,6 +2437,8 @@
 // space as a marking stack when tracing live objects.
 class SemiSpace : public Space {
  public:
+  typedef PageIterator iterator;
+
   static void Swap(SemiSpace* from, SemiSpace* to);
 
   SemiSpace(Heap* heap, SemiSpaceId semispace)
@@ -2404,7 +2450,8 @@
         committed_(false),
         id_(semispace),
         anchor_(this),
-        current_page_(nullptr) {}
+        current_page_(nullptr),
+        pages_used_(0) {}
 
   inline bool Contains(HeapObject* o);
   inline bool Contains(Object* o);
@@ -2427,6 +2474,8 @@
   // than the current capacity.
   bool ShrinkTo(int new_capacity);
 
+  bool EnsureCurrentCapacity();
+
   // Returns the start address of the first page of the space.
   Address space_start() {
     DCHECK_NE(anchor_.next_page(), anchor());
@@ -2435,6 +2484,7 @@
 
   Page* first_page() { return anchor_.next_page(); }
   Page* current_page() { return current_page_; }
+  int pages_used() { return pages_used_; }
 
   // Returns one past the end address of the space.
   Address space_end() { return anchor_.prev_page()->area_end(); }
@@ -2447,15 +2497,23 @@
 
   bool AdvancePage() {
     Page* next_page = current_page_->next_page();
-    if (next_page == anchor()) return false;
+    // We cannot expand if we reached the maximum number of pages already. Note
+    // that we need to account for the next page already for this check as we
+    // could potentially fill the whole page after advancing.
+    const bool reached_max_pages = (pages_used_ + 1) == max_pages();
+    if (next_page == anchor() || reached_max_pages) {
+      return false;
+    }
     current_page_ = next_page;
+    pages_used_++;
     return true;
   }
 
   // Resets the space to using the first page.
   void Reset();
 
-  bool ReplaceWithEmptyPage(Page* page);
+  void RemovePage(Page* page);
+  void PrependPage(Page* page);
 
   // Age mark accessors.
   Address age_mark() { return age_mark_; }
@@ -2505,10 +2563,14 @@
   virtual void Verify();
 #endif
 
+  iterator begin() { return iterator(anchor_.next_page()); }
+  iterator end() { return iterator(anchor()); }
+
  private:
   void RewindPages(Page* start, int num_pages);
 
   inline Page* anchor() { return &anchor_; }
+  inline int max_pages() { return current_capacity_ / Page::kPageSize; }
 
   // Copies the flags into the masked positions on all pages in the space.
   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
@@ -2516,7 +2578,8 @@
   // The currently committed space capacity.
   int current_capacity_;
 
-  // The maximum capacity that can be used by this space.
+  // The maximum capacity that can be used by this space. A space cannot grow
+  // beyond that size.
   int maximum_capacity_;
 
   // The minimum capacity for the space. A space cannot shrink below this size.
@@ -2530,9 +2593,10 @@
 
   Page anchor_;
   Page* current_page_;
+  int pages_used_;
 
+  friend class NewSpace;
   friend class SemiSpaceIterator;
-  friend class NewSpacePageIterator;
 };
 
 
@@ -2546,10 +2610,7 @@
   // Create an iterator over the allocated objects in the given to-space.
   explicit SemiSpaceIterator(NewSpace* space);
 
-  inline HeapObject* Next();
-
-  // Implementation of the ObjectIterator functions.
-  inline HeapObject* next_object() override;
+  inline HeapObject* Next() override;
 
  private:
   void Initialize(Address start, Address end);
@@ -2560,35 +2621,6 @@
   Address limit_;
 };
 
-
-// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a semi-space.
-class NewSpacePageIterator BASE_EMBEDDED {
- public:
-  // Make an iterator that runs over all pages in to-space.
-  explicit inline NewSpacePageIterator(NewSpace* space);
-
-  // Make an iterator that runs over all pages in the given semispace,
-  // even those not used in allocation.
-  explicit inline NewSpacePageIterator(SemiSpace* space);
-
-  // Make iterator that iterates from the page containing start
-  // to the page that contains limit in the same semispace.
-  inline NewSpacePageIterator(Address start, Address limit);
-
-  inline bool has_next();
-  inline Page* next();
-
- private:
-  Page* prev_page_;  // Previous page returned.
-  // Next page that will be returned.  Cached here so that we can use this
-  // iterator for operations that deallocate pages.
-  Page* next_page_;
-  // Last page returned.
-  Page* last_page_;
-};
-
-
 // -----------------------------------------------------------------------------
 // The young generation space.
 //
@@ -2597,12 +2629,13 @@
 
 class NewSpace : public Space {
  public:
+  typedef PageIterator iterator;
+
   explicit NewSpace(Heap* heap)
       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
         to_space_(heap, kToSpace),
         from_space_(heap, kFromSpace),
         reservation_(),
-        pages_used_(0),
         top_on_previous_step_(0),
         allocated_histogram_(nullptr),
         promoted_histogram_(nullptr) {}
@@ -2634,7 +2667,7 @@
 
   // Return the allocated bytes in the active semispace.
   intptr_t Size() override {
-    return pages_used_ * Page::kAllocatableMemory +
+    return to_space_.pages_used() * Page::kAllocatableMemory +
            static_cast<int>(top() - to_space_.page_low());
   }
 
@@ -2711,12 +2744,14 @@
     return static_cast<size_t>(allocated);
   }
 
-  bool ReplaceWithEmptyPage(Page* page) {
-    // This method is called after flipping the semispace.
+  void MovePageFromSpaceToSpace(Page* page) {
     DCHECK(page->InFromSpace());
-    return from_space_.ReplaceWithEmptyPage(page);
+    from_space_.RemovePage(page);
+    to_space_.PrependPage(page);
   }
 
+  bool Rebalance();
+
   // Return the maximum capacity of a semispace.
   int MaximumCapacity() {
     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
@@ -2859,6 +2894,9 @@
   void PauseAllocationObservers() override;
   void ResumeAllocationObservers() override;
 
+  iterator begin() { return to_space_.begin(); }
+  iterator end() { return to_space_.end(); }
+
  private:
   // Update allocation info to match the current to-space page.
   void UpdateAllocationInfo();
@@ -2869,7 +2907,6 @@
   SemiSpace to_space_;
   SemiSpace from_space_;
   base::VirtualMemory reservation_;
-  int pages_used_;
 
   // Allocation pointer and limit for normal allocation and allocation during
   // mark-compact collection.
@@ -3003,6 +3040,8 @@
 
 class LargeObjectSpace : public Space {
  public:
+  typedef LargePageIterator iterator;
+
   LargeObjectSpace(Heap* heap, AllocationSpace id);
   virtual ~LargeObjectSpace();
 
@@ -3061,6 +3100,12 @@
 
   LargePage* first_page() { return first_page_; }
 
+  // Collect code statistics.
+  void CollectCodeStatistics();
+
+  iterator begin() { return iterator(first_page_); }
+  iterator end() { return iterator(nullptr); }
+
 #ifdef VERIFY_HEAP
   virtual void Verify();
 #endif
@@ -3068,7 +3113,6 @@
 #ifdef DEBUG
   void Print() override;
   void ReportStatistics();
-  void CollectCodeStatistics();
 #endif
 
  private:
@@ -3078,7 +3122,7 @@
   int page_count_;         // number of chunks
   intptr_t objects_size_;  // size of objects
   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
-  HashMap chunk_map_;
+  base::HashMap chunk_map_;
 
   friend class LargeObjectIterator;
 };
@@ -3088,31 +3132,17 @@
  public:
   explicit LargeObjectIterator(LargeObjectSpace* space);
 
-  HeapObject* Next();
-
-  // implementation of ObjectIterator.
-  virtual HeapObject* next_object() { return Next(); }
+  HeapObject* Next() override;
 
  private:
   LargePage* current_;
 };
 
-class LargePageIterator BASE_EMBEDDED {
- public:
-  explicit inline LargePageIterator(LargeObjectSpace* space);
-
-  inline LargePage* next();
-
- private:
-  LargePage* next_page_;
-};
-
 // Iterates over the chunks (pages and large object pages) that can contain
 // pointers to new space or to evacuation candidates.
 class MemoryChunkIterator BASE_EMBEDDED {
  public:
-  enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE };
-  inline explicit MemoryChunkIterator(Heap* heap, Mode mode);
+  inline explicit MemoryChunkIterator(Heap* heap);
 
   // Return NULL when the iterator is done.
   inline MemoryChunk* next();
@@ -3125,8 +3155,8 @@
     kLargeObjectState,
     kFinishedState
   };
+  Heap* heap_;
   State state_;
-  const Mode mode_;
   PageIterator old_iterator_;
   PageIterator code_iterator_;
   PageIterator map_iterator_;