Upgrade V8 to version 4.9.385.28

https://chromium.googlesource.com/v8/v8/+/4.9.385.28

FPIIM-449

Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/heap/OWNERS b/src/heap/OWNERS
new file mode 100644
index 0000000..32da1ec
--- /dev/null
+++ b/src/heap/OWNERS
@@ -0,0 +1,7 @@
+set noparent
+
+hpayer@chromium.org
+jochen@chromium.org
+mlippautz@chromium.org
+mstarzinger@chromium.org
+ulan@chromium.org
diff --git a/src/heap/array-buffer-tracker.cc b/src/heap/array-buffer-tracker.cc
new file mode 100644
index 0000000..bbe3c6b
--- /dev/null
+++ b/src/heap/array-buffer-tracker.cc
@@ -0,0 +1,138 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/objects-inl.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+ArrayBufferTracker::~ArrayBufferTracker() {
+  Isolate* isolate = heap()->isolate();
+  size_t freed_memory = 0;
+  for (auto& buffer : live_array_buffers_) {
+    isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
+    freed_memory += buffer.second;
+  }
+  for (auto& buffer : live_array_buffers_for_scavenge_) {
+    isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
+    freed_memory += buffer.second;
+  }
+  live_array_buffers_.clear();
+  live_array_buffers_for_scavenge_.clear();
+  not_yet_discovered_array_buffers_.clear();
+  not_yet_discovered_array_buffers_for_scavenge_.clear();
+
+  if (freed_memory > 0) {
+    heap()->update_amount_of_external_allocated_memory(
+        -static_cast<int64_t>(freed_memory));
+  }
+}
+
+
+void ArrayBufferTracker::RegisterNew(JSArrayBuffer* buffer) {
+  void* data = buffer->backing_store();
+  if (!data) return;
+
+  bool in_new_space = heap()->InNewSpace(buffer);
+  size_t length = NumberToSize(heap()->isolate(), buffer->byte_length());
+  if (in_new_space) {
+    live_array_buffers_for_scavenge_[data] = length;
+  } else {
+    live_array_buffers_[data] = length;
+  }
+
+  // We may go over the limit of externally allocated memory here. We call the
+  // api function to trigger a GC in this case.
+  reinterpret_cast<v8::Isolate*>(heap()->isolate())
+      ->AdjustAmountOfExternalAllocatedMemory(length);
+}
+
+
+void ArrayBufferTracker::Unregister(JSArrayBuffer* buffer) {
+  void* data = buffer->backing_store();
+  if (!data) return;
+
+  bool in_new_space = heap()->InNewSpace(buffer);
+  std::map<void*, size_t>* live_buffers =
+      in_new_space ? &live_array_buffers_for_scavenge_ : &live_array_buffers_;
+  std::map<void*, size_t>* not_yet_discovered_buffers =
+      in_new_space ? &not_yet_discovered_array_buffers_for_scavenge_
+                   : &not_yet_discovered_array_buffers_;
+
+  DCHECK(live_buffers->count(data) > 0);
+
+  size_t length = (*live_buffers)[data];
+  live_buffers->erase(data);
+  not_yet_discovered_buffers->erase(data);
+
+  heap()->update_amount_of_external_allocated_memory(
+      -static_cast<int64_t>(length));
+}
+
+
+void ArrayBufferTracker::MarkLive(JSArrayBuffer* buffer) {
+  void* data = buffer->backing_store();
+
+  // ArrayBuffer might be in the middle of being constructed.
+  if (data == heap()->undefined_value()) return;
+  if (heap()->InNewSpace(buffer)) {
+    not_yet_discovered_array_buffers_for_scavenge_.erase(data);
+  } else {
+    not_yet_discovered_array_buffers_.erase(data);
+  }
+}
+
+
+void ArrayBufferTracker::FreeDead(bool from_scavenge) {
+  size_t freed_memory = 0;
+  Isolate* isolate = heap()->isolate();
+  for (auto& buffer : not_yet_discovered_array_buffers_for_scavenge_) {
+    isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
+    freed_memory += buffer.second;
+    live_array_buffers_for_scavenge_.erase(buffer.first);
+  }
+
+  if (!from_scavenge) {
+    for (auto& buffer : not_yet_discovered_array_buffers_) {
+      isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
+      freed_memory += buffer.second;
+      live_array_buffers_.erase(buffer.first);
+    }
+  }
+
+  not_yet_discovered_array_buffers_for_scavenge_ =
+      live_array_buffers_for_scavenge_;
+  if (!from_scavenge) not_yet_discovered_array_buffers_ = live_array_buffers_;
+
+  // Do not call through the api as this code is triggered while doing a GC.
+  heap()->update_amount_of_external_allocated_memory(
+      -static_cast<int64_t>(freed_memory));
+}
+
+
+void ArrayBufferTracker::PrepareDiscoveryInNewSpace() {
+  not_yet_discovered_array_buffers_for_scavenge_ =
+      live_array_buffers_for_scavenge_;
+}
+
+
+void ArrayBufferTracker::Promote(JSArrayBuffer* buffer) {
+  if (buffer->is_external()) return;
+  void* data = buffer->backing_store();
+  if (!data) return;
+  // ArrayBuffer might be in the middle of being constructed.
+  if (data == heap()->undefined_value()) return;
+  DCHECK(live_array_buffers_for_scavenge_.count(data) > 0);
+  live_array_buffers_[data] = live_array_buffers_for_scavenge_[data];
+  live_array_buffers_for_scavenge_.erase(data);
+  not_yet_discovered_array_buffers_for_scavenge_.erase(data);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/array-buffer-tracker.h b/src/heap/array-buffer-tracker.h
new file mode 100644
index 0000000..7ba22fb
--- /dev/null
+++ b/src/heap/array-buffer-tracker.h
@@ -0,0 +1,73 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_ARRAY_BUFFER_TRACKER_H_
+#define V8_HEAP_ARRAY_BUFFER_TRACKER_H_
+
+#include <map>
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class Heap;
+class JSArrayBuffer;
+
+class ArrayBufferTracker {
+ public:
+  explicit ArrayBufferTracker(Heap* heap) : heap_(heap) {}
+  ~ArrayBufferTracker();
+
+  inline Heap* heap() { return heap_; }
+
+  // The following methods are used to track raw C++ pointers to externally
+  // allocated memory used as backing store in live array buffers.
+
+  // A new ArrayBuffer was created with |data| as backing store.
+  void RegisterNew(JSArrayBuffer* buffer);
+
+  // The backing store |data| is no longer owned by V8.
+  void Unregister(JSArrayBuffer* buffer);
+
+  // A live ArrayBuffer was discovered during marking/scavenge.
+  void MarkLive(JSArrayBuffer* buffer);
+
+  // Frees all backing store pointers that weren't discovered in the previous
+  // marking or scavenge phase.
+  void FreeDead(bool from_scavenge);
+
+  // Prepare for a new scavenge phase. A new marking phase is implicitly
+  // prepared by finishing the previous one.
+  void PrepareDiscoveryInNewSpace();
+
+  // An ArrayBuffer moved from new space to old space.
+  void Promote(JSArrayBuffer* buffer);
+
+ private:
+  Heap* heap_;
+
+  // |live_array_buffers_| maps externally allocated memory used as backing
+  // store for ArrayBuffers to the length of the respective memory blocks.
+  //
+  // At the beginning of mark/compact, |not_yet_discovered_array_buffers_| is
+  // a copy of |live_array_buffers_| and we remove pointers as we discover live
+  // ArrayBuffer objects during marking. At the end of mark/compact, the
+  // remaining memory blocks can be freed.
+  std::map<void*, size_t> live_array_buffers_;
+  std::map<void*, size_t> not_yet_discovered_array_buffers_;
+
+  // To be able to free memory held by ArrayBuffers during scavenge as well, we
+  // have a separate list of allocated memory held by ArrayBuffers in new space.
+  //
+  // Since mark/compact also evacuates the new space, all pointers in the
+  // |live_array_buffers_for_scavenge_| list are also in the
+  // |live_array_buffers_| list.
+  std::map<void*, size_t> live_array_buffers_for_scavenge_;
+  std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_;
+};
+}  // namespace internal
+}  // namespace v8
+#endif  // V8_HEAP_ARRAY_BUFFER_TRACKER_H_
diff --git a/src/heap/gc-idle-time-handler.cc b/src/heap/gc-idle-time-handler.cc
index ff2a559..4e6e608 100644
--- a/src/heap/gc-idle-time-handler.cc
+++ b/src/heap/gc-idle-time-handler.cc
@@ -3,6 +3,8 @@
 // found in the LICENSE file.
 
 #include "src/heap/gc-idle-time-handler.h"
+
+#include "src/flags.h"
 #include "src/heap/gc-tracer.h"
 #include "src/utils.h"
 
@@ -12,10 +14,8 @@
 const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
 const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
 const size_t GCIdleTimeHandler::kMaxFinalIncrementalMarkCompactTimeInMs = 1000;
-const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
-const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
-const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
 const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
+const size_t GCIdleTimeHandler::kMinTimeForOverApproximatingWeakClosureInMs = 1;
 
 
 void GCIdleTimeAction::Print() {
@@ -26,42 +26,24 @@
     case DO_NOTHING:
       PrintF("no action");
       break;
-    case DO_INCREMENTAL_MARKING:
-      PrintF("incremental marking with step %" V8_PTR_PREFIX "d / ms",
-             parameter);
+    case DO_INCREMENTAL_STEP:
+      PrintF("incremental step");
       if (additional_work) {
         PrintF("; finalized marking");
       }
       break;
-    case DO_SCAVENGE:
-      PrintF("scavenge");
-      break;
     case DO_FULL_GC:
       PrintF("full GC");
       break;
-    case DO_FINALIZE_SWEEPING:
-      PrintF("finalize sweeping");
-      break;
   }
 }
 
 
-void GCIdleTimeHandler::HeapState::Print() {
+void GCIdleTimeHeapState::Print() {
   PrintF("contexts_disposed=%d ", contexts_disposed);
   PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
   PrintF("size_of_objects=%" V8_PTR_PREFIX "d ", size_of_objects);
   PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
-  PrintF("can_start_incremental_marking=%d ", can_start_incremental_marking);
-  PrintF("sweeping_in_progress=%d ", sweeping_in_progress);
-  PrintF("mark_compact_speed=%" V8_PTR_PREFIX "d ",
-         mark_compact_speed_in_bytes_per_ms);
-  PrintF("incremental_marking_speed=%" V8_PTR_PREFIX "d ",
-         incremental_marking_speed_in_bytes_per_ms);
-  PrintF("scavenge_speed=%" V8_PTR_PREFIX "d ", scavenge_speed_in_bytes_per_ms);
-  PrintF("new_space_size=%" V8_PTR_PREFIX "d ", used_new_space_size);
-  PrintF("new_space_capacity=%" V8_PTR_PREFIX "d ", new_space_capacity);
-  PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d",
-         new_space_allocation_throughput_in_bytes_per_ms);
 }
 
 
@@ -111,51 +93,13 @@
 }
 
 
-bool GCIdleTimeHandler::ShouldDoScavenge(
-    size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
-    size_t scavenge_speed_in_bytes_per_ms,
-    size_t new_space_allocation_throughput_in_bytes_per_ms) {
-  size_t new_space_allocation_limit =
-      kMaxFrameRenderingIdleTime * scavenge_speed_in_bytes_per_ms;
-
-  // If the limit is larger than the new space size, then scavenging used to be
-  // really fast. We can take advantage of the whole new space.
-  if (new_space_allocation_limit > new_space_size) {
-    new_space_allocation_limit = new_space_size;
-  }
-
-  // We do not know the allocation throughput before the first Scavenge.
-  // TODO(hpayer): Estimate allocation throughput before the first Scavenge.
-  if (new_space_allocation_throughput_in_bytes_per_ms == 0) {
-    new_space_allocation_limit =
-        static_cast<size_t>(new_space_size * kConservativeTimeRatio);
-  } else {
-    // We have to trigger scavenge before we reach the end of new space.
-    new_space_allocation_limit -=
-        new_space_allocation_throughput_in_bytes_per_ms *
-        kMaxFrameRenderingIdleTime;
-  }
-
-  if (scavenge_speed_in_bytes_per_ms == 0) {
-    scavenge_speed_in_bytes_per_ms = kInitialConservativeScavengeSpeed;
-  }
-
-  if (new_space_allocation_limit <= used_new_space_size) {
-    if (used_new_space_size / scavenge_speed_in_bytes_per_ms <=
-        idle_time_in_ms) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
 bool GCIdleTimeHandler::ShouldDoMarkCompact(
     size_t idle_time_in_ms, size_t size_of_objects,
     size_t mark_compact_speed_in_bytes_per_ms) {
-  return idle_time_in_ms >=
-         EstimateMarkCompactTime(size_of_objects,
-                                 mark_compact_speed_in_bytes_per_ms);
+  return idle_time_in_ms >= kMaxScheduledIdleTime &&
+         idle_time_in_ms >=
+             EstimateMarkCompactTime(size_of_objects,
+                                     mark_compact_speed_in_bytes_per_ms);
 }
 
 
@@ -176,29 +120,41 @@
 }
 
 
+bool GCIdleTimeHandler::ShouldDoOverApproximateWeakClosure(
+    size_t idle_time_in_ms) {
+  // TODO(jochen): Estimate the time it will take to build the object groups.
+  return idle_time_in_ms >= kMinTimeForOverApproximatingWeakClosureInMs;
+}
+
+
+GCIdleTimeAction GCIdleTimeHandler::NothingOrDone(double idle_time_in_ms) {
+  if (idle_time_in_ms >= kMinBackgroundIdleTime) {
+    return GCIdleTimeAction::Nothing();
+  }
+  if (idle_times_which_made_no_progress_ >= kMaxNoProgressIdleTimes) {
+    return GCIdleTimeAction::Done();
+  } else {
+    idle_times_which_made_no_progress_++;
+    return GCIdleTimeAction::Nothing();
+  }
+}
+
+
 // The following logic is implemented by the controller:
 // (1) If we don't have any idle time, do nothing, unless a context was
 // disposed, incremental marking is stopped, and the heap is small. Then do
 // a full GC.
-// (2) If the new space is almost full and we can affort a Scavenge or if the
-// next Scavenge will very likely take long, then a Scavenge is performed.
-// (3) If there is currently no MarkCompact idle round going on, we start a
-// new idle round if enough garbage was created. Otherwise we do not perform
-// garbage collection to keep system utilization low.
-// (4) If incremental marking is done, we perform a full garbage collection
-// if  we are allowed to still do full garbage collections during this idle
-// round or if we are not allowed to start incremental marking. Otherwise we
-// do not perform garbage collection to keep system utilization low.
-// (5) If sweeping is in progress and we received a large enough idle time
+// (2) If the context disposal rate is high and we cannot perform a full GC,
+// we do nothing until the context disposal rate becomes lower.
+// (3) If the new space is almost full and we can affort a scavenge or if the
+// next scavenge will very likely take long, then a scavenge is performed.
+// (4) If sweeping is in progress and we received a large enough idle time
 // request, we finalize sweeping here.
-// (6) If incremental marking is in progress, we perform a marking step. Note,
+// (5) If incremental marking is in progress, we perform a marking step. Note,
 // that this currently may trigger a full garbage collection.
 GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
-                                            HeapState heap_state) {
+                                            GCIdleTimeHeapState heap_state) {
   if (static_cast<int>(idle_time_in_ms) <= 0) {
-    if (heap_state.contexts_disposed > 0) {
-      StartIdleRound();
-    }
     if (heap_state.incremental_marking_stopped) {
       if (ShouldDoContextDisposalMarkCompact(
               heap_state.contexts_disposed,
@@ -209,57 +165,20 @@
     return GCIdleTimeAction::Nothing();
   }
 
-  if (ShouldDoScavenge(
-          static_cast<size_t>(idle_time_in_ms), heap_state.new_space_capacity,
-          heap_state.used_new_space_size,
-          heap_state.scavenge_speed_in_bytes_per_ms,
-          heap_state.new_space_allocation_throughput_in_bytes_per_ms)) {
-    return GCIdleTimeAction::Scavenge();
+  // We are in a context disposal GC scenario. Don't do anything if we do not
+  // get the right idle signal.
+  if (ShouldDoContextDisposalMarkCompact(heap_state.contexts_disposed,
+                                         heap_state.contexts_disposal_rate)) {
+    return NothingOrDone(idle_time_in_ms);
   }
 
-  if (IsMarkCompactIdleRoundFinished()) {
-    if (EnoughGarbageSinceLastIdleRound()) {
-      StartIdleRound();
-    } else {
-      return GCIdleTimeAction::Done();
-    }
+  if (!FLAG_incremental_marking || heap_state.incremental_marking_stopped) {
+    return GCIdleTimeAction::Done();
   }
 
-  if (heap_state.incremental_marking_stopped) {
-    if (ShouldDoMarkCompact(static_cast<size_t>(idle_time_in_ms),
-                            heap_state.size_of_objects,
-                            heap_state.mark_compact_speed_in_bytes_per_ms)) {
-      // If there are no more than two GCs left in this idle round and we are
-      // allowed to do a full GC, then make those GCs full in order to compact
-      // the code space.
-      // TODO(ulan): Once we enable code compaction for incremental marking, we
-      // can get rid of this special case and always start incremental marking.
-      int remaining_mark_sweeps =
-          kMaxMarkCompactsInIdleRound - mark_compacts_since_idle_round_started_;
-      if (static_cast<size_t>(idle_time_in_ms) > kMaxFrameRenderingIdleTime &&
-          (remaining_mark_sweeps <= 2 ||
-           !heap_state.can_start_incremental_marking)) {
-        return GCIdleTimeAction::FullGC();
-      }
-    }
-    if (!heap_state.can_start_incremental_marking) {
-      return GCIdleTimeAction::Nothing();
-    }
-  }
-  // TODO(hpayer): Estimate finalize sweeping time.
-  if (heap_state.sweeping_in_progress &&
-      static_cast<size_t>(idle_time_in_ms) >= kMinTimeForFinalizeSweeping) {
-    return GCIdleTimeAction::FinalizeSweeping();
-  }
+  return GCIdleTimeAction::IncrementalStep();
+}
 
-  if (heap_state.incremental_marking_stopped &&
-      !heap_state.can_start_incremental_marking) {
-    return GCIdleTimeAction::Nothing();
-  }
-  size_t step_size = EstimateMarkingStepSize(
-      static_cast<size_t>(kIncrementalMarkingStepTimeInMs),
-      heap_state.incremental_marking_speed_in_bytes_per_ms);
-  return GCIdleTimeAction::IncrementalMarking(step_size);
-}
-}
-}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/gc-idle-time-handler.h b/src/heap/gc-idle-time-handler.h
index 4dd190b..74ef1b1 100644
--- a/src/heap/gc-idle-time-handler.h
+++ b/src/heap/gc-idle-time-handler.h
@@ -13,10 +13,8 @@
 enum GCIdleTimeActionType {
   DONE,
   DO_NOTHING,
-  DO_INCREMENTAL_MARKING,
-  DO_SCAVENGE,
+  DO_INCREMENTAL_STEP,
   DO_FULL_GC,
-  DO_FINALIZE_SWEEPING
 };
 
 
@@ -25,7 +23,6 @@
   static GCIdleTimeAction Done() {
     GCIdleTimeAction result;
     result.type = DONE;
-    result.parameter = 0;
     result.additional_work = false;
     return result;
   }
@@ -33,23 +30,13 @@
   static GCIdleTimeAction Nothing() {
     GCIdleTimeAction result;
     result.type = DO_NOTHING;
-    result.parameter = 0;
     result.additional_work = false;
     return result;
   }
 
-  static GCIdleTimeAction IncrementalMarking(intptr_t step_size) {
+  static GCIdleTimeAction IncrementalStep() {
     GCIdleTimeAction result;
-    result.type = DO_INCREMENTAL_MARKING;
-    result.parameter = step_size;
-    result.additional_work = false;
-    return result;
-  }
-
-  static GCIdleTimeAction Scavenge() {
-    GCIdleTimeAction result;
-    result.type = DO_SCAVENGE;
-    result.parameter = 0;
+    result.type = DO_INCREMENTAL_STEP;
     result.additional_work = false;
     return result;
   }
@@ -57,15 +44,6 @@
   static GCIdleTimeAction FullGC() {
     GCIdleTimeAction result;
     result.type = DO_FULL_GC;
-    result.parameter = 0;
-    result.additional_work = false;
-    return result;
-  }
-
-  static GCIdleTimeAction FinalizeSweeping() {
-    GCIdleTimeAction result;
-    result.type = DO_FINALIZE_SWEEPING;
-    result.parameter = 0;
     result.additional_work = false;
     return result;
   }
@@ -73,12 +51,20 @@
   void Print();
 
   GCIdleTimeActionType type;
-  intptr_t parameter;
   bool additional_work;
 };
 
 
-class GCTracer;
+class GCIdleTimeHeapState {
+ public:
+  void Print();
+
+  int contexts_disposed;
+  double contexts_disposal_rate;
+  size_t size_of_objects;
+  bool incremental_marking_stopped;
+};
+
 
 // The idle time handler makes decisions about which garbage collection
 // operations are executing during IdleNotification.
@@ -111,23 +97,18 @@
   // EstimateFinalIncrementalMarkCompactTime.
   static const size_t kMaxFinalIncrementalMarkCompactTimeInMs;
 
-  // Minimum time to finalize sweeping phase. The main thread may wait for
-  // sweeper threads.
-  static const size_t kMinTimeForFinalizeSweeping;
+  // This is the maximum scheduled idle time. Note that it can be more than
+  // 16.66 ms when there is currently no rendering going on.
+  static const size_t kMaxScheduledIdleTime = 50;
 
-  // Number of idle mark-compact events, after which idle handler will finish
-  // idle round.
-  static const int kMaxMarkCompactsInIdleRound;
+  // The maximum idle time when frames are rendered is 16.66ms.
+  static const size_t kMaxFrameRenderingIdleTime = 17;
 
-  // Number of scavenges that will trigger start of new idle round.
-  static const int kIdleScavengeThreshold;
+  static const int kMinBackgroundIdleTime = 900;
 
-  // That is the maximum idle time we will have during frame rendering.
-  static const size_t kMaxFrameRenderingIdleTime = 16;
-
-  // If we haven't recorded any scavenger events yet, we use a conservative
-  // lower bound for the scavenger speed.
-  static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
+  // An allocation throughput below kLowAllocationThroughput bytes/ms is
+  // considered low
+  static const size_t kLowAllocationThroughput = 1000;
 
   // If contexts are disposed at a higher rate a full gc is triggered.
   static const double kHighContextDisposalRate;
@@ -135,42 +116,19 @@
   // Incremental marking step time.
   static const size_t kIncrementalMarkingStepTimeInMs = 1;
 
-  class HeapState {
-   public:
-    void Print();
+  static const size_t kMinTimeForOverApproximatingWeakClosureInMs;
 
-    int contexts_disposed;
-    double contexts_disposal_rate;
-    size_t size_of_objects;
-    bool incremental_marking_stopped;
-    bool can_start_incremental_marking;
-    bool sweeping_in_progress;
-    size_t mark_compact_speed_in_bytes_per_ms;
-    size_t incremental_marking_speed_in_bytes_per_ms;
-    size_t final_incremental_mark_compact_speed_in_bytes_per_ms;
-    size_t scavenge_speed_in_bytes_per_ms;
-    size_t used_new_space_size;
-    size_t new_space_capacity;
-    size_t new_space_allocation_throughput_in_bytes_per_ms;
-  };
+  // Number of times we will return a Nothing action in the current mode
+  // despite having idle time available before we returning a Done action to
+  // ensure we don't keep scheduling idle tasks and making no progress.
+  static const int kMaxNoProgressIdleTimes = 10;
 
-  GCIdleTimeHandler()
-      : mark_compacts_since_idle_round_started_(0),
-        scavenges_since_last_idle_round_(0) {}
+  GCIdleTimeHandler() : idle_times_which_made_no_progress_(0) {}
 
-  GCIdleTimeAction Compute(double idle_time_in_ms, HeapState heap_state);
+  GCIdleTimeAction Compute(double idle_time_in_ms,
+                           GCIdleTimeHeapState heap_state);
 
-  void NotifyIdleMarkCompact() {
-    if (mark_compacts_since_idle_round_started_ < kMaxMarkCompactsInIdleRound) {
-      ++mark_compacts_since_idle_round_started_;
-      if (mark_compacts_since_idle_round_started_ ==
-          kMaxMarkCompactsInIdleRound) {
-        scavenges_since_last_idle_round_ = 0;
-      }
-    }
-  }
-
-  void NotifyScavenge() { ++scavenges_since_last_idle_round_; }
+  void ResetNoProgressCounter() { idle_times_which_made_no_progress_ = 0; }
 
   static size_t EstimateMarkingStepSize(size_t idle_time_in_ms,
                                         size_t marking_speed_in_bytes_per_ms);
@@ -192,23 +150,13 @@
       size_t idle_time_in_ms, size_t size_of_objects,
       size_t final_incremental_mark_compact_speed_in_bytes_per_ms);
 
-  static bool ShouldDoScavenge(
-      size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
-      size_t scavenger_speed_in_bytes_per_ms,
-      size_t new_space_allocation_throughput_in_bytes_per_ms);
+  static bool ShouldDoOverApproximateWeakClosure(size_t idle_time_in_ms);
 
  private:
-  void StartIdleRound() { mark_compacts_since_idle_round_started_ = 0; }
-  bool IsMarkCompactIdleRoundFinished() {
-    return mark_compacts_since_idle_round_started_ ==
-           kMaxMarkCompactsInIdleRound;
-  }
-  bool EnoughGarbageSinceLastIdleRound() {
-    return scavenges_since_last_idle_round_ >= kIdleScavengeThreshold;
-  }
+  GCIdleTimeAction NothingOrDone(double idle_time_in_ms);
 
-  int mark_compacts_since_idle_round_started_;
-  int scavenges_since_last_idle_round_;
+  // Idle notifications with no progress.
+  int idle_times_which_made_no_progress_;
 
   DISALLOW_COPY_AND_ASSIGN(GCIdleTimeHandler);
 };
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
index a35872d..ec1ad65 100644
--- a/src/heap/gc-tracer.cc
+++ b/src/heap/gc-tracer.cc
@@ -2,10 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-
 #include "src/heap/gc-tracer.h"
 
+#include "src/counters.h"
+#include "src/heap/heap-inl.h"
+#include "src/isolate.h"
+
 namespace v8 {
 namespace internal {
 
@@ -19,8 +21,21 @@
 }
 
 
+GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
+    : tracer_(tracer), scope_(scope) {
+  start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
+}
+
+
+GCTracer::Scope::~Scope() {
+  DCHECK(scope_ < NUMBER_OF_SCOPES);  // scope_ is unsigned.
+  tracer_->current_.scopes[scope_] +=
+      tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
+}
+
+
 GCTracer::AllocationEvent::AllocationEvent(double duration,
-                                           intptr_t allocation_in_bytes) {
+                                           size_t allocation_in_bytes) {
   duration_ = duration;
   allocation_in_bytes_ = allocation_in_bytes;
 }
@@ -31,8 +46,8 @@
 }
 
 
-GCTracer::SurvivalEvent::SurvivalEvent(double survival_rate) {
-  survival_rate_ = survival_rate;
+GCTracer::SurvivalEvent::SurvivalEvent(double promotion_ratio) {
+  promotion_ratio_ = promotion_ratio;
 }
 
 
@@ -43,6 +58,7 @@
       collector_reason(collector_reason),
       start_time(0.0),
       end_time(0.0),
+      reduce_memory(false),
       start_object_size(0),
       end_object_size(0),
       start_memory_size(0),
@@ -97,12 +113,21 @@
       cumulative_incremental_marking_duration_(0.0),
       cumulative_pure_incremental_marking_duration_(0.0),
       longest_incremental_marking_step_(0.0),
+      cumulative_incremental_marking_finalization_steps_(0),
+      cumulative_incremental_marking_finalization_duration_(0.0),
+      longest_incremental_marking_finalization_step_(0.0),
       cumulative_marking_duration_(0.0),
       cumulative_sweeping_duration_(0.0),
-      new_space_top_after_gc_(0),
+      allocation_time_ms_(0.0),
+      new_space_allocation_counter_bytes_(0),
+      old_generation_allocation_counter_bytes_(0),
+      allocation_duration_since_gc_(0.0),
+      new_space_allocation_in_bytes_since_gc_(0),
+      old_generation_allocation_in_bytes_since_gc_(0),
+      combined_mark_compact_speed_cache_(0.0),
       start_counter_(0) {
   current_ = Event(Event::START, NULL, NULL);
-  current_.end_time = base::OS::TimeCurrentMillis();
+  current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
   previous_ = previous_incremental_mark_compactor_event_ = current_;
 }
 
@@ -113,13 +138,9 @@
   if (start_counter_ != 1) return;
 
   previous_ = current_;
-  double start_time = base::OS::TimeCurrentMillis();
-  if (new_space_top_after_gc_ != 0) {
-    AddNewSpaceAllocationTime(
-        start_time - previous_.end_time,
-        reinterpret_cast<intptr_t>((heap_->new_space()->top()) -
-                                   new_space_top_after_gc_));
-  }
+  double start_time = heap_->MonotonicallyIncreasingTimeInMs();
+  SampleAllocation(start_time, heap_->NewSpaceAllocationCounter(),
+                   heap_->OldGenerationAllocationCounter());
   if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR)
     previous_incremental_mark_compactor_event_ = current_;
 
@@ -134,6 +155,7 @@
     }
   }
 
+  current_.reduce_memory = heap_->ShouldReduceMemory();
   current_.start_time = start_time;
   current_.start_object_size = heap_->SizeOfObjects();
   current_.start_memory_size = heap_->isolate()->memory_allocator()->Size();
@@ -154,17 +176,21 @@
   for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
     current_.scopes[i] = 0;
   }
+  int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
+  int used_memory = static_cast<int>(current_.start_object_size / KB);
+  heap_->isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
+      start_time, committed_memory);
+  heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
+      start_time, used_memory);
 }
 
 
 void GCTracer::Stop(GarbageCollector collector) {
   start_counter_--;
   if (start_counter_ != 0) {
-    if (FLAG_trace_gc) {
-      PrintF("[Finished reentrant %s during %s.]\n",
-             collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
-             current_.TypeName(false));
-    }
+    Output("[Finished reentrant %s during %s.]\n",
+           collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
+           current_.TypeName(false));
     return;
   }
 
@@ -174,12 +200,20 @@
           (current_.type == Event::MARK_COMPACTOR ||
            current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
 
-  current_.end_time = base::OS::TimeCurrentMillis();
+  current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
   current_.end_object_size = heap_->SizeOfObjects();
   current_.end_memory_size = heap_->isolate()->memory_allocator()->Size();
   current_.end_holes_size = CountTotalHolesSize(heap_);
-  new_space_top_after_gc_ =
-      reinterpret_cast<intptr_t>(heap_->new_space()->top());
+  current_.survived_new_space_object_size = heap_->SurvivedNewSpaceObjectSize();
+
+  AddAllocation(current_.end_time);
+
+  int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
+  int used_memory = static_cast<int>(current_.end_object_size / KB);
+  heap_->isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
+      current_.end_time, committed_memory);
+  heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
+      current_.end_time, used_memory);
 
   if (current_.type == Event::SCAVENGER) {
     current_.incremental_marking_steps =
@@ -214,18 +248,18 @@
             .cumulative_pure_incremental_marking_duration;
     longest_incremental_marking_step_ = 0.0;
     incremental_mark_compactor_events_.push_front(current_);
+    combined_mark_compact_speed_cache_ = 0.0;
   } else {
     DCHECK(current_.incremental_marking_bytes == 0);
     DCHECK(current_.incremental_marking_duration == 0);
     DCHECK(current_.pure_incremental_marking_duration == 0);
-    DCHECK(longest_incremental_marking_step_ == 0.0);
+    longest_incremental_marking_step_ = 0.0;
     mark_compactor_events_.push_front(current_);
+    combined_mark_compact_speed_cache_ = 0.0;
   }
 
   // TODO(ernstm): move the code below out of GCTracer.
 
-  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
-
   double duration = current_.end_time - current_.start_time;
   double spent_in_mutator = Max(current_.start_time - previous_.end_time, 0.0);
 
@@ -235,20 +269,58 @@
   if (current_.type == Event::SCAVENGER && FLAG_trace_gc_ignore_scavenger)
     return;
 
-  if (FLAG_trace_gc) {
-    if (FLAG_trace_gc_nvp)
-      PrintNVP();
-    else
-      Print();
+  if (FLAG_trace_gc_nvp)
+    PrintNVP();
+  else
+    Print();
 
+  if (FLAG_trace_gc) {
     heap_->PrintShortHeapStatistics();
   }
+
+  longest_incremental_marking_finalization_step_ = 0.0;
+  cumulative_incremental_marking_finalization_steps_ = 0;
+  cumulative_incremental_marking_finalization_duration_ = 0.0;
 }
 
 
-void GCTracer::AddNewSpaceAllocationTime(double duration,
-                                         intptr_t allocation_in_bytes) {
-  allocation_events_.push_front(AllocationEvent(duration, allocation_in_bytes));
+void GCTracer::SampleAllocation(double current_ms,
+                                size_t new_space_counter_bytes,
+                                size_t old_generation_counter_bytes) {
+  if (allocation_time_ms_ == 0) {
+    // It is the first sample.
+    allocation_time_ms_ = current_ms;
+    new_space_allocation_counter_bytes_ = new_space_counter_bytes;
+    old_generation_allocation_counter_bytes_ = old_generation_counter_bytes;
+    return;
+  }
+  // This assumes that counters are unsigned integers so that the subtraction
+  // below works even if the new counter is less then the old counter.
+  size_t new_space_allocated_bytes =
+      new_space_counter_bytes - new_space_allocation_counter_bytes_;
+  size_t old_generation_allocated_bytes =
+      old_generation_counter_bytes - old_generation_allocation_counter_bytes_;
+  double duration = current_ms - allocation_time_ms_;
+  allocation_time_ms_ = current_ms;
+  new_space_allocation_counter_bytes_ = new_space_counter_bytes;
+  old_generation_allocation_counter_bytes_ = old_generation_counter_bytes;
+  allocation_duration_since_gc_ += duration;
+  new_space_allocation_in_bytes_since_gc_ += new_space_allocated_bytes;
+  old_generation_allocation_in_bytes_since_gc_ +=
+      old_generation_allocated_bytes;
+}
+
+
+void GCTracer::AddAllocation(double current_ms) {
+  allocation_time_ms_ = current_ms;
+  new_space_allocation_events_.push_front(AllocationEvent(
+      allocation_duration_since_gc_, new_space_allocation_in_bytes_since_gc_));
+  old_generation_allocation_events_.push_front(
+      AllocationEvent(allocation_duration_since_gc_,
+                      old_generation_allocation_in_bytes_since_gc_));
+  allocation_duration_since_gc_ = 0;
+  new_space_allocation_in_bytes_since_gc_ = 0;
+  old_generation_allocation_in_bytes_since_gc_ = 0;
 }
 
 
@@ -257,8 +329,15 @@
 }
 
 
-void GCTracer::AddSurvivalRate(double survival_rate) {
-  survival_events_.push_front(SurvivalEvent(survival_rate));
+void GCTracer::AddCompactionEvent(double duration,
+                                  intptr_t live_bytes_compacted) {
+  compaction_events_.push_front(
+      CompactionEvent(duration, live_bytes_compacted));
+}
+
+
+void GCTracer::AddSurvivalRatio(double promotion_ratio) {
+  survival_events_.push_front(SurvivalEvent(promotion_ratio));
 }
 
 
@@ -275,29 +354,59 @@
 }
 
 
-void GCTracer::Print() const {
-  PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
+void GCTracer::AddIncrementalMarkingFinalizationStep(double duration) {
+  cumulative_incremental_marking_finalization_steps_++;
+  cumulative_incremental_marking_finalization_duration_ += duration;
+  longest_incremental_marking_finalization_step_ =
+      Max(longest_incremental_marking_finalization_step_, duration);
+}
 
-  PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ", current_.TypeName(false),
+
+void GCTracer::Output(const char* format, ...) const {
+  if (FLAG_trace_gc) {
+    va_list arguments;
+    va_start(arguments, format);
+    base::OS::VPrint(format, arguments);
+    va_end(arguments);
+  }
+
+  const int kBufferSize = 256;
+  char raw_buffer[kBufferSize];
+  Vector<char> buffer(raw_buffer, kBufferSize);
+  va_list arguments2;
+  va_start(arguments2, format);
+  VSNPrintF(buffer, format, arguments2);
+  va_end(arguments2);
+
+  heap_->AddToRingBuffer(buffer.start());
+}
+
+
+void GCTracer::Print() const {
+  if (FLAG_trace_gc) {
+    PrintIsolate(heap_->isolate(), "");
+  }
+  Output("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
+
+  Output("%s %.1f (%.1f) -> %.1f (%.1f) MB, ", current_.TypeName(false),
          static_cast<double>(current_.start_object_size) / MB,
          static_cast<double>(current_.start_memory_size) / MB,
          static_cast<double>(current_.end_object_size) / MB,
          static_cast<double>(current_.end_memory_size) / MB);
 
   int external_time = static_cast<int>(current_.scopes[Scope::EXTERNAL]);
-  if (external_time > 0) PrintF("%d / ", external_time);
-
   double duration = current_.end_time - current_.start_time;
-  PrintF("%.1f ms", duration);
+  Output("%.1f / %d ms", duration, external_time);
+
   if (current_.type == Event::SCAVENGER) {
     if (current_.incremental_marking_steps > 0) {
-      PrintF(" (+ %.1f ms in %d steps since last GC)",
+      Output(" (+ %.1f ms in %d steps since last GC)",
              current_.incremental_marking_duration,
              current_.incremental_marking_steps);
     }
   } else {
     if (current_.incremental_marking_steps > 0) {
-      PrintF(
+      Output(
           " (+ %.1f ms in %d steps since start of marking, "
           "biggest step %.1f ms)",
           current_.incremental_marking_duration,
@@ -307,92 +416,220 @@
   }
 
   if (current_.gc_reason != NULL) {
-    PrintF(" [%s]", current_.gc_reason);
+    Output(" [%s]", current_.gc_reason);
   }
 
   if (current_.collector_reason != NULL) {
-    PrintF(" [%s]", current_.collector_reason);
+    Output(" [%s]", current_.collector_reason);
   }
 
-  PrintF(".\n");
+  Output(".\n");
 }
 
 
 void GCTracer::PrintNVP() const {
-  PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
-
   double duration = current_.end_time - current_.start_time;
   double spent_in_mutator = current_.start_time - previous_.end_time;
-
-  PrintF("pause=%.1f ", duration);
-  PrintF("mutator=%.1f ", spent_in_mutator);
-  PrintF("gc=%s ", current_.TypeName(true));
-
-  PrintF("external=%.1f ", current_.scopes[Scope::EXTERNAL]);
-  PrintF("mark=%.1f ", current_.scopes[Scope::MC_MARK]);
-  PrintF("sweep=%.2f ", current_.scopes[Scope::MC_SWEEP]);
-  PrintF("sweepns=%.2f ", current_.scopes[Scope::MC_SWEEP_NEWSPACE]);
-  PrintF("sweepos=%.2f ", current_.scopes[Scope::MC_SWEEP_OLDSPACE]);
-  PrintF("sweepcode=%.2f ", current_.scopes[Scope::MC_SWEEP_CODE]);
-  PrintF("sweepcell=%.2f ", current_.scopes[Scope::MC_SWEEP_CELL]);
-  PrintF("sweepmap=%.2f ", current_.scopes[Scope::MC_SWEEP_MAP]);
-  PrintF("evacuate=%.1f ", current_.scopes[Scope::MC_EVACUATE_PAGES]);
-  PrintF("new_new=%.1f ",
-         current_.scopes[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
-  PrintF("root_new=%.1f ",
-         current_.scopes[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
-  PrintF("old_new=%.1f ",
-         current_.scopes[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
-  PrintF("compaction_ptrs=%.1f ",
-         current_.scopes[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
-  PrintF("intracompaction_ptrs=%.1f ",
-         current_.scopes[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
-  PrintF("misc_compaction=%.1f ",
-         current_.scopes[Scope::MC_UPDATE_MISC_POINTERS]);
-  PrintF("weak_closure=%.1f ", current_.scopes[Scope::MC_WEAKCLOSURE]);
-  PrintF("weakcollection_process=%.1f ",
-         current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS]);
-  PrintF("weakcollection_clear=%.1f ",
-         current_.scopes[Scope::MC_WEAKCOLLECTION_CLEAR]);
-  PrintF("weakcollection_abort=%.1f ",
-         current_.scopes[Scope::MC_WEAKCOLLECTION_ABORT]);
-
-  PrintF("total_size_before=%" V8_PTR_PREFIX "d ", current_.start_object_size);
-  PrintF("total_size_after=%" V8_PTR_PREFIX "d ", current_.end_object_size);
-  PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", current_.start_holes_size);
-  PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", current_.end_holes_size);
-
   intptr_t allocated_since_last_gc =
       current_.start_object_size - previous_.end_object_size;
-  PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc);
-  PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size_);
-  PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ",
-         heap_->semi_space_copied_object_size_);
-  PrintF("nodes_died_in_new=%d ", heap_->nodes_died_in_new_space_);
-  PrintF("nodes_copied_in_new=%d ", heap_->nodes_copied_in_new_space_);
-  PrintF("nodes_promoted=%d ", heap_->nodes_promoted_);
-  PrintF("promotion_ratio=%.1f%% ", heap_->promotion_ratio_);
-  PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
-  PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
-  PrintF("average_survival_rate%.1f%% ", AverageSurvivalRate());
-  PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
-         NewSpaceAllocationThroughputInBytesPerMillisecond());
-  PrintF("context_disposal_rate=%.1f ", ContextDisposalRateInMilliseconds());
 
-  if (current_.type == Event::SCAVENGER) {
-    PrintF("steps_count=%d ", current_.incremental_marking_steps);
-    PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
-    PrintF("scavenge_throughput=%" V8_PTR_PREFIX "d ",
-           ScavengeSpeedInBytesPerMillisecond());
-  } else {
-    PrintF("steps_count=%d ", current_.incremental_marking_steps);
-    PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
-    PrintF("longest_step=%.1f ", current_.longest_incremental_marking_step);
-    PrintF("incremental_marking_throughput=%" V8_PTR_PREFIX "d ",
-           IncrementalMarkingSpeedInBytesPerMillisecond());
+  switch (current_.type) {
+    case Event::SCAVENGER:
+      PrintIsolate(heap_->isolate(),
+                   "%8.0f ms: "
+                   "pause=%.1f "
+                   "mutator=%.1f "
+                   "gc=%s "
+                   "reduce_memory=%d "
+                   "scavenge=%.2f "
+                   "old_new=%.2f "
+                   "weak=%.2f "
+                   "roots=%.2f "
+                   "code=%.2f "
+                   "semispace=%.2f "
+                   "object_groups=%.2f "
+                   "steps_count=%d "
+                   "steps_took=%.1f "
+                   "scavenge_throughput=%" V8_PTR_PREFIX
+                   "d "
+                   "total_size_before=%" V8_PTR_PREFIX
+                   "d "
+                   "total_size_after=%" V8_PTR_PREFIX
+                   "d "
+                   "holes_size_before=%" V8_PTR_PREFIX
+                   "d "
+                   "holes_size_after=%" V8_PTR_PREFIX
+                   "d "
+                   "allocated=%" V8_PTR_PREFIX
+                   "d "
+                   "promoted=%" V8_PTR_PREFIX
+                   "d "
+                   "semi_space_copied=%" V8_PTR_PREFIX
+                   "d "
+                   "nodes_died_in_new=%d "
+                   "nodes_copied_in_new=%d "
+                   "nodes_promoted=%d "
+                   "promotion_ratio=%.1f%% "
+                   "average_survival_ratio=%.1f%% "
+                   "promotion_rate=%.1f%% "
+                   "semi_space_copy_rate=%.1f%% "
+                   "new_space_allocation_throughput=%" V8_PTR_PREFIX
+                   "d "
+                   "context_disposal_rate=%.1f\n",
+                   heap_->isolate()->time_millis_since_init(), duration,
+                   spent_in_mutator, current_.TypeName(true),
+                   current_.reduce_memory,
+                   current_.scopes[Scope::SCAVENGER_SCAVENGE],
+                   current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
+                   current_.scopes[Scope::SCAVENGER_WEAK],
+                   current_.scopes[Scope::SCAVENGER_ROOTS],
+                   current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
+                   current_.scopes[Scope::SCAVENGER_SEMISPACE],
+                   current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS],
+                   current_.incremental_marking_steps,
+                   current_.incremental_marking_duration,
+                   ScavengeSpeedInBytesPerMillisecond(),
+                   current_.start_object_size, current_.end_object_size,
+                   current_.start_holes_size, current_.end_holes_size,
+                   allocated_since_last_gc, heap_->promoted_objects_size(),
+                   heap_->semi_space_copied_object_size(),
+                   heap_->nodes_died_in_new_space_,
+                   heap_->nodes_copied_in_new_space_, heap_->nodes_promoted_,
+                   heap_->promotion_ratio_, AverageSurvivalRatio(),
+                   heap_->promotion_rate_, heap_->semi_space_copied_rate_,
+                   NewSpaceAllocationThroughputInBytesPerMillisecond(),
+                   ContextDisposalRateInMilliseconds());
+      break;
+    case Event::MARK_COMPACTOR:
+    case Event::INCREMENTAL_MARK_COMPACTOR:
+      PrintIsolate(
+          heap_->isolate(),
+          "%8.0f ms: "
+          "pause=%.1f "
+          "mutator=%.1f "
+          "gc=%s "
+          "reduce_memory=%d "
+          "external=%.1f "
+          "clear=%1.f "
+          "clear.code_flush=%.1f "
+          "clear.dependent_code=%.1f "
+          "clear.global_handles=%.1f "
+          "clear.maps=%.1f "
+          "clear.slots_buffer=%.1f "
+          "clear.store_buffer=%.1f "
+          "clear.string_table=%.1f "
+          "clear.weak_cells=%.1f "
+          "clear.weak_collections=%.1f "
+          "clear.weak_lists=%.1f "
+          "evacuate=%.1f "
+          "evacuate.candidates=%.1f "
+          "evacuate.clean_up=%.1f "
+          "evacuate.new_space=%.1f "
+          "evacuate.update_pointers=%.1f "
+          "evacuate.update_pointers.between_evacuated=%.1f "
+          "evacuate.update_pointers.to_evacuated=%.1f "
+          "evacuate.update_pointers.to_new=%.1f "
+          "evacuate.update_pointers.weak=%.1f "
+          "finish=%.1f "
+          "mark=%.1f "
+          "mark.finish_incremental=%.1f "
+          "mark.prepare_code_flush=%.1f "
+          "mark.roots=%.1f "
+          "mark.weak_closure=%.1f "
+          "sweep=%.1f "
+          "sweep.code=%.1f "
+          "sweep.map=%.1f "
+          "sweep.old=%.1f "
+          "incremental_finalize=%.1f "
+          "steps_count=%d "
+          "steps_took=%.1f "
+          "longest_step=%.1f "
+          "finalization_steps_count=%d "
+          "finalization_steps_took=%.1f "
+          "finalization_longest_step=%.1f "
+          "incremental_marking_throughput=%" V8_PTR_PREFIX
+          "d "
+          "total_size_before=%" V8_PTR_PREFIX
+          "d "
+          "total_size_after=%" V8_PTR_PREFIX
+          "d "
+          "holes_size_before=%" V8_PTR_PREFIX
+          "d "
+          "holes_size_after=%" V8_PTR_PREFIX
+          "d "
+          "allocated=%" V8_PTR_PREFIX
+          "d "
+          "promoted=%" V8_PTR_PREFIX
+          "d "
+          "semi_space_copied=%" V8_PTR_PREFIX
+          "d "
+          "nodes_died_in_new=%d "
+          "nodes_copied_in_new=%d "
+          "nodes_promoted=%d "
+          "promotion_ratio=%.1f%% "
+          "average_survival_ratio=%.1f%% "
+          "promotion_rate=%.1f%% "
+          "semi_space_copy_rate=%.1f%% "
+          "new_space_allocation_throughput=%" V8_PTR_PREFIX
+          "d "
+          "context_disposal_rate=%.1f "
+          "compaction_speed=%" V8_PTR_PREFIX "d\n",
+          heap_->isolate()->time_millis_since_init(), duration,
+          spent_in_mutator, current_.TypeName(true), current_.reduce_memory,
+          current_.scopes[Scope::EXTERNAL], current_.scopes[Scope::MC_CLEAR],
+          current_.scopes[Scope::MC_CLEAR_CODE_FLUSH],
+          current_.scopes[Scope::MC_CLEAR_DEPENDENT_CODE],
+          current_.scopes[Scope::MC_CLEAR_GLOBAL_HANDLES],
+          current_.scopes[Scope::MC_CLEAR_MAPS],
+          current_.scopes[Scope::MC_CLEAR_SLOTS_BUFFER],
+          current_.scopes[Scope::MC_CLEAR_STORE_BUFFER],
+          current_.scopes[Scope::MC_CLEAR_STRING_TABLE],
+          current_.scopes[Scope::MC_CLEAR_WEAK_CELLS],
+          current_.scopes[Scope::MC_CLEAR_WEAK_COLLECTIONS],
+          current_.scopes[Scope::MC_CLEAR_WEAK_LISTS],
+          current_.scopes[Scope::MC_EVACUATE],
+          current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
+          current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
+          current_.scopes[Scope::MC_EVACUATE_NEW_SPACE],
+          current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
+          current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED],
+          current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
+          current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
+          current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
+          current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
+          current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
+          current_.scopes[Scope::MC_MARK_PREPARE_CODE_FLUSH],
+          current_.scopes[Scope::MC_MARK_ROOTS],
+          current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
+          current_.scopes[Scope::MC_SWEEP],
+          current_.scopes[Scope::MC_SWEEP_CODE],
+          current_.scopes[Scope::MC_SWEEP_MAP],
+          current_.scopes[Scope::MC_SWEEP_OLD],
+          current_.scopes[Scope::MC_INCREMENTAL_FINALIZE],
+          current_.incremental_marking_steps,
+          current_.incremental_marking_duration,
+          current_.longest_incremental_marking_step,
+          cumulative_incremental_marking_finalization_steps_,
+          cumulative_incremental_marking_finalization_duration_,
+          longest_incremental_marking_finalization_step_,
+          IncrementalMarkingSpeedInBytesPerMillisecond(),
+          current_.start_object_size, current_.end_object_size,
+          current_.start_holes_size, current_.end_holes_size,
+          allocated_since_last_gc, heap_->promoted_objects_size(),
+          heap_->semi_space_copied_object_size(),
+          heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
+          heap_->nodes_promoted_, heap_->promotion_ratio_,
+          AverageSurvivalRatio(), heap_->promotion_rate_,
+          heap_->semi_space_copied_rate_,
+          NewSpaceAllocationThroughputInBytesPerMillisecond(),
+          ContextDisposalRateInMilliseconds(),
+          CompactionSpeedInBytesPerMillisecond());
+      break;
+    case Event::START:
+      break;
+    default:
+      UNREACHABLE();
   }
-
-  PrintF("\n");
 }
 
 
@@ -484,24 +721,43 @@
   }
 
   if (durations == 0.0) return 0;
-
-  return static_cast<intptr_t>(bytes / durations);
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
 }
 
 
-intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond() const {
+intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond(
+    ScavengeSpeedMode mode) const {
   intptr_t bytes = 0;
   double durations = 0.0;
   EventBuffer::const_iterator iter = scavenger_events_.begin();
   while (iter != scavenger_events_.end()) {
-    bytes += iter->new_space_object_size;
+    bytes += mode == kForAllObjects ? iter->new_space_object_size
+                                    : iter->survived_new_space_object_size;
     durations += iter->end_time - iter->start_time;
     ++iter;
   }
 
   if (durations == 0.0) return 0;
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+}
 
-  return static_cast<intptr_t>(bytes / durations);
+
+intptr_t GCTracer::CompactionSpeedInBytesPerMillisecond() const {
+  if (compaction_events_.size() == 0) return 0;
+  intptr_t bytes = 0;
+  double durations = 0.0;
+  CompactionEventBuffer::const_iterator iter = compaction_events_.begin();
+  while (iter != compaction_events_.end()) {
+    bytes += iter->live_bytes_compacted;
+    durations += iter->duration;
+    ++iter;
+  }
+
+  if (durations == 0.0) return 0;
+  // Make sure the result is at least 1.
+  return Max<intptr_t>(static_cast<intptr_t>(bytes / durations + 0.5), 1);
 }
 
 
@@ -516,8 +772,8 @@
   }
 
   if (durations == 0.0) return 0;
-
-  return static_cast<intptr_t>(bytes / durations);
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
 }
 
 
@@ -533,31 +789,96 @@
   }
 
   if (durations == 0.0) return 0;
-
-  return static_cast<intptr_t>(bytes / durations);
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
 }
 
 
-intptr_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond() const {
-  intptr_t bytes = 0;
-  double durations = 0.0;
-  AllocationEventBuffer::const_iterator iter = allocation_events_.begin();
-  while (iter != allocation_events_.end()) {
+double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
+  if (combined_mark_compact_speed_cache_ > 0)
+    return combined_mark_compact_speed_cache_;
+  const double kMinimumMarkingSpeed = 0.5;
+  double speed1 =
+      static_cast<double>(IncrementalMarkingSpeedInBytesPerMillisecond());
+  double speed2 = static_cast<double>(
+      FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
+  if (speed1 < kMinimumMarkingSpeed || speed2 < kMinimumMarkingSpeed) {
+    // No data for the incremental marking speed.
+    // Return the non-incremental mark-compact speed.
+    combined_mark_compact_speed_cache_ =
+        static_cast<double>(MarkCompactSpeedInBytesPerMillisecond());
+  } else {
+    // Combine the speed of incremental step and the speed of the final step.
+    // 1 / (1 / speed1 + 1 / speed2) = speed1 * speed2 / (speed1 + speed2).
+    combined_mark_compact_speed_cache_ = speed1 * speed2 / (speed1 + speed2);
+  }
+  return combined_mark_compact_speed_cache_;
+}
+
+
+size_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
+    double time_ms) const {
+  size_t bytes = new_space_allocation_in_bytes_since_gc_;
+  double durations = allocation_duration_since_gc_;
+  AllocationEventBuffer::const_iterator iter =
+      new_space_allocation_events_.begin();
+  const size_t max_bytes = static_cast<size_t>(-1);
+  while (iter != new_space_allocation_events_.end() &&
+         bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
     bytes += iter->allocation_in_bytes_;
     durations += iter->duration_;
     ++iter;
   }
 
   if (durations == 0.0) return 0;
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+}
 
-  return static_cast<intptr_t>(bytes / durations);
+
+size_t GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
+    double time_ms) const {
+  size_t bytes = old_generation_allocation_in_bytes_since_gc_;
+  double durations = allocation_duration_since_gc_;
+  AllocationEventBuffer::const_iterator iter =
+      old_generation_allocation_events_.begin();
+  const size_t max_bytes = static_cast<size_t>(-1);
+  while (iter != old_generation_allocation_events_.end() &&
+         bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
+    bytes += iter->allocation_in_bytes_;
+    durations += iter->duration_;
+    ++iter;
+  }
+
+  if (durations == 0.0) return 0;
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+}
+
+
+size_t GCTracer::AllocationThroughputInBytesPerMillisecond(
+    double time_ms) const {
+  return NewSpaceAllocationThroughputInBytesPerMillisecond(time_ms) +
+         OldGenerationAllocationThroughputInBytesPerMillisecond(time_ms);
+}
+
+
+size_t GCTracer::CurrentAllocationThroughputInBytesPerMillisecond() const {
+  return AllocationThroughputInBytesPerMillisecond(kThroughputTimeFrameMs);
+}
+
+
+size_t GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
+    const {
+  return OldGenerationAllocationThroughputInBytesPerMillisecond(
+      kThroughputTimeFrameMs);
 }
 
 
 double GCTracer::ContextDisposalRateInMilliseconds() const {
   if (context_disposal_events_.size() < kRingBufferMaxSize) return 0.0;
 
-  double begin = base::OS::TimeCurrentMillis();
+  double begin = heap_->MonotonicallyIncreasingTimeInMs();
   double end = 0.0;
   ContextDisposalEventBuffer::const_iterator iter =
       context_disposal_events_.begin();
@@ -570,13 +891,13 @@
 }
 
 
-double GCTracer::AverageSurvivalRate() const {
+double GCTracer::AverageSurvivalRatio() const {
   if (survival_events_.size() == 0) return 0.0;
 
   double sum_of_rates = 0.0;
   SurvivalEventBuffer::const_iterator iter = survival_events_.begin();
   while (iter != survival_events_.end()) {
-    sum_of_rates += iter->survival_rate_;
+    sum_of_rates += iter->promotion_ratio_;
     ++iter;
   }
 
@@ -590,5 +911,5 @@
 
 
 void GCTracer::ResetSurvivalEvents() { survival_events_.reset(); }
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
index 528eb52..e8ec168 100644
--- a/src/heap/gc-tracer.h
+++ b/src/heap/gc-tracer.h
@@ -6,6 +6,7 @@
 #define V8_HEAP_GC_TRACER_H_
 
 #include "src/base/platform/platform.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -85,6 +86,9 @@
 };
 
 
+enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
+
+
 // GCTracer collects and prints ONE line after each garbage collector
 // invocation IFF --trace_gc is used.
 // TODO(ernstm): Unit tests.
@@ -94,37 +98,49 @@
    public:
     enum ScopeId {
       EXTERNAL,
+      MC_CLEAR,
+      MC_CLEAR_CODE_FLUSH,
+      MC_CLEAR_DEPENDENT_CODE,
+      MC_CLEAR_GLOBAL_HANDLES,
+      MC_CLEAR_MAPS,
+      MC_CLEAR_SLOTS_BUFFER,
+      MC_CLEAR_STORE_BUFFER,
+      MC_CLEAR_STRING_TABLE,
+      MC_CLEAR_WEAK_CELLS,
+      MC_CLEAR_WEAK_COLLECTIONS,
+      MC_CLEAR_WEAK_LISTS,
+      MC_EVACUATE,
+      MC_EVACUATE_CANDIDATES,
+      MC_EVACUATE_CLEAN_UP,
+      MC_EVACUATE_NEW_SPACE,
+      MC_EVACUATE_UPDATE_POINTERS,
+      MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED,
+      MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED,
+      MC_EVACUATE_UPDATE_POINTERS_TO_NEW,
+      MC_EVACUATE_UPDATE_POINTERS_WEAK,
+      MC_FINISH,
+      MC_INCREMENTAL_FINALIZE,
       MC_MARK,
+      MC_MARK_FINISH_INCREMENTAL,
+      MC_MARK_PREPARE_CODE_FLUSH,
+      MC_MARK_ROOTS,
+      MC_MARK_WEAK_CLOSURE,
       MC_SWEEP,
-      MC_SWEEP_NEWSPACE,
-      MC_SWEEP_OLDSPACE,
       MC_SWEEP_CODE,
-      MC_SWEEP_CELL,
       MC_SWEEP_MAP,
-      MC_EVACUATE_PAGES,
-      MC_UPDATE_NEW_TO_NEW_POINTERS,
-      MC_UPDATE_ROOT_TO_NEW_POINTERS,
-      MC_UPDATE_OLD_TO_NEW_POINTERS,
-      MC_UPDATE_POINTERS_TO_EVACUATED,
-      MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
-      MC_UPDATE_MISC_POINTERS,
-      MC_WEAKCLOSURE,
-      MC_WEAKCOLLECTION_PROCESS,
-      MC_WEAKCOLLECTION_CLEAR,
-      MC_WEAKCOLLECTION_ABORT,
-      MC_FLUSH_CODE,
+      MC_SWEEP_OLD,
+      SCAVENGER_CODE_FLUSH_CANDIDATES,
+      SCAVENGER_OBJECT_GROUPS,
+      SCAVENGER_OLD_TO_NEW_POINTERS,
+      SCAVENGER_ROOTS,
+      SCAVENGER_SCAVENGE,
+      SCAVENGER_SEMISPACE,
+      SCAVENGER_WEAK,
       NUMBER_OF_SCOPES
     };
 
-    Scope(GCTracer* tracer, ScopeId scope) : tracer_(tracer), scope_(scope) {
-      start_time_ = base::OS::TimeCurrentMillis();
-    }
-
-    ~Scope() {
-      DCHECK(scope_ < NUMBER_OF_SCOPES);  // scope_ is unsigned.
-      tracer_->current_.scopes[scope_] +=
-          base::OS::TimeCurrentMillis() - start_time_;
-    }
+    Scope(GCTracer* tracer, ScopeId scope);
+    ~Scope();
 
    private:
     GCTracer* tracer_;
@@ -140,15 +156,27 @@
     // Default constructor leaves the event uninitialized.
     AllocationEvent() {}
 
-    AllocationEvent(double duration, intptr_t allocation_in_bytes);
+    AllocationEvent(double duration, size_t allocation_in_bytes);
 
-    // Time spent in the mutator during the end of the last garbage collection
-    // to the beginning of the next garbage collection.
+    // Time spent in the mutator during the end of the last sample to the
+    // beginning of the next sample.
     double duration_;
 
-    // Memory allocated in the new space during the end of the last garbage
-    // collection to the beginning of the next garbage collection.
-    intptr_t allocation_in_bytes_;
+    // Memory allocated in the new space during the end of the last sample
+    // to the beginning of the next sample
+    size_t allocation_in_bytes_;
+  };
+
+
+  class CompactionEvent {
+   public:
+    CompactionEvent() : duration(0), live_bytes_compacted(0) {}
+
+    CompactionEvent(double duration, intptr_t live_bytes_compacted)
+        : duration(duration), live_bytes_compacted(live_bytes_compacted) {}
+
+    double duration;
+    intptr_t live_bytes_compacted;
   };
 
 
@@ -169,9 +197,9 @@
     // Default constructor leaves the event uninitialized.
     SurvivalEvent() {}
 
-    explicit SurvivalEvent(double survival_rate);
+    explicit SurvivalEvent(double survival_ratio);
 
-    double survival_rate_;
+    double promotion_ratio_;
   };
 
 
@@ -204,6 +232,9 @@
     // Timestamp set in the destructor.
     double end_time;
 
+    // Memory reduction flag set.
+    bool reduce_memory;
+
     // Size of objects in heap set in constructor.
     intptr_t start_object_size;
 
@@ -226,6 +257,8 @@
 
     // Size of new space objects in constructor.
     intptr_t new_space_object_size;
+    // Size of survived new space objects in desctructor.
+    intptr_t survived_new_space_object_size;
 
     // Number of incremental marking steps since creation of tracer.
     // (value at start of event)
@@ -283,8 +316,12 @@
   typedef RingBuffer<ContextDisposalEvent, kRingBufferMaxSize>
       ContextDisposalEventBuffer;
 
+  typedef RingBuffer<CompactionEvent, kRingBufferMaxSize> CompactionEventBuffer;
+
   typedef RingBuffer<SurvivalEvent, kRingBufferMaxSize> SurvivalEventBuffer;
 
+  static const int kThroughputTimeFrameMs = 5000;
+
   explicit GCTracer(Heap* heap);
 
   // Start collecting data.
@@ -294,16 +331,24 @@
   // Stop collecting data and print results.
   void Stop(GarbageCollector collector);
 
-  // Log an allocation throughput event.
-  void AddNewSpaceAllocationTime(double duration, intptr_t allocation_in_bytes);
+  // Sample and accumulate bytes allocated since the last GC.
+  void SampleAllocation(double current_ms, size_t new_space_counter_bytes,
+                        size_t old_generation_counter_bytes);
+
+  // Log the accumulated new space allocation bytes.
+  void AddAllocation(double current_ms);
 
   void AddContextDisposalTime(double time);
 
-  void AddSurvivalRate(double survival_rate);
+  void AddCompactionEvent(double duration, intptr_t live_bytes_compacted);
+
+  void AddSurvivalRatio(double survival_ratio);
 
   // Log an incremental marking step.
   void AddIncrementalMarkingStep(double duration, intptr_t bytes);
 
+  void AddIncrementalMarkingFinalizationStep(double duration);
+
   // Log time spent in marking.
   void AddMarkingTime(double duration) {
     cumulative_marking_duration_ += duration;
@@ -366,7 +411,12 @@
 
   // Compute the average scavenge speed in bytes/millisecond.
   // Returns 0 if no events have been recorded.
-  intptr_t ScavengeSpeedInBytesPerMillisecond() const;
+  intptr_t ScavengeSpeedInBytesPerMillisecond(
+      ScavengeSpeedMode mode = kForAllObjects) const;
+
+  // Compute the average compaction speed in bytes/millisecond.
+  // Returns 0 if not enough events have been recorded.
+  intptr_t CompactionSpeedInBytesPerMillisecond() const;
 
   // Compute the average mark-sweep speed in bytes/millisecond.
   // Returns 0 if no events have been recorded.
@@ -377,9 +427,35 @@
   // Returns 0 if no events have been recorded.
   intptr_t FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
 
+  // Compute the overall mark compact speed including incremental steps
+  // and the final mark-compact step.
+  double CombinedMarkCompactSpeedInBytesPerMillisecond();
+
   // Allocation throughput in the new space in bytes/millisecond.
-  // Returns 0 if no events have been recorded.
-  intptr_t NewSpaceAllocationThroughputInBytesPerMillisecond() const;
+  // Returns 0 if no allocation events have been recorded.
+  size_t NewSpaceAllocationThroughputInBytesPerMillisecond(
+      double time_ms = 0) const;
+
+  // Allocation throughput in the old generation in bytes/millisecond in the
+  // last time_ms milliseconds.
+  // Returns 0 if no allocation events have been recorded.
+  size_t OldGenerationAllocationThroughputInBytesPerMillisecond(
+      double time_ms = 0) const;
+
+  // Allocation throughput in heap in bytes/millisecond in the last time_ms
+  // milliseconds.
+  // Returns 0 if no allocation events have been recorded.
+  size_t AllocationThroughputInBytesPerMillisecond(double time_ms) const;
+
+  // Allocation throughput in heap in bytes/milliseconds in the last
+  // kThroughputTimeFrameMs seconds.
+  // Returns 0 if no allocation events have been recorded.
+  size_t CurrentAllocationThroughputInBytesPerMillisecond() const;
+
+  // Allocation throughput in old generation in bytes/milliseconds in the last
+  // kThroughputTimeFrameMs seconds.
+  // Returns 0 if no allocation events have been recorded.
+  size_t CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
 
   // Computes the context disposal rate in milliseconds. It takes the time
   // frame of the first recorded context disposal to the current time and
@@ -387,10 +463,10 @@
   // Returns 0 if no events have been recorded.
   double ContextDisposalRateInMilliseconds() const;
 
-  // Computes the average survival rate based on the last recorded survival
+  // Computes the average survival ratio based on the last recorded survival
   // events.
   // Returns 0 if no events have been recorded.
-  double AverageSurvivalRate() const;
+  double AverageSurvivalRatio() const;
 
   // Returns true if at least one survival event was recorded.
   bool SurvivalEventsRecorded() const;
@@ -407,6 +483,10 @@
   // TODO(ernstm): Move to Heap.
   void Print() const;
 
+  // Prints a line and also adds it to the heap's ring buffer so that
+  // it can be included in later crash dumps.
+  void Output(const char* format, ...) const;
+
   // Compute the mean duration of the events in the given ring buffer.
   double MeanDuration(const EventBuffer& events) const;
 
@@ -419,6 +499,9 @@
     cumulative_incremental_marking_duration_ = 0;
     cumulative_pure_incremental_marking_duration_ = 0;
     longest_incremental_marking_step_ = 0;
+    cumulative_incremental_marking_finalization_steps_ = 0;
+    cumulative_incremental_marking_finalization_duration_ = 0;
+    longest_incremental_marking_finalization_step_ = 0;
     cumulative_marking_duration_ = 0;
     cumulative_sweeping_duration_ = 0;
   }
@@ -446,11 +529,15 @@
   EventBuffer incremental_mark_compactor_events_;
 
   // RingBuffer for allocation events.
-  AllocationEventBuffer allocation_events_;
+  AllocationEventBuffer new_space_allocation_events_;
+  AllocationEventBuffer old_generation_allocation_events_;
 
   // RingBuffer for context disposal events.
   ContextDisposalEventBuffer context_disposal_events_;
 
+  // RingBuffer for compaction events.
+  CompactionEventBuffer compaction_events_;
+
   // RingBuffer for survival events.
   SurvivalEventBuffer survival_events_;
 
@@ -471,6 +558,17 @@
   // Longest incremental marking step since start of marking.
   double longest_incremental_marking_step_;
 
+  // Cumulative number of incremental marking finalization steps since creation
+  // of tracer.
+  int cumulative_incremental_marking_finalization_steps_;
+
+  // Cumulative duration of incremental marking finalization steps since
+  // creation of tracer.
+  double cumulative_incremental_marking_finalization_duration_;
+
+  // Longest incremental marking finalization step since start of marking.
+  double longest_incremental_marking_finalization_step_;
+
   // Total marking time.
   // This timer is precise when run with --print-cumulative-gc-stat
   double cumulative_marking_duration_;
@@ -484,16 +582,24 @@
   // all sweeping operations performed on the main thread.
   double cumulative_sweeping_duration_;
 
-  // Holds the new space top pointer recorded at the end of the last garbage
-  // collection.
-  intptr_t new_space_top_after_gc_;
+  // Timestamp and allocation counter at the last sampled allocation event.
+  double allocation_time_ms_;
+  size_t new_space_allocation_counter_bytes_;
+  size_t old_generation_allocation_counter_bytes_;
+
+  // Accumulated duration and allocated bytes since the last GC.
+  double allocation_duration_since_gc_;
+  size_t new_space_allocation_in_bytes_since_gc_;
+  size_t old_generation_allocation_in_bytes_since_gc_;
+
+  double combined_mark_compact_speed_cache_;
 
   // Counts how many tracers were started without stopping.
   int start_counter_;
 
   DISALLOW_COPY_AND_ASSIGN(GCTracer);
 };
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_HEAP_GC_TRACER_H_
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
index 549ecbc..a723b3b 100644
--- a/src/heap/heap-inl.h
+++ b/src/heap/heap-inl.h
@@ -8,15 +8,19 @@
 #include <cmath>
 
 #include "src/base/platform/platform.h"
-#include "src/cpu-profiler.h"
+#include "src/counters.h"
 #include "src/heap/heap.h"
+#include "src/heap/incremental-marking-inl.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/spaces-inl.h"
 #include "src/heap/store-buffer.h"
 #include "src/heap/store-buffer-inl.h"
-#include "src/heap-profiler.h"
 #include "src/isolate.h"
 #include "src/list-inl.h"
+#include "src/log.h"
 #include "src/msan.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
+#include "src/type-feedback-vector-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -27,13 +31,6 @@
     return;
   }
 
-  if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
-    NewSpacePage* rear_page =
-        NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
-    DCHECK(!rear_page->prev_page()->is_anchor());
-    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
-  }
-
   if ((rear_ - 2) < limit_) {
     RelocateQueueHead();
     emergency_stack_->Add(Entry(target, size));
@@ -50,6 +47,45 @@
 }
 
 
+#define ROOT_ACCESSOR(type, name, camel_name) \
+  type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
+ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+  Map* Heap::name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
+STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define STRING_ACCESSOR(name, str) \
+  String* Heap::name() { return String::cast(roots_[k##name##RootIndex]); }
+INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name) \
+  Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
+PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, description) \
+  Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
+PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define ROOT_ACCESSOR(type, name, camel_name)                                 \
+  void Heap::set_##name(type* value) {                                        \
+    /* The deserializer makes use of the fact that these common roots are */  \
+    /* never in new space and never on a page that is being compacted.    */  \
+    DCHECK(!deserialization_complete() ||                                     \
+           RootCanBeWrittenAfterInitialization(k##camel_name##RootIndex));    \
+    DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
+    roots_[k##camel_name##RootIndex] = value;                                 \
+  }
+ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+
 template <>
 bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
   // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
@@ -89,12 +125,11 @@
   // Compute map and object size.
   Map* map = one_byte_internalized_string_map();
   int size = SeqOneByteString::SizeFor(str.length());
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
 
   // Allocate string.
-  HeapObject* result;
+  HeapObject* result = nullptr;
   {
-    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -121,12 +156,11 @@
   // Compute map and object size.
   Map* map = internalized_string_map();
   int size = SeqTwoByteString::SizeFor(str.length());
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
 
   // Allocate string.
-  HeapObject* result;
+  HeapObject* result = nullptr;
   {
-    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -157,19 +191,13 @@
 }
 
 
-AllocationResult Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
-  if (src->length() == 0) return src;
-  return CopyConstantPoolArrayWithMap(src, src->map());
-}
-
-
 AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
-                                   AllocationSpace retry_space) {
+                                   AllocationAlignment alignment) {
   DCHECK(AllowHandleAllocation::IsAllowed());
   DCHECK(AllowHeapAllocation::IsAllowed());
   DCHECK(gc_state_ == NOT_IN_GC);
 #ifdef DEBUG
-  if (FLAG_gc_interval >= 0 && AllowAllocationFailure::IsAllowed(isolate_) &&
+  if (FLAG_gc_interval >= 0 && !always_allocate() &&
       Heap::allocation_timeout_-- <= 0) {
     return AllocationResult::Retry(space);
   }
@@ -177,13 +205,14 @@
   isolate_->counters()->objs_since_last_young()->Increment();
 #endif
 
-  HeapObject* object;
+  bool large_object = size_in_bytes > Page::kMaxRegularHeapObjectSize;
+  HeapObject* object = nullptr;
   AllocationResult allocation;
   if (NEW_SPACE == space) {
-    allocation = new_space_.AllocateRaw(size_in_bytes);
-    if (always_allocate() && allocation.IsRetry() && retry_space != NEW_SPACE) {
-      space = retry_space;
+    if (large_object) {
+      space = LO_SPACE;
     } else {
+      allocation = new_space_.AllocateRaw(size_in_bytes, alignment);
       if (allocation.To(&object)) {
         OnAllocationEvent(object, size_in_bytes);
       }
@@ -191,26 +220,27 @@
     }
   }
 
-  if (OLD_POINTER_SPACE == space) {
-    allocation = old_pointer_space_->AllocateRaw(size_in_bytes);
-  } else if (OLD_DATA_SPACE == space) {
-    allocation = old_data_space_->AllocateRaw(size_in_bytes);
+  // Here we only allocate in the old generation.
+  if (OLD_SPACE == space) {
+    if (large_object) {
+      allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+    } else {
+      allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
+    }
   } else if (CODE_SPACE == space) {
     if (size_in_bytes <= code_space()->AreaSize()) {
-      allocation = code_space_->AllocateRaw(size_in_bytes);
+      allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
     } else {
-      // Large code objects are allocated in large object space.
       allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
     }
   } else if (LO_SPACE == space) {
+    DCHECK(large_object);
     allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
-  } else if (CELL_SPACE == space) {
-    allocation = cell_space_->AllocateRaw(size_in_bytes);
-  } else if (PROPERTY_CELL_SPACE == space) {
-    allocation = property_cell_space_->AllocateRaw(size_in_bytes);
+  } else if (MAP_SPACE == space) {
+    allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
   } else {
-    DCHECK(MAP_SPACE == space);
-    allocation = map_space_->AllocateRaw(size_in_bytes);
+    // NEW_SPACE is not allowed here.
+    UNREACHABLE();
   }
   if (allocation.To(&object)) {
     OnAllocationEvent(object, size_in_bytes);
@@ -229,16 +259,23 @@
 
   if (FLAG_verify_predictable) {
     ++allocations_count_;
+    // Advance synthetic time by making a time request.
+    MonotonicallyIncreasingTimeInMs();
 
     UpdateAllocationsHash(object);
     UpdateAllocationsHash(size_in_bytes);
 
-    if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
-        (--dump_allocations_hash_countdown_ == 0)) {
-      dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+    if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
       PrintAlloctionsHash();
     }
   }
+
+  if (FLAG_trace_allocation_stack_interval > 0) {
+    if (!FLAG_verify_predictable) ++allocations_count_;
+    if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
+      isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
+    }
+  }
 }
 
 
@@ -249,25 +286,21 @@
     heap_profiler->ObjectMoveEvent(source->address(), target->address(),
                                    size_in_bytes);
   }
-
-  if (isolate_->logger()->is_logging_code_events() ||
-      isolate_->cpu_profiler()->is_profiling()) {
-    if (target->IsSharedFunctionInfo()) {
-      PROFILE(isolate_, SharedFunctionInfoMoveEvent(source->address(),
-                                                    target->address()));
-    }
+  if (target->IsSharedFunctionInfo()) {
+    LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
+                                                         target->address()));
   }
 
   if (FLAG_verify_predictable) {
     ++allocations_count_;
+    // Advance synthetic time by making a time request.
+    MonotonicallyIncreasingTimeInMs();
 
     UpdateAllocationsHash(source);
     UpdateAllocationsHash(target);
     UpdateAllocationsHash(size_in_bytes);
 
-    if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
-        (--dump_allocations_hash_countdown_ == 0)) {
-      dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+    if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
       PrintAlloctionsHash();
     }
   }
@@ -298,9 +331,8 @@
 }
 
 
-void Heap::PrintAlloctionsHash() {
-  uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
-  PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count_, hash);
+void Heap::RegisterExternalString(String* string) {
+  external_string_table_.AddString(string);
 }
 
 
@@ -341,23 +373,11 @@
 }
 
 
-bool Heap::InOldPointerSpace(Address address) {
-  return old_pointer_space_->Contains(address);
-}
+bool Heap::InOldSpace(Address address) { return old_space_->Contains(address); }
 
 
-bool Heap::InOldPointerSpace(Object* object) {
-  return InOldPointerSpace(reinterpret_cast<Address>(object));
-}
-
-
-bool Heap::InOldDataSpace(Address address) {
-  return old_data_space_->Contains(address);
-}
-
-
-bool Heap::InOldDataSpace(Object* object) {
-  return InOldDataSpace(reinterpret_cast<Address>(object));
+bool Heap::InOldSpace(Object* object) {
+  return InOldSpace(reinterpret_cast<Address>(object));
 }
 
 
@@ -389,53 +409,16 @@
 }
 
 
-OldSpace* Heap::TargetSpace(HeapObject* object) {
-  InstanceType type = object->map()->instance_type();
-  AllocationSpace space = TargetSpaceId(type);
-  return (space == OLD_POINTER_SPACE) ? old_pointer_space_ : old_data_space_;
-}
-
-
-AllocationSpace Heap::TargetSpaceId(InstanceType type) {
-  // Heap numbers and sequential strings are promoted to old data space, all
-  // other object types are promoted to old pointer space.  We do not use
-  // object->IsHeapNumber() and object->IsSeqString() because we already
-  // know that object has the heap object tag.
-
-  // These objects are never allocated in new space.
-  DCHECK(type != MAP_TYPE);
-  DCHECK(type != CODE_TYPE);
-  DCHECK(type != ODDBALL_TYPE);
-  DCHECK(type != CELL_TYPE);
-  DCHECK(type != PROPERTY_CELL_TYPE);
-
-  if (type <= LAST_NAME_TYPE) {
-    if (type == SYMBOL_TYPE) return OLD_POINTER_SPACE;
-    DCHECK(type < FIRST_NONSTRING_TYPE);
-    // There are four string representations: sequential strings, external
-    // strings, cons strings, and sliced strings.
-    // Only the latter two contain non-map-word pointers to heap objects.
-    return ((type & kIsIndirectStringMask) == kIsIndirectStringTag)
-               ? OLD_POINTER_SPACE
-               : OLD_DATA_SPACE;
-  } else {
-    return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
-  }
-}
-
-
 bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
   // Object migration is governed by the following rules:
   //
-  // 1) Objects in new-space can be migrated to one of the old spaces
+  // 1) Objects in new-space can be migrated to the old space
   //    that matches their target space or they stay in new-space.
   // 2) Objects in old-space stay in the same space when migrating.
   // 3) Fillers (two or more words) can migrate due to left-trimming of
-  //    fixed arrays in new-space, old-data-space and old-pointer-space.
+  //    fixed arrays in new-space or old space.
   // 4) Fillers (one word) can never migrate, they are skipped by
   //    incremental marking explicitly to prevent invalid pattern.
-  // 5) Short external strings can end up in old pointer space when a cons
-  //    string in old pointer space is made external (String::MakeExternal).
   //
   // Since this function is used for debugging only, we do not place
   // asserts here, but check everything explicitly.
@@ -445,17 +428,13 @@
   AllocationSpace src = chunk->owner()->identity();
   switch (src) {
     case NEW_SPACE:
-      return dst == src || dst == TargetSpaceId(type);
-    case OLD_POINTER_SPACE:
-      return dst == src && (dst == TargetSpaceId(type) || obj->IsFiller() ||
-                            obj->IsExternalString());
-    case OLD_DATA_SPACE:
-      return dst == src && dst == TargetSpaceId(type);
+      return dst == src || dst == OLD_SPACE;
+    case OLD_SPACE:
+      return dst == src &&
+             (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
     case CODE_SPACE:
       return dst == src && type == CODE_TYPE;
     case MAP_SPACE:
-    case CELL_SPACE:
-    case PROPERTY_CELL_SPACE:
     case LO_SPACE:
       return false;
   }
@@ -489,9 +468,6 @@
 }
 
 
-void Heap::ScavengePointer(HeapObject** p) { ScavengeObject(p, *p); }
-
-
 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
   // Check if there is potentially a memento behind the object. If
   // the last word of the memento is on another page we return
@@ -522,7 +498,7 @@
   Address top = NewSpaceTop();
   DCHECK(memento_address == top ||
          memento_address + HeapObject::kHeaderSize <= top ||
-         !NewSpacePage::OnSamePage(memento_address, top));
+         !NewSpacePage::OnSamePage(memento_address, top - 1));
   if (memento_address == top) return NULL;
 
   AllocationMemento* memento = AllocationMemento::cast(candidate);
@@ -531,48 +507,39 @@
 }
 
 
-void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
-                                        ScratchpadSlotMode mode) {
-  Heap* heap = object->GetHeap();
-  DCHECK(heap->InFromSpace(object));
-
+void Heap::UpdateAllocationSite(HeapObject* object,
+                                HashMap* pretenuring_feedback) {
+  DCHECK(InFromSpace(object));
   if (!FLAG_allocation_site_pretenuring ||
       !AllocationSite::CanTrack(object->map()->instance_type()))
     return;
+  AllocationMemento* memento = FindAllocationMemento(object);
+  if (memento == nullptr) return;
 
-  AllocationMemento* memento = heap->FindAllocationMemento(object);
-  if (memento == NULL) return;
+  AllocationSite* key = memento->GetAllocationSite();
+  DCHECK(!key->IsZombie());
 
-  if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
-    heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(), mode);
+  if (pretenuring_feedback == global_pretenuring_feedback_) {
+    // For inserting in the global pretenuring storage we need to first
+    // increment the memento found count on the allocation site.
+    if (key->IncrementMementoFoundCount()) {
+      global_pretenuring_feedback_->LookupOrInsert(
+          key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
+    }
+  } else {
+    // Any other pretenuring storage than the global one is used as a cache,
+    // where the count is later on merge in the allocation site.
+    HashMap::Entry* e = pretenuring_feedback->LookupOrInsert(
+        key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
+    DCHECK(e != nullptr);
+    (*bit_cast<intptr_t*>(&e->value))++;
   }
 }
 
 
-void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
-  DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
-
-  // We use the first word (where the map pointer usually is) of a heap
-  // object to record the forwarding pointer.  A forwarding pointer can
-  // point to an old space, the code space, or the to space of the new
-  // generation.
-  MapWord first_word = object->map_word();
-
-  // If the first word is a forwarding address, the object has already been
-  // copied.
-  if (first_word.IsForwardingAddress()) {
-    HeapObject* dest = first_word.ToForwardingAddress();
-    DCHECK(object->GetIsolate()->heap()->InFromSpace(*p));
-    *p = dest;
-    return;
-  }
-
-  UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
-
-  // AllocationMementos are unrooted and shouldn't survive a scavenge
-  DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
-  // Call the slow part of scavenge object.
-  return ScavengeObjectSlow(p, object);
+void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
+  global_pretenuring_feedback_->Remove(
+      site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
 }
 
 
@@ -591,55 +558,7 @@
 }
 
 
-// Calls the FUNCTION_CALL function and retries it up to three times
-// to guarantee that any allocations performed during the call will
-// succeed if there's enough memory.
-
-// Warning: Do not use the identifiers __object__, __maybe_object__ or
-// __scope__ in a call to this macro.
-
-#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
-  if (__allocation__.To(&__object__)) {                   \
-    DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
-    RETURN_VALUE;                                         \
-  }
-
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)    \
-  do {                                                                        \
-    AllocationResult __allocation__ = FUNCTION_CALL;                          \
-    Object* __object__ = NULL;                                                \
-    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
-    (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(),            \
-                                      "allocation failure");                  \
-    __allocation__ = FUNCTION_CALL;                                           \
-    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
-    (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment();        \
-    (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc");          \
-    {                                                                         \
-      AlwaysAllocateScope __scope__(ISOLATE);                                 \
-      __allocation__ = FUNCTION_CALL;                                         \
-    }                                                                         \
-    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
-    /* TODO(1181417): Fix this. */                                            \
-    v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
-    RETURN_EMPTY;                                                             \
-  } while (false)
-
-#define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \
-                              RETURN_EMPTY)                         \
-  CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)
-
-#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE)                      \
-  CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL,                               \
-                        return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
-                        return Handle<TYPE>())
-
-
-#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
-  CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return)
-
-
-void ExternalStringTable::AddString(String* string) {
+void Heap::ExternalStringTable::AddString(String* string) {
   DCHECK(string->IsExternalString());
   if (heap_->InNewSpace(string)) {
     new_space_strings_.Add(string);
@@ -649,7 +568,7 @@
 }
 
 
-void ExternalStringTable::Iterate(ObjectVisitor* v) {
+void Heap::ExternalStringTable::Iterate(ObjectVisitor* v) {
   if (!new_space_strings_.is_empty()) {
     Object** start = &new_space_strings_[0];
     v->VisitPointers(start, start + new_space_strings_.length());
@@ -663,7 +582,7 @@
 
 // Verify() is inline to avoid ifdef-s around its calls in release
 // mode.
-void ExternalStringTable::Verify() {
+void Heap::ExternalStringTable::Verify() {
 #ifdef DEBUG
   for (int i = 0; i < new_space_strings_.length(); ++i) {
     Object* obj = Object::cast(new_space_strings_[i]);
@@ -679,14 +598,14 @@
 }
 
 
-void ExternalStringTable::AddOldString(String* string) {
+void Heap::ExternalStringTable::AddOldString(String* string) {
   DCHECK(string->IsExternalString());
   DCHECK(!heap_->InNewSpace(string));
   old_space_strings_.Add(string);
 }
 
 
-void ExternalStringTable::ShrinkNewStrings(int position) {
+void Heap::ExternalStringTable::ShrinkNewStrings(int position) {
   new_space_strings_.Rewind(position);
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
@@ -696,6 +615,27 @@
 }
 
 
+int DescriptorLookupCache::Lookup(Map* source, Name* name) {
+  if (!name->IsUniqueName()) return kAbsent;
+  int index = Hash(source, name);
+  Key& key = keys_[index];
+  if ((key.source == source) && (key.name == name)) return results_[index];
+  return kAbsent;
+}
+
+
+void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
+  DCHECK(result != kAbsent);
+  if (name->IsUniqueName()) {
+    int index = Hash(source, name);
+    Key& key = keys_[index];
+    key.source = source;
+    key.name = name;
+    results_[index] = result;
+  }
+}
+
+
 void Heap::ClearInstanceofCache() {
   set_instanceof_cache_function(Smi::FromInt(0));
 }
@@ -712,47 +652,57 @@
 }
 
 
+uint32_t Heap::HashSeed() {
+  uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
+  DCHECK(FLAG_randomize_hashes || seed == 0);
+  return seed;
+}
+
+
+int Heap::NextScriptId() {
+  int last_id = last_script_id()->value();
+  if (last_id == Smi::kMaxValue) {
+    last_id = 1;
+  } else {
+    last_id++;
+  }
+  set_last_script_id(Smi::FromInt(last_id));
+  return last_id;
+}
+
+
+void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
+  DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
+  set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+
+void Heap::SetConstructStubDeoptPCOffset(int pc_offset) {
+  DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
+  set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+
+void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
+  DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
+  set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+
+void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
+  DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
+  set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+
 AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
-    : heap_(isolate->heap()), daf_(isolate) {
-  // We shouldn't hit any nested scopes, because that requires
-  // non-handle code to call handle code. The code still works but
-  // performance will degrade, so we want to catch this situation
-  // in debug mode.
-  DCHECK(heap_->always_allocate_scope_depth_ == 0);
-  heap_->always_allocate_scope_depth_++;
+    : heap_(isolate->heap()) {
+  heap_->always_allocate_scope_count_.Increment(1);
 }
 
 
 AlwaysAllocateScope::~AlwaysAllocateScope() {
-  heap_->always_allocate_scope_depth_--;
-  DCHECK(heap_->always_allocate_scope_depth_ == 0);
-}
-
-
-#ifdef VERIFY_HEAP
-NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() {
-  Isolate* isolate = Isolate::Current();
-  isolate->heap()->no_weak_object_verification_scope_depth_++;
-}
-
-
-NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
-  Isolate* isolate = Isolate::Current();
-  isolate->heap()->no_weak_object_verification_scope_depth_--;
-}
-#endif
-
-
-GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
-  heap_->gc_callbacks_depth_++;
-}
-
-
-GCCallbacksScope::~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
-
-
-bool GCCallbacksScope::CheckReenter() {
-  return heap_->gc_callbacks_depth_ == 1;
+  heap_->always_allocate_scope_count_.Increment(-1);
 }
 
 
@@ -772,7 +722,7 @@
     CHECK((*current)->IsSmi());
   }
 }
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_HEAP_HEAP_INL_H_
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 0b817e4..84b3c79 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -2,10 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
+#include "src/heap/heap.h"
 
 #include "src/accessors.h"
 #include "src/api.h"
+#include "src/ast/scopeinfo.h"
 #include "src/base/bits.h"
 #include "src/base/once.h"
 #include "src/base/utils/random-number-generator.h"
@@ -13,44 +14,59 @@
 #include "src/codegen.h"
 #include "src/compilation-cache.h"
 #include "src/conversions.h"
-#include "src/cpu-profiler.h"
-#include "src/debug.h"
+#include "src/debug/debug.h"
 #include "src/deoptimizer.h"
 #include "src/global-handles.h"
+#include "src/heap/array-buffer-tracker.h"
 #include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/gc-tracer.h"
 #include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact-inl.h"
 #include "src/heap/mark-compact.h"
+#include "src/heap/memory-reducer.h"
+#include "src/heap/object-stats.h"
 #include "src/heap/objects-visiting-inl.h"
 #include "src/heap/objects-visiting.h"
+#include "src/heap/scavenge-job.h"
+#include "src/heap/scavenger-inl.h"
 #include "src/heap/store-buffer.h"
-#include "src/heap-profiler.h"
-#include "src/isolate-inl.h"
-#include "src/natives.h"
+#include "src/interpreter/interpreter.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/regexp/jsregexp.h"
 #include "src/runtime-profiler.h"
-#include "src/scopeinfo.h"
-#include "src/serialize.h"
-#include "src/snapshot.h"
+#include "src/snapshot/natives.h"
+#include "src/snapshot/serialize.h"
+#include "src/snapshot/snapshot.h"
+#include "src/type-feedback-vector.h"
 #include "src/utils.h"
+#include "src/v8.h"
 #include "src/v8threads.h"
 #include "src/vm-state-inl.h"
 
-#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h"          // NOLINT
-#include "src/arm/regexp-macro-assembler-arm.h"  // NOLINT
-#endif
-#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h"            // NOLINT
-#include "src/mips/regexp-macro-assembler-mips.h"  // NOLINT
-#endif
-#if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h"
-#include "src/mips64/regexp-macro-assembler-mips64.h"
-#endif
-
 namespace v8 {
 namespace internal {
 
 
+struct Heap::StrongRootsList {
+  Object** start;
+  Object** end;
+  StrongRootsList* next;
+};
+
+class IdleScavengeObserver : public InlineAllocationObserver {
+ public:
+  IdleScavengeObserver(Heap& heap, intptr_t step_size)
+      : InlineAllocationObserver(step_size), heap_(heap) {}
+
+  void Step(int bytes_allocated, Address, size_t) override {
+    heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
+  }
+
+ private:
+  Heap& heap_;
+};
+
+
 Heap::Heap()
     : amount_of_external_allocated_memory_(0),
       amount_of_external_allocated_memory_at_last_global_gc_(0),
@@ -63,7 +79,8 @@
       initial_semispace_size_(Page::kPageSize),
       target_semispace_size_(Page::kPageSize),
       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
-      initial_old_generation_size_(max_old_generation_size_ / 2),
+      initial_old_generation_size_(max_old_generation_size_ /
+                                   kInitalOldGenerationLimitFactor),
       old_generation_size_configured_(false),
       max_executable_size_(256ul * (kPointerSize / 4) * MB),
       // Variables set based on semispace_size_ and old_generation_size_ in
@@ -73,40 +90,33 @@
       maximum_committed_(0),
       survived_since_last_expansion_(0),
       survived_last_scavenge_(0),
-      sweep_generation_(0),
-      always_allocate_scope_depth_(0),
+      always_allocate_scope_count_(0),
       contexts_disposed_(0),
+      number_of_disposed_maps_(0),
       global_ic_age_(0),
-      flush_monomorphic_ics_(false),
       scan_on_scavenge_pages_(0),
       new_space_(this),
-      old_pointer_space_(NULL),
-      old_data_space_(NULL),
+      old_space_(NULL),
       code_space_(NULL),
       map_space_(NULL),
-      cell_space_(NULL),
-      property_cell_space_(NULL),
       lo_space_(NULL),
       gc_state_(NOT_IN_GC),
       gc_post_processing_depth_(0),
       allocations_count_(0),
       raw_allocations_hash_(0),
-      dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
       ms_count_(0),
       gc_count_(0),
       remembered_unmapped_pages_index_(0),
-      unflattened_strings_length_(0),
 #ifdef DEBUG
       allocation_timeout_(0),
 #endif  // DEBUG
       old_generation_allocation_limit_(initial_old_generation_size_),
       old_gen_exhausted_(false),
+      optimize_for_memory_usage_(false),
       inline_allocation_disabled_(false),
       store_buffer_rebuilder_(store_buffer()),
-      hidden_string_(NULL),
-      gc_safe_size_of_old_object_(NULL),
       total_regexp_code_generated_(0),
-      tracer_(this),
+      tracer_(nullptr),
       high_survival_rate_period_length_(0),
       promoted_objects_size_(0),
       promotion_ratio_(0),
@@ -124,24 +134,39 @@
       marking_time_(0.0),
       sweeping_time_(0.0),
       last_idle_notification_time_(0.0),
-      mark_compact_collector_(this),
+      last_gc_time_(0.0),
+      scavenge_collector_(nullptr),
+      mark_compact_collector_(nullptr),
       store_buffer_(this),
-      marking_(this),
-      incremental_marking_(this),
-      gc_count_at_last_idle_gc_(0),
+      incremental_marking_(nullptr),
+      gc_idle_time_handler_(nullptr),
+      memory_reducer_(nullptr),
+      object_stats_(nullptr),
+      scavenge_job_(nullptr),
+      idle_scavenge_observer_(nullptr),
       full_codegen_bytes_generated_(0),
       crankshaft_codegen_bytes_generated_(0),
+      new_space_allocation_counter_(0),
+      old_generation_allocation_counter_(0),
+      old_generation_size_at_last_gc_(0),
       gcs_since_last_deopt_(0),
-#ifdef VERIFY_HEAP
-      no_weak_object_verification_scope_depth_(0),
-#endif
-      allocation_sites_scratchpad_length_(0),
+      global_pretenuring_feedback_(nullptr),
+      ring_buffer_full_(false),
+      ring_buffer_end_(0),
       promotion_queue_(this),
       configured_(false),
+      current_gc_flags_(Heap::kNoGCFlags),
+      current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
       external_string_table_(this),
       chunks_queued_for_free_(NULL),
+      concurrent_unmapping_tasks_active_(0),
+      pending_unmapping_tasks_semaphore_(0),
       gc_callbacks_depth_(0),
-      deserialization_complete_(false) {
+      deserialization_complete_(false),
+      strong_roots_list_(NULL),
+      array_buffer_tracker_(NULL),
+      heap_iterator_depth_(0),
+      force_oom_(false) {
 // Allow build-time customization of the max semispace size. Building
 // V8 with snapshots and a non-default max semispace size is much
 // easier if you can define it as part of the build environment.
@@ -150,39 +175,40 @@
 #endif
 
   // Ensure old_generation_size_ is a multiple of kPageSize.
-  DCHECK(MB >= Page::kPageSize);
+  DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
 
   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
   set_native_contexts_list(NULL);
-  set_array_buffers_list(Smi::FromInt(0));
   set_allocation_sites_list(Smi::FromInt(0));
   set_encountered_weak_collections(Smi::FromInt(0));
   set_encountered_weak_cells(Smi::FromInt(0));
+  set_encountered_transition_arrays(Smi::FromInt(0));
   // Put a dummy entry in the remembered pages so we can find the list the
   // minidump even if there are no real unmapped pages.
   RememberUnmappedPage(NULL, false);
-
-  ClearObjectStats(true);
 }
 
 
 intptr_t Heap::Capacity() {
   if (!HasBeenSetUp()) return 0;
 
-  return new_space_.Capacity() + old_pointer_space_->Capacity() +
-         old_data_space_->Capacity() + code_space_->Capacity() +
-         map_space_->Capacity() + cell_space_->Capacity() +
-         property_cell_space_->Capacity();
+  return new_space_.Capacity() + old_space_->Capacity() +
+         code_space_->Capacity() + map_space_->Capacity();
+}
+
+
+intptr_t Heap::CommittedOldGenerationMemory() {
+  if (!HasBeenSetUp()) return 0;
+
+  return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
+         map_space_->CommittedMemory() + lo_space_->Size();
 }
 
 
 intptr_t Heap::CommittedMemory() {
   if (!HasBeenSetUp()) return 0;
 
-  return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
-         old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
-         map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
-         property_cell_space_->CommittedMemory() + lo_space_->Size();
+  return new_space_.CommittedMemory() + CommittedOldGenerationMemory();
 }
 
 
@@ -190,12 +216,9 @@
   if (!HasBeenSetUp()) return 0;
 
   return new_space_.CommittedPhysicalMemory() +
-         old_pointer_space_->CommittedPhysicalMemory() +
-         old_data_space_->CommittedPhysicalMemory() +
+         old_space_->CommittedPhysicalMemory() +
          code_space_->CommittedPhysicalMemory() +
          map_space_->CommittedPhysicalMemory() +
-         cell_space_->CommittedPhysicalMemory() +
-         property_cell_space_->CommittedPhysicalMemory() +
          lo_space_->CommittedPhysicalMemory();
 }
 
@@ -220,25 +243,18 @@
 intptr_t Heap::Available() {
   if (!HasBeenSetUp()) return 0;
 
-  return new_space_.Available() + old_pointer_space_->Available() +
-         old_data_space_->Available() + code_space_->Available() +
-         map_space_->Available() + cell_space_->Available() +
-         property_cell_space_->Available();
+  intptr_t total = 0;
+  AllSpaces spaces(this);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+    total += space->Available();
+  }
+  return total;
 }
 
 
 bool Heap::HasBeenSetUp() {
-  return old_pointer_space_ != NULL && old_data_space_ != NULL &&
-         code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
-         property_cell_space_ != NULL && lo_space_ != NULL;
-}
-
-
-int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
-  if (IntrusiveMarking::IsMarked(object)) {
-    return IntrusiveMarking::SizeOfMarkedObject(object);
-  }
-  return object->SizeFromMap(object->map());
+  return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL &&
+         lo_space_ != NULL;
 }
 
 
@@ -321,80 +337,58 @@
 
 void Heap::PrintShortHeapStatistics() {
   if (!FLAG_trace_gc_verbose) return;
-  PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX
-           "d KB"
-           ", available: %6" V8_PTR_PREFIX "d KB\n",
-           isolate_->memory_allocator()->Size() / KB,
-           isolate_->memory_allocator()->Available() / KB);
-  PrintPID("New space,          used: %6" V8_PTR_PREFIX
-           "d KB"
-           ", available: %6" V8_PTR_PREFIX
-           "d KB"
-           ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           new_space_.Size() / KB, new_space_.Available() / KB,
-           new_space_.CommittedMemory() / KB);
-  PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX
-           "d KB"
-           ", available: %6" V8_PTR_PREFIX
-           "d KB"
-           ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           old_pointer_space_->SizeOfObjects() / KB,
-           old_pointer_space_->Available() / KB,
-           old_pointer_space_->CommittedMemory() / KB);
-  PrintPID("Old data space,     used: %6" V8_PTR_PREFIX
-           "d KB"
-           ", available: %6" V8_PTR_PREFIX
-           "d KB"
-           ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           old_data_space_->SizeOfObjects() / KB,
-           old_data_space_->Available() / KB,
-           old_data_space_->CommittedMemory() / KB);
-  PrintPID("Code space,         used: %6" V8_PTR_PREFIX
-           "d KB"
-           ", available: %6" V8_PTR_PREFIX
-           "d KB"
-           ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
-           code_space_->CommittedMemory() / KB);
-  PrintPID("Map space,          used: %6" V8_PTR_PREFIX
-           "d KB"
-           ", available: %6" V8_PTR_PREFIX
-           "d KB"
-           ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
-           map_space_->CommittedMemory() / KB);
-  PrintPID("Cell space,         used: %6" V8_PTR_PREFIX
-           "d KB"
-           ", available: %6" V8_PTR_PREFIX
-           "d KB"
-           ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
-           cell_space_->CommittedMemory() / KB);
-  PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
-           "d KB"
-           ", available: %6" V8_PTR_PREFIX
-           "d KB"
-           ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           property_cell_space_->SizeOfObjects() / KB,
-           property_cell_space_->Available() / KB,
-           property_cell_space_->CommittedMemory() / KB);
-  PrintPID("Large object space, used: %6" V8_PTR_PREFIX
-           "d KB"
-           ", available: %6" V8_PTR_PREFIX
-           "d KB"
-           ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
-           lo_space_->CommittedMemory() / KB);
-  PrintPID("All spaces,         used: %6" V8_PTR_PREFIX
-           "d KB"
-           ", available: %6" V8_PTR_PREFIX
-           "d KB"
-           ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           this->SizeOfObjects() / KB, this->Available() / KB,
-           this->CommittedMemory() / KB);
-  PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
-           static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
-  PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
+  PrintIsolate(isolate_, "Memory allocator,   used: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", available: %6" V8_PTR_PREFIX "d KB\n",
+               isolate_->memory_allocator()->Size() / KB,
+               isolate_->memory_allocator()->Available() / KB);
+  PrintIsolate(isolate_, "New space,          used: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", available: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+               new_space_.Size() / KB, new_space_.Available() / KB,
+               new_space_.CommittedMemory() / KB);
+  PrintIsolate(isolate_, "Old space,          used: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", available: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+               old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
+               old_space_->CommittedMemory() / KB);
+  PrintIsolate(isolate_, "Code space,         used: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", available: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+               code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
+               code_space_->CommittedMemory() / KB);
+  PrintIsolate(isolate_, "Map space,          used: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", available: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+               map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
+               map_space_->CommittedMemory() / KB);
+  PrintIsolate(isolate_, "Large object space, used: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", available: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+               lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
+               lo_space_->CommittedMemory() / KB);
+  PrintIsolate(isolate_, "All spaces,         used: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", available: %6" V8_PTR_PREFIX
+                         "d KB"
+                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
+               this->SizeOfObjects() / KB, this->Available() / KB,
+               this->CommittedMemory() / KB);
+  PrintIsolate(
+      isolate_, "External memory reported: %6" V8_PTR_PREFIX "d KB\n",
+      static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
+  PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
+               total_gc_time_ms_);
 }
 
 
@@ -413,19 +407,27 @@
 #else
   if (FLAG_log_gc) new_space_.ReportStatistics();
 #endif  // DEBUG
+  for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
+       ++i) {
+    int count = deferred_counters_[i];
+    deferred_counters_[i] = 0;
+    while (count > 0) {
+      count--;
+      isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
+    }
+  }
+}
+
+
+void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
+  deferred_counters_[feature]++;
 }
 
 
 void Heap::GarbageCollectionPrologue() {
   {
     AllowHeapAllocation for_the_first_part_of_prologue;
-    ClearJSFunctionResultCaches();
     gc_count_++;
-    unflattened_strings_length_ = 0;
-
-    if (FLAG_flush_code && FLAG_flush_code_incrementally) {
-      mark_compact_collector()->EnableCodeFlushing(true);
-    }
 
 #ifdef VERIFY_HEAP
     if (FLAG_verify_heap) {
@@ -455,7 +457,7 @@
   store_buffer()->GCPrologue();
 
   if (isolate()->concurrent_osr_enabled()) {
-    isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
+    isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs();
   }
 
   if (new_space_.IsAtMaximumCapacity()) {
@@ -464,6 +466,7 @@
     maximum_size_scavenges_ = 0;
   }
   CheckNewSpaceExpansionCriteria();
+  UpdateNewSpaceAllocationCounter();
 }
 
 
@@ -477,30 +480,80 @@
 }
 
 
-void Heap::ClearAllICsByKind(Code::Kind kind) {
-  HeapObjectIterator it(code_space());
+const char* Heap::GetSpaceName(int idx) {
+  switch (idx) {
+    case NEW_SPACE:
+      return "new_space";
+    case OLD_SPACE:
+      return "old_space";
+    case MAP_SPACE:
+      return "map_space";
+    case CODE_SPACE:
+      return "code_space";
+    case LO_SPACE:
+      return "large_object_space";
+    default:
+      UNREACHABLE();
+  }
+  return nullptr;
+}
 
-  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
-    Code* code = Code::cast(object);
-    Code::Kind current_kind = code->kind();
-    if (current_kind == Code::FUNCTION ||
-        current_kind == Code::OPTIMIZED_FUNCTION) {
-      code->ClearInlineCaches(kind);
+
+void Heap::RepairFreeListsAfterDeserialization() {
+  PagedSpaces spaces(this);
+  for (PagedSpace* space = spaces.next(); space != NULL;
+       space = spaces.next()) {
+    space->RepairFreeListsAfterDeserialization();
+  }
+}
+
+
+void Heap::MergeAllocationSitePretenuringFeedback(
+    const HashMap& local_pretenuring_feedback) {
+  AllocationSite* site = nullptr;
+  for (HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
+       local_entry != nullptr;
+       local_entry = local_pretenuring_feedback.Next(local_entry)) {
+    site = reinterpret_cast<AllocationSite*>(local_entry->key);
+    MapWord map_word = site->map_word();
+    if (map_word.IsForwardingAddress()) {
+      site = AllocationSite::cast(map_word.ToForwardingAddress());
+    }
+    DCHECK(site->IsAllocationSite());
+    int value =
+        static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
+    DCHECK_GT(value, 0);
+
+    {
+      // TODO(mlippautz): For parallel processing we need synchronization here.
+      if (site->IncrementMementoFoundCount(value)) {
+        global_pretenuring_feedback_->LookupOrInsert(
+            site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
+      }
     }
   }
 }
 
 
-void Heap::RepairFreeListsAfterBoot() {
-  PagedSpaces spaces(this);
-  for (PagedSpace* space = spaces.next(); space != NULL;
-       space = spaces.next()) {
-    space->RepairFreeListsAfterBoot();
+class Heap::PretenuringScope {
+ public:
+  explicit PretenuringScope(Heap* heap) : heap_(heap) {
+    heap_->global_pretenuring_feedback_ =
+        new HashMap(HashMap::PointersMatch, kInitialFeedbackCapacity);
   }
-}
+
+  ~PretenuringScope() {
+    delete heap_->global_pretenuring_feedback_;
+    heap_->global_pretenuring_feedback_ = nullptr;
+  }
+
+ private:
+  Heap* heap_;
+};
 
 
 void Heap::ProcessPretenuringFeedback() {
+  bool trigger_deoptimization = false;
   if (FLAG_allocation_site_pretenuring) {
     int tenure_decisions = 0;
     int dont_tenure_decisions = 0;
@@ -508,49 +561,43 @@
     int allocation_sites = 0;
     int active_allocation_sites = 0;
 
-    // If the scratchpad overflowed, we have to iterate over the allocation
-    // sites list.
-    // TODO(hpayer): We iterate over the whole list of allocation sites when
-    // we grew to the maximum semi-space size to deopt maybe tenured
-    // allocation sites. We could hold the maybe tenured allocation sites
-    // in a seperate data structure if this is a performance problem.
-    bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
-    bool use_scratchpad =
-        allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
-        !deopt_maybe_tenured;
+    AllocationSite* site = nullptr;
 
-    int i = 0;
-    Object* list_element = allocation_sites_list();
-    bool trigger_deoptimization = false;
+    // Step 1: Digest feedback for recorded allocation sites.
     bool maximum_size_scavenge = MaximumSizeScavenge();
-    while (use_scratchpad ? i < allocation_sites_scratchpad_length_
-                          : list_element->IsAllocationSite()) {
-      AllocationSite* site =
-          use_scratchpad
-              ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
-              : AllocationSite::cast(list_element);
-      allocation_mementos_found += site->memento_found_count();
-      if (site->memento_found_count() > 0) {
-        active_allocation_sites++;
-        if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
-          trigger_deoptimization = true;
-        }
-        if (site->GetPretenureMode() == TENURED) {
-          tenure_decisions++;
-        } else {
-          dont_tenure_decisions++;
-        }
-        allocation_sites++;
-      }
-
-      if (deopt_maybe_tenured && site->IsMaybeTenure()) {
-        site->set_deopt_dependent_code(true);
+    for (HashMap::Entry* e = global_pretenuring_feedback_->Start();
+         e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
+      site = reinterpret_cast<AllocationSite*>(e->key);
+      int found_count = site->memento_found_count();
+      // The fact that we have an entry in the storage means that we've found
+      // the site at least once.
+      DCHECK_GT(found_count, 0);
+      DCHECK(site->IsAllocationSite());
+      allocation_sites++;
+      active_allocation_sites++;
+      allocation_mementos_found += found_count;
+      if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
         trigger_deoptimization = true;
       }
-
-      if (use_scratchpad) {
-        i++;
+      if (site->GetPretenureMode() == TENURED) {
+        tenure_decisions++;
       } else {
+        dont_tenure_decisions++;
+      }
+    }
+
+    // Step 2: Deopt maybe tenured allocation sites if necessary.
+    bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
+    if (deopt_maybe_tenured) {
+      Object* list_element = allocation_sites_list();
+      while (list_element->IsAllocationSite()) {
+        site = AllocationSite::cast(list_element);
+        DCHECK(site->IsAllocationSite());
+        allocation_sites++;
+        if (site->IsMaybeTenure()) {
+          site->set_deopt_dependent_code(true);
+          trigger_deoptimization = true;
+        }
         list_element = site->weak_next();
       }
     }
@@ -559,18 +606,16 @@
       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
     }
 
-    FlushAllocationSitesScratchpad();
-
     if (FLAG_trace_pretenuring_statistics &&
         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
          dont_tenure_decisions > 0)) {
-      PrintF(
-          "GC: (mode, #visited allocation sites, #active allocation sites, "
-          "#mementos, #tenure decisions, #donttenure decisions) "
-          "(%s, %d, %d, %d, %d, %d)\n",
-          use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
-          active_allocation_sites, allocation_mementos_found, tenure_decisions,
-          dont_tenure_decisions);
+      PrintIsolate(isolate(),
+                   "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
+                   "active_sites=%d "
+                   "mementos=%d tenured=%d not_tenured=%d\n",
+                   deopt_maybe_tenured ? 1 : 0, allocation_sites,
+                   active_allocation_sites, allocation_mementos_found,
+                   tenure_decisions, dont_tenure_decisions);
     }
   }
 }
@@ -578,8 +623,7 @@
 
 void Heap::DeoptMarkedAllocationSites() {
   // TODO(hpayer): If iterating over the allocation sites list becomes a
-  // performance issue, use a cache heap data structure instead (similar to the
-  // allocation sites scratchpad).
+  // performance issue, use a cache data structure in heap instead.
   Object* list_element = allocation_sites_list();
   while (list_element->IsAllocationSite()) {
     AllocationSite* site = AllocationSite::cast(list_element);
@@ -602,9 +646,6 @@
     ZapFromSpace();
   }
 
-  // Process pretenuring feedback and update allocation sites.
-  ProcessPretenuringFeedback();
-
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
     Verify();
@@ -618,6 +659,7 @@
   if (FLAG_print_handles) PrintHandles();
   if (FLAG_gc_verbose) Print();
   if (FLAG_code_stats) ReportCodeStatistics("After GC");
+  if (FLAG_check_handle_count) CheckHandleCount();
 #endif
   if (FLAG_deopt_every_n_garbage_collections > 0) {
     // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
@@ -652,23 +694,13 @@
 
     isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
         (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-    isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
-        static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
-                         CommittedMemory()));
-    isolate_->counters()->heap_fraction_old_data_space()->AddSample(
-        static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
-                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_old_space()->AddSample(static_cast<int>(
+        (old_space()->CommittedMemory() * 100.0) / CommittedMemory()));
     isolate_->counters()->heap_fraction_code_space()->AddSample(
         static_cast<int>((code_space()->CommittedMemory() * 100.0) /
                          CommittedMemory()));
     isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
         (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-    isolate_->counters()->heap_fraction_cell_space()->AddSample(
-        static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
-                         CommittedMemory()));
-    isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
-        static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
-                         CommittedMemory()));
     isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
         (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
 
@@ -678,12 +710,6 @@
         static_cast<int>(SizeOfObjects() / KB));
     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
         static_cast<int>(map_space()->CommittedMemory() / KB));
-    isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
-        static_cast<int>(cell_space()->CommittedMemory() / KB));
-    isolate_->counters()
-        ->heap_sample_property_cell_space_committed()
-        ->AddSample(
-            static_cast<int>(property_cell_space()->CommittedMemory() / KB));
     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
         static_cast<int>(code_space()->CommittedMemory() / KB));
 
@@ -710,12 +736,9 @@
   UPDATE_FRAGMENTATION_FOR_SPACE(space)
 
   UPDATE_COUNTERS_FOR_SPACE(new_space)
-  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
-  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
-  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
-  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
 #undef UPDATE_COUNTERS_FOR_SPACE
 #undef UPDATE_FRAGMENTATION_FOR_SPACE
@@ -728,6 +751,113 @@
   // Remember the last top pointer so that we can later find out
   // whether we allocated in new space since the last GC.
   new_space_top_after_last_gc_ = new_space()->top();
+  last_gc_time_ = MonotonicallyIncreasingTimeInMs();
+
+  ReduceNewSpaceSize();
+}
+
+
+void Heap::PreprocessStackTraces() {
+  WeakFixedArray::Iterator iterator(weak_stack_trace_list());
+  FixedArray* elements;
+  while ((elements = iterator.Next<FixedArray>())) {
+    for (int j = 1; j < elements->length(); j += 4) {
+      Object* maybe_code = elements->get(j + 2);
+      // If GC happens while adding a stack trace to the weak fixed array,
+      // which has been copied into a larger backing store, we may run into
+      // a stack trace that has already been preprocessed. Guard against this.
+      if (!maybe_code->IsCode()) break;
+      Code* code = Code::cast(maybe_code);
+      int offset = Smi::cast(elements->get(j + 3))->value();
+      Address pc = code->address() + offset;
+      int pos = code->SourcePosition(pc);
+      elements->set(j + 2, Smi::FromInt(pos));
+    }
+  }
+  // We must not compact the weak fixed list here, as we may be in the middle
+  // of writing to it, when the GC triggered. Instead, we reset the root value.
+  set_weak_stack_trace_list(Smi::FromInt(0));
+}
+
+
+class GCCallbacksScope {
+ public:
+  explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
+    heap_->gc_callbacks_depth_++;
+  }
+  ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
+
+  bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
+
+ private:
+  Heap* heap_;
+};
+
+
+void Heap::HandleGCRequest() {
+  if (incremental_marking()->request_type() ==
+      IncrementalMarking::COMPLETE_MARKING) {
+    CollectAllGarbage(current_gc_flags_, "GC interrupt",
+                      current_gc_callback_flags_);
+  } else if (incremental_marking()->IsMarking() &&
+             !incremental_marking()->finalize_marking_completed()) {
+    FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
+  }
+}
+
+
+void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
+  scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
+}
+
+
+void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] (%s).\n", gc_reason);
+  }
+
+  GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
+  HistogramTimerScope incremental_marking_scope(
+      isolate()->counters()->gc_incremental_marking_finalize());
+
+  {
+    GCCallbacksScope scope(this);
+    if (scope.CheckReenter()) {
+      AllowHeapAllocation allow_allocation;
+      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+      VMState<EXTERNAL> state(isolate_);
+      HandleScope handle_scope(isolate_);
+      CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
+    }
+  }
+  incremental_marking()->FinalizeIncrementally();
+  {
+    GCCallbacksScope scope(this);
+    if (scope.CheckReenter()) {
+      AllowHeapAllocation allow_allocation;
+      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+      VMState<EXTERNAL> state(isolate_);
+      HandleScope handle_scope(isolate_);
+      CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
+    }
+  }
+}
+
+
+HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
+  if (collector == SCAVENGER) {
+    return isolate_->counters()->gc_scavenger();
+  } else {
+    if (!incremental_marking()->IsStopped()) {
+      if (ShouldReduceMemory()) {
+        return isolate_->counters()->gc_finalize_reduce_memory();
+      } else {
+        return isolate_->counters()->gc_finalize();
+      }
+    } else {
+      return isolate_->counters()->gc_compactor();
+    }
+  }
 }
 
 
@@ -736,9 +866,9 @@
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
-  mark_compact_collector_.SetFlags(flags);
-  CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
-  mark_compact_collector_.SetFlags(kNoGCFlags);
+  set_current_gc_flags(flags);
+  CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
+  set_current_gc_flags(kNoGCFlags);
 }
 
 
@@ -757,25 +887,52 @@
   if (isolate()->concurrent_recompilation_enabled()) {
     // The optimizing compiler may be unnecessarily holding on to memory.
     DisallowHeapAllocation no_recursive_gc;
-    isolate()->optimizing_compiler_thread()->Flush();
+    isolate()->optimizing_compile_dispatcher()->Flush();
   }
-  mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
-                                     kReduceMemoryFootprintMask);
+  isolate()->ClearSerializerData();
+  set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
   isolate_->compilation_cache()->Clear();
   const int kMaxNumberOfAttempts = 7;
   const int kMinNumberOfAttempts = 2;
   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
-    if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
+    if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
+                        v8::kGCCallbackFlagForced) &&
         attempt + 1 >= kMinNumberOfAttempts) {
       break;
     }
   }
-  mark_compact_collector()->SetFlags(kNoGCFlags);
+  set_current_gc_flags(kNoGCFlags);
   new_space_.Shrink();
   UncommitFromSpace();
 }
 
 
+void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
+  if (incremental_marking()->IsStopped()) {
+    if (incremental_marking()->CanBeActivated()) {
+      StartIncrementalMarking(
+          i::Heap::kNoGCFlags,
+          kGCCallbackFlagSynchronousPhantomCallbackProcessing, gc_reason);
+    } else {
+      CollectAllGarbage(i::Heap::kNoGCFlags, gc_reason,
+                        kGCCallbackFlagSynchronousPhantomCallbackProcessing);
+    }
+  } else {
+    // Incremental marking is turned on an has already been started.
+
+    // TODO(mlippautz): Compute the time slice for incremental marking based on
+    // memory pressure.
+    double deadline = MonotonicallyIncreasingTimeInMs() +
+                      FLAG_external_allocation_limit_incremental_time;
+    incremental_marking()->AdvanceIncrementalMarking(
+        0, deadline,
+        IncrementalMarking::StepActions(IncrementalMarking::GC_VIA_STACK_GUARD,
+                                        IncrementalMarking::FORCE_MARKING,
+                                        IncrementalMarking::FORCE_COMPLETION));
+  }
+}
+
+
 void Heap::EnsureFillerObjectAtTop() {
   // There may be an allocation memento behind every object in new space.
   // If we evacuate a not full new space or if we are on the last page of
@@ -783,10 +940,14 @@
   // pointer of the new space page. We store a filler object there to
   // identify the unused space.
   Address from_top = new_space_.top();
-  Address from_limit = new_space_.limit();
-  if (from_top < from_limit) {
-    int remaining_in_page = static_cast<int>(from_limit - from_top);
-    CreateFillerObjectAt(from_top, remaining_in_page);
+  // Check that from_top is inside its page (i.e., not at the end).
+  Address space_end = new_space_.ToSpaceEnd();
+  if (from_top < space_end) {
+    Page* page = Page::FromAddress(from_top);
+    if (page->Contains(from_top)) {
+      int remaining_in_page = static_cast<int>(page->area_end() - from_top);
+      CreateFillerObjectAt(from_top, remaining_in_page);
+    }
   }
 }
 
@@ -814,16 +975,17 @@
     }
   }
 
-  if (collector == MARK_COMPACTOR &&
-      !mark_compact_collector()->abort_incremental_marking() &&
-      !incremental_marking()->IsStopped() &&
-      !incremental_marking()->should_hurry() &&
-      FLAG_incremental_marking_steps) {
+  if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
+      !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
+      !incremental_marking()->should_hurry() && FLAG_incremental_marking &&
+      OldGenerationAllocationLimitReached()) {
     // Make progress in incremental marking.
     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
-    if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
+    if (!incremental_marking()->IsComplete() &&
+        !mark_compact_collector()->marking_deque_.IsEmpty() &&
+        !FLAG_gc_global) {
       if (FLAG_trace_incremental_marking) {
         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
       }
@@ -833,6 +995,11 @@
   }
 
   bool next_gc_likely_to_collect_more = false;
+  intptr_t committed_memory_before = 0;
+
+  if (collector == MARK_COMPACTOR) {
+    committed_memory_before = CommittedOldGenerationMemory();
+  }
 
   {
     tracer()->Start(collector, gc_reason, collector_reason);
@@ -841,22 +1008,49 @@
     GarbageCollectionPrologue();
 
     {
-      HistogramTimerScope histogram_timer_scope(
-          (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
-                                   : isolate_->counters()->gc_compactor());
+      HistogramTimerScope histogram_timer_scope(GCTypeTimer(collector));
+
       next_gc_likely_to_collect_more =
           PerformGarbageCollection(collector, gc_callback_flags);
     }
 
     GarbageCollectionEpilogue();
+    if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
+      isolate()->CheckDetachedContextsAfterGC();
+    }
+
+    if (collector == MARK_COMPACTOR) {
+      intptr_t committed_memory_after = CommittedOldGenerationMemory();
+      intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
+      MemoryReducer::Event event;
+      event.type = MemoryReducer::kMarkCompact;
+      event.time_ms = MonotonicallyIncreasingTimeInMs();
+      // Trigger one more GC if
+      // - this GC decreased committed memory,
+      // - there is high fragmentation,
+      // - there are live detached contexts.
+      event.next_gc_likely_to_collect_more =
+          (committed_memory_before - committed_memory_after) > MB ||
+          HasHighFragmentation(used_memory_after, committed_memory_after) ||
+          (detached_contexts()->length() > 0);
+      if (deserialization_complete_) {
+        memory_reducer_->NotifyMarkCompact(event);
+      }
+    }
+
     tracer()->Stop(collector);
   }
 
+  if (collector == MARK_COMPACTOR &&
+      (gc_callback_flags & kGCCallbackFlagForced) != 0) {
+    isolate()->CountUsage(v8::Isolate::kForcedGC);
+  }
+
   // Start incremental marking for the next cycle. The heap snapshot
   // generator needs incremental marking to stay off after it aborted.
-  if (!mark_compact_collector()->abort_incremental_marking() &&
-      WorthActivatingIncrementalMarking()) {
-    incremental_marking()->Start();
+  if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
+      incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
+    StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
   }
 
   return next_gc_likely_to_collect_more;
@@ -867,18 +1061,39 @@
   if (!dependant_context) {
     tracer()->ResetSurvivalEvents();
     old_generation_size_configured_ = false;
+    MemoryReducer::Event event;
+    event.type = MemoryReducer::kContextDisposed;
+    event.time_ms = MonotonicallyIncreasingTimeInMs();
+    memory_reducer_->NotifyContextDisposed(event);
   }
   if (isolate()->concurrent_recompilation_enabled()) {
     // Flush the queued recompilation tasks.
-    isolate()->optimizing_compiler_thread()->Flush();
+    isolate()->optimizing_compile_dispatcher()->Flush();
   }
-  flush_monomorphic_ics_ = true;
   AgeInlineCaches();
-  tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
+  number_of_disposed_maps_ = retained_maps()->Length();
+  tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
   return ++contexts_disposed_;
 }
 
 
+void Heap::StartIncrementalMarking(int gc_flags,
+                                   const GCCallbackFlags gc_callback_flags,
+                                   const char* reason) {
+  DCHECK(incremental_marking()->IsStopped());
+  set_current_gc_flags(gc_flags);
+  current_gc_callback_flags_ = gc_callback_flags;
+  incremental_marking()->Start(reason);
+}
+
+
+void Heap::StartIdleIncrementalMarking() {
+  gc_idle_time_handler_->ResetNoProgressCounter();
+  StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags,
+                          "idle");
+}
+
+
 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
                         int len) {
   if (len == 0) return;
@@ -902,7 +1117,7 @@
 // Helper class for verifying the string table.
 class StringTableVerifier : public ObjectVisitor {
  public:
-  void VisitPointers(Object** start, Object** end) {
+  void VisitPointers(Object** start, Object** end) override {
     // Visit all HeapObject pointers in [start, end).
     for (Object** p = start; p < end; p++) {
       if ((*p)->IsHeapObject()) {
@@ -922,15 +1137,6 @@
 #endif  // VERIFY_HEAP
 
 
-static bool AbortIncrementalMarkingAndCollectGarbage(
-    Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
-  heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
-  bool result = heap->CollectGarbage(space, gc_reason);
-  heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
-  return result;
-}
-
-
 bool Heap::ReserveSpace(Reservation* reservations) {
   bool gc_performed = true;
   int counter = 0;
@@ -944,7 +1150,7 @@
       bool perform_gc = false;
       if (space == LO_SPACE) {
         DCHECK_EQ(1, reservation->length());
-        perform_gc = !lo_space()->CanAllocateSize(reservation->at(0).size);
+        perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
       } else {
         for (auto& chunk : *reservation) {
           AllocationResult allocation;
@@ -952,18 +1158,19 @@
           DCHECK_LE(size, MemoryAllocator::PageAreaSize(
                               static_cast<AllocationSpace>(space)));
           if (space == NEW_SPACE) {
-            allocation = new_space()->AllocateRaw(size);
+            allocation = new_space()->AllocateRawUnaligned(size);
           } else {
-            allocation = paged_space(space)->AllocateRaw(size);
+            allocation = paged_space(space)->AllocateRawUnaligned(size);
           }
-          FreeListNode* node;
-          if (allocation.To(&node)) {
+          HeapObject* free_space = nullptr;
+          if (allocation.To(&free_space)) {
             // Mark with a free list node, in case we have a GC before
             // deserializing.
-            node->set_size(this, size);
+            Address free_space_address = free_space->address();
+            CreateFillerObjectAt(free_space_address, size);
             DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
-            chunk.start = node->address();
-            chunk.end = node->address() + size;
+            chunk.start = free_space_address;
+            chunk.end = free_space_address + size;
           } else {
             perform_gc = true;
             break;
@@ -972,12 +1179,18 @@
       }
       if (perform_gc) {
         if (space == NEW_SPACE) {
-          Heap::CollectGarbage(NEW_SPACE,
-                               "failed to reserve space in the new space");
+          CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
         } else {
-          AbortIncrementalMarkingAndCollectGarbage(
-              this, static_cast<AllocationSpace>(space),
-              "failed to reserve space in paged or large object space");
+          if (counter > 1) {
+            CollectAllGarbage(
+                kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
+                "failed to reserve space in paged or large "
+                "object space, trying to reduce memory footprint");
+          } else {
+            CollectAllGarbage(
+                kAbortIncrementalMarkingMask,
+                "failed to reserve space in paged or large object space");
+          }
         }
         gc_performed = true;
         break;  // Abort for-loop over spaces and retry.
@@ -998,29 +1211,6 @@
 }
 
 
-void Heap::ClearJSFunctionResultCaches() {
-  if (isolate_->bootstrapper()->IsActive()) return;
-
-  Object* context = native_contexts_list();
-  while (!context->IsUndefined()) {
-    // Get the caches for this context. GC can happen when the context
-    // is not fully initialized, so the caches can be undefined.
-    Object* caches_or_undefined =
-        Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
-    if (!caches_or_undefined->IsUndefined()) {
-      FixedArray* caches = FixedArray::cast(caches_or_undefined);
-      // Clear the caches:
-      int length = caches->length();
-      for (int i = 0; i < length; i++) {
-        JSFunctionResultCache::cast(caches->get(i))->Clear();
-      }
-    }
-    // Get the next context:
-    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
-  }
-}
-
-
 void Heap::ClearNormalizedMapCaches() {
   if (isolate_->bootstrapper()->IsActive() &&
       !incremental_marking()->IsMarking()) {
@@ -1060,8 +1250,7 @@
        static_cast<double>(start_new_space_size) * 100);
 
   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
-  tracer()->AddSurvivalRate(survival_rate);
-
+  tracer()->AddSurvivalRatio(survival_rate);
   if (survival_rate > kYoungSurvivalRateHighThreshold) {
     high_survival_rate_period_length_++;
   } else {
@@ -1108,19 +1297,25 @@
     incremental_marking()->NotifyOfHighPromotionRate();
   }
 
-  if (collector == MARK_COMPACTOR) {
-    // Perform mark-sweep with optional compaction.
-    MarkCompact();
-    sweep_generation_++;
-    // Temporarily set the limit for case when PostGarbageCollectionProcessing
-    // allocates and triggers GC. The real limit is set at after
-    // PostGarbageCollectionProcessing.
-    old_generation_allocation_limit_ =
-        OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
-    old_gen_exhausted_ = false;
-    old_generation_size_configured_ = true;
-  } else {
-    Scavenge();
+  {
+    Heap::PretenuringScope pretenuring_scope(this);
+
+    if (collector == MARK_COMPACTOR) {
+      UpdateOldGenerationAllocationCounter();
+      // Perform mark-sweep with optional compaction.
+      MarkCompact();
+      old_gen_exhausted_ = false;
+      old_generation_size_configured_ = true;
+      // This should be updated before PostGarbageCollectionProcessing, which
+      // can cause another GC. Take into account the objects promoted during GC.
+      old_generation_allocation_counter_ +=
+          static_cast<size_t>(promoted_objects_size_);
+      old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
+    } else {
+      Scavenge();
+    }
+
+    ProcessPretenuringFeedback();
   }
 
   UpdateSurvivalStatistics(start_new_space_size);
@@ -1128,16 +1323,25 @@
 
   isolate_->counters()->objs_since_last_young()->Set(0);
 
-  // Callbacks that fire after this point might trigger nested GCs and
-  // restart incremental marking, the assertion can't be moved down.
-  DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
+  if (collector != SCAVENGER) {
+    // Callbacks that fire after this point might trigger nested GCs and
+    // restart incremental marking, the assertion can't be moved down.
+    DCHECK(incremental_marking()->IsStopped());
+
+    // We finished a marking cycle. We can uncommit the marking deque until
+    // we start marking again.
+    mark_compact_collector()->marking_deque()->Uninitialize();
+    mark_compact_collector()->EnsureMarkingDequeIsCommitted(
+        MarkCompactCollector::kMinMarkingDequeSize);
+  }
 
   gc_post_processing_depth_++;
   {
     AllowHeapAllocation allow_allocation;
     GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
     freed_global_handles =
-        isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
+        isolate_->global_handles()->PostGarbageCollectionProcessing(
+            collector, gc_callback_flags);
   }
   gc_post_processing_depth_--;
 
@@ -1146,15 +1350,19 @@
   // Update relocatables.
   Relocatable::PostGarbageCollectionProcessing(isolate_);
 
+  double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
+  double mutator_speed = static_cast<double>(
+      tracer()
+          ->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond());
+  intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
   if (collector == MARK_COMPACTOR) {
     // Register the amount of external allocated memory.
     amount_of_external_allocated_memory_at_last_global_gc_ =
         amount_of_external_allocated_memory_;
-    old_generation_allocation_limit_ = OldGenerationAllocationLimit(
-        PromotedSpaceSizeOfObjects(), freed_global_handles);
-    // We finished a marking cycle. We can uncommit the marking deque until
-    // we start marking again.
-    mark_compact_collector_.UncommitMarkingDeque();
+    SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
+  } else if (HasLowYoungGenerationAllocationRate() &&
+             old_generation_size_configured_) {
+    DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
   }
 
   {
@@ -1181,10 +1389,9 @@
 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
-      if (!gc_prologue_callbacks_[i].pass_isolate_) {
-        v8::GCPrologueCallback callback =
-            reinterpret_cast<v8::GCPrologueCallback>(
-                gc_prologue_callbacks_[i].callback);
+      if (!gc_prologue_callbacks_[i].pass_isolate) {
+        v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
+            gc_prologue_callbacks_[i].callback);
         callback(gc_type, flags);
       } else {
         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -1199,10 +1406,9 @@
                                    GCCallbackFlags gc_callback_flags) {
   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
-      if (!gc_epilogue_callbacks_[i].pass_isolate_) {
-        v8::GCPrologueCallback callback =
-            reinterpret_cast<v8::GCPrologueCallback>(
-                gc_epilogue_callbacks_[i].callback);
+      if (!gc_epilogue_callbacks_[i].pass_isolate) {
+        v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
+            gc_epilogue_callbacks_[i].callback);
         callback(gc_type, gc_callback_flags);
       } else {
         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -1214,18 +1420,20 @@
 
 
 void Heap::MarkCompact() {
+  PauseInlineAllocationObserversScope pause_observers(new_space());
+
   gc_state_ = MARK_COMPACT;
   LOG(isolate_, ResourceEvent("markcompact", "begin"));
 
   uint64_t size_of_objects_before_gc = SizeOfObjects();
 
-  mark_compact_collector_.Prepare();
+  mark_compact_collector()->Prepare();
 
   ms_count_++;
 
   MarkCompactPrologue();
 
-  mark_compact_collector_.CollectGarbage();
+  mark_compact_collector()->CollectGarbage();
 
   LOG(isolate_, ResourceEvent("markcompact", "end"));
 
@@ -1242,9 +1450,9 @@
 
   isolate_->counters()->objs_since_last_full()->Set(0);
 
-  flush_monomorphic_ics_ = false;
-
   incremental_marking()->Epilogue();
+
+  PreprocessStackTraces();
 }
 
 
@@ -1270,37 +1478,14 @@
 }
 
 
-// Helper class for copying HeapObjects
-class ScavengeVisitor : public ObjectVisitor {
- public:
-  explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
-
-  void VisitPointer(Object** p) { ScavengePointer(p); }
-
-  void VisitPointers(Object** start, Object** end) {
-    // Copy all HeapObject pointers in [start, end)
-    for (Object** p = start; p < end; p++) ScavengePointer(p);
-  }
-
- private:
-  void ScavengePointer(Object** p) {
-    Object* object = *p;
-    if (!heap_->InNewSpace(object)) return;
-    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
-                         reinterpret_cast<HeapObject*>(object));
-  }
-
-  Heap* heap_;
-};
-
-
 #ifdef VERIFY_HEAP
 // Visitor class to verify pointers in code or data space do not point into
 // new space.
 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
  public:
   explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
-  void VisitPointers(Object** start, Object** end) {
+
+  void VisitPointers(Object** start, Object** end) override {
     for (Object** current = start; current < end; current++) {
       if ((*current)->IsHeapObject()) {
         CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
@@ -1321,11 +1506,6 @@
   for (HeapObject* object = code_it.Next(); object != NULL;
        object = code_it.Next())
     object->Iterate(&v);
-
-    HeapObjectIterator data_it(heap->old_data_space());
-    for (HeapObject* object = data_it.Next(); object != NULL;
-         object = data_it.Next())
-      object->Iterate(&v);
 }
 #endif  // VERIFY_HEAP
 
@@ -1355,71 +1535,38 @@
 }
 
 
+static bool IsUnmodifiedHeapObject(Object** p) {
+  Object* object = *p;
+  if (object->IsSmi()) return false;
+  HeapObject* heap_object = HeapObject::cast(object);
+  if (!object->IsJSObject()) return false;
+  Object* obj_constructor = (JSObject::cast(object))->map()->GetConstructor();
+  if (!obj_constructor->IsJSFunction()) return false;
+  JSFunction* constructor = JSFunction::cast(obj_constructor);
+  if (!constructor->shared()->IsApiFunction()) return false;
+  if (constructor != nullptr &&
+      constructor->initial_map() == heap_object->map()) {
+    return true;
+  }
+  return false;
+}
+
+
 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
                                        StoreBufferEvent event) {
   heap->store_buffer_rebuilder_.Callback(page, event);
 }
 
 
-void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
-  if (event == kStoreBufferStartScanningPagesEvent) {
-    start_of_current_page_ = NULL;
-    current_page_ = NULL;
-  } else if (event == kStoreBufferScanningPageEvent) {
-    if (current_page_ != NULL) {
-      // If this page already overflowed the store buffer during this iteration.
-      if (current_page_->scan_on_scavenge()) {
-        // Then we should wipe out the entries that have been added for it.
-        store_buffer_->SetTop(start_of_current_page_);
-      } else if (store_buffer_->Top() - start_of_current_page_ >=
-                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
-        // Did we find too many pointers in the previous page?  The heuristic is
-        // that no page can take more then 1/5 the remaining slots in the store
-        // buffer.
-        current_page_->set_scan_on_scavenge(true);
-        store_buffer_->SetTop(start_of_current_page_);
-      } else {
-        // In this case the page we scanned took a reasonable number of slots in
-        // the store buffer.  It has now been rehabilitated and is no longer
-        // marked scan_on_scavenge.
-        DCHECK(!current_page_->scan_on_scavenge());
-      }
-    }
-    start_of_current_page_ = store_buffer_->Top();
-    current_page_ = page;
-  } else if (event == kStoreBufferFullEvent) {
-    // The current page overflowed the store buffer again.  Wipe out its entries
-    // in the store buffer and mark it scan-on-scavenge again.  This may happen
-    // several times while scanning.
-    if (current_page_ == NULL) {
-      // Store Buffer overflowed while scanning promoted objects.  These are not
-      // in any particular page, though they are likely to be clustered by the
-      // allocation routines.
-      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
-    } else {
-      // Store Buffer overflowed while scanning a particular old space page for
-      // pointers to new space.
-      DCHECK(current_page_ == page);
-      DCHECK(page != NULL);
-      current_page_->set_scan_on_scavenge(true);
-      DCHECK(start_of_current_page_ != store_buffer_->Top());
-      store_buffer_->SetTop(start_of_current_page_);
-    }
-  } else {
-    UNREACHABLE();
-  }
-}
-
-
 void PromotionQueue::Initialize() {
-  // Assumes that a NewSpacePage exactly fits a number of promotion queue
-  // entries (where each is a pair of intptr_t). This allows us to simplify
-  // the test fpr when to switch pages.
+  // The last to-space page may be used for promotion queue. On promotion
+  // conflict, we use the emergency stack.
   DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
          0);
-  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
   front_ = rear_ =
       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
+  limit_ = reinterpret_cast<intptr_t*>(
+      Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
   emergency_stack_ = NULL;
 }
 
@@ -1471,7 +1618,16 @@
 
 
 void Heap::Scavenge() {
+  GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
   RelocationLock relocation_lock(this);
+  // There are soft limits in the allocation code, designed to trigger a mark
+  // sweep collection by failing allocations. There is no sense in trying to
+  // trigger one during scavenge: scavenges allocation should always succeed.
+  AlwaysAllocateScope scope(isolate());
+
+  // Bump-pointer allocations done during scavenge are not real allocations.
+  // Pause the inline allocation steps.
+  PauseInlineAllocationObserversScope pause_observers(new_space());
 
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
@@ -1488,9 +1644,9 @@
   // Used for updating survived_since_last_expansion_ at function end.
   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
 
-  SelectScavengingVisitorsTable();
+  scavenge_collector_->SelectScavengingVisitorsTable();
 
-  incremental_marking()->PrepareForScavenge();
+  array_buffer_tracker()->PrepareDiscoveryInNewSpace();
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
@@ -1517,74 +1673,75 @@
   Address new_space_front = new_space_.ToSpaceStart();
   promotion_queue_.Initialize();
 
-#ifdef DEBUG
-  store_buffer()->Clean();
-#endif
-
   ScavengeVisitor scavenge_visitor(this);
-  // Copy roots.
-  IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
 
-  // Copy objects reachable from the old generation.
+  if (FLAG_scavenge_reclaim_unmodified_objects) {
+    isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+        &IsUnmodifiedHeapObject);
+  }
+
   {
+    // Copy roots.
+    GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
+    IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+  }
+
+  {
+    // Copy objects reachable from the old generation.
+    GCTracer::Scope gc_scope(tracer(),
+                             GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
     StoreBufferRebuildScope scope(this, store_buffer(),
                                   &ScavengeStoreBufferCallback);
-    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
+    store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject);
   }
 
-  // Copy objects reachable from simple cells by scavenging cell values
-  // directly.
-  HeapObjectIterator cell_iterator(cell_space_);
-  for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
-       heap_object = cell_iterator.Next()) {
-    if (heap_object->IsCell()) {
-      Cell* cell = Cell::cast(heap_object);
-      Address value_address = cell->ValueAddress();
-      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+  {
+    GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
+    // Copy objects reachable from the encountered weak collections list.
+    scavenge_visitor.VisitPointer(&encountered_weak_collections_);
+    // Copy objects reachable from the encountered weak cells.
+    scavenge_visitor.VisitPointer(&encountered_weak_cells_);
+  }
+
+  {
+    // Copy objects reachable from the code flushing candidates list.
+    GCTracer::Scope gc_scope(tracer(),
+                             GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
+    MarkCompactCollector* collector = mark_compact_collector();
+    if (collector->is_code_flushing_enabled()) {
+      collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
     }
   }
 
-  // Copy objects reachable from global property cells by scavenging global
-  // property cell values directly.
-  HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
-  for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
-       heap_object != NULL;
-       heap_object = js_global_property_cell_iterator.Next()) {
-    if (heap_object->IsPropertyCell()) {
-      PropertyCell* cell = PropertyCell::cast(heap_object);
-      Address value_address = cell->ValueAddress();
-      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
-      Address type_address = cell->TypeAddress();
-      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
-    }
-  }
-
-  // Copy objects reachable from the encountered weak collections list.
-  scavenge_visitor.VisitPointer(&encountered_weak_collections_);
-  // Copy objects reachable from the encountered weak cells.
-  scavenge_visitor.VisitPointer(&encountered_weak_cells_);
-
-  // Copy objects reachable from the code flushing candidates list.
-  MarkCompactCollector* collector = mark_compact_collector();
-  if (collector->is_code_flushing_enabled()) {
-    collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
-  }
-
-  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
-  while (isolate()->global_handles()->IterateObjectGroups(
-      &scavenge_visitor, &IsUnscavengedHeapObject)) {
+  {
+    GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
   }
-  isolate()->global_handles()->RemoveObjectGroups();
-  isolate()->global_handles()->RemoveImplicitRefGroups();
 
-  isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
-      &IsUnscavengedHeapObject);
+  if (FLAG_scavenge_reclaim_unmodified_objects) {
+    isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+        &IsUnscavengedHeapObject);
 
-  isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
-      &scavenge_visitor);
-  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+    isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
+        &scavenge_visitor);
+    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+  } else {
+    GCTracer::Scope gc_scope(tracer(),
+                             GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
+    while (isolate()->global_handles()->IterateObjectGroups(
+        &scavenge_visitor, &IsUnscavengedHeapObject)) {
+      new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+    }
+    isolate()->global_handles()->RemoveObjectGroups();
+    isolate()->global_handles()->RemoveImplicitRefGroups();
+
+    isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
+        &IsUnscavengedHeapObject);
+
+    isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
+        &scavenge_visitor);
+    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+  }
 
   UpdateNewSpaceReferencesInExternalStringTable(
       &UpdateNewSpaceReferenceInExternalStringTableEntry);
@@ -1594,15 +1751,14 @@
   incremental_marking()->UpdateMarkingDequeAfterScavenge();
 
   ScavengeWeakObjectRetainer weak_object_retainer(this);
-  ProcessWeakReferences(&weak_object_retainer);
+  ProcessYoungWeakReferences(&weak_object_retainer);
 
   DCHECK(new_space_front == new_space_.top());
 
   // Set age mark.
   new_space_.set_age_mark(new_space_.top());
 
-  new_space_.LowerInlineAllocationLimit(
-      new_space_.inline_allocation_limit_step());
+  array_buffer_tracker()->FreeDead(true);
 
   // Update how much has survived scavenge.
   IncrementYoungSurvivorsCounter(static_cast<int>(
@@ -1611,8 +1767,6 @@
   LOG(isolate_, ResourceEvent("scavenge", "end"));
 
   gc_state_ = NOT_IN_GC;
-
-  gc_idle_time_handler_.NotifyScavenge();
 }
 
 
@@ -1681,16 +1835,14 @@
 }
 
 
-void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
-  ProcessArrayBuffers(retainer);
+void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
   ProcessNativeContexts(retainer);
-  // TODO(mvstanton): AllocationSites only need to be processed during
-  // MARK_COMPACT, as they live in old space. Verify and address.
   ProcessAllocationSites(retainer);
-  // Collects callback info for handles that are pending (about to be
-  // collected) and either phantom or internal-fields.  Releases the global
-  // handles.  See also PostGarbageCollectionProcessing.
-  isolate()->global_handles()->CollectPhantomCallbackData();
+}
+
+
+void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
+  ProcessNativeContexts(retainer);
 }
 
 
@@ -1701,24 +1853,6 @@
 }
 
 
-void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
-  Object* array_buffer_obj =
-      VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
-  set_array_buffers_list(array_buffer_obj);
-}
-
-
-void Heap::TearDownArrayBuffers() {
-  Object* undefined = undefined_value();
-  for (Object* o = array_buffers_list(); o != undefined;) {
-    JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
-    Runtime::FreeArrayBuffer(isolate(), buffer);
-    o = buffer->weak_next();
-  }
-  set_array_buffers_list(undefined);
-}
-
-
 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
   Object* allocation_site_obj =
       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
@@ -1736,6 +1870,7 @@
       casted->ResetPretenureDecision();
       casted->set_deopt_dependent_code(true);
       marked = true;
+      RemoveAllocationSitePretenuringFeedback(casted);
     }
     cur = casted->weak_next();
   }
@@ -1791,17 +1926,6 @@
 }
 
 
-class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
- public:
-  static inline void VisitPointer(Heap* heap, Object** p) {
-    Object* object = *p;
-    if (!heap->InNewSpace(object)) return;
-    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
-                         reinterpret_cast<HeapObject*>(object));
-  }
-};
-
-
 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
                          Address new_space_front) {
   do {
@@ -1813,7 +1937,7 @@
       if (!NewSpacePage::IsAtEnd(new_space_front)) {
         HeapObject* object = HeapObject::FromAddress(new_space_front);
         new_space_front +=
-            NewSpaceScavenger::IterateBody(object->map(), object);
+            StaticScavengeVisitor::IterateBody(object->map(), object);
       } else {
         new_space_front =
             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
@@ -1834,28 +1958,8 @@
         // for pointers to from semispace instead of looking for pointers
         // to new space.
         DCHECK(!target->IsMap());
-        Address obj_address = target->address();
-#if V8_DOUBLE_FIELDS_UNBOXING
-        LayoutDescriptorHelper helper(target->map());
-        bool has_only_tagged_fields = helper.all_fields_tagged();
 
-        if (!has_only_tagged_fields) {
-          for (int offset = 0; offset < size;) {
-            int end_of_region_offset;
-            if (helper.IsTagged(offset, size, &end_of_region_offset)) {
-              IterateAndMarkPointersToFromSpace(
-                  obj_address + offset, obj_address + end_of_region_offset,
-                  &ScavengeObject);
-            }
-            offset = end_of_region_offset;
-          }
-        } else {
-#endif
-          IterateAndMarkPointersToFromSpace(obj_address, obj_address + size,
-                                            &ScavengeObject);
-#if V8_DOUBLE_FIELDS_UNBOXING
-        }
-#endif
+        IteratePointersToFromSpace(target, size, &Scavenger::ScavengeObject);
       }
     }
 
@@ -1869,499 +1973,78 @@
 
 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
               0);  // NOLINT
-STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
+STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
               0);  // NOLINT
-STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
-               kDoubleAlignmentMask) == 0);  // NOLINT
+#ifdef V8_HOST_ARCH_32_BIT
+STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
+              0);  // NOLINT
+#endif
 
 
-INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
-                                              int size));
-
-static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
-                                       int size) {
-  if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
-    heap->CreateFillerObjectAt(object->address(), kPointerSize);
-    return HeapObject::FromAddress(object->address() + kPointerSize);
-  } else {
-    heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
-                               kPointerSize);
-    return object;
+int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
+  switch (alignment) {
+    case kWordAligned:
+      return 0;
+    case kDoubleAligned:
+    case kDoubleUnaligned:
+      return kDoubleSize - kPointerSize;
+    case kSimd128Unaligned:
+      return kSimd128Size - kPointerSize;
+    default:
+      UNREACHABLE();
   }
+  return 0;
+}
+
+
+int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
+  intptr_t offset = OffsetFrom(address);
+  if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
+    return kPointerSize;
+  if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
+    return kDoubleSize - kPointerSize;  // No fill if double is always aligned.
+  if (alignment == kSimd128Unaligned) {
+    return (kSimd128Size - (static_cast<int>(offset) + kPointerSize)) &
+           kSimd128AlignmentMask;
+  }
+  return 0;
+}
+
+
+HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
+  CreateFillerObjectAt(object->address(), filler_size);
+  return HeapObject::FromAddress(object->address() + filler_size);
+}
+
+
+HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
+                                  int allocation_size,
+                                  AllocationAlignment alignment) {
+  int filler_size = allocation_size - object_size;
+  DCHECK(filler_size > 0);
+  int pre_filler = GetFillToAlign(object->address(), alignment);
+  if (pre_filler) {
+    object = PrecedeWithFiller(object, pre_filler);
+    filler_size -= pre_filler;
+  }
+  if (filler_size)
+    CreateFillerObjectAt(object->address() + object_size, filler_size);
+  return object;
 }
 
 
 HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
-  return EnsureDoubleAligned(this, object, size);
+  return AlignWithFiller(object, size - kPointerSize, size, kDoubleAligned);
 }
 
 
-enum LoggingAndProfiling {
-  LOGGING_AND_PROFILING_ENABLED,
-  LOGGING_AND_PROFILING_DISABLED
-};
-
-
-enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
-
-
-template <MarksHandling marks_handling,
-          LoggingAndProfiling logging_and_profiling_mode>
-class ScavengingVisitor : public StaticVisitorBase {
- public:
-  static void Initialize() {
-    table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
-    table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
-    table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
-    table_.Register(kVisitByteArray, &EvacuateByteArray);
-    table_.Register(kVisitFixedArray, &EvacuateFixedArray);
-    table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
-    table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
-    table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
-
-    table_.Register(
-        kVisitNativeContext,
-        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
-            Context::kSize>);
-
-    table_.Register(
-        kVisitConsString,
-        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
-            ConsString::kSize>);
-
-    table_.Register(
-        kVisitSlicedString,
-        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
-            SlicedString::kSize>);
-
-    table_.Register(
-        kVisitSymbol,
-        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
-            Symbol::kSize>);
-
-    table_.Register(
-        kVisitSharedFunctionInfo,
-        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
-            SharedFunctionInfo::kSize>);
-
-    table_.Register(kVisitJSWeakCollection,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
-    table_.Register(kVisitJSArrayBuffer,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
-    table_.Register(kVisitJSTypedArray,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
-    table_.Register(kVisitJSDataView,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
-    table_.Register(kVisitJSRegExp,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
-    if (marks_handling == IGNORE_MARKS) {
-      table_.Register(
-          kVisitJSFunction,
-          &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
-              JSFunction::kSize>);
-    } else {
-      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
-    }
-
-    table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
-                                   kVisitDataObject, kVisitDataObjectGeneric>();
-
-    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
-                                   kVisitJSObject, kVisitJSObjectGeneric>();
-
-    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
-                                   kVisitStruct, kVisitStructGeneric>();
-  }
-
-  static VisitorDispatchTable<ScavengingCallback>* GetTable() {
-    return &table_;
-  }
-
- private:
-  enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
-
-  static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
-    bool should_record = false;
-#ifdef DEBUG
-    should_record = FLAG_heap_stats;
-#endif
-    should_record = should_record || FLAG_log_gc;
-    if (should_record) {
-      if (heap->new_space()->Contains(obj)) {
-        heap->new_space()->RecordAllocation(obj);
-      } else {
-        heap->new_space()->RecordPromotion(obj);
-      }
-    }
-  }
-
-  // Helper function used by CopyObject to copy a source object to an
-  // allocated target object and update the forwarding pointer in the source
-  // object.  Returns the target object.
-  INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
-                                   HeapObject* target, int size)) {
-    // If we migrate into to-space, then the to-space top pointer should be
-    // right after the target object. Incorporate double alignment
-    // over-allocation.
-    DCHECK(!heap->InToSpace(target) ||
-           target->address() + size == heap->new_space()->top() ||
-           target->address() + size + kPointerSize == heap->new_space()->top());
-
-    // Make sure that we do not overwrite the promotion queue which is at
-    // the end of to-space.
-    DCHECK(!heap->InToSpace(target) ||
-           heap->promotion_queue()->IsBelowPromotionQueue(
-               heap->new_space()->top()));
-
-    // Copy the content of source to target.
-    heap->CopyBlock(target->address(), source->address(), size);
-
-    // Set the forwarding address.
-    source->set_map_word(MapWord::FromForwardingAddress(target));
-
-    if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
-      // Update NewSpace stats if necessary.
-      RecordCopiedObject(heap, target);
-      heap->OnMoveEvent(target, source, size);
-    }
-
-    if (marks_handling == TRANSFER_MARKS) {
-      if (Marking::TransferColor(source, target)) {
-        MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
-      }
-    }
-  }
-
-  template <int alignment>
-  static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
-                                         HeapObject* object, int object_size) {
-    Heap* heap = map->GetHeap();
-
-    int allocation_size = object_size;
-    if (alignment != kObjectAlignment) {
-      DCHECK(alignment == kDoubleAlignment);
-      allocation_size += kPointerSize;
-    }
-
-    DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
-    AllocationResult allocation =
-        heap->new_space()->AllocateRaw(allocation_size);
-
-    HeapObject* target = NULL;  // Initialization to please compiler.
-    if (allocation.To(&target)) {
-      // Order is important here: Set the promotion limit before storing a
-      // filler for double alignment or migrating the object. Otherwise we
-      // may end up overwriting promotion queue entries when we migrate the
-      // object.
-      heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
-
-      if (alignment != kObjectAlignment) {
-        target = EnsureDoubleAligned(heap, target, allocation_size);
-      }
-
-      // Order is important: slot might be inside of the target if target
-      // was allocated over a dead object and slot comes from the store
-      // buffer.
-      *slot = target;
-      MigrateObject(heap, object, target, object_size);
-
-      heap->IncrementSemiSpaceCopiedObjectSize(object_size);
-      return true;
-    }
-    return false;
-  }
-
-
-  template <ObjectContents object_contents, int alignment>
-  static inline bool PromoteObject(Map* map, HeapObject** slot,
-                                   HeapObject* object, int object_size) {
-    Heap* heap = map->GetHeap();
-
-    int allocation_size = object_size;
-    if (alignment != kObjectAlignment) {
-      DCHECK(alignment == kDoubleAlignment);
-      allocation_size += kPointerSize;
-    }
-
-    AllocationResult allocation;
-    if (object_contents == DATA_OBJECT) {
-      DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
-      allocation = heap->old_data_space()->AllocateRaw(allocation_size);
-    } else {
-      DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
-      allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
-    }
-
-    HeapObject* target = NULL;  // Initialization to please compiler.
-    if (allocation.To(&target)) {
-      if (alignment != kObjectAlignment) {
-        target = EnsureDoubleAligned(heap, target, allocation_size);
-      }
-
-      // Order is important: slot might be inside of the target if target
-      // was allocated over a dead object and slot comes from the store
-      // buffer.
-
-      // Unfortunately, the allocation can also write over the slot if the slot
-      // was in free space and the allocation wrote free list data (such as the
-      // free list map or entry size) over the slot.  We guard against this by
-      // checking that the slot still points to the object being moved.  This
-      // should be sufficient because neither the free list map nor the free
-      // list entry size should look like a new space pointer (the former is an
-      // old space pointer, the latter is word-aligned).
-      if (*slot == object) {
-        *slot = target;
-      }
-      MigrateObject(heap, object, target, object_size);
-
-      if (object_contents == POINTER_OBJECT) {
-        if (map->instance_type() == JS_FUNCTION_TYPE) {
-          heap->promotion_queue()->insert(target,
-                                          JSFunction::kNonWeakFieldsEndOffset);
-        } else {
-          heap->promotion_queue()->insert(target, object_size);
-        }
-      }
-      heap->IncrementPromotedObjectsSize(object_size);
-      return true;
-    }
-    return false;
-  }
-
-
-  template <ObjectContents object_contents, int alignment>
-  static inline void EvacuateObject(Map* map, HeapObject** slot,
-                                    HeapObject* object, int object_size) {
-    SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
-    SLOW_DCHECK(object->Size() == object_size);
-    Heap* heap = map->GetHeap();
-
-    if (!heap->ShouldBePromoted(object->address(), object_size)) {
-      // A semi-space copy may fail due to fragmentation. In that case, we
-      // try to promote the object.
-      if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
-        return;
-      }
-    }
-
-    if (PromoteObject<object_contents, alignment>(map, slot, object,
-                                                  object_size)) {
-      return;
-    }
-
-    // If promotion failed, we try to copy the object to the other semi-space
-    if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
-
-    UNREACHABLE();
-  }
-
-
-  static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
-                                        HeapObject* object) {
-    ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
-        JSFunction::kSize>(map, slot, object);
-
-    MapWord map_word = object->map_word();
-    DCHECK(map_word.IsForwardingAddress());
-    HeapObject* target = map_word.ToForwardingAddress();
-
-    MarkBit mark_bit = Marking::MarkBitFrom(target);
-    if (Marking::IsBlack(mark_bit)) {
-      // This object is black and it might not be rescanned by marker.
-      // We should explicitly record code entry slot for compaction because
-      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
-      // miss it as it is not HeapObject-tagged.
-      Address code_entry_slot =
-          target->address() + JSFunction::kCodeEntryOffset;
-      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
-      map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
-          code_entry_slot, code);
-    }
-  }
-
-
-  static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
-                                        HeapObject* object) {
-    int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
-    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
-                                                     object_size);
-  }
-
-
-  static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
-                                              HeapObject* object) {
-    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
-    int object_size = FixedDoubleArray::SizeFor(length);
-    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
-                                                  object_size);
-  }
-
-
-  static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
-                                             HeapObject* object) {
-    int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
-                                                  object_size);
-  }
-
-
-  static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
-                                               HeapObject* object) {
-    int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
-    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
-                                                  object_size);
-  }
-
-
-  static inline void EvacuateByteArray(Map* map, HeapObject** slot,
-                                       HeapObject* object) {
-    int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
-                                                  object_size);
-  }
-
-
-  static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
-                                              HeapObject* object) {
-    int object_size = SeqOneByteString::cast(object)
-                          ->SeqOneByteStringSize(map->instance_type());
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
-                                                  object_size);
-  }
-
-
-  static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
-                                              HeapObject* object) {
-    int object_size = SeqTwoByteString::cast(object)
-                          ->SeqTwoByteStringSize(map->instance_type());
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
-                                                  object_size);
-  }
-
-
-  static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
-                                               HeapObject* object) {
-    DCHECK(IsShortcutCandidate(map->instance_type()));
-
-    Heap* heap = map->GetHeap();
-
-    if (marks_handling == IGNORE_MARKS &&
-        ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
-      HeapObject* first =
-          HeapObject::cast(ConsString::cast(object)->unchecked_first());
-
-      *slot = first;
-
-      if (!heap->InNewSpace(first)) {
-        object->set_map_word(MapWord::FromForwardingAddress(first));
-        return;
-      }
-
-      MapWord first_word = first->map_word();
-      if (first_word.IsForwardingAddress()) {
-        HeapObject* target = first_word.ToForwardingAddress();
-
-        *slot = target;
-        object->set_map_word(MapWord::FromForwardingAddress(target));
-        return;
-      }
-
-      heap->DoScavengeObject(first->map(), slot, first);
-      object->set_map_word(MapWord::FromForwardingAddress(*slot));
-      return;
-    }
-
-    int object_size = ConsString::kSize;
-    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
-                                                     object_size);
-  }
-
-  template <ObjectContents object_contents>
-  class ObjectEvacuationStrategy {
-   public:
-    template <int object_size>
-    static inline void VisitSpecialized(Map* map, HeapObject** slot,
-                                        HeapObject* object) {
-      EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
-                                                        object_size);
-    }
-
-    static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
-      int object_size = map->instance_size();
-      EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
-                                                        object_size);
-    }
-  };
-
-  static VisitorDispatchTable<ScavengingCallback> table_;
-};
-
-
-template <MarksHandling marks_handling,
-          LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback>
-    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
-
-
-static void InitializeScavengingVisitorsTables() {
-  ScavengingVisitor<TRANSFER_MARKS,
-                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<TRANSFER_MARKS,
-                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
+  return array_buffer_tracker()->RegisterNew(buffer);
 }
 
 
-void Heap::SelectScavengingVisitorsTable() {
-  bool logging_and_profiling =
-      FLAG_verify_predictable || isolate()->logger()->is_logging() ||
-      isolate()->cpu_profiler()->is_profiling() ||
-      (isolate()->heap_profiler() != NULL &&
-       isolate()->heap_profiler()->is_tracking_object_moves());
-
-  if (!incremental_marking()->IsMarking()) {
-    if (!logging_and_profiling) {
-      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
-          IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
-    } else {
-      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
-          IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
-    }
-  } else {
-    if (!logging_and_profiling) {
-      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
-          TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
-    } else {
-      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
-          TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
-    }
-
-    if (incremental_marking()->IsCompacting()) {
-      // When compacting forbid short-circuiting of cons-strings.
-      // Scavenging code relies on the fact that new space object
-      // can't be evacuated into evacuation candidate but
-      // short-circuiting violates this assumption.
-      scavenging_visitors_table_.Register(
-          StaticVisitorBase::kVisitShortcutCandidate,
-          scavenging_visitors_table_.GetVisitorById(
-              StaticVisitorBase::kVisitConsString));
-    }
-  }
-}
-
-
-void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
-  SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
-  MapWord first_word = object->map_word();
-  SLOW_DCHECK(!first_word.IsForwardingAddress());
-  Map* map = first_word.ToMap();
-  map->GetHeap()->DoScavengeObject(map, p, object);
+void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
+  return array_buffer_tracker()->Unregister(buffer);
 }
 
 
@@ -2370,20 +2053,21 @@
     old_generation_allocation_limit_ =
         Max(kMinimumOldGenerationAllocationLimit,
             static_cast<intptr_t>(
-                static_cast<double>(initial_old_generation_size_) *
-                (tracer()->AverageSurvivalRate() / 100)));
+                static_cast<double>(old_generation_allocation_limit_) *
+                (tracer()->AverageSurvivalRatio() / 100)));
   }
 }
 
 
 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
                                           int instance_size) {
-  Object* result;
-  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
+  Object* result = nullptr;
+  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
   if (!allocation.To(&result)) return allocation;
 
   // Map::cast cannot be used due to uninitialized map field.
-  reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
+  reinterpret_cast<Map*>(result)->set_map(
+      reinterpret_cast<Map*>(root(kMetaMapRootIndex)));
   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
   // Initialize to only containing tagged fields.
@@ -2393,15 +2077,17 @@
     reinterpret_cast<Map*>(result)
         ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
   }
-  reinterpret_cast<Map*>(result)->set_inobject_properties(0);
-  reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
+  reinterpret_cast<Map*>(result)->clear_unused();
+  reinterpret_cast<Map*>(result)
+      ->set_inobject_properties_or_constructor_function_index(0);
   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
   reinterpret_cast<Map*>(result)->set_bit_field(0);
   reinterpret_cast<Map*>(result)->set_bit_field2(0);
   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
                    Map::OwnsDescriptors::encode(true) |
-                   Map::Counter::encode(Map::kRetainingCounterStart);
+                   Map::ConstructionCounter::encode(Map::kNoSlackTracking);
   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
+  reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
   return result;
 }
 
@@ -2409,22 +2095,23 @@
 AllocationResult Heap::AllocateMap(InstanceType instance_type,
                                    int instance_size,
                                    ElementsKind elements_kind) {
-  HeapObject* result;
-  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
+  HeapObject* result = nullptr;
+  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
   if (!allocation.To(&result)) return allocation;
 
   result->set_map_no_write_barrier(meta_map());
   Map* map = Map::cast(result);
   map->set_instance_type(instance_type);
   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
-  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
+  map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
   map->set_instance_size(instance_size);
-  map->set_inobject_properties(0);
-  map->set_pre_allocated_property_fields(0);
+  map->clear_unused();
+  map->set_inobject_properties_or_constructor_function_index(0);
   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
                           SKIP_WRITE_BARRIER);
-  map->init_back_pointer(undefined_value());
+  map->set_weak_cell_cache(Smi::FromInt(0));
+  map->set_raw_transitions(Smi::FromInt(0));
   map->set_unused_property_fields(0);
   map->set_instance_descriptors(empty_descriptor_array());
   if (FLAG_unbox_double_fields) {
@@ -2432,14 +2119,15 @@
   }
   // Must be called only after |instance_type|, |instance_size| and
   // |layout_descriptor| are set.
-  map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
+  map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
   map->set_bit_field(0);
   map->set_bit_field2(1 << Map::kIsExtensible);
   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
                    Map::OwnsDescriptors::encode(true) |
-                   Map::Counter::encode(Map::kRetainingCounterStart);
+                   Map::ConstructionCounter::encode(Map::kNoSlackTracking);
   map->set_bit_field3(bit_field3);
   map->set_elements_kind(elements_kind);
+  map->set_new_target_is_base(true);
 
   return map;
 }
@@ -2447,9 +2135,10 @@
 
 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
                                             AllocationSpace space) {
-  HeapObject* obj;
+  HeapObject* obj = nullptr;
   {
-    AllocationResult allocation = AllocateRaw(size, space, space);
+    AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
+    AllocationResult allocation = AllocateRaw(size, space, align);
     if (!allocation.To(&obj)) return allocation;
   }
 #ifdef DEBUG
@@ -2471,6 +2160,7 @@
 
 
 const Heap::ConstantStringTable Heap::constant_string_table[] = {
+    {"", kempty_stringRootIndex},
 #define CONSTANT_STRING_ELEMENT(name, contents) \
   { contents, k##name##RootIndex }              \
   ,
@@ -2489,7 +2179,7 @@
 
 
 bool Heap::CreateInitialMaps() {
-  HeapObject* obj;
+  HeapObject* obj = nullptr;
   {
     AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
     if (!allocation.To(&obj)) return false;
@@ -2510,8 +2200,6 @@
     ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
-    ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
-                         constant_pool_array);
 
 #undef ALLOCATE_PARTIAL_MAP
   }
@@ -2524,14 +2212,14 @@
   set_empty_fixed_array(FixedArray::cast(obj));
 
   {
-    AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
+    AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
     if (!allocation.To(&obj)) return false;
   }
   set_null_value(Oddball::cast(obj));
   Oddball::cast(obj)->set_kind(Oddball::kNull);
 
   {
-    AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
+    AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
     if (!allocation.To(&obj)) return false;
   }
   set_undefined_value(Oddball::cast(obj));
@@ -2548,17 +2236,10 @@
   }
   set_empty_descriptor_array(DescriptorArray::cast(obj));
 
-  // Allocate the constant pool array.
-  {
-    AllocationResult allocation = AllocateEmptyConstantPoolArray();
-    if (!allocation.To(&obj)) return false;
-  }
-  set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
-
   // Fix the instance_descriptors for the existing maps.
   meta_map()->set_code_cache(empty_fixed_array());
   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
-  meta_map()->init_back_pointer(undefined_value());
+  meta_map()->set_raw_transitions(Smi::FromInt(0));
   meta_map()->set_instance_descriptors(empty_descriptor_array());
   if (FLAG_unbox_double_fields) {
     meta_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
@@ -2567,7 +2248,7 @@
   fixed_array_map()->set_code_cache(empty_fixed_array());
   fixed_array_map()->set_dependent_code(
       DependentCode::cast(empty_fixed_array()));
-  fixed_array_map()->init_back_pointer(undefined_value());
+  fixed_array_map()->set_raw_transitions(Smi::FromInt(0));
   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
   if (FLAG_unbox_double_fields) {
     fixed_array_map()->set_layout_descriptor(
@@ -2576,7 +2257,7 @@
 
   undefined_map()->set_code_cache(empty_fixed_array());
   undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
-  undefined_map()->init_back_pointer(undefined_value());
+  undefined_map()->set_raw_transitions(Smi::FromInt(0));
   undefined_map()->set_instance_descriptors(empty_descriptor_array());
   if (FLAG_unbox_double_fields) {
     undefined_map()->set_layout_descriptor(
@@ -2585,37 +2266,24 @@
 
   null_map()->set_code_cache(empty_fixed_array());
   null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
-  null_map()->init_back_pointer(undefined_value());
+  null_map()->set_raw_transitions(Smi::FromInt(0));
   null_map()->set_instance_descriptors(empty_descriptor_array());
   if (FLAG_unbox_double_fields) {
     null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
   }
 
-  constant_pool_array_map()->set_code_cache(empty_fixed_array());
-  constant_pool_array_map()->set_dependent_code(
-      DependentCode::cast(empty_fixed_array()));
-  constant_pool_array_map()->init_back_pointer(undefined_value());
-  constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
-  if (FLAG_unbox_double_fields) {
-    constant_pool_array_map()->set_layout_descriptor(
-        LayoutDescriptor::FastPointerLayout());
-  }
-
   // Fix prototype object for existing maps.
   meta_map()->set_prototype(null_value());
-  meta_map()->set_constructor(null_value());
+  meta_map()->set_constructor_or_backpointer(null_value());
 
   fixed_array_map()->set_prototype(null_value());
-  fixed_array_map()->set_constructor(null_value());
+  fixed_array_map()->set_constructor_or_backpointer(null_value());
 
   undefined_map()->set_prototype(null_value());
-  undefined_map()->set_constructor(null_value());
+  undefined_map()->set_constructor_or_backpointer(null_value());
 
   null_map()->set_prototype(null_value());
-  null_map()->set_constructor(null_value());
-
-  constant_pool_array_map()->set_prototype(null_value());
-  constant_pool_array_map()->set_constructor(null_value());
+  null_map()->set_constructor_or_backpointer(null_value());
 
   {  // Map allocation
 #define ALLOCATE_MAP(instance_type, size, field_name)               \
@@ -2628,18 +2296,34 @@
 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
   ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
 
+#define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
+                               constructor_function_index)      \
+  {                                                             \
+    ALLOCATE_MAP((instance_type), (size), field_name);          \
+    field_name##_map()->SetConstructorFunctionIndex(            \
+        (constructor_function_index));                          \
+  }
+
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
     DCHECK(fixed_array_map() != fixed_cow_array_map());
 
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
-    ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
+    ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
+                           Context::NUMBER_FUNCTION_INDEX)
     ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
                  mutable_heap_number)
-    ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
+    ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
+                           Context::SYMBOL_FUNCTION_INDEX)
+#define ALLOCATE_SIMD128_MAP(TYPE, Type, type, lane_count, lane_type) \
+  ALLOCATE_PRIMITIVE_MAP(SIMD128_VALUE_TYPE, Type::kSize, type,       \
+                         Context::TYPE##_FUNCTION_INDEX)
+    SIMD128_TYPES(ALLOCATE_SIMD128_MAP)
+#undef ALLOCATE_SIMD128_MAP
     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
 
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
-    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
+    ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
+                           Context::BOOLEAN_FUNCTION_INDEX);
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
@@ -2652,9 +2336,10 @@
         AllocationResult allocation = AllocateMap(entry.type, entry.size);
         if (!allocation.To(&obj)) return false;
       }
+      Map* map = Map::cast(obj);
+      map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
       // Mark cons string maps as unstable, because their objects can change
       // maps during GC.
-      Map* map = Map::cast(obj);
       if (StringShape(entry.type).IsCons()) map->mark_unstable();
       roots_[entry.index] = map;
     }
@@ -2663,26 +2348,16 @@
       AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
                                                 ExternalOneByteString::kSize);
       if (!allocation.To(&obj)) return false;
-      set_native_source_string_map(Map::cast(obj));
+      Map* map = Map::cast(obj);
+      map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
+      set_native_source_string_map(map);
     }
 
-    ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
-    undetectable_string_map()->set_is_undetectable();
-
-    ALLOCATE_VARSIZE_MAP(ONE_BYTE_STRING_TYPE, undetectable_one_byte_string);
-    undetectable_one_byte_string_map()->set_is_undetectable();
-
     ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
     ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
+    ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
     ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
 
-#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)        \
-  ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
-               external_##type##_array)
-
-    TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
-#undef ALLOCATE_EXTERNAL_ARRAY_MAP
-
 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
   ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
 
@@ -2699,6 +2374,7 @@
     ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
     ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
 
+    ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
 
     for (unsigned i = 0; i < arraysize(struct_table); i++) {
       const StructTable& entry = struct_table[i];
@@ -2729,6 +2405,7 @@
     ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
     ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
     external_map()->set_is_extensible(false);
+#undef ALLOCATE_PRIMITIVE_MAP
 #undef ALLOCATE_VARSIZE_MAP
 #undef ALLOCATE_MAP
   }
@@ -2738,19 +2415,16 @@
       ByteArray* byte_array;
       if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
       set_empty_byte_array(byte_array);
+
+      BytecodeArray* bytecode_array = nullptr;
+      AllocationResult allocation =
+          AllocateBytecodeArray(0, nullptr, 0, 0, empty_fixed_array());
+      if (!allocation.To(&bytecode_array)) {
+        return false;
+      }
+      set_empty_bytecode_array(bytecode_array);
     }
 
-#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)  \
-  {                                                                   \
-    ExternalArray* obj;                                               \
-    if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
-      return false;                                                   \
-    set_empty_external_##type##_array(obj);                           \
-  }
-
-    TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
-#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
-
 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
   {                                                                     \
     FixedTypedArrayBase* obj;                                           \
@@ -2774,11 +2448,11 @@
   int size = HeapNumber::kSize;
   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
 
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+  AllocationSpace space = SelectSpace(pretenure);
 
-  HeapObject* result;
+  HeapObject* result = nullptr;
   {
-    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -2788,14 +2462,39 @@
   return result;
 }
 
+#define SIMD_ALLOCATE_DEFINITION(TYPE, Type, type, lane_count, lane_type) \
+  AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count],      \
+                                        PretenureFlag pretenure) {        \
+    int size = Type::kSize;                                               \
+    STATIC_ASSERT(Type::kSize <= Page::kMaxRegularHeapObjectSize);        \
+                                                                          \
+    AllocationSpace space = SelectSpace(pretenure);                       \
+                                                                          \
+    HeapObject* result = nullptr;                                         \
+    {                                                                     \
+      AllocationResult allocation =                                       \
+          AllocateRaw(size, space, kSimd128Unaligned);                    \
+      if (!allocation.To(&result)) return allocation;                     \
+    }                                                                     \
+                                                                          \
+    result->set_map_no_write_barrier(type##_map());                       \
+    Type* instance = Type::cast(result);                                  \
+    for (int i = 0; i < lane_count; i++) {                                \
+      instance->set_lane(i, lanes[i]);                                    \
+    }                                                                     \
+    return result;                                                        \
+  }
+SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)
+#undef SIMD_ALLOCATE_DEFINITION
+
 
 AllocationResult Heap::AllocateCell(Object* value) {
   int size = Cell::kSize;
   STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
 
-  HeapObject* result;
+  HeapObject* result = nullptr;
   {
-    AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
+    AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
   result->set_map_no_write_barrier(cell_map());
@@ -2808,17 +2507,16 @@
   int size = PropertyCell::kSize;
   STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
 
-  HeapObject* result;
-  AllocationResult allocation =
-      AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
+  HeapObject* result = nullptr;
+  AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
   if (!allocation.To(&result)) return allocation;
 
   result->set_map_no_write_barrier(global_property_cell_map());
   PropertyCell* cell = PropertyCell::cast(result);
   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
                            SKIP_WRITE_BARRIER);
+  cell->set_property_details(PropertyDetails(Smi::FromInt(0)));
   cell->set_value(the_hole_value());
-  cell->set_type(HeapType::None());
   return result;
 }
 
@@ -2826,19 +2524,33 @@
 AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
   int size = WeakCell::kSize;
   STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
-  HeapObject* result;
+  HeapObject* result = nullptr;
   {
-    AllocationResult allocation =
-        AllocateRaw(size, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
+    AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
   result->set_map_no_write_barrier(weak_cell_map());
   WeakCell::cast(result)->initialize(value);
-  WeakCell::cast(result)->set_next(undefined_value(), SKIP_WRITE_BARRIER);
+  WeakCell::cast(result)->clear_next(the_hole_value());
   return result;
 }
 
 
+AllocationResult Heap::AllocateTransitionArray(int capacity) {
+  DCHECK(capacity > 0);
+  HeapObject* raw_array = nullptr;
+  {
+    AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
+    if (!allocation.To(&raw_array)) return allocation;
+  }
+  raw_array->set_map_no_write_barrier(transition_array_map());
+  TransitionArray* array = TransitionArray::cast(raw_array);
+  array->set_length(capacity);
+  MemsetPointer(array->data_start(), undefined_value(), capacity);
+  return array;
+}
+
+
 void Heap::CreateApiObjects() {
   HandleScope scope(isolate());
   Factory* factory = isolate()->factory();
@@ -2910,9 +2622,11 @@
   set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
   DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
 
-  set_nan_value(
-      *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
+  set_nan_value(*factory->NewHeapNumber(
+      std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
   set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
+  set_minus_infinity_value(
+      *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
 
   // The hole has not been created yet, but we want to put something
   // predictable in the gaps in the string table, so lets make that Smi zero.
@@ -2923,44 +2637,47 @@
 
   // Finish initializing oddballs after creating the string table.
   Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
-                      factory->nan_value(), Oddball::kUndefined);
+                      factory->nan_value(), "undefined", Oddball::kUndefined);
 
   // Initialize the null_value.
   Oddball::Initialize(isolate(), factory->null_value(), "null",
-                      handle(Smi::FromInt(0), isolate()), Oddball::kNull);
+                      handle(Smi::FromInt(0), isolate()), "object",
+                      Oddball::kNull);
 
   set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
                                       handle(Smi::FromInt(1), isolate()),
-                                      Oddball::kTrue));
+                                      "boolean", Oddball::kTrue));
 
   set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
                                        handle(Smi::FromInt(0), isolate()),
-                                       Oddball::kFalse));
+                                       "boolean", Oddball::kFalse));
 
   set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
                                           handle(Smi::FromInt(-1), isolate()),
-                                          Oddball::kTheHole));
+                                          "undefined", Oddball::kTheHole));
 
-  set_uninitialized_value(*factory->NewOddball(
-      factory->uninitialized_map(), "uninitialized",
-      handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
+  set_uninitialized_value(
+      *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
+                           handle(Smi::FromInt(-1), isolate()), "undefined",
+                           Oddball::kUninitialized));
 
-  set_arguments_marker(*factory->NewOddball(
-      factory->arguments_marker_map(), "arguments_marker",
-      handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
+  set_arguments_marker(
+      *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
+                           handle(Smi::FromInt(-4), isolate()), "undefined",
+                           Oddball::kArgumentMarker));
 
   set_no_interceptor_result_sentinel(*factory->NewOddball(
       factory->no_interceptor_result_sentinel_map(),
       "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
-      Oddball::kOther));
+      "undefined", Oddball::kOther));
 
   set_termination_exception(*factory->NewOddball(
       factory->termination_exception_map(), "termination_exception",
-      handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
+      handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
 
   set_exception(*factory->NewOddball(factory->exception_map(), "exception",
                                      handle(Smi::FromInt(-5), isolate()),
-                                     Oddball::kException));
+                                     "undefined", Oddball::kException));
 
   for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
     Handle<String> str =
@@ -2968,14 +2685,16 @@
     roots_[constant_string_table[i].index] = *str;
   }
 
+  // The {hidden_string} is special because it is an empty string, but does not
+  // match any string (even the {empty_string}) when looked up in properties.
   // Allocate the hidden string which is used to identify the hidden properties
   // in JSObjects. The hash code has a special value so that it will not match
   // the empty string when searching for the property. It cannot be part of the
   // loop above because it needs to be allocated manually with the special
   // hash code in place. The hash code for the hidden_string is zero to ensure
   // that it will always be at the first entry in property descriptors.
-  hidden_string_ = *factory->NewOneByteInternalizedString(
-      OneByteVector("", 0), String::kEmptyStringHash);
+  set_hidden_string(*factory->NewOneByteInternalizedString(
+      OneByteVector("", 0), String::kEmptyStringHash));
 
   // Create the code_stubs dictionary. The initial size is set to avoid
   // expanding the dictionary during bootstrapping.
@@ -2994,22 +2713,35 @@
 
   {
     HandleScope scope(isolate());
-#define SYMBOL_INIT(name)                               \
-  Handle<Symbol> name = factory->NewPrivateOwnSymbol(); \
-  roots_[k##name##RootIndex] = *name;
+#define SYMBOL_INIT(name)                                              \
+  {                                                                    \
+    Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
+    Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol());   \
+    symbol->set_name(*name##d);                                        \
+    roots_[k##name##RootIndex] = *symbol;                              \
+  }
     PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
 #undef SYMBOL_INIT
   }
 
   {
     HandleScope scope(isolate());
-#define SYMBOL_INIT(name, varname, description)                             \
+#define SYMBOL_INIT(name, description)                                      \
   Handle<Symbol> name = factory->NewSymbol();                               \
   Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
   name->set_name(*name##d);                                                 \
   roots_[k##name##RootIndex] = *name;
     PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
 #undef SYMBOL_INIT
+
+#define SYMBOL_INIT(name, description)                                      \
+  Handle<Symbol> name = factory->NewSymbol();                               \
+  Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
+  name->set_is_well_known_symbol(true);                                     \
+  name->set_name(*name##d);                                                 \
+  roots_[k##name##RootIndex] = *name;
+    WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
+#undef SYMBOL_INIT
   }
 
   CreateFixedStubs();
@@ -3020,6 +2752,11 @@
   Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
   set_intrinsic_function_names(*intrinsic_names);
 
+  Handle<NameDictionary> empty_properties_dictionary =
+      NameDictionary::New(isolate(), 0, TENURED);
+  empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
+  set_empty_properties_dictionary(*empty_properties_dictionary);
+
   set_number_string_cache(
       *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
 
@@ -3037,6 +2774,15 @@
   set_natives_source_cache(
       *factory->NewFixedArray(Natives::GetBuiltinsCount()));
 
+  set_experimental_natives_source_cache(
+      *factory->NewFixedArray(ExperimentalNatives::GetBuiltinsCount()));
+
+  set_extra_natives_source_cache(
+      *factory->NewFixedArray(ExtraNatives::GetBuiltinsCount()));
+
+  set_experimental_extra_natives_source_cache(
+      *factory->NewFixedArray(ExperimentalExtraNatives::GetBuiltinsCount()));
+
   set_undefined_cell(*factory->NewCell(factory->undefined_value()));
 
   // The symbol registry is initialized lazily.
@@ -3050,6 +2796,59 @@
   // Number of queued microtasks stored in Isolate::pending_microtask_count().
   set_microtask_queue(empty_fixed_array());
 
+  {
+    StaticFeedbackVectorSpec spec;
+    FeedbackVectorSlot load_ic_slot = spec.AddLoadICSlot();
+    FeedbackVectorSlot keyed_load_ic_slot = spec.AddKeyedLoadICSlot();
+    FeedbackVectorSlot store_ic_slot = spec.AddStoreICSlot();
+    FeedbackVectorSlot keyed_store_ic_slot = spec.AddKeyedStoreICSlot();
+
+    DCHECK_EQ(load_ic_slot,
+              FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
+    DCHECK_EQ(keyed_load_ic_slot,
+              FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+    DCHECK_EQ(store_ic_slot,
+              FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
+    DCHECK_EQ(keyed_store_ic_slot,
+              FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+
+    Handle<TypeFeedbackMetadata> dummy_metadata =
+        TypeFeedbackMetadata::New(isolate(), &spec);
+    Handle<TypeFeedbackVector> dummy_vector =
+        TypeFeedbackVector::New(isolate(), dummy_metadata);
+
+    Object* megamorphic = *TypeFeedbackVector::MegamorphicSentinel(isolate());
+    dummy_vector->Set(load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+    dummy_vector->Set(keyed_load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+    dummy_vector->Set(store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+    dummy_vector->Set(keyed_store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
+
+    set_dummy_vector(*dummy_vector);
+  }
+
+  {
+    Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
+    set_empty_weak_cell(*cell);
+    cell->clear();
+
+    Handle<FixedArray> cleared_optimized_code_map =
+        factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
+    cleared_optimized_code_map->set(SharedFunctionInfo::kSharedCodeIndex,
+                                    *cell);
+    STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
+                  SharedFunctionInfo::kSharedCodeIndex == 0);
+    set_cleared_optimized_code_map(*cleared_optimized_code_map);
+  }
+
+  set_detached_contexts(empty_fixed_array());
+  set_retained_maps(ArrayList::cast(empty_fixed_array()));
+
+  set_weak_object_to_code_table(
+      *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
+                          TENURED));
+
+  set_script_list(Smi::FromInt(0));
+
   Handle<SeededNumberDictionary> slow_element_dictionary =
       SeededNumberDictionary::New(isolate(), 0, TENURED);
   slow_element_dictionary->set_requires_slow_elements();
@@ -3057,12 +2856,30 @@
 
   set_materialized_objects(*factory->NewFixedArray(0, TENURED));
 
-  // Handling of script id generation is in Factory::NewScript.
+  // Handling of script id generation is in Heap::NextScriptId().
   set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
 
-  set_allocation_sites_scratchpad(
-      *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
-  InitializeAllocationSitesScratchpad();
+  // Allocate the empty script.
+  Handle<Script> script = factory->NewScript(factory->empty_string());
+  script->set_type(Script::TYPE_NATIVE);
+  set_empty_script(*script);
+
+  Handle<PropertyCell> cell = factory->NewPropertyCell();
+  cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+  set_array_protector(*cell);
+
+  cell = factory->NewPropertyCell();
+  cell->set_value(the_hole_value());
+  set_empty_property_cell(*cell);
+
+  set_weak_stack_trace_list(Smi::FromInt(0));
+
+  set_noscript_shared_function_infos(Smi::FromInt(0));
+
+  // Will be filled in by Interpreter::Initialize().
+  set_interpreter_table(
+      *interpreter::Interpreter::CreateUninitializedInterpreterTable(
+          isolate()));
 
   // Initialize keyed lookup cache.
   isolate_->keyed_lookup_cache()->Clear();
@@ -3079,128 +2896,42 @@
 
 
 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
-  RootListIndex writable_roots[] = {
-      kStoreBufferTopRootIndex,
-      kStackLimitRootIndex,
-      kNumberStringCacheRootIndex,
-      kInstanceofCacheFunctionRootIndex,
-      kInstanceofCacheMapRootIndex,
-      kInstanceofCacheAnswerRootIndex,
-      kCodeStubsRootIndex,
-      kNonMonomorphicCacheRootIndex,
-      kPolymorphicCodeCacheRootIndex,
-      kLastScriptIdRootIndex,
-      kEmptyScriptRootIndex,
-      kRealStackLimitRootIndex,
-      kArgumentsAdaptorDeoptPCOffsetRootIndex,
-      kConstructStubDeoptPCOffsetRootIndex,
-      kGetterStubDeoptPCOffsetRootIndex,
-      kSetterStubDeoptPCOffsetRootIndex,
-      kStringTableRootIndex,
-  };
+  switch (root_index) {
+    case kStoreBufferTopRootIndex:
+    case kNumberStringCacheRootIndex:
+    case kInstanceofCacheFunctionRootIndex:
+    case kInstanceofCacheMapRootIndex:
+    case kInstanceofCacheAnswerRootIndex:
+    case kCodeStubsRootIndex:
+    case kNonMonomorphicCacheRootIndex:
+    case kPolymorphicCodeCacheRootIndex:
+    case kEmptyScriptRootIndex:
+    case kSymbolRegistryRootIndex:
+    case kScriptListRootIndex:
+    case kMaterializedObjectsRootIndex:
+    case kMicrotaskQueueRootIndex:
+    case kDetachedContextsRootIndex:
+    case kWeakObjectToCodeTableRootIndex:
+    case kRetainedMapsRootIndex:
+    case kNoScriptSharedFunctionInfosRootIndex:
+    case kWeakStackTraceListRootIndex:
+// Smi values
+#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
+      SMI_ROOT_LIST(SMI_ENTRY)
+#undef SMI_ENTRY
+    // String table
+    case kStringTableRootIndex:
+      return true;
 
-  for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
-    if (root_index == writable_roots[i]) return true;
+    default:
+      return false;
   }
-  return false;
 }
 
 
 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
   return !RootCanBeWrittenAfterInitialization(root_index) &&
-         !InNewSpace(roots_array_start()[root_index]);
-}
-
-
-Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
-                                   Object* key_pattern, ResultsCacheType type) {
-  FixedArray* cache;
-  if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
-  if (type == STRING_SPLIT_SUBSTRINGS) {
-    DCHECK(key_pattern->IsString());
-    if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
-    cache = heap->string_split_cache();
-  } else {
-    DCHECK(type == REGEXP_MULTIPLE_INDICES);
-    DCHECK(key_pattern->IsFixedArray());
-    cache = heap->regexp_multiple_cache();
-  }
-
-  uint32_t hash = key_string->Hash();
-  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
-                    ~(kArrayEntriesPerCacheEntry - 1));
-  if (cache->get(index + kStringOffset) == key_string &&
-      cache->get(index + kPatternOffset) == key_pattern) {
-    return cache->get(index + kArrayOffset);
-  }
-  index =
-      ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
-  if (cache->get(index + kStringOffset) == key_string &&
-      cache->get(index + kPatternOffset) == key_pattern) {
-    return cache->get(index + kArrayOffset);
-  }
-  return Smi::FromInt(0);
-}
-
-
-void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
-                               Handle<Object> key_pattern,
-                               Handle<FixedArray> value_array,
-                               ResultsCacheType type) {
-  Factory* factory = isolate->factory();
-  Handle<FixedArray> cache;
-  if (!key_string->IsInternalizedString()) return;
-  if (type == STRING_SPLIT_SUBSTRINGS) {
-    DCHECK(key_pattern->IsString());
-    if (!key_pattern->IsInternalizedString()) return;
-    cache = factory->string_split_cache();
-  } else {
-    DCHECK(type == REGEXP_MULTIPLE_INDICES);
-    DCHECK(key_pattern->IsFixedArray());
-    cache = factory->regexp_multiple_cache();
-  }
-
-  uint32_t hash = key_string->Hash();
-  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
-                    ~(kArrayEntriesPerCacheEntry - 1));
-  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
-    cache->set(index + kStringOffset, *key_string);
-    cache->set(index + kPatternOffset, *key_pattern);
-    cache->set(index + kArrayOffset, *value_array);
-  } else {
-    uint32_t index2 =
-        ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
-    if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
-      cache->set(index2 + kStringOffset, *key_string);
-      cache->set(index2 + kPatternOffset, *key_pattern);
-      cache->set(index2 + kArrayOffset, *value_array);
-    } else {
-      cache->set(index2 + kStringOffset, Smi::FromInt(0));
-      cache->set(index2 + kPatternOffset, Smi::FromInt(0));
-      cache->set(index2 + kArrayOffset, Smi::FromInt(0));
-      cache->set(index + kStringOffset, *key_string);
-      cache->set(index + kPatternOffset, *key_pattern);
-      cache->set(index + kArrayOffset, *value_array);
-    }
-  }
-  // If the array is a reasonably short list of substrings, convert it into a
-  // list of internalized strings.
-  if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
-    for (int i = 0; i < value_array->length(); i++) {
-      Handle<String> str(String::cast(value_array->get(i)), isolate);
-      Handle<String> internalized_str = factory->InternalizeString(str);
-      value_array->set(i, *internalized_str);
-    }
-  }
-  // Convert backing store to a copy-on-write array.
-  value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
-}
-
-
-void RegExpResultsCache::Clear(FixedArray* cache) {
-  for (int i = 0; i < kRegExpResultsCacheSize; i++) {
-    cache->set(i, Smi::FromInt(0));
-  }
+         !InNewSpace(root(root_index));
 }
 
 
@@ -3226,69 +2957,6 @@
 }
 
 
-void Heap::FlushAllocationSitesScratchpad() {
-  for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
-    allocation_sites_scratchpad()->set_undefined(i);
-  }
-  allocation_sites_scratchpad_length_ = 0;
-}
-
-
-void Heap::InitializeAllocationSitesScratchpad() {
-  DCHECK(allocation_sites_scratchpad()->length() ==
-         kAllocationSiteScratchpadSize);
-  for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
-    allocation_sites_scratchpad()->set_undefined(i);
-  }
-}
-
-
-void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
-                                         ScratchpadSlotMode mode) {
-  if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
-    // We cannot use the normal write-barrier because slots need to be
-    // recorded with non-incremental marking as well. We have to explicitly
-    // record the slot to take evacuation candidates into account.
-    allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
-                                       site, SKIP_WRITE_BARRIER);
-    Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
-        allocation_sites_scratchpad_length_);
-
-    if (mode == RECORD_SCRATCHPAD_SLOT) {
-      // We need to allow slots buffer overflow here since the evacuation
-      // candidates are not part of the global list of old space pages and
-      // releasing an evacuation candidate due to a slots buffer overflow
-      // results in lost pages.
-      mark_compact_collector()->RecordSlot(slot, slot, *slot,
-                                           SlotsBuffer::IGNORE_OVERFLOW);
-    }
-    allocation_sites_scratchpad_length_++;
-  }
-}
-
-
-Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
-  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
-}
-
-
-Heap::RootListIndex Heap::RootIndexForExternalArrayType(
-    ExternalArrayType array_type) {
-  switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
-  case kExternal##Type##Array:                                  \
-    return kExternal##Type##ArrayMapRootIndex;
-
-    TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
-#undef ARRAY_TYPE_TO_ROOT_INDEX
-
-    default:
-      UNREACHABLE();
-      return kUndefinedValueRootIndex;
-  }
-}
-
-
 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
   return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
 }
@@ -3311,23 +2979,6 @@
 }
 
 
-Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
-    ElementsKind elementsKind) {
-  switch (elementsKind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
-  case EXTERNAL_##TYPE##_ELEMENTS:                                \
-    return kEmptyExternal##Type##ArrayRootIndex;
-
-    TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
-#undef ELEMENT_KIND_TO_ROOT_INDEX
-
-    default:
-      UNREACHABLE();
-      return kUndefinedValueRootIndex;
-  }
-}
-
-
 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
     ElementsKind elementsKind) {
   switch (elementsKind) {
@@ -3344,12 +2995,6 @@
 }
 
 
-ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
-  return ExternalArray::cast(
-      roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
-}
-
-
 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
   return FixedTypedArrayBase::cast(
       roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
@@ -3360,8 +3005,8 @@
                                        PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate foreigns in paged spaces.
   STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
-  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-  Foreign* result;
+  AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
+  Foreign* result = nullptr;
   AllocationResult allocation = Allocate(foreign_map(), space);
   if (!allocation.To(&result)) return allocation;
   result->set_foreign_address(address);
@@ -3374,10 +3019,10 @@
     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
   }
   int size = ByteArray::SizeFor(length);
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
-  HeapObject* result;
+  AllocationSpace space = SelectSpace(pretenure);
+  HeapObject* result = nullptr;
   {
-    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    AllocationResult allocation = AllocateRaw(size, space);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -3387,46 +3032,88 @@
 }
 
 
+AllocationResult Heap::AllocateBytecodeArray(int length,
+                                             const byte* const raw_bytecodes,
+                                             int frame_size,
+                                             int parameter_count,
+                                             FixedArray* constant_pool) {
+  if (length < 0 || length > BytecodeArray::kMaxLength) {
+    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+  }
+  // Bytecode array is pretenured, so constant pool array should be to.
+  DCHECK(!InNewSpace(constant_pool));
+
+  int size = BytecodeArray::SizeFor(length);
+  HeapObject* result = nullptr;
+  {
+    AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map_no_write_barrier(bytecode_array_map());
+  BytecodeArray* instance = BytecodeArray::cast(result);
+  instance->set_length(length);
+  instance->set_frame_size(frame_size);
+  instance->set_parameter_count(parameter_count);
+  instance->set_constant_pool(constant_pool);
+  CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
+
+  return result;
+}
+
+
 void Heap::CreateFillerObjectAt(Address addr, int size) {
   if (size == 0) return;
   HeapObject* filler = HeapObject::FromAddress(addr);
   if (size == kPointerSize) {
-    filler->set_map_no_write_barrier(one_pointer_filler_map());
+    filler->set_map_no_write_barrier(
+        reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)));
   } else if (size == 2 * kPointerSize) {
-    filler->set_map_no_write_barrier(two_pointer_filler_map());
+    filler->set_map_no_write_barrier(
+        reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)));
   } else {
-    filler->set_map_no_write_barrier(free_space_map());
-    FreeSpace::cast(filler)->set_size(size);
+    DCHECK_GT(size, 2 * kPointerSize);
+    filler->set_map_no_write_barrier(
+        reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
+    FreeSpace::cast(filler)->nobarrier_set_size(size);
   }
+  // At this point, we may be deserializing the heap from a snapshot, and
+  // none of the maps have been created yet and are NULL.
+  DCHECK((filler->map() == NULL && !deserialization_complete_) ||
+         filler->map()->IsMap());
 }
 
 
 bool Heap::CanMoveObjectStart(HeapObject* object) {
+  if (!FLAG_move_object_start) return false;
+
   Address address = object->address();
-  bool is_in_old_pointer_space = InOldPointerSpace(address);
-  bool is_in_old_data_space = InOldDataSpace(address);
 
   if (lo_space()->Contains(object)) return false;
 
   Page* page = Page::FromAddress(address);
   // We can move the object start if:
-  // (1) the object is not in old pointer or old data space,
+  // (1) the object is not in old space,
   // (2) the page of the object was already swept,
   // (3) the page was already concurrently swept. This case is an optimization
   // for concurrent sweeping. The WasSwept predicate for concurrently swept
   // pages is set after sweeping all pages.
-  return (!is_in_old_pointer_space && !is_in_old_data_space) ||
-         page->WasSwept() || page->SweepingCompleted();
+  return !InOldSpace(address) || page->WasSwept() || page->SweepingCompleted();
 }
 
 
-void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
-  if (incremental_marking()->IsMarking() &&
-      Marking::IsBlack(Marking::MarkBitFrom(address))) {
-    if (mode == FROM_GC) {
-      MemoryChunk::IncrementLiveBytesFromGC(address, by);
+void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
+  // As long as the inspected object is black and we are currently not iterating
+  // the heap using HeapIterator, we can update the live byte count. We cannot
+  // update while using HeapIterator because the iterator is temporarily
+  // marking the whole object graph, without updating live bytes.
+  if (!in_heap_iterator() &&
+      !mark_compact_collector()->sweeping_in_progress() &&
+      Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
+    if (mode == SEQUENTIAL_TO_SWEEPER) {
+      MemoryChunk::IncrementLiveBytesFromGC(object, by);
     } else {
-      MemoryChunk::IncrementLiveBytesFromMutator(address, by);
+      MemoryChunk::IncrementLiveBytesFromMutator(object, by);
     }
   }
 }
@@ -3434,6 +3121,8 @@
 
 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
                                          int elements_to_trim) {
+  DCHECK(!object->IsFixedTypedArrayBase());
+  DCHECK(!object->IsByteArray());
   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
   const int bytes_to_trim = elements_to_trim * element_size;
   Map* map = object->map();
@@ -3471,8 +3160,8 @@
       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
 
   // Maintain consistency of live bytes during incremental marking
-  marking()->TransferMark(object->address(), new_start);
-  AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
+  Marking::TransferMark(this, object->address(), new_start);
+  AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
 
   // Notify the heap profiler of change in object layout.
   OnMoveEvent(new_object, object, new_object->Size());
@@ -3481,22 +3170,44 @@
 
 
 // Force instantiation of templatized method.
-template
-void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
-template
-void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
+template void Heap::RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+    FixedArrayBase*, int);
+template void Heap::RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
+    FixedArrayBase*, int);
 
 
 template<Heap::InvocationMode mode>
 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
-  const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
-  const int bytes_to_trim = elements_to_trim * element_size;
+  const int len = object->length();
+  DCHECK_LE(elements_to_trim, len);
+  DCHECK_GE(elements_to_trim, 0);
+
+  int bytes_to_trim;
+  if (object->IsFixedTypedArrayBase()) {
+    InstanceType type = object->map()->instance_type();
+    bytes_to_trim =
+        FixedTypedArrayBase::TypedArraySize(type, len) -
+        FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
+  } else if (object->IsByteArray()) {
+    int new_size = ByteArray::SizeFor(len - elements_to_trim);
+    bytes_to_trim = ByteArray::SizeFor(len) - new_size;
+    DCHECK_GE(bytes_to_trim, 0);
+  } else {
+    const int element_size =
+        object->IsFixedArray() ? kPointerSize : kDoubleSize;
+    bytes_to_trim = elements_to_trim * element_size;
+  }
+
 
   // For now this trick is only applied to objects in new and paged space.
   DCHECK(object->map() != fixed_cow_array_map());
 
-  const int len = object->length();
-  DCHECK(elements_to_trim < len);
+  if (bytes_to_trim == 0) {
+    // No need to create filler and update live bytes counters, just initialize
+    // header of the trimmed array.
+    object->synchronized_set_length(len - elements_to_trim);
+    return;
+  }
 
   // Calculate location of new array end.
   Address new_end = object->address() + object->Size() - bytes_to_trim;
@@ -3517,7 +3228,7 @@
   object->synchronized_set_length(len - elements_to_trim);
 
   // Maintain consistency of live bytes during incremental marking
-  AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
+  AdjustLiveBytes(object, -bytes_to_trim, mode);
 
   // Notify the heap profiler of change in object layout. The array may not be
   // moved during GC, and size has to be adjusted nevertheless.
@@ -3528,22 +3239,23 @@
 }
 
 
-AllocationResult Heap::AllocateExternalArray(int length,
-                                             ExternalArrayType array_type,
-                                             void* external_pointer,
-                                             PretenureFlag pretenure) {
-  int size = ExternalArray::kAlignedSize;
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
-  HeapObject* result;
+AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
+    int length, ExternalArrayType array_type, void* external_pointer,
+    PretenureFlag pretenure) {
+  int size = FixedTypedArrayBase::kHeaderSize;
+  AllocationSpace space = SelectSpace(pretenure);
+  HeapObject* result = nullptr;
   {
-    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    AllocationResult allocation = AllocateRaw(size, space);
     if (!allocation.To(&result)) return allocation;
   }
 
-  result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
-  ExternalArray::cast(result)->set_length(length);
-  ExternalArray::cast(result)->set_external_pointer(external_pointer);
-  return result;
+  result->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
+  FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
+  elements->set_base_pointer(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+  elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
+  elements->set_length(length);
+  return elements;
 }
 
 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
@@ -3568,41 +3280,38 @@
 
 AllocationResult Heap::AllocateFixedTypedArray(int length,
                                                ExternalArrayType array_type,
+                                               bool initialize,
                                                PretenureFlag pretenure) {
   int element_size;
   ElementsKind elements_kind;
   ForFixedTypedArray(array_type, &element_size, &elements_kind);
   int size = OBJECT_POINTER_ALIGN(length * element_size +
                                   FixedTypedArrayBase::kDataOffset);
-#ifndef V8_HOST_ARCH_64_BIT
-  if (array_type == kExternalFloat64Array) {
-    size += kPointerSize;
-  }
-#endif
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+  AllocationSpace space = SelectSpace(pretenure);
 
-  HeapObject* object;
-  AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  HeapObject* object = nullptr;
+  AllocationResult allocation = AllocateRaw(
+      size, space,
+      array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
   if (!allocation.To(&object)) return allocation;
 
-  if (array_type == kExternalFloat64Array) {
-    object = EnsureDoubleAligned(this, object, size);
-  }
-
-  object->set_map(MapForFixedTypedArray(array_type));
+  object->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
   FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
+  elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
+  elements->set_external_pointer(
+      ExternalReference::fixed_typed_array_base_data_offset().address(),
+      SKIP_WRITE_BARRIER);
   elements->set_length(length);
-  memset(elements->DataPtr(), 0, elements->DataSize());
+  if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
   return elements;
 }
 
 
 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
   DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
-  AllocationResult allocation =
-      AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
+  AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
 
-  HeapObject* result;
+  HeapObject* result = nullptr;
   if (!allocation.To(&result)) return allocation;
 
   if (immovable) {
@@ -3623,8 +3332,10 @@
 
   result->set_map_no_write_barrier(code_map());
   Code* code = Code::cast(result);
+  DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
-         isolate_->code_range()->contains(code->address()));
+         isolate_->code_range()->contains(code->address()) ||
+         object_size <= code_space()->AreaSize());
   code->set_gc_metadata(Smi::FromInt(0));
   code->set_ic_age(global_ic_age_);
   return code;
@@ -3633,21 +3344,11 @@
 
 AllocationResult Heap::CopyCode(Code* code) {
   AllocationResult allocation;
-  HeapObject* new_constant_pool;
-  if (FLAG_enable_ool_constant_pool &&
-      code->constant_pool() != empty_constant_pool_array()) {
-    // Copy the constant pool, since edits to the copied code may modify
-    // the constant pool.
-    allocation = CopyConstantPoolArray(code->constant_pool());
-    if (!allocation.To(&new_constant_pool)) return allocation;
-  } else {
-    new_constant_pool = empty_constant_pool_array();
-  }
 
-  HeapObject* result;
+  HeapObject* result = nullptr;
   // Allocate an object the same size as the code object.
   int obj_size = code->Size();
-  allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
+  allocation = AllocateRaw(obj_size, CODE_SPACE);
   if (!allocation.To(&result)) return allocation;
 
   // Copy code object.
@@ -3656,36 +3357,25 @@
   CopyBlock(new_addr, old_addr, obj_size);
   Code* new_code = Code::cast(result);
 
-  // Update the constant pool.
-  new_code->set_constant_pool(new_constant_pool);
-
   // Relocate the copy.
+  DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
-         isolate_->code_range()->contains(code->address()));
+         isolate_->code_range()->contains(code->address()) ||
+         obj_size <= code_space()->AreaSize());
   new_code->Relocate(new_addr - old_addr);
   return new_code;
 }
 
 
 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
-  // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
-  // do not risk leaving uninitialized Code object (and breaking the heap).
-  ByteArray* reloc_info_array;
+  // Allocate ByteArray before the Code object, so that we do not risk
+  // leaving uninitialized Code object (and breaking the heap).
+  ByteArray* reloc_info_array = nullptr;
   {
     AllocationResult allocation =
         AllocateByteArray(reloc_info.length(), TENURED);
     if (!allocation.To(&reloc_info_array)) return allocation;
   }
-  HeapObject* new_constant_pool;
-  if (FLAG_enable_ool_constant_pool &&
-      code->constant_pool() != empty_constant_pool_array()) {
-    // Copy the constant pool, since edits to the copied code may modify
-    // the constant pool.
-    AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
-    if (!allocation.To(&new_constant_pool)) return allocation;
-  } else {
-    new_constant_pool = empty_constant_pool_array();
-  }
 
   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
 
@@ -3696,9 +3386,8 @@
   size_t relocation_offset =
       static_cast<size_t>(code->instruction_end() - old_addr);
 
-  HeapObject* result;
-  AllocationResult allocation =
-      AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
+  HeapObject* result = nullptr;
+  AllocationResult allocation = AllocateRaw(new_obj_size, CODE_SPACE);
   if (!allocation.To(&result)) return allocation;
 
   // Copy code object.
@@ -3710,16 +3399,16 @@
   Code* new_code = Code::cast(result);
   new_code->set_relocation_info(reloc_info_array);
 
-  // Update constant pool.
-  new_code->set_constant_pool(new_constant_pool);
-
   // Copy patched rinfo.
   CopyBytes(new_code->relocation_start(), reloc_info.start(),
             static_cast<size_t>(reloc_info.length()));
 
   // Relocate the copy.
+  DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
-         isolate_->code_range()->contains(code->address()));
+         isolate_->code_range()->contains(code->address()) ||
+         new_obj_size <= code_space()->AreaSize());
+
   new_code->Relocate(new_addr - old_addr);
 
 #ifdef VERIFY_HEAP
@@ -3744,16 +3433,12 @@
                                 AllocationSite* allocation_site) {
   DCHECK(gc_state_ == NOT_IN_GC);
   DCHECK(map->instance_type() != MAP_TYPE);
-  // If allocation failures are disallowed, we may allocate in a different
-  // space when new space is full and the object is not a large object.
-  AllocationSpace retry_space =
-      (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
   int size = map->instance_size();
   if (allocation_site != NULL) {
     size += AllocationMemento::kSize;
   }
-  HeapObject* result;
-  AllocationResult allocation = AllocateRaw(size, space, retry_space);
+  HeapObject* result = nullptr;
+  AllocationResult allocation = AllocateRaw(size, space);
   if (!allocation.To(&result)) return allocation;
   // No need for write barrier since object is white and map is in old space.
   result->set_map_no_write_barrier(map);
@@ -3777,6 +3462,14 @@
   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
   // verification code has to cope with (temporarily) invalid objects.  See
   // for example, JSArray::JSArrayVerify).
+  InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
+}
+
+
+void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
+  if (start_offset == map->instance_size()) return;
+  DCHECK_LT(start_offset, map->instance_size());
+
   Object* filler;
   // We cannot always fill with one_pointer_filler_map because objects
   // created from API functions expect their internal fields to be initialized
@@ -3784,22 +3477,23 @@
   // Pre-allocated fields need to be initialized with undefined_value as well
   // so that object accesses before the constructor completes (e.g. in the
   // debugger) will not cause a crash.
-  if (map->constructor()->IsJSFunction() &&
-      JSFunction::cast(map->constructor())
-          ->IsInobjectSlackTrackingInProgress()) {
+
+  // In case of Array subclassing the |map| could already be transitioned
+  // to different elements kind from the initial map on which we track slack.
+  Map* initial_map = map->FindRootMap();
+  if (initial_map->IsInobjectSlackTrackingInProgress()) {
     // We might want to shrink the object later.
-    DCHECK(obj->GetInternalFieldCount() == 0);
     filler = Heap::one_pointer_filler_map();
   } else {
     filler = Heap::undefined_value();
   }
-  obj->InitializeBody(map, Heap::undefined_value(), filler);
+  obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
+  initial_map->InobjectSlackTrackingStep();
 }
 
 
 AllocationResult Heap::AllocateJSObjectFromMap(
-    Map* map, PretenureFlag pretenure, bool allocate_properties,
-    AllocationSite* allocation_site) {
+    Map* map, PretenureFlag pretenure, AllocationSite* allocation_site) {
   // JSFunctions should be allocated using AllocateFunction to be
   // properly initialized.
   DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
@@ -3807,32 +3501,19 @@
   // Both types of global objects should be allocated using
   // AllocateGlobalObject to be properly initialized.
   DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
-  DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
 
   // Allocate the backing storage for the properties.
-  FixedArray* properties;
-  if (allocate_properties) {
-    int prop_size = map->InitialPropertiesLength();
-    DCHECK(prop_size >= 0);
-    {
-      AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
-      if (!allocation.To(&properties)) return allocation;
-    }
-  } else {
-    properties = empty_fixed_array();
-  }
+  FixedArray* properties = empty_fixed_array();
 
   // Allocate the JSObject.
-  int size = map->instance_size();
-  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
-  JSObject* js_obj;
+  AllocationSpace space = SelectSpace(pretenure);
+  JSObject* js_obj = nullptr;
   AllocationResult allocation = Allocate(map, space, allocation_site);
   if (!allocation.To(&js_obj)) return allocation;
 
   // Initialize the JSObject.
   InitializeJSObjectFromMap(js_obj, properties, map);
-  DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
-         js_obj->HasFixedTypedArrayElements());
+  DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements());
   return js_obj;
 }
 
@@ -3844,11 +3525,11 @@
 
   // Allocate the object based on the constructors initial map.
   AllocationResult allocation = AllocateJSObjectFromMap(
-      constructor->initial_map(), pretenure, true, allocation_site);
+      constructor->initial_map(), pretenure, allocation_site);
 #ifdef DEBUG
   // Make sure result is NOT a global object if valid.
-  HeapObject* obj;
-  DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
+  HeapObject* obj = nullptr;
+  DCHECK(!allocation.To(&obj) || !obj->IsJSGlobalObject());
 #endif
   return allocation;
 }
@@ -3858,51 +3539,31 @@
   // Make the clone.
   Map* map = source->map();
 
-  // We can only clone normal objects or arrays. Copying anything else
+  // We can only clone regexps, normal objects or arrays. Copying anything else
   // will break invariants.
-  CHECK(map->instance_type() == JS_OBJECT_TYPE ||
+  CHECK(map->instance_type() == JS_REGEXP_TYPE ||
+        map->instance_type() == JS_OBJECT_TYPE ||
         map->instance_type() == JS_ARRAY_TYPE);
 
   int object_size = map->instance_size();
-  HeapObject* clone;
+  HeapObject* clone = nullptr;
 
   DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
 
-  WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
+  int adjusted_object_size =
+      site != NULL ? object_size + AllocationMemento::kSize : object_size;
+  AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
+  if (!allocation.To(&clone)) return allocation;
 
-  // If we're forced to always allocate, we use the general allocation
-  // functions which may leave us with an object in old space.
-  if (always_allocate()) {
-    {
-      AllocationResult allocation =
-          AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
-      if (!allocation.To(&clone)) return allocation;
-    }
-    Address clone_address = clone->address();
-    CopyBlock(clone_address, source->address(), object_size);
-    // Update write barrier for all fields that lie beyond the header.
-    RecordWrites(clone_address, JSObject::kHeaderSize,
-                 (object_size - JSObject::kHeaderSize) / kPointerSize);
-  } else {
-    wb_mode = SKIP_WRITE_BARRIER;
+  SLOW_DCHECK(InNewSpace(clone));
+  // Since we know the clone is allocated in new space, we can copy
+  // the contents without worrying about updating the write barrier.
+  CopyBlock(clone->address(), source->address(), object_size);
 
-    {
-      int adjusted_object_size =
-          site != NULL ? object_size + AllocationMemento::kSize : object_size;
-      AllocationResult allocation =
-          AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
-      if (!allocation.To(&clone)) return allocation;
-    }
-    SLOW_DCHECK(InNewSpace(clone));
-    // Since we know the clone is allocated in new space, we can copy
-    // the contents without worrying about updating the write barrier.
-    CopyBlock(clone->address(), source->address(), object_size);
-
-    if (site != NULL) {
-      AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
-          reinterpret_cast<Address>(clone) + object_size);
-      InitializeAllocationMemento(alloc_memento, site);
-    }
+  if (site != NULL) {
+    AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+        reinterpret_cast<Address>(clone) + object_size);
+    InitializeAllocationMemento(alloc_memento, site);
   }
 
   SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
@@ -3911,7 +3572,7 @@
   FixedArray* properties = FixedArray::cast(source->properties());
   // Update elements if necessary.
   if (elements->length() > 0) {
-    FixedArrayBase* elem;
+    FixedArrayBase* elem = nullptr;
     {
       AllocationResult allocation;
       if (elements->map() == fixed_cow_array_map()) {
@@ -3923,16 +3584,16 @@
       }
       if (!allocation.To(&elem)) return allocation;
     }
-    JSObject::cast(clone)->set_elements(elem, wb_mode);
+    JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
   }
   // Update properties if necessary.
   if (properties->length() > 0) {
-    FixedArray* prop;
+    FixedArray* prop = nullptr;
     {
       AllocationResult allocation = CopyFixedArray(properties);
       if (!allocation.To(&prop)) return allocation;
     }
-    JSObject::cast(clone)->set_properties(prop, wb_mode);
+    JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER);
   }
   // Return the new clone.
   return clone;
@@ -3949,9 +3610,9 @@
 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
                                     int len) {
   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
-  unsigned stream_length = vector.length();
+  size_t stream_length = vector.length();
   while (stream_length != 0) {
-    unsigned consumed = 0;
+    size_t consumed = 0;
     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
     DCHECK(c != unibrow::Utf8::kBadChar);
     DCHECK(consumed <= stream_length);
@@ -4002,12 +3663,11 @@
     map = internalized_string_map();
     size = SeqTwoByteString::SizeFor(chars);
   }
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
 
   // Allocate string.
-  HeapObject* result;
+  HeapObject* result = nullptr;
   {
-    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -4045,11 +3705,11 @@
   DCHECK_GE(String::kMaxLength, length);
   int size = SeqOneByteString::SizeFor(length);
   DCHECK(size <= SeqOneByteString::kMaxSize);
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+  AllocationSpace space = SelectSpace(pretenure);
 
-  HeapObject* result;
+  HeapObject* result = nullptr;
   {
-    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    AllocationResult allocation = AllocateRaw(size, space);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -4069,11 +3729,11 @@
   DCHECK_GE(String::kMaxLength, length);
   int size = SeqTwoByteString::SizeFor(length);
   DCHECK(size <= SeqTwoByteString::kMaxSize);
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+  AllocationSpace space = SelectSpace(pretenure);
 
-  HeapObject* result;
+  HeapObject* result = nullptr;
   {
-    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    AllocationResult allocation = AllocateRaw(size, space);
     if (!allocation.To(&result)) return allocation;
   }
 
@@ -4088,10 +3748,9 @@
 
 AllocationResult Heap::AllocateEmptyFixedArray() {
   int size = FixedArray::SizeFor(0);
-  HeapObject* result;
+  HeapObject* result = nullptr;
   {
-    AllocationResult allocation =
-        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
+    AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
   // Initialize the object.
@@ -4101,19 +3760,13 @@
 }
 
 
-AllocationResult Heap::AllocateEmptyExternalArray(
-    ExternalArrayType array_type) {
-  return AllocateExternalArray(0, array_type, NULL, TENURED);
-}
-
-
 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
   if (!InNewSpace(src)) {
     return src;
   }
 
   int len = src->length();
-  HeapObject* obj;
+  HeapObject* obj = nullptr;
   {
     AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
     if (!allocation.To(&obj)) return allocation;
@@ -4122,7 +3775,7 @@
   FixedArray* result = FixedArray::cast(obj);
   result->set_length(len);
 
-  // Copy the content
+  // Copy the content.
   DisallowHeapAllocation no_gc;
   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
@@ -4137,13 +3790,36 @@
 
 AllocationResult Heap::AllocateEmptyFixedTypedArray(
     ExternalArrayType array_type) {
-  return AllocateFixedTypedArray(0, array_type, TENURED);
+  return AllocateFixedTypedArray(0, array_type, false, TENURED);
+}
+
+
+AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
+                                             PretenureFlag pretenure) {
+  int old_len = src->length();
+  int new_len = old_len + grow_by;
+  DCHECK(new_len >= old_len);
+  HeapObject* obj = nullptr;
+  {
+    AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
+    if (!allocation.To(&obj)) return allocation;
+  }
+  obj->set_map_no_write_barrier(fixed_array_map());
+  FixedArray* result = FixedArray::cast(obj);
+  result->set_length(new_len);
+
+  // Copy the content.
+  DisallowHeapAllocation no_gc;
+  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+  for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
+  MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
+  return result;
 }
 
 
 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
   int len = src->length();
-  HeapObject* obj;
+  HeapObject* obj = nullptr;
   {
     AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
     if (!allocation.To(&obj)) return allocation;
@@ -4158,7 +3834,7 @@
   FixedArray* result = FixedArray::cast(obj);
   result->set_length(len);
 
-  // Copy the content
+  // Copy the content.
   DisallowHeapAllocation no_gc;
   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
@@ -4169,7 +3845,7 @@
 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
                                                    Map* map) {
   int len = src->length();
-  HeapObject* obj;
+  HeapObject* obj = nullptr;
   {
     AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
     if (!allocation.To(&obj)) return allocation;
@@ -4182,40 +3858,15 @@
 }
 
 
-AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
-                                                    Map* map) {
-  HeapObject* obj;
-  if (src->is_extended_layout()) {
-    ConstantPoolArray::NumberOfEntries small(src,
-                                             ConstantPoolArray::SMALL_SECTION);
-    ConstantPoolArray::NumberOfEntries extended(
-        src, ConstantPoolArray::EXTENDED_SECTION);
-    AllocationResult allocation =
-        AllocateExtendedConstantPoolArray(small, extended);
-    if (!allocation.To(&obj)) return allocation;
-  } else {
-    ConstantPoolArray::NumberOfEntries small(src,
-                                             ConstantPoolArray::SMALL_SECTION);
-    AllocationResult allocation = AllocateConstantPoolArray(small);
-    if (!allocation.To(&obj)) return allocation;
-  }
-  obj->set_map_no_write_barrier(map);
-  CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
-            src->address() + ConstantPoolArray::kFirstEntryOffset,
-            src->size() - ConstantPoolArray::kFirstEntryOffset);
-  return obj;
-}
-
-
 AllocationResult Heap::AllocateRawFixedArray(int length,
                                              PretenureFlag pretenure) {
   if (length < 0 || length > FixedArray::kMaxLength) {
     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
   }
   int size = FixedArray::SizeFor(length);
-  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
+  AllocationSpace space = SelectSpace(pretenure);
 
-  return AllocateRaw(size, space, OLD_POINTER_SPACE);
+  return AllocateRaw(size, space);
 }
 
 
@@ -4227,7 +3878,7 @@
   if (length == 0) return empty_fixed_array();
 
   DCHECK(!InNewSpace(filler));
-  HeapObject* result;
+  HeapObject* result = nullptr;
   {
     AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
     if (!allocation.To(&result)) return allocation;
@@ -4249,7 +3900,7 @@
 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
   if (length == 0) return empty_fixed_array();
 
-  HeapObject* obj;
+  HeapObject* obj = nullptr;
   {
     AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
     if (!allocation.To(&obj)) return allocation;
@@ -4265,7 +3916,7 @@
     int length, PretenureFlag pretenure) {
   if (length == 0) return empty_fixed_array();
 
-  HeapObject* elements;
+  HeapObject* elements = nullptr;
   AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
   if (!allocation.To(&elements)) return allocation;
 
@@ -4278,86 +3929,19 @@
 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
                                                    PretenureFlag pretenure) {
   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
-    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length",
+                                                kDoubleAligned);
   }
   int size = FixedDoubleArray::SizeFor(length);
-#ifndef V8_HOST_ARCH_64_BIT
-  size += kPointerSize;
-#endif
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+  AllocationSpace space = SelectSpace(pretenure);
 
-  HeapObject* object;
+  HeapObject* object = nullptr;
   {
-    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
     if (!allocation.To(&object)) return allocation;
   }
 
-  return EnsureDoubleAligned(this, object, size);
-}
-
-
-AllocationResult Heap::AllocateConstantPoolArray(
-    const ConstantPoolArray::NumberOfEntries& small) {
-  CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
-  int size = ConstantPoolArray::SizeFor(small);
-#ifndef V8_HOST_ARCH_64_BIT
-  size += kPointerSize;
-#endif
-  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
-
-  HeapObject* object;
-  {
-    AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
-    if (!allocation.To(&object)) return allocation;
-  }
-  object = EnsureDoubleAligned(this, object, size);
-  object->set_map_no_write_barrier(constant_pool_array_map());
-
-  ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
-  constant_pool->Init(small);
-  constant_pool->ClearPtrEntries(isolate());
-  return constant_pool;
-}
-
-
-AllocationResult Heap::AllocateExtendedConstantPoolArray(
-    const ConstantPoolArray::NumberOfEntries& small,
-    const ConstantPoolArray::NumberOfEntries& extended) {
-  CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
-  CHECK(extended.are_in_range(0, kMaxInt));
-  int size = ConstantPoolArray::SizeForExtended(small, extended);
-#ifndef V8_HOST_ARCH_64_BIT
-  size += kPointerSize;
-#endif
-  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
-
-  HeapObject* object;
-  {
-    AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
-    if (!allocation.To(&object)) return allocation;
-  }
-  object = EnsureDoubleAligned(this, object, size);
-  object->set_map_no_write_barrier(constant_pool_array_map());
-
-  ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
-  constant_pool->InitExtended(small, extended);
-  constant_pool->ClearPtrEntries(isolate());
-  return constant_pool;
-}
-
-
-AllocationResult Heap::AllocateEmptyConstantPoolArray() {
-  ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
-  int size = ConstantPoolArray::SizeFor(small);
-  HeapObject* result;
-  {
-    AllocationResult allocation =
-        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-  result->set_map_no_write_barrier(constant_pool_array_map());
-  ConstantPoolArray::cast(result)->Init(small);
-  return result;
+  return object;
 }
 
 
@@ -4365,9 +3949,8 @@
   // Statically ensure that it is safe to allocate symbols in paged spaces.
   STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
 
-  HeapObject* result;
-  AllocationResult allocation =
-      AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
+  HeapObject* result = nullptr;
+  AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
   if (!allocation.To(&result)) return allocation;
 
   result->set_map_no_write_barrier(symbol_map());
@@ -4384,7 +3967,7 @@
   Symbol::cast(result)
       ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
   Symbol::cast(result)->set_name(undefined_value());
-  Symbol::cast(result)->set_flags(Smi::FromInt(0));
+  Symbol::cast(result)->set_flags(0);
 
   DCHECK(!Symbol::cast(result)->is_private());
   return result;
@@ -4405,10 +3988,9 @@
       return exception();
   }
   int size = map->instance_size();
-  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
-  Struct* result;
+  Struct* result = nullptr;
   {
-    AllocationResult allocation = Allocate(map, space);
+    AllocationResult allocation = Allocate(map, OLD_SPACE);
     if (!allocation.To(&result)) return allocation;
   }
   result->InitializeBody(size);
@@ -4435,150 +4017,212 @@
 }
 
 
-void Heap::IdleMarkCompact(const char* message) {
-  bool uncommit = false;
-  if (gc_count_at_last_idle_gc_ == gc_count_) {
-    // No GC since the last full GC, the mutator is probably not active.
-    isolate_->compilation_cache()->Clear();
-    uncommit = true;
+static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
+  const double kMinMutatorUtilization = 0.0;
+  const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
+  if (mutator_speed == 0) return kMinMutatorUtilization;
+  if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
+  // Derivation:
+  // mutator_utilization = mutator_time / (mutator_time + gc_time)
+  // mutator_time = 1 / mutator_speed
+  // gc_time = 1 / gc_speed
+  // mutator_utilization = (1 / mutator_speed) /
+  //                       (1 / mutator_speed + 1 / gc_speed)
+  // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
+  return gc_speed / (mutator_speed + gc_speed);
+}
+
+
+double Heap::YoungGenerationMutatorUtilization() {
+  double mutator_speed = static_cast<double>(
+      tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
+  double gc_speed = static_cast<double>(
+      tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
+  double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
+  if (FLAG_trace_mutator_utilization) {
+    PrintIsolate(isolate(),
+                 "Young generation mutator utilization = %.3f ("
+                 "mutator_speed=%.f, gc_speed=%.f)\n",
+                 result, mutator_speed, gc_speed);
   }
-  CollectAllGarbage(kReduceMemoryFootprintMask, message);
-  gc_idle_time_handler_.NotifyIdleMarkCompact();
-  gc_count_at_last_idle_gc_ = gc_count_;
-  if (uncommit) {
+  return result;
+}
+
+
+double Heap::OldGenerationMutatorUtilization() {
+  double mutator_speed = static_cast<double>(
+      tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
+  double gc_speed = static_cast<double>(
+      tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
+  double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
+  if (FLAG_trace_mutator_utilization) {
+    PrintIsolate(isolate(),
+                 "Old generation mutator utilization = %.3f ("
+                 "mutator_speed=%.f, gc_speed=%.f)\n",
+                 result, mutator_speed, gc_speed);
+  }
+  return result;
+}
+
+
+bool Heap::HasLowYoungGenerationAllocationRate() {
+  const double high_mutator_utilization = 0.993;
+  return YoungGenerationMutatorUtilization() > high_mutator_utilization;
+}
+
+
+bool Heap::HasLowOldGenerationAllocationRate() {
+  const double high_mutator_utilization = 0.993;
+  return OldGenerationMutatorUtilization() > high_mutator_utilization;
+}
+
+
+bool Heap::HasLowAllocationRate() {
+  return HasLowYoungGenerationAllocationRate() &&
+         HasLowOldGenerationAllocationRate();
+}
+
+
+bool Heap::HasHighFragmentation() {
+  intptr_t used = PromotedSpaceSizeOfObjects();
+  intptr_t committed = CommittedOldGenerationMemory();
+  return HasHighFragmentation(used, committed);
+}
+
+
+bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
+  const intptr_t kSlack = 16 * MB;
+  // Fragmentation is high if committed > 2 * used + kSlack.
+  // Rewrite the exression to avoid overflow.
+  return committed - used > used + kSlack;
+}
+
+
+void Heap::ReduceNewSpaceSize() {
+  // TODO(ulan): Unify this constant with the similar constant in
+  // GCIdleTimeHandler once the change is merged to 4.5.
+  static const size_t kLowAllocationThroughput = 1000;
+  const size_t allocation_throughput =
+      tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
+
+  if (FLAG_predictable) return;
+
+  if (ShouldReduceMemory() ||
+      ((allocation_throughput != 0) &&
+       (allocation_throughput < kLowAllocationThroughput))) {
     new_space_.Shrink();
     UncommitFromSpace();
   }
 }
 
 
-bool Heap::TryFinalizeIdleIncrementalMarking(
-    double idle_time_in_ms, size_t size_of_objects,
-    size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
-  if (incremental_marking()->IsComplete() ||
-      (mark_compact_collector_.marking_deque()->IsEmpty() &&
-       gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
-           static_cast<size_t>(idle_time_in_ms), size_of_objects,
-           final_incremental_mark_compact_speed_in_bytes_per_ms))) {
-    CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
+void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
+  if (incremental_marking()->IsMarking() &&
+      (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
+       (!incremental_marking()->finalize_marking_completed() &&
+        mark_compact_collector()->marking_deque()->IsEmpty()))) {
+    FinalizeIncrementalMarking(comment);
+  } else if (incremental_marking()->IsComplete() ||
+             (mark_compact_collector()->marking_deque()->IsEmpty())) {
+    CollectAllGarbage(current_gc_flags_, comment);
+  }
+}
+
+
+bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
+  size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
+  size_t final_incremental_mark_compact_speed_in_bytes_per_ms =
+      static_cast<size_t>(
+          tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
+  if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
+      (!incremental_marking()->finalize_marking_completed() &&
+       mark_compact_collector()->marking_deque()->IsEmpty() &&
+       gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
+           static_cast<size_t>(idle_time_in_ms)))) {
+    FinalizeIncrementalMarking(
+        "Idle notification: finalize incremental marking");
+    return true;
+  } else if (incremental_marking()->IsComplete() ||
+             (mark_compact_collector()->marking_deque()->IsEmpty() &&
+              gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
+                  static_cast<size_t>(idle_time_in_ms), size_of_objects,
+                  final_incremental_mark_compact_speed_in_bytes_per_ms))) {
+    CollectAllGarbage(current_gc_flags_,
+                      "idle notification: finalize incremental marking");
     return true;
   }
   return false;
 }
 
 
-bool Heap::WorthActivatingIncrementalMarking() {
-  return incremental_marking()->IsStopped() &&
-         incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
-}
-
-
-static double MonotonicallyIncreasingTimeInMs() {
-  return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
-         static_cast<double>(base::Time::kMillisecondsPerSecond);
-}
-
-
-bool Heap::IdleNotification(int idle_time_in_ms) {
-  return IdleNotification(
-      V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
-      (static_cast<double>(idle_time_in_ms) /
-       static_cast<double>(base::Time::kMillisecondsPerSecond)));
-}
-
-
-bool Heap::IdleNotification(double deadline_in_seconds) {
-  double deadline_in_ms =
-      deadline_in_seconds *
-      static_cast<double>(base::Time::kMillisecondsPerSecond);
-  HistogramTimerScope idle_notification_scope(
-      isolate_->counters()->gc_idle_notification());
-
-  GCIdleTimeHandler::HeapState heap_state;
+GCIdleTimeHeapState Heap::ComputeHeapState() {
+  GCIdleTimeHeapState heap_state;
   heap_state.contexts_disposed = contexts_disposed_;
   heap_state.contexts_disposal_rate =
       tracer()->ContextDisposalRateInMilliseconds();
   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
-  // TODO(ulan): Start incremental marking only for large heaps.
-  heap_state.can_start_incremental_marking =
-      incremental_marking()->ShouldActivate() && FLAG_incremental_marking;
-  heap_state.sweeping_in_progress =
-      mark_compact_collector()->sweeping_in_progress();
-  heap_state.mark_compact_speed_in_bytes_per_ms =
-      static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
-  heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
-      tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
-  heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms =
-      static_cast<size_t>(
-          tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
-  heap_state.scavenge_speed_in_bytes_per_ms =
-      static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
-  heap_state.used_new_space_size = new_space_.Size();
-  heap_state.new_space_capacity = new_space_.Capacity();
-  heap_state.new_space_allocation_throughput_in_bytes_per_ms =
-      static_cast<size_t>(
-          tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
+  return heap_state;
+}
 
-  double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
-  GCIdleTimeAction action =
-      gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
-  isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
-      static_cast<int>(idle_time_in_ms));
 
+bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
+                                 GCIdleTimeHeapState heap_state,
+                                 double deadline_in_ms) {
   bool result = false;
   switch (action.type) {
     case DONE:
       result = true;
       break;
-    case DO_INCREMENTAL_MARKING: {
-      if (incremental_marking()->IsStopped()) {
-        incremental_marking()->Start();
-      }
-      double remaining_idle_time_in_ms = 0.0;
-      do {
-        incremental_marking()->Step(
-            action.parameter, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
-            IncrementalMarking::FORCE_MARKING,
-            IncrementalMarking::DO_NOT_FORCE_COMPLETION);
-        remaining_idle_time_in_ms =
-            deadline_in_ms - MonotonicallyIncreasingTimeInMs();
-      } while (remaining_idle_time_in_ms >=
-                   2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
-               !incremental_marking()->IsComplete() &&
-               !mark_compact_collector_.marking_deque()->IsEmpty());
-      if (remaining_idle_time_in_ms > 0.0) {
-        action.additional_work = TryFinalizeIdleIncrementalMarking(
-            remaining_idle_time_in_ms, heap_state.size_of_objects,
-            heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
+    case DO_INCREMENTAL_STEP: {
+      if (incremental_marking()->incremental_marking_job()->IdleTaskPending()) {
+        result = true;
+      } else {
+        incremental_marking()
+            ->incremental_marking_job()
+            ->NotifyIdleTaskProgress();
+        result = IncrementalMarkingJob::IdleTask::Step(this, deadline_in_ms) ==
+                 IncrementalMarkingJob::IdleTask::kDone;
       }
       break;
     }
     case DO_FULL_GC: {
-      if (contexts_disposed_) {
-        HistogramTimerScope scope(isolate_->counters()->gc_context());
-        CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
-        gc_idle_time_handler_.NotifyIdleMarkCompact();
-        gc_count_at_last_idle_gc_ = gc_count_;
-      } else {
-        IdleMarkCompact("idle notification: finalize idle round");
-      }
+      DCHECK(contexts_disposed_ > 0);
+      HistogramTimerScope scope(isolate_->counters()->gc_context());
+      CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
       break;
     }
-    case DO_SCAVENGE:
-      CollectGarbage(NEW_SPACE, "idle notification: scavenge");
-      break;
-    case DO_FINALIZE_SWEEPING:
-      mark_compact_collector()->EnsureSweepingCompleted();
-      break;
     case DO_NOTHING:
       break;
   }
 
+  return result;
+}
+
+
+void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
+                                    GCIdleTimeHeapState heap_state,
+                                    double start_ms, double deadline_in_ms) {
+  double idle_time_in_ms = deadline_in_ms - start_ms;
   double current_time = MonotonicallyIncreasingTimeInMs();
   last_idle_notification_time_ = current_time;
   double deadline_difference = deadline_in_ms - current_time;
 
+  contexts_disposed_ = 0;
+
+  isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
+      static_cast<int>(idle_time_in_ms));
+
+  if (deadline_in_ms - start_ms >
+      GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
+    int committed_memory = static_cast<int>(CommittedMemory() / KB);
+    int used_memory = static_cast<int>(heap_state.size_of_objects / KB);
+    isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
+        start_ms, committed_memory);
+    isolate()->counters()->aggregated_memory_heap_used()->AddSample(
+        start_ms, used_memory);
+  }
+
   if (deadline_difference >= 0) {
     if (action.type != DONE && action.type != DO_NOTHING) {
       isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
@@ -4591,6 +4235,7 @@
 
   if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
       FLAG_trace_idle_notification_verbose) {
+    PrintIsolate(isolate_, "%8.0f ms: ", isolate()->time_millis_since_init());
     PrintF(
         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
         "ms, deadline usage %.2f ms [",
@@ -4605,15 +4250,51 @@
     }
     PrintF("\n");
   }
+}
 
-  contexts_disposed_ = 0;
+
+double Heap::MonotonicallyIncreasingTimeInMs() {
+  return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
+         static_cast<double>(base::Time::kMillisecondsPerSecond);
+}
+
+
+bool Heap::IdleNotification(int idle_time_in_ms) {
+  return IdleNotification(
+      V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
+      (static_cast<double>(idle_time_in_ms) /
+       static_cast<double>(base::Time::kMillisecondsPerSecond)));
+}
+
+
+bool Heap::IdleNotification(double deadline_in_seconds) {
+  CHECK(HasBeenSetUp());
+  double deadline_in_ms =
+      deadline_in_seconds *
+      static_cast<double>(base::Time::kMillisecondsPerSecond);
+  HistogramTimerScope idle_notification_scope(
+      isolate_->counters()->gc_idle_notification());
+  double start_ms = MonotonicallyIncreasingTimeInMs();
+  double idle_time_in_ms = deadline_in_ms - start_ms;
+
+  tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
+                             OldGenerationAllocationCounter());
+
+  GCIdleTimeHeapState heap_state = ComputeHeapState();
+
+  GCIdleTimeAction action =
+      gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
+
+  bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
+
+  IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
   return result;
 }
 
 
 bool Heap::RecentIdleNotificationHappened() {
   return (last_idle_notification_time_ +
-          GCIdleTimeHandler::kMaxFrameRenderingIdleTime) >
+          GCIdleTimeHandler::kMaxScheduledIdleTime) >
          MonotonicallyIncreasingTimeInMs();
 }
 
@@ -4660,18 +4341,12 @@
   isolate_->memory_allocator()->ReportStatistics();
   PrintF("To space : ");
   new_space_.ReportStatistics();
-  PrintF("Old pointer space : ");
-  old_pointer_space_->ReportStatistics();
-  PrintF("Old data space : ");
-  old_data_space_->ReportStatistics();
+  PrintF("Old space : ");
+  old_space_->ReportStatistics();
   PrintF("Code space : ");
   code_space_->ReportStatistics();
   PrintF("Map space : ");
   map_space_->ReportStatistics();
-  PrintF("Cell space : ");
-  cell_space_->ReportStatistics();
-  PrintF("PropertyCell space : ");
-  property_cell_space_->ReportStatistics();
   PrintF("Large object space : ");
   lo_space_->ReportStatistics();
   PrintF(">>>>>> ========================================= >>>>>>\n");
@@ -4685,11 +4360,8 @@
 bool Heap::Contains(Address addr) {
   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
   return HasBeenSetUp() &&
-         (new_space_.ToSpaceContains(addr) ||
-          old_pointer_space_->Contains(addr) ||
-          old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
-          map_space_->Contains(addr) || cell_space_->Contains(addr) ||
-          property_cell_space_->Contains(addr) ||
+         (new_space_.ToSpaceContains(addr) || old_space_->Contains(addr) ||
+          code_space_->Contains(addr) || map_space_->Contains(addr) ||
           lo_space_->SlowContains(addr));
 }
 
@@ -4706,18 +4378,12 @@
   switch (space) {
     case NEW_SPACE:
       return new_space_.ToSpaceContains(addr);
-    case OLD_POINTER_SPACE:
-      return old_pointer_space_->Contains(addr);
-    case OLD_DATA_SPACE:
-      return old_data_space_->Contains(addr);
+    case OLD_SPACE:
+      return old_space_->Contains(addr);
     case CODE_SPACE:
       return code_space_->Contains(addr);
     case MAP_SPACE:
       return map_space_->Contains(addr);
-    case CELL_SPACE:
-      return cell_space_->Contains(addr);
-    case PROPERTY_CELL_SPACE:
-      return property_cell_space_->Contains(addr);
     case LO_SPACE:
       return lo_space_->SlowContains(addr);
   }
@@ -4726,13 +4392,32 @@
 }
 
 
+bool Heap::IsValidAllocationSpace(AllocationSpace space) {
+  switch (space) {
+    case NEW_SPACE:
+    case OLD_SPACE:
+    case CODE_SPACE:
+    case MAP_SPACE:
+    case LO_SPACE:
+      return true;
+    default:
+      return false;
+  }
+}
+
+
 bool Heap::RootIsImmortalImmovable(int root_index) {
   switch (root_index) {
-#define CASE(name)               \
-  case Heap::k##name##RootIndex: \
+#define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
+    IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
+#undef IMMORTAL_IMMOVABLE_ROOT
+#define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
+    INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
+#undef INTERNALIZED_STRING
+#define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
+    STRING_TYPE_LIST(STRING_TYPE)
+#undef STRING_TYPE
     return true;
-    IMMORTAL_IMMOVABLE_ROOT_LIST(CASE);
-#undef CASE
     default:
       return false;
   }
@@ -4759,21 +4444,24 @@
 
   new_space_.Verify();
 
-  old_pointer_space_->Verify(&visitor);
+  old_space_->Verify(&visitor);
   map_space_->Verify(&visitor);
 
   VerifyPointersVisitor no_dirty_regions_visitor;
-  old_data_space_->Verify(&no_dirty_regions_visitor);
   code_space_->Verify(&no_dirty_regions_visitor);
-  cell_space_->Verify(&no_dirty_regions_visitor);
-  property_cell_space_->Verify(&no_dirty_regions_visitor);
 
   lo_space_->Verify();
+
+  mark_compact_collector()->VerifyWeakEmbeddedObjectsInCode();
+  if (FLAG_omit_map_checks_for_leaf_maps) {
+    mark_compact_collector()->VerifyOmittedMapChecks();
+  }
 }
 #endif
 
 
 void Heap::ZapFromSpace() {
+  if (!new_space_.IsFromSpaceCommitted()) return;
   NewSpacePageIterator it(new_space_.FromSpaceStart(),
                           new_space_.FromSpaceEnd());
   while (it.has_next()) {
@@ -4786,10 +4474,68 @@
 }
 
 
-void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
+void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
+                                             Address end, bool record_slots,
                                              ObjectSlotCallback callback) {
   Address slot_address = start;
 
+  while (slot_address < end) {
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    Object* target = *slot;
+    // If the store buffer becomes overfull we mark pages as being exempt from
+    // the store buffer.  These pages are scanned to find pointers that point
+    // to the new space.  In that case we may hit newly promoted objects and
+    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
+    if (target->IsHeapObject()) {
+      if (Heap::InFromSpace(target)) {
+        callback(reinterpret_cast<HeapObject**>(slot),
+                 HeapObject::cast(target));
+        Object* new_target = *slot;
+        if (InNewSpace(new_target)) {
+          SLOW_DCHECK(Heap::InToSpace(new_target));
+          SLOW_DCHECK(new_target->IsHeapObject());
+          store_buffer_.EnterDirectlyIntoStoreBuffer(
+              reinterpret_cast<Address>(slot));
+        }
+        SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
+      } else if (record_slots &&
+                 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
+        mark_compact_collector()->RecordSlot(object, slot, target);
+      }
+    }
+    slot_address += kPointerSize;
+  }
+}
+
+
+class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
+ public:
+  IteratePointersToFromSpaceVisitor(Heap* heap, HeapObject* target,
+                                    bool record_slots,
+                                    ObjectSlotCallback callback)
+      : heap_(heap),
+        target_(target),
+        record_slots_(record_slots),
+        callback_(callback) {}
+
+  V8_INLINE void VisitPointers(Object** start, Object** end) override {
+    heap_->IterateAndMarkPointersToFromSpace(
+        target_, reinterpret_cast<Address>(start),
+        reinterpret_cast<Address>(end), record_slots_, callback_);
+  }
+
+  V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
+
+ private:
+  Heap* heap_;
+  HeapObject* target_;
+  bool record_slots_;
+  ObjectSlotCallback callback_;
+};
+
+
+void Heap::IteratePointersToFromSpace(HeapObject* target, int size,
+                                      ObjectSlotCallback callback) {
   // We are not collecting slots on new space objects during mutation
   // thus we have to scan for pointers to evacuation candidates when we
   // promote objects. But we should not record any slots in non-black
@@ -4798,176 +4544,16 @@
   // it would be a violation of the invariant to record it's slots.
   bool record_slots = false;
   if (incremental_marking()->IsCompacting()) {
-    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
+    MarkBit mark_bit = Marking::MarkBitFrom(target);
     record_slots = Marking::IsBlack(mark_bit);
   }
 
-  while (slot_address < end) {
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    Object* object = *slot;
-    // If the store buffer becomes overfull we mark pages as being exempt from
-    // the store buffer.  These pages are scanned to find pointers that point
-    // to the new space.  In that case we may hit newly promoted objects and
-    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
-    if (object->IsHeapObject()) {
-      if (Heap::InFromSpace(object)) {
-        callback(reinterpret_cast<HeapObject**>(slot),
-                 HeapObject::cast(object));
-        Object* new_object = *slot;
-        if (InNewSpace(new_object)) {
-          SLOW_DCHECK(Heap::InToSpace(new_object));
-          SLOW_DCHECK(new_object->IsHeapObject());
-          store_buffer_.EnterDirectlyIntoStoreBuffer(
-              reinterpret_cast<Address>(slot));
-        }
-        SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
-      } else if (record_slots &&
-                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
-        mark_compact_collector()->RecordSlot(slot, slot, object);
-      }
-    }
-    slot_address += kPointerSize;
-  }
+  IteratePointersToFromSpaceVisitor visitor(this, target, record_slots,
+                                            callback);
+  target->IterateBody(target->map()->instance_type(), size, &visitor);
 }
 
 
-#ifdef DEBUG
-typedef bool (*CheckStoreBufferFilter)(Object** addr);
-
-
-bool IsAMapPointerAddress(Object** addr) {
-  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
-  int mod = a % Map::kSize;
-  return mod >= Map::kPointerFieldsBeginOffset &&
-         mod < Map::kPointerFieldsEndOffset;
-}
-
-
-bool EverythingsAPointer(Object** addr) { return true; }
-
-
-static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
-                             Object**** store_buffer_position,
-                             Object*** store_buffer_top,
-                             CheckStoreBufferFilter filter,
-                             Address special_garbage_start,
-                             Address special_garbage_end) {
-  Map* free_space_map = heap->free_space_map();
-  for (; current < limit; current++) {
-    Object* o = *current;
-    Address current_address = reinterpret_cast<Address>(current);
-    // Skip free space.
-    if (o == free_space_map) {
-      Address current_address = reinterpret_cast<Address>(current);
-      FreeSpace* free_space =
-          FreeSpace::cast(HeapObject::FromAddress(current_address));
-      int skip = free_space->Size();
-      DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
-      DCHECK(skip > 0);
-      current_address += skip - kPointerSize;
-      current = reinterpret_cast<Object**>(current_address);
-      continue;
-    }
-    // Skip the current linear allocation space between top and limit which is
-    // unmarked with the free space map, but can contain junk.
-    if (current_address == special_garbage_start &&
-        special_garbage_end != special_garbage_start) {
-      current_address = special_garbage_end - kPointerSize;
-      current = reinterpret_cast<Object**>(current_address);
-      continue;
-    }
-    if (!(*filter)(current)) continue;
-    DCHECK(current_address < special_garbage_start ||
-           current_address >= special_garbage_end);
-    DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
-    // We have to check that the pointer does not point into new space
-    // without trying to cast it to a heap object since the hash field of
-    // a string can contain values like 1 and 3 which are tagged null
-    // pointers.
-    if (!heap->InNewSpace(o)) continue;
-    while (**store_buffer_position < current &&
-           *store_buffer_position < store_buffer_top) {
-      (*store_buffer_position)++;
-    }
-    if (**store_buffer_position != current ||
-        *store_buffer_position == store_buffer_top) {
-      Object** obj_start = current;
-      while (!(*obj_start)->IsMap()) obj_start--;
-      UNREACHABLE();
-    }
-  }
-}
-
-
-// Check that the store buffer contains all intergenerational pointers by
-// scanning a page and ensuring that all pointers to young space are in the
-// store buffer.
-void Heap::OldPointerSpaceCheckStoreBuffer() {
-  OldSpace* space = old_pointer_space();
-  PageIterator pages(space);
-
-  store_buffer()->SortUniq();
-
-  while (pages.has_next()) {
-    Page* page = pages.next();
-    Object** current = reinterpret_cast<Object**>(page->area_start());
-
-    Address end = page->area_end();
-
-    Object*** store_buffer_position = store_buffer()->Start();
-    Object*** store_buffer_top = store_buffer()->Top();
-
-    Object** limit = reinterpret_cast<Object**>(end);
-    CheckStoreBuffer(this, current, limit, &store_buffer_position,
-                     store_buffer_top, &EverythingsAPointer, space->top(),
-                     space->limit());
-  }
-}
-
-
-void Heap::MapSpaceCheckStoreBuffer() {
-  MapSpace* space = map_space();
-  PageIterator pages(space);
-
-  store_buffer()->SortUniq();
-
-  while (pages.has_next()) {
-    Page* page = pages.next();
-    Object** current = reinterpret_cast<Object**>(page->area_start());
-
-    Address end = page->area_end();
-
-    Object*** store_buffer_position = store_buffer()->Start();
-    Object*** store_buffer_top = store_buffer()->Top();
-
-    Object** limit = reinterpret_cast<Object**>(end);
-    CheckStoreBuffer(this, current, limit, &store_buffer_position,
-                     store_buffer_top, &IsAMapPointerAddress, space->top(),
-                     space->limit());
-  }
-}
-
-
-void Heap::LargeObjectSpaceCheckStoreBuffer() {
-  LargeObjectIterator it(lo_space());
-  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
-    // We only have code, sequential strings, or fixed arrays in large
-    // object space, and only fixed arrays can possibly contain pointers to
-    // the young generation.
-    if (object->IsFixedArray()) {
-      Object*** store_buffer_position = store_buffer()->Start();
-      Object*** store_buffer_top = store_buffer()->Top();
-      Object** current = reinterpret_cast<Object**>(object->address());
-      Object** limit =
-          reinterpret_cast<Object**>(object->address() + object->Size());
-      CheckStoreBuffer(this, current, limit, &store_buffer_position,
-                       store_buffer_top, &EverythingsAPointer, NULL, NULL);
-    }
-  }
-}
-#endif
-
-
 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
   IterateStrongRoots(v, mode);
   IterateWeakRoots(v, mode);
@@ -4997,9 +4583,6 @@
   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
   v->Synchronize(VisitorSynchronization::kStrongRootList);
 
-  v->VisitPointer(bit_cast<Object**>(&hidden_string_));
-  v->Synchronize(VisitorSynchronization::kInternalizedString);
-
   isolate_->bootstrapper()->Iterate(v);
   v->Synchronize(VisitorSynchronization::kBootstrapper);
   isolate_->Iterate(v);
@@ -5054,6 +4637,12 @@
   isolate_->thread_manager()->Iterate(v);
   v->Synchronize(VisitorSynchronization::kThreadManager);
 
+  // Iterate over other strong roots (currently only identity maps).
+  for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
+    v->VisitPointers(list->start, list->end);
+  }
+  v->Synchronize(VisitorSynchronization::kStrongRoots);
+
   // Iterate over the pointers the Serialization/Deserialization code is
   // holding.
   // During garbage collection this keeps the partial snapshot cache alive.
@@ -5082,10 +4671,10 @@
     max_semi_space_size_ = max_semi_space_size * MB;
   }
   if (max_old_space_size > 0) {
-    max_old_generation_size_ = max_old_space_size * MB;
+    max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
   }
   if (max_executable_size > 0) {
-    max_executable_size_ = max_executable_size * MB;
+    max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
   }
 
   // If max space size flags are specified overwrite the configuration.
@@ -5093,10 +4682,18 @@
     max_semi_space_size_ = FLAG_max_semi_space_size * MB;
   }
   if (FLAG_max_old_space_size > 0) {
-    max_old_generation_size_ = FLAG_max_old_space_size * MB;
+    max_old_generation_size_ =
+        static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
   }
   if (FLAG_max_executable_size > 0) {
-    max_executable_size_ = FLAG_max_executable_size * MB;
+    max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
+  }
+
+  if (Page::kPageSize > MB) {
+    max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
+    max_old_generation_size_ =
+        ROUND_UP(max_old_generation_size_, Page::kPageSize);
+    max_executable_size_ = ROUND_UP(max_executable_size_, Page::kPageSize);
   }
 
   if (FLAG_stress_compaction) {
@@ -5104,7 +4701,7 @@
     max_semi_space_size_ = Page::kPageSize;
   }
 
-  if (Snapshot::HaveASnapshotToStartFrom()) {
+  if (isolate()->snapshot_available()) {
     // If we are using a snapshot we always reserve the default amount
     // of memory for each semispace because code in the snapshot has
     // write-barrier code that relies on the size and alignment of new
@@ -5113,8 +4710,9 @@
     if (max_semi_space_size_ > reserved_semispace_size_) {
       max_semi_space_size_ = reserved_semispace_size_;
       if (FLAG_trace_gc) {
-        PrintPID("Max semi-space size cannot be more than %d kbytes\n",
-                 reserved_semispace_size_ >> 10);
+        PrintIsolate(isolate_,
+                     "Max semi-space size cannot be more than %d kbytes\n",
+                     reserved_semispace_size_ >> 10);
       }
     }
   } else {
@@ -5123,12 +4721,6 @@
     reserved_semispace_size_ = max_semi_space_size_;
   }
 
-  // The max executable size must be less than or equal to the max old
-  // generation size.
-  if (max_executable_size_ > max_old_generation_size_) {
-    max_executable_size_ = max_old_generation_size_;
-  }
-
   // The new space size must be a power of two to support single-bit testing
   // for containment.
   max_semi_space_size_ =
@@ -5141,13 +4733,14 @@
     if (initial_semispace_size > max_semi_space_size_) {
       initial_semispace_size_ = max_semi_space_size_;
       if (FLAG_trace_gc) {
-        PrintPID(
-            "Min semi-space size cannot be more than the maximum "
-            "semi-space size of %d MB\n",
-            max_semi_space_size_ / MB);
+        PrintIsolate(isolate_,
+                     "Min semi-space size cannot be more than the maximum "
+                     "semi-space size of %d MB\n",
+                     max_semi_space_size_ / MB);
       }
     } else {
-      initial_semispace_size_ = initial_semispace_size;
+      initial_semispace_size_ =
+          ROUND_UP(initial_semispace_size, Page::kPageSize);
     }
   }
 
@@ -5158,21 +4751,21 @@
     if (target_semispace_size < initial_semispace_size_) {
       target_semispace_size_ = initial_semispace_size_;
       if (FLAG_trace_gc) {
-        PrintPID(
-            "Target semi-space size cannot be less than the minimum "
-            "semi-space size of %d MB\n",
-            initial_semispace_size_ / MB);
+        PrintIsolate(isolate_,
+                     "Target semi-space size cannot be less than the minimum "
+                     "semi-space size of %d MB\n",
+                     initial_semispace_size_ / MB);
       }
     } else if (target_semispace_size > max_semi_space_size_) {
       target_semispace_size_ = max_semi_space_size_;
       if (FLAG_trace_gc) {
-        PrintPID(
-            "Target semi-space size cannot be less than the maximum "
-            "semi-space size of %d MB\n",
-            max_semi_space_size_ / MB);
+        PrintIsolate(isolate_,
+                     "Target semi-space size cannot be less than the maximum "
+                     "semi-space size of %d MB\n",
+                     max_semi_space_size_ / MB);
       }
     } else {
-      target_semispace_size_ = target_semispace_size;
+      target_semispace_size_ = ROUND_UP(target_semispace_size, Page::kPageSize);
     }
   }
 
@@ -5188,17 +4781,24 @@
       Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
           max_old_generation_size_);
 
+  // The max executable size must be less than or equal to the max old
+  // generation size.
+  if (max_executable_size_ > max_old_generation_size_) {
+    max_executable_size_ = max_old_generation_size_;
+  }
+
   if (FLAG_initial_old_space_size > 0) {
     initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
   } else {
-    initial_old_generation_size_ = max_old_generation_size_ / 2;
+    initial_old_generation_size_ =
+        max_old_generation_size_ / kInitalOldGenerationLimitFactor;
   }
   old_generation_allocation_limit_ = initial_old_generation_size_;
 
   // We rely on being able to allocate new arrays in paged spaces.
   DCHECK(Page::kMaxRegularHeapObjectSize >=
          (JSArray::kSize +
-          FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
+          FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
           AllocationMemento::kSize));
 
   code_range_size_ = code_range_size * MB;
@@ -5208,6 +4808,30 @@
 }
 
 
+void Heap::AddToRingBuffer(const char* string) {
+  size_t first_part =
+      Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
+  memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
+  ring_buffer_end_ += first_part;
+  if (first_part < strlen(string)) {
+    ring_buffer_full_ = true;
+    size_t second_part = strlen(string) - first_part;
+    memcpy(trace_ring_buffer_, string + first_part, second_part);
+    ring_buffer_end_ = second_part;
+  }
+}
+
+
+void Heap::GetFromRingBuffer(char* buffer) {
+  size_t copied = 0;
+  if (ring_buffer_full_) {
+    copied = kTraceRingBufferSize - ring_buffer_end_;
+    memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
+  }
+  memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
+}
+
+
 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
 
 
@@ -5216,18 +4840,12 @@
   *stats->end_marker = HeapStats::kEndMarker;
   *stats->new_space_size = new_space_.SizeAsInt();
   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
-  *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
-  *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
-  *stats->old_data_space_size = old_data_space_->SizeOfObjects();
-  *stats->old_data_space_capacity = old_data_space_->Capacity();
+  *stats->old_space_size = old_space_->SizeOfObjects();
+  *stats->old_space_capacity = old_space_->Capacity();
   *stats->code_space_size = code_space_->SizeOfObjects();
   *stats->code_space_capacity = code_space_->Capacity();
   *stats->map_space_size = map_space_->SizeOfObjects();
   *stats->map_space_capacity = map_space_->Capacity();
-  *stats->cell_space_size = cell_space_->SizeOfObjects();
-  *stats->cell_space_capacity = cell_space_->Capacity();
-  *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
-  *stats->property_cell_space_capacity = property_cell_space_->Capacity();
   *stats->lo_space_size = lo_space_->Size();
   isolate_->global_handles()->RecordStats(stats);
   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
@@ -5246,14 +4864,23 @@
       stats->size_per_type[type] += obj->Size();
     }
   }
+  if (stats->last_few_messages != NULL)
+    GetFromRingBuffer(stats->last_few_messages);
+  if (stats->js_stacktrace != NULL) {
+    FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
+    StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
+    if (gc_state() == Heap::NOT_IN_GC) {
+      isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
+    } else {
+      accumulator.Add("Cannot get stack trace in GC.");
+    }
+  }
 }
 
 
 intptr_t Heap::PromotedSpaceSizeOfObjects() {
-  return old_pointer_space_->SizeOfObjects() +
-         old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
-         map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
-         property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
+  return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
+         map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
 }
 
 
@@ -5266,47 +4893,147 @@
 }
 
 
-intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
-                                            int freed_global_handles) {
-  const int kMaxHandles = 1000;
-  const int kMinHandles = 100;
-  double min_factor = 1.1;
-  double max_factor = 4;
-  // We set the old generation growing factor to 2 to grow the heap slower on
-  // memory-constrained devices.
-  if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
-    max_factor = 2;
-  }
-  // If there are many freed global handles, then the next full GC will
-  // likely collect a lot of garbage. Choose the heap growing factor
-  // depending on freed global handles.
-  // TODO(ulan, hpayer): Take into account mutator utilization.
-  double factor;
-  if (freed_global_handles <= kMinHandles) {
-    factor = max_factor;
-  } else if (freed_global_handles >= kMaxHandles) {
-    factor = min_factor;
-  } else {
-    // Compute factor using linear interpolation between points
-    // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
-    factor = max_factor -
-             (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
-                 (kMaxHandles - kMinHandles);
-  }
+const double Heap::kMinHeapGrowingFactor = 1.1;
+const double Heap::kMaxHeapGrowingFactor = 4.0;
+const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
+const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
+const double Heap::kTargetMutatorUtilization = 0.97;
 
-  if (FLAG_stress_compaction ||
-      mark_compact_collector()->reduce_memory_footprint_) {
-    factor = min_factor;
-  }
 
+// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
+// (mutator speed), this function returns the heap growing factor that will
+// achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
+// remain the same until the next GC.
+//
+// For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
+// TM / (TM + TG), where TM is the time spent in the mutator and TG is the
+// time spent in the garbage collector.
+//
+// Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
+// time-frame from the end of the current GC to the end of the next GC. Based
+// on the MU we can compute the heap growing factor F as
+//
+// F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
+//
+// This formula can be derived as follows.
+//
+// F = Limit / Live by definition, where the Limit is the allocation limit,
+// and the Live is size of live objects.
+// Let’s assume that we already know the Limit. Then:
+//   TG = Limit / gc_speed
+//   TM = (TM + TG) * MU, by definition of MU.
+//   TM = TG * MU / (1 - MU)
+//   TM = Limit *  MU / (gc_speed * (1 - MU))
+// On the other hand, if the allocation throughput remains constant:
+//   Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
+// Solving it for TM, we get
+//   TM = (Limit - Live) / mutator_speed
+// Combining the two equation for TM:
+//   (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
+//   (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
+// substitute R = gc_speed / mutator_speed
+//   (Limit - Live) = Limit * MU  / (R * (1 - MU))
+// substitute F = Limit / Live
+//   F - 1 = F * MU  / (R * (1 - MU))
+//   F - F * MU / (R * (1 - MU)) = 1
+//   F * (1 - MU / (R * (1 - MU))) = 1
+//   F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
+//   F = R * (1 - MU) / (R * (1 - MU) - MU)
+double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
+  if (gc_speed == 0 || mutator_speed == 0) return kMaxHeapGrowingFactor;
+
+  const double speed_ratio = gc_speed / mutator_speed;
+  const double mu = kTargetMutatorUtilization;
+
+  const double a = speed_ratio * (1 - mu);
+  const double b = speed_ratio * (1 - mu) - mu;
+
+  // The factor is a / b, but we need to check for small b first.
+  double factor =
+      (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor;
+  factor = Min(factor, kMaxHeapGrowingFactor);
+  factor = Max(factor, kMinHeapGrowingFactor);
+  return factor;
+}
+
+
+intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
+                                                     intptr_t old_gen_size) {
+  CHECK(factor > 1.0);
+  CHECK(old_gen_size > 0);
   intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
-  limit = Max(limit, kMinimumOldGenerationAllocationLimit);
+  limit = Max(limit, old_gen_size + kMinimumOldGenerationAllocationLimit);
   limit += new_space_.Capacity();
   intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
   return Min(limit, halfway_to_the_max);
 }
 
 
+void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
+                                           double gc_speed,
+                                           double mutator_speed) {
+  const double kConservativeHeapGrowingFactor = 1.3;
+
+  double factor = HeapGrowingFactor(gc_speed, mutator_speed);
+
+  if (FLAG_trace_gc_verbose) {
+    PrintIsolate(isolate_,
+                 "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
+                 "(gc=%.f, mutator=%.f)\n",
+                 factor, kTargetMutatorUtilization, gc_speed / mutator_speed,
+                 gc_speed, mutator_speed);
+  }
+
+  // We set the old generation growing factor to 2 to grow the heap slower on
+  // memory-constrained devices.
+  if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice ||
+      FLAG_optimize_for_size) {
+    factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
+  }
+
+  if (memory_reducer_->ShouldGrowHeapSlowly() || optimize_for_memory_usage_) {
+    factor = Min(factor, kConservativeHeapGrowingFactor);
+  }
+
+  if (FLAG_stress_compaction || ShouldReduceMemory()) {
+    factor = kMinHeapGrowingFactor;
+  }
+
+  if (FLAG_heap_growing_percent > 0) {
+    factor = 1.0 + FLAG_heap_growing_percent / 100.0;
+  }
+
+  old_generation_allocation_limit_ =
+      CalculateOldGenerationAllocationLimit(factor, old_gen_size);
+
+  if (FLAG_trace_gc_verbose) {
+    PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX
+                           "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
+                 old_gen_size / KB, old_generation_allocation_limit_ / KB,
+                 factor);
+  }
+}
+
+
+void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
+                                              double gc_speed,
+                                              double mutator_speed) {
+  double factor = HeapGrowingFactor(gc_speed, mutator_speed);
+  intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
+  if (limit < old_generation_allocation_limit_) {
+    if (FLAG_trace_gc_verbose) {
+      PrintIsolate(isolate_, "Dampen: old size: %" V8_PTR_PREFIX
+                             "d KB, old limit: %" V8_PTR_PREFIX
+                             "d KB, "
+                             "new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
+                   old_gen_size / KB, old_generation_allocation_limit_ / KB,
+                   limit / KB, factor);
+    }
+    old_generation_allocation_limit_ = limit;
+  }
+}
+
+
 void Heap::EnableInlineAllocation() {
   if (!inline_allocation_disabled_) return;
   inline_allocation_disabled_ = false;
@@ -5335,8 +5062,8 @@
 V8_DECLARE_ONCE(initialize_gc_once);
 
 static void InitializeGCOnce() {
-  InitializeScavengingVisitorsTables();
-  NewSpaceScavenger::Initialize();
+  Scavenger::Initialize();
+  StaticScavengeVisitor::Initialize();
   MarkCompactCollector::Initialize();
 }
 
@@ -5360,59 +5087,41 @@
 
   base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
 
-  MarkMapPointersAsEncoded(false);
-
   // Set up memory allocator.
   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
     return false;
 
+  // Initialize incremental marking.
+  incremental_marking_ = new IncrementalMarking(this);
+
   // Set up new space.
   if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
     return false;
   }
   new_space_top_after_last_gc_ = new_space()->top();
 
-  // Initialize old pointer space.
-  old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
-                                    OLD_POINTER_SPACE, NOT_EXECUTABLE);
-  if (old_pointer_space_ == NULL) return false;
-  if (!old_pointer_space_->SetUp()) return false;
-
-  // Initialize old data space.
-  old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
-                                 NOT_EXECUTABLE);
-  if (old_data_space_ == NULL) return false;
-  if (!old_data_space_->SetUp()) return false;
+  // Initialize old space.
+  old_space_ = new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
+  if (old_space_ == NULL) return false;
+  if (!old_space_->SetUp()) return false;
 
   if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
 
   // Initialize the code space, set its maximum capacity to the old
   // generation size. It needs executable memory.
-  code_space_ =
-      new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
+  code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
   if (code_space_ == NULL) return false;
   if (!code_space_->SetUp()) return false;
 
   // Initialize map space.
-  map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
+  map_space_ = new MapSpace(this, MAP_SPACE);
   if (map_space_ == NULL) return false;
   if (!map_space_->SetUp()) return false;
 
-  // Initialize simple cell space.
-  cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
-  if (cell_space_ == NULL) return false;
-  if (!cell_space_->SetUp()) return false;
-
-  // Initialize global property cell space.
-  property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
-                                               PROPERTY_CELL_SPACE);
-  if (property_cell_space_ == NULL) return false;
-  if (!property_cell_space_->SetUp()) return false;
-
   // The large object code space may contain code or data.  We set the memory
   // to be non-executable here for safety, but this means we need to enable it
   // explicitly when allocating large code objects.
-  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
+  lo_space_ = new LargeObjectSpace(this, LO_SPACE);
   if (lo_space_ == NULL) return false;
   if (!lo_space_->SetUp()) return false;
 
@@ -5427,6 +5136,28 @@
     }
   }
 
+  for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
+       i++) {
+    deferred_counters_[i] = 0;
+  }
+
+  tracer_ = new GCTracer(this);
+
+  scavenge_collector_ = new Scavenger(this);
+
+  mark_compact_collector_ = new MarkCompactCollector(this);
+
+  gc_idle_time_handler_ = new GCIdleTimeHandler();
+
+  memory_reducer_ = new MemoryReducer(this);
+
+  object_stats_ = new ObjectStats(this);
+  object_stats_->ClearObjectStats(true);
+
+  scavenge_job_ = new ScavengeJob();
+
+  array_buffer_tracker_ = new ArrayBufferTracker(this);
+
   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
 
@@ -5434,6 +5165,10 @@
 
   mark_compact_collector()->SetUp();
 
+  idle_scavenge_observer_ = new IdleScavengeObserver(
+      *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
+  new_space()->AddInlineAllocationObserver(idle_scavenge_observer_);
+
   return true;
 }
 
@@ -5445,12 +5180,11 @@
 
   // Create initial objects
   CreateInitialObjects();
-  CHECK_EQ(0, gc_count_);
+  CHECK_EQ(0u, gc_count_);
 
   set_native_contexts_list(undefined_value());
-  set_array_buffers_list(undefined_value());
   set_allocation_sites_list(undefined_value());
-  weak_object_to_code_table_ = undefined_value();
+
   return true;
 }
 
@@ -5470,7 +5204,23 @@
 }
 
 
-void Heap::NotifyDeserializationComplete() { deserialization_complete_ = true; }
+void Heap::PrintAlloctionsHash() {
+  uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
+  PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
+}
+
+
+void Heap::NotifyDeserializationComplete() {
+  deserialization_complete_ = true;
+#ifdef DEBUG
+  // All pages right after bootstrapping must be marked as never-evacuate.
+  PagedSpaces spaces(this);
+  for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
+    PageIterator it(s);
+    while (it.has_next()) CHECK(it.next()->NeverEvacuate());
+  }
+#endif  // DEBUG
+}
 
 
 void Heap::TearDown() {
@@ -5490,8 +5240,9 @@
     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
-    PrintF("total_marking_time=%.1f ", tracer_.cumulative_marking_duration());
-    PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
+    PrintF("total_marking_time=%.1f ", tracer()->cumulative_marking_duration());
+    PrintF("total_sweeping_time=%.1f ",
+           tracer()->cumulative_sweeping_duration());
     PrintF("\n\n");
   }
 
@@ -5501,20 +5252,12 @@
            MaximumCommittedMemory());
     PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
            new_space_.MaximumCommittedMemory());
-    PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
-           old_data_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
-           old_pointer_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
-           old_pointer_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_old_space=%" V8_PTR_PREFIX "d ",
+           old_space_->MaximumCommittedMemory());
     PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
            code_space_->MaximumCommittedMemory());
     PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
            map_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
-           cell_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
-           property_cell_space_->MaximumCommittedMemory());
     PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
            lo_space_->MaximumCommittedMemory());
     PrintF("\n\n");
@@ -5524,52 +5267,66 @@
     PrintAlloctionsHash();
   }
 
-  TearDownArrayBuffers();
+  new_space()->RemoveInlineAllocationObserver(idle_scavenge_observer_);
+  delete idle_scavenge_observer_;
+  idle_scavenge_observer_ = nullptr;
+
+  delete scavenge_collector_;
+  scavenge_collector_ = nullptr;
+
+  if (mark_compact_collector_ != nullptr) {
+    mark_compact_collector_->TearDown();
+    delete mark_compact_collector_;
+    mark_compact_collector_ = nullptr;
+  }
+
+  delete incremental_marking_;
+  incremental_marking_ = nullptr;
+
+  delete gc_idle_time_handler_;
+  gc_idle_time_handler_ = nullptr;
+
+  if (memory_reducer_ != nullptr) {
+    memory_reducer_->TearDown();
+    delete memory_reducer_;
+    memory_reducer_ = nullptr;
+  }
+
+  delete object_stats_;
+  object_stats_ = nullptr;
+
+  delete scavenge_job_;
+  scavenge_job_ = nullptr;
+
+  WaitUntilUnmappingOfFreeChunksCompleted();
+
+  delete array_buffer_tracker_;
+  array_buffer_tracker_ = nullptr;
 
   isolate_->global_handles()->TearDown();
 
   external_string_table_.TearDown();
 
-  mark_compact_collector()->TearDown();
+  delete tracer_;
+  tracer_ = nullptr;
 
   new_space_.TearDown();
 
-  if (old_pointer_space_ != NULL) {
-    old_pointer_space_->TearDown();
-    delete old_pointer_space_;
-    old_pointer_space_ = NULL;
-  }
-
-  if (old_data_space_ != NULL) {
-    old_data_space_->TearDown();
-    delete old_data_space_;
-    old_data_space_ = NULL;
+  if (old_space_ != NULL) {
+    delete old_space_;
+    old_space_ = NULL;
   }
 
   if (code_space_ != NULL) {
-    code_space_->TearDown();
     delete code_space_;
     code_space_ = NULL;
   }
 
   if (map_space_ != NULL) {
-    map_space_->TearDown();
     delete map_space_;
     map_space_ = NULL;
   }
 
-  if (cell_space_ != NULL) {
-    cell_space_->TearDown();
-    delete cell_space_;
-    cell_space_ = NULL;
-  }
-
-  if (property_cell_space_ != NULL) {
-    property_cell_space_->TearDown();
-    delete property_cell_space_;
-    property_cell_space_ = NULL;
-  }
-
   if (lo_space_ != NULL) {
     lo_space_->TearDown();
     delete lo_space_;
@@ -5579,19 +5336,26 @@
   store_buffer()->TearDown();
 
   isolate_->memory_allocator()->TearDown();
+
+  StrongRootsList* next = NULL;
+  for (StrongRootsList* list = strong_roots_list_; list; list = next) {
+    next = list->next;
+    delete list;
+  }
+  strong_roots_list_ = NULL;
 }
 
 
-void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+void Heap::AddGCPrologueCallback(v8::Isolate::GCCallback callback,
                                  GCType gc_type, bool pass_isolate) {
   DCHECK(callback != NULL);
-  GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
+  GCCallbackPair pair(callback, gc_type, pass_isolate);
   DCHECK(!gc_prologue_callbacks_.Contains(pair));
   return gc_prologue_callbacks_.Add(pair);
 }
 
 
-void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
+void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallback callback) {
   DCHECK(callback != NULL);
   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
     if (gc_prologue_callbacks_[i].callback == callback) {
@@ -5603,16 +5367,16 @@
 }
 
 
-void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
                                  GCType gc_type, bool pass_isolate) {
   DCHECK(callback != NULL);
-  GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
+  GCCallbackPair pair(callback, gc_type, pass_isolate);
   DCHECK(!gc_epilogue_callbacks_.Contains(pair));
   return gc_epilogue_callbacks_.Add(pair);
 }
 
 
-void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
+void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback) {
   DCHECK(callback != NULL);
   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
     if (gc_epilogue_callbacks_[i].callback == callback) {
@@ -5625,38 +5389,66 @@
 
 
 // TODO(ishell): Find a better place for this.
-void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
+void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
                                          Handle<DependentCode> dep) {
   DCHECK(!InNewSpace(*obj));
   DCHECK(!InNewSpace(*dep));
-  // This handle scope keeps the table handle local to this function, which
-  // allows us to safely skip write barriers in table update operations.
-  HandleScope scope(isolate());
-  Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
-                              isolate());
+  Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
   table = WeakHashTable::Put(table, obj, dep);
-
-  if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
-    WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
-  }
-  set_weak_object_to_code_table(*table);
-  DCHECK_EQ(*dep, table->Lookup(obj));
+  if (*table != weak_object_to_code_table())
+    set_weak_object_to_code_table(*table);
+  DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
 }
 
 
-DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
-  Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
+DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
+  Object* dep = weak_object_to_code_table()->Lookup(obj);
   if (dep->IsDependentCode()) return DependentCode::cast(dep);
   return DependentCode::cast(empty_fixed_array());
 }
 
 
-void Heap::EnsureWeakObjectToCodeTable() {
-  if (!weak_object_to_code_table()->IsHashTable()) {
-    set_weak_object_to_code_table(
-        *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
-                            TENURED));
+void Heap::AddRetainedMap(Handle<Map> map) {
+  Handle<WeakCell> cell = Map::WeakCellForMap(map);
+  Handle<ArrayList> array(retained_maps(), isolate());
+  if (array->IsFull()) {
+    CompactRetainedMaps(*array);
   }
+  array = ArrayList::Add(
+      array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
+      ArrayList::kReloadLengthAfterAllocation);
+  if (*array != retained_maps()) {
+    set_retained_maps(*array);
+  }
+}
+
+
+void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
+  DCHECK_EQ(retained_maps, this->retained_maps());
+  int length = retained_maps->Length();
+  int new_length = 0;
+  int new_number_of_disposed_maps = 0;
+  // This loop compacts the array by removing cleared weak cells.
+  for (int i = 0; i < length; i += 2) {
+    DCHECK(retained_maps->Get(i)->IsWeakCell());
+    WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
+    Object* age = retained_maps->Get(i + 1);
+    if (cell->cleared()) continue;
+    if (i != new_length) {
+      retained_maps->Set(new_length, cell);
+      retained_maps->Set(new_length + 1, age);
+    }
+    if (i < number_of_disposed_maps_) {
+      new_number_of_disposed_maps += 2;
+    }
+    new_length += 2;
+  }
+  number_of_disposed_maps_ = new_number_of_disposed_maps;
+  Object* undefined = undefined_value();
+  for (int i = new_length; i < length; i++) {
+    retained_maps->Clear(i, undefined);
+  }
+  if (new_length != length) retained_maps->SetLength(new_length);
 }
 
 
@@ -5668,7 +5460,7 @@
 
 class PrintHandleVisitor : public ObjectVisitor {
  public:
-  void VisitPointers(Object** start, Object** end) {
+  void VisitPointers(Object** start, Object** end) override {
     for (Object** p = start; p < end; p++)
       PrintF("  handle %p to %p\n", reinterpret_cast<void*>(p),
              reinterpret_cast<void*>(*p));
@@ -5684,23 +5476,37 @@
 
 #endif
 
+class CheckHandleCountVisitor : public ObjectVisitor {
+ public:
+  CheckHandleCountVisitor() : handle_count_(0) {}
+  ~CheckHandleCountVisitor() override {
+    CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
+  }
+  void VisitPointers(Object** start, Object** end) override {
+    handle_count_ += end - start;
+  }
+
+ private:
+  ptrdiff_t handle_count_;
+};
+
+
+void Heap::CheckHandleCount() {
+  CheckHandleCountVisitor v;
+  isolate_->handle_scope_implementer()->Iterate(&v);
+}
+
 
 Space* AllSpaces::next() {
   switch (counter_++) {
     case NEW_SPACE:
       return heap_->new_space();
-    case OLD_POINTER_SPACE:
-      return heap_->old_pointer_space();
-    case OLD_DATA_SPACE:
-      return heap_->old_data_space();
+    case OLD_SPACE:
+      return heap_->old_space();
     case CODE_SPACE:
       return heap_->code_space();
     case MAP_SPACE:
       return heap_->map_space();
-    case CELL_SPACE:
-      return heap_->cell_space();
-    case PROPERTY_CELL_SPACE:
-      return heap_->property_cell_space();
     case LO_SPACE:
       return heap_->lo_space();
     default:
@@ -5711,18 +5517,12 @@
 
 PagedSpace* PagedSpaces::next() {
   switch (counter_++) {
-    case OLD_POINTER_SPACE:
-      return heap_->old_pointer_space();
-    case OLD_DATA_SPACE:
-      return heap_->old_data_space();
+    case OLD_SPACE:
+      return heap_->old_space();
     case CODE_SPACE:
       return heap_->code_space();
     case MAP_SPACE:
       return heap_->map_space();
-    case CELL_SPACE:
-      return heap_->cell_space();
-    case PROPERTY_CELL_SPACE:
-      return heap_->property_cell_space();
     default:
       return NULL;
   }
@@ -5731,10 +5531,8 @@
 
 OldSpace* OldSpaces::next() {
   switch (counter_++) {
-    case OLD_POINTER_SPACE:
-      return heap_->old_pointer_space();
-    case OLD_DATA_SPACE:
-      return heap_->old_data_space();
+    case OLD_SPACE:
+      return heap_->old_space();
     case CODE_SPACE:
       return heap_->code_space();
     default:
@@ -5744,17 +5542,7 @@
 
 
 SpaceIterator::SpaceIterator(Heap* heap)
-    : heap_(heap),
-      current_space_(FIRST_SPACE),
-      iterator_(NULL),
-      size_func_(NULL) {}
-
-
-SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
-    : heap_(heap),
-      current_space_(FIRST_SPACE),
-      iterator_(NULL),
-      size_func_(size_func) {}
+    : heap_(heap), current_space_(FIRST_SPACE), iterator_(NULL) {}
 
 
 SpaceIterator::~SpaceIterator() {
@@ -5791,30 +5579,19 @@
 
   switch (current_space_) {
     case NEW_SPACE:
-      iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
+      iterator_ = new SemiSpaceIterator(heap_->new_space());
       break;
-    case OLD_POINTER_SPACE:
-      iterator_ =
-          new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
-      break;
-    case OLD_DATA_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
+    case OLD_SPACE:
+      iterator_ = new HeapObjectIterator(heap_->old_space());
       break;
     case CODE_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
+      iterator_ = new HeapObjectIterator(heap_->code_space());
       break;
     case MAP_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
-      break;
-    case CELL_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
-      break;
-    case PROPERTY_CELL_SPACE:
-      iterator_ =
-          new HeapObjectIterator(heap_->property_cell_space(), size_func_);
+      iterator_ = new HeapObjectIterator(heap_->map_space());
       break;
     case LO_SPACE:
-      iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
+      iterator_ = new LargeObjectIterator(heap_->lo_space());
       break;
   }
 
@@ -5842,8 +5619,9 @@
   }
 
   bool SkipObject(HeapObject* object) {
+    if (object->IsFiller()) return true;
     MarkBit mark_bit = Marking::MarkBitFrom(object);
-    return !mark_bit.Get();
+    return Marking::IsWhite(mark_bit);
   }
 
  private:
@@ -5851,13 +5629,13 @@
    public:
     MarkingVisitor() : marking_stack_(10) {}
 
-    void VisitPointers(Object** start, Object** end) {
+    void VisitPointers(Object** start, Object** end) override {
       for (Object** p = start; p < end; p++) {
         if (!(*p)->IsHeapObject()) continue;
         HeapObject* obj = HeapObject::cast(*p);
         MarkBit mark_bit = Marking::MarkBitFrom(obj);
-        if (!mark_bit.Get()) {
-          mark_bit.Set();
+        if (Marking::IsWhite(mark_bit)) {
+          Marking::WhiteToBlack(mark_bit);
           marking_stack_.Add(obj);
         }
       }
@@ -5885,31 +5663,16 @@
 };
 
 
-HeapIterator::HeapIterator(Heap* heap)
-    : make_heap_iterable_helper_(heap),
-      no_heap_allocation_(),
-      heap_(heap),
-      filtering_(HeapIterator::kNoFiltering),
-      filter_(NULL) {
-  Init();
-}
-
-
 HeapIterator::HeapIterator(Heap* heap,
                            HeapIterator::HeapObjectsFiltering filtering)
     : make_heap_iterable_helper_(heap),
       no_heap_allocation_(),
       heap_(heap),
       filtering_(filtering),
-      filter_(NULL) {
-  Init();
-}
-
-
-HeapIterator::~HeapIterator() { Shutdown(); }
-
-
-void HeapIterator::Init() {
+      filter_(nullptr),
+      space_iterator_(nullptr),
+      object_iterator_(nullptr) {
+  heap_->heap_iterator_start();
   // Start the iteration.
   space_iterator_ = new SpaceIterator(heap_);
   switch (filtering_) {
@@ -5923,35 +5686,34 @@
 }
 
 
-void HeapIterator::Shutdown() {
+HeapIterator::~HeapIterator() {
+  heap_->heap_iterator_end();
 #ifdef DEBUG
   // Assert that in filtering mode we have iterated through all
   // objects. Otherwise, heap will be left in an inconsistent state.
   if (filtering_ != kNoFiltering) {
-    DCHECK(object_iterator_ == NULL);
+    DCHECK(object_iterator_ == nullptr);
   }
 #endif
   // Make sure the last iterator is deallocated.
+  delete object_iterator_;
   delete space_iterator_;
-  space_iterator_ = NULL;
-  object_iterator_ = NULL;
   delete filter_;
-  filter_ = NULL;
 }
 
 
 HeapObject* HeapIterator::next() {
-  if (filter_ == NULL) return NextObject();
+  if (filter_ == nullptr) return NextObject();
 
   HeapObject* obj = NextObject();
-  while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
+  while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
   return obj;
 }
 
 
 HeapObject* HeapIterator::NextObject() {
   // No iterator means we are done.
-  if (object_iterator_ == NULL) return NULL;
+  if (object_iterator_ == nullptr) return nullptr;
 
   if (HeapObject* obj = object_iterator_->next_object()) {
     // If the current iterator has more objects we are fine.
@@ -5966,15 +5728,8 @@
     }
   }
   // Done with the last space.
-  object_iterator_ = NULL;
-  return NULL;
-}
-
-
-void HeapIterator::reset() {
-  // Restart the iterator.
-  Shutdown();
-  Init();
+  object_iterator_ = nullptr;
+  return nullptr;
 }
 
 
@@ -5985,7 +5740,8 @@
 class PathTracer::MarkVisitor : public ObjectVisitor {
  public:
   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
-  void VisitPointers(Object** start, Object** end) {
+
+  void VisitPointers(Object** start, Object** end) override {
     // Scan all HeapObject pointers in [start, end)
     for (Object** p = start; !tracer_->found() && (p < end); p++) {
       if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
@@ -6000,7 +5756,8 @@
 class PathTracer::UnmarkVisitor : public ObjectVisitor {
  public:
   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
-  void VisitPointers(Object** start, Object** end) {
+
+  void VisitPointers(Object** start, Object** end) override {
     // Scan all HeapObject pointers in [start, end)
     for (Object** p = start; p < end; p++) {
       if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
@@ -6047,7 +5804,7 @@
 
 
 static bool SafeIsNativeContext(HeapObject* obj) {
-  return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
+  return obj->map() == obj->GetHeap()->root(Heap::kNativeContextMapRootIndex);
 }
 
 
@@ -6256,7 +6013,7 @@
 }
 
 
-void ExternalStringTable::CleanUp() {
+void Heap::ExternalStringTable::CleanUp() {
   int last = 0;
   for (int i = 0; i < new_space_strings_.length(); ++i) {
     if (new_space_strings_[i] == heap_->the_hole_value()) {
@@ -6291,7 +6048,7 @@
 }
 
 
-void ExternalStringTable::TearDown() {
+void Heap::ExternalStringTable::TearDown() {
   for (int i = 0; i < new_space_strings_.length(); ++i) {
     heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
   }
@@ -6303,56 +6060,85 @@
 }
 
 
+class Heap::UnmapFreeMemoryTask : public v8::Task {
+ public:
+  UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
+      : heap_(heap), head_(head) {}
+  virtual ~UnmapFreeMemoryTask() {}
+
+ private:
+  // v8::Task overrides.
+  void Run() override {
+    heap_->FreeQueuedChunks(head_);
+    heap_->pending_unmapping_tasks_semaphore_.Signal();
+  }
+
+  Heap* heap_;
+  MemoryChunk* head_;
+
+  DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
+};
+
+
+void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
+  while (concurrent_unmapping_tasks_active_ > 0) {
+    pending_unmapping_tasks_semaphore_.Wait();
+    concurrent_unmapping_tasks_active_--;
+  }
+}
+
+
 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
+  // PreFree logically frees the memory chunk. However, the actual freeing
+  // will happen on a separate thread sometime later.
+  isolate_->memory_allocator()->PreFreeMemory(chunk);
+
+  // The chunks added to this queue will be freed by a concurrent thread.
   chunk->set_next_chunk(chunks_queued_for_free_);
   chunks_queued_for_free_ = chunk;
 }
 
 
-void Heap::FreeQueuedChunks() {
+void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
   if (chunks_queued_for_free_ == NULL) return;
   MemoryChunk* next;
   MemoryChunk* chunk;
   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
     next = chunk->next_chunk();
     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+  }
+  store_buffer()->Compact();
+  store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
+}
 
-    if (chunk->owner()->identity() == LO_SPACE) {
-      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
-      // If FromAnyPointerAddress encounters a slot that belongs to a large
-      // chunk queued for deletion it will fail to find the chunk because
-      // it try to perform a search in the list of pages owned by of the large
-      // object space and queued chunks were detached from that list.
-      // To work around this we split large chunk into normal kPageSize aligned
-      // pieces and initialize size, owner and flags field of every piece.
-      // If FromAnyPointerAddress encounters a slot that belongs to one of
-      // these smaller pieces it will treat it as a slot on a normal Page.
-      Address chunk_end = chunk->address() + chunk->size();
-      MemoryChunk* inner =
-          MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
-      MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
-      while (inner <= inner_last) {
-        // Size of a large chunk is always a multiple of
-        // OS::AllocateAlignment() so there is always
-        // enough space for a fake MemoryChunk header.
-        Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
-        // Guard against overflow.
-        if (area_end < inner->address()) area_end = chunk_end;
-        inner->SetArea(inner->address(), area_end);
-        inner->set_size(Page::kPageSize);
-        inner->set_owner(lo_space());
-        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
-        inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
-      }
+
+void Heap::FreeQueuedChunks() {
+  if (chunks_queued_for_free_ != NULL) {
+    if (FLAG_concurrent_sweeping) {
+      V8::GetCurrentPlatform()->CallOnBackgroundThread(
+          new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
+          v8::Platform::kShortRunningTask);
+    } else {
+      FreeQueuedChunks(chunks_queued_for_free_);
+      pending_unmapping_tasks_semaphore_.Signal();
     }
+    chunks_queued_for_free_ = NULL;
+  } else {
+    // If we do not have anything to unmap, we just signal the semaphore
+    // that we are done.
+    pending_unmapping_tasks_semaphore_.Signal();
   }
-  isolate_->heap()->store_buffer()->Compact();
-  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
-  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+  concurrent_unmapping_tasks_active_++;
+}
+
+
+void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
+  MemoryChunk* next;
+  MemoryChunk* chunk;
+  for (chunk = list_head; chunk != NULL; chunk = next) {
     next = chunk->next_chunk();
-    isolate_->memory_allocator()->Free(chunk);
+    isolate_->memory_allocator()->PerformFreeMemory(chunk);
   }
-  chunks_queued_for_free_ = NULL;
 }
 
 
@@ -6371,76 +6157,95 @@
 }
 
 
-void Heap::ClearObjectStats(bool clear_last_time_stats) {
-  memset(object_counts_, 0, sizeof(object_counts_));
-  memset(object_sizes_, 0, sizeof(object_sizes_));
-  if (clear_last_time_stats) {
-    memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
-    memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
+void Heap::RegisterStrongRoots(Object** start, Object** end) {
+  StrongRootsList* list = new StrongRootsList();
+  list->next = strong_roots_list_;
+  list->start = start;
+  list->end = end;
+  strong_roots_list_ = list;
+}
+
+
+void Heap::UnregisterStrongRoots(Object** start) {
+  StrongRootsList* prev = NULL;
+  StrongRootsList* list = strong_roots_list_;
+  while (list != nullptr) {
+    StrongRootsList* next = list->next;
+    if (list->start == start) {
+      if (prev) {
+        prev->next = next;
+      } else {
+        strong_roots_list_ = next;
+      }
+      delete list;
+    } else {
+      prev = list;
+    }
+    list = next;
   }
 }
 
 
-static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
-
-
-void Heap::CheckpointObjectStats() {
-  base::LockGuard<base::Mutex> lock_guard(
-      checkpoint_object_stats_mutex.Pointer());
-  Counters* counters = isolate()->counters();
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name)              \
-  counters->count_of_##name()->Increment(                \
-      static_cast<int>(object_counts_[name]));           \
-  counters->count_of_##name()->Decrement(                \
-      static_cast<int>(object_counts_last_time_[name])); \
-  counters->size_of_##name()->Increment(                 \
-      static_cast<int>(object_sizes_[name]));            \
-  counters->size_of_##name()->Decrement(                 \
-      static_cast<int>(object_sizes_last_time_[name]));
-  INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-  int index;
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
-  index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
-  counters->count_of_CODE_TYPE_##name()->Increment(       \
-      static_cast<int>(object_counts_[index]));           \
-  counters->count_of_CODE_TYPE_##name()->Decrement(       \
-      static_cast<int>(object_counts_last_time_[index])); \
-  counters->size_of_CODE_TYPE_##name()->Increment(        \
-      static_cast<int>(object_sizes_[index]));            \
-  counters->size_of_CODE_TYPE_##name()->Decrement(        \
-      static_cast<int>(object_sizes_last_time_[index]));
-  CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
-  index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
-  counters->count_of_FIXED_ARRAY_##name()->Increment(     \
-      static_cast<int>(object_counts_[index]));           \
-  counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
-      static_cast<int>(object_counts_last_time_[index])); \
-  counters->size_of_FIXED_ARRAY_##name()->Increment(      \
-      static_cast<int>(object_sizes_[index]));            \
-  counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
-      static_cast<int>(object_sizes_last_time_[index]));
-  FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
-  index =                                                                     \
-      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
-  counters->count_of_CODE_AGE_##name()->Increment(                            \
-      static_cast<int>(object_counts_[index]));                               \
-  counters->count_of_CODE_AGE_##name()->Decrement(                            \
-      static_cast<int>(object_counts_last_time_[index]));                     \
-  counters->size_of_CODE_AGE_##name()->Increment(                             \
-      static_cast<int>(object_sizes_[index]));                                \
-  counters->size_of_CODE_AGE_##name()->Decrement(                             \
-      static_cast<int>(object_sizes_last_time_[index]));
-  CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-
-  MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
-  MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
-  ClearObjectStats();
+size_t Heap::NumberOfTrackedHeapObjectTypes() {
+  return ObjectStats::OBJECT_STATS_COUNT;
 }
+
+
+size_t Heap::ObjectCountAtLastGC(size_t index) {
+  if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
+  return object_stats_->object_count_last_gc(index);
 }
-}  // namespace v8::internal
+
+
+size_t Heap::ObjectSizeAtLastGC(size_t index) {
+  if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
+  return object_stats_->object_size_last_gc(index);
+}
+
+
+bool Heap::GetObjectTypeName(size_t index, const char** object_type,
+                             const char** object_sub_type) {
+  if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
+
+  switch (static_cast<int>(index)) {
+#define COMPARE_AND_RETURN_NAME(name) \
+  case name:                          \
+    *object_type = #name;             \
+    *object_sub_type = "";            \
+    return true;
+    INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
+#undef COMPARE_AND_RETURN_NAME
+#define COMPARE_AND_RETURN_NAME(name)                      \
+  case ObjectStats::FIRST_CODE_KIND_SUB_TYPE + Code::name: \
+    *object_type = "CODE_TYPE";                            \
+    *object_sub_type = "CODE_KIND/" #name;                 \
+    return true;
+    CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
+#undef COMPARE_AND_RETURN_NAME
+#define COMPARE_AND_RETURN_NAME(name)                  \
+  case ObjectStats::FIRST_FIXED_ARRAY_SUB_TYPE + name: \
+    *object_type = "FIXED_ARRAY_TYPE";                 \
+    *object_sub_type = #name;                          \
+    return true;
+    FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
+#undef COMPARE_AND_RETURN_NAME
+#define COMPARE_AND_RETURN_NAME(name)                                  \
+  case ObjectStats::FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - \
+      Code::kFirstCodeAge:                                             \
+    *object_type = "CODE_TYPE";                                        \
+    *object_sub_type = "CODE_AGE/" #name;                              \
+    return true;
+    CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
+#undef COMPARE_AND_RETURN_NAME
+  }
+  return false;
+}
+
+
+// static
+int Heap::GetStaticVisitorIdForMap(Map* map) {
+  return StaticVisitorBase::GetVisitorId(map);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/heap.h b/src/heap/heap.h
index e6ccf2e..af9d0a6 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -6,20 +6,18 @@
 #define V8_HEAP_HEAP_H_
 
 #include <cmath>
+#include <map>
 
+// Clients of this interface shouldn't depend on lots of heap internals.
+// Do not include anything from src/heap here!
 #include "src/allocation.h"
 #include "src/assert-scope.h"
-#include "src/counters.h"
+#include "src/atomic-utils.h"
 #include "src/globals.h"
-#include "src/heap/gc-idle-time-handler.h"
-#include "src/heap/gc-tracer.h"
-#include "src/heap/incremental-marking.h"
-#include "src/heap/mark-compact.h"
-#include "src/heap/objects-visiting.h"
+// TODO(mstarzinger): Two more includes to kill!
 #include "src/heap/spaces.h"
 #include "src/heap/store-buffer.h"
 #include "src/list.h"
-#include "src/splay-tree-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -37,33 +35,46 @@
   V(Oddball, null_value, NullValue)                                            \
   V(Oddball, true_value, TrueValue)                                            \
   V(Oddball, false_value, FalseValue)                                          \
+  V(String, empty_string, empty_string)                                        \
+  V(String, hidden_string, hidden_string)                                      \
   V(Oddball, uninitialized_value, UninitializedValue)                          \
-  V(Oddball, exception, Exception)                                             \
   V(Map, cell_map, CellMap)                                                    \
   V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
   V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, meta_map, MetaMap)                                                    \
   V(Map, heap_number_map, HeapNumberMap)                                       \
   V(Map, mutable_heap_number_map, MutableHeapNumberMap)                        \
+  V(Map, float32x4_map, Float32x4Map)                                          \
+  V(Map, int32x4_map, Int32x4Map)                                              \
+  V(Map, uint32x4_map, Uint32x4Map)                                            \
+  V(Map, bool32x4_map, Bool32x4Map)                                            \
+  V(Map, int16x8_map, Int16x8Map)                                              \
+  V(Map, uint16x8_map, Uint16x8Map)                                            \
+  V(Map, bool16x8_map, Bool16x8Map)                                            \
+  V(Map, int8x16_map, Int8x16Map)                                              \
+  V(Map, uint8x16_map, Uint8x16Map)                                            \
+  V(Map, bool8x16_map, Bool8x16Map)                                            \
   V(Map, native_context_map, NativeContextMap)                                 \
   V(Map, fixed_array_map, FixedArrayMap)                                       \
   V(Map, code_map, CodeMap)                                                    \
   V(Map, scope_info_map, ScopeInfoMap)                                         \
   V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
   V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
-  V(Map, constant_pool_array_map, ConstantPoolArrayMap)                        \
   V(Map, weak_cell_map, WeakCellMap)                                           \
-  V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel)      \
-  V(Map, hash_table_map, HashTableMap)                                         \
-  V(Map, ordered_hash_table_map, OrderedHashTableMap)                          \
+  V(Map, transition_array_map, TransitionArrayMap)                             \
+  V(Map, one_byte_string_map, OneByteStringMap)                                \
+  V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap)       \
+  V(Map, function_context_map, FunctionContextMap)                             \
   V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
   V(ByteArray, empty_byte_array, EmptyByteArray)                               \
   V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
-  V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray)      \
-  V(Oddball, arguments_marker, ArgumentsMarker)                                \
   /* The roots above this line should be boring from a GC point of view.    */ \
   /* This means they are never in new space and never on a page that is     */ \
   /* being compacted.                                                       */ \
+  V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel)      \
+  V(Oddball, arguments_marker, ArgumentsMarker)                                \
+  V(Oddball, exception, Exception)                                             \
+  V(Oddball, termination_exception, TerminationException)                      \
   V(FixedArray, number_string_cache, NumberStringCache)                        \
   V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
   V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
@@ -71,13 +82,13 @@
   V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
   V(FixedArray, string_split_cache, StringSplitCache)                          \
   V(FixedArray, regexp_multiple_cache, RegExpMultipleCache)                    \
-  V(Oddball, termination_exception, TerminationException)                      \
   V(Smi, hash_seed, HashSeed)                                                  \
+  V(Map, hash_table_map, HashTableMap)                                         \
+  V(Map, ordered_hash_table_map, OrderedHashTableMap)                          \
   V(Map, symbol_map, SymbolMap)                                                \
   V(Map, string_map, StringMap)                                                \
-  V(Map, one_byte_string_map, OneByteStringMap)                                \
-  V(Map, cons_string_map, ConsStringMap)                                       \
   V(Map, cons_one_byte_string_map, ConsOneByteStringMap)                       \
+  V(Map, cons_string_map, ConsStringMap)                                       \
   V(Map, sliced_string_map, SlicedStringMap)                                   \
   V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap)                   \
   V(Map, external_string_map, ExternalStringMap)                               \
@@ -89,7 +100,6 @@
   V(Map, short_external_string_with_one_byte_data_map,                         \
     ShortExternalStringWithOneByteDataMap)                                     \
   V(Map, internalized_string_map, InternalizedStringMap)                       \
-  V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap)       \
   V(Map, external_internalized_string_map, ExternalInternalizedStringMap)      \
   V(Map, external_internalized_string_with_one_byte_data_map,                  \
     ExternalInternalizedStringWithOneByteDataMap)                              \
@@ -102,27 +112,6 @@
   V(Map, short_external_one_byte_internalized_string_map,                      \
     ShortExternalOneByteInternalizedStringMap)                                 \
   V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap)    \
-  V(Map, undetectable_string_map, UndetectableStringMap)                       \
-  V(Map, undetectable_one_byte_string_map, UndetectableOneByteStringMap)       \
-  V(Map, external_int8_array_map, ExternalInt8ArrayMap)                        \
-  V(Map, external_uint8_array_map, ExternalUint8ArrayMap)                      \
-  V(Map, external_int16_array_map, ExternalInt16ArrayMap)                      \
-  V(Map, external_uint16_array_map, ExternalUint16ArrayMap)                    \
-  V(Map, external_int32_array_map, ExternalInt32ArrayMap)                      \
-  V(Map, external_uint32_array_map, ExternalUint32ArrayMap)                    \
-  V(Map, external_float32_array_map, ExternalFloat32ArrayMap)                  \
-  V(Map, external_float64_array_map, ExternalFloat64ArrayMap)                  \
-  V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap)       \
-  V(ExternalArray, empty_external_int8_array, EmptyExternalInt8Array)          \
-  V(ExternalArray, empty_external_uint8_array, EmptyExternalUint8Array)        \
-  V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array)        \
-  V(ExternalArray, empty_external_uint16_array, EmptyExternalUint16Array)      \
-  V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array)        \
-  V(ExternalArray, empty_external_uint32_array, EmptyExternalUint32Array)      \
-  V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array)    \
-  V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array)    \
-  V(ExternalArray, empty_external_uint8_clamped_array,                         \
-    EmptyExternalUint8ClampedArray)                                            \
   V(Map, fixed_uint8_array_map, FixedUint8ArrayMap)                            \
   V(Map, fixed_int8_array_map, FixedInt8ArrayMap)                              \
   V(Map, fixed_uint16_array_map, FixedUint16ArrayMap)                          \
@@ -143,7 +132,6 @@
   V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array,                      \
     EmptyFixedUint8ClampedArray)                                               \
   V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
-  V(Map, function_context_map, FunctionContextMap)                             \
   V(Map, catch_context_map, CatchContextMap)                                   \
   V(Map, with_context_map, WithContextMap)                                     \
   V(Map, block_context_map, BlockContextMap)                                   \
@@ -161,10 +149,12 @@
   V(Map, termination_exception_map, TerminationExceptionMap)                   \
   V(Map, message_object_map, JSMessageObjectMap)                               \
   V(Map, foreign_map, ForeignMap)                                              \
+  V(Map, neander_map, NeanderMap)                                              \
+  V(Map, external_map, ExternalMap)                                            \
   V(HeapNumber, nan_value, NanValue)                                           \
   V(HeapNumber, infinity_value, InfinityValue)                                 \
   V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
-  V(Map, neander_map, NeanderMap)                                              \
+  V(HeapNumber, minus_infinity_value, MinusInfinityValue)                      \
   V(JSObject, message_listeners, MessageListeners)                             \
   V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
   V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache)      \
@@ -172,17 +162,36 @@
   V(Code, js_entry_code, JsEntryCode)                                          \
   V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
   V(FixedArray, natives_source_cache, NativesSourceCache)                      \
+  V(FixedArray, experimental_natives_source_cache,                             \
+    ExperimentalNativesSourceCache)                                            \
+  V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache)           \
+  V(FixedArray, experimental_extra_natives_source_cache,                       \
+    ExperimentalExtraNativesSourceCache)                                       \
   V(Script, empty_script, EmptyScript)                                         \
   V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames)          \
-  V(Cell, undefined_cell, UndefineCell)                                        \
+  V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary)    \
+  V(Cell, undefined_cell, UndefinedCell)                                       \
   V(JSObject, observation_state, ObservationState)                             \
-  V(Map, external_map, ExternalMap)                                            \
   V(Object, symbol_registry, SymbolRegistry)                                   \
+  V(Object, script_list, ScriptList)                                           \
   V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
     EmptySlowElementDictionary)                                                \
   V(FixedArray, materialized_objects, MaterializedObjects)                     \
-  V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad)        \
-  V(FixedArray, microtask_queue, MicrotaskQueue)
+  V(FixedArray, microtask_queue, MicrotaskQueue)                               \
+  V(TypeFeedbackVector, dummy_vector, DummyVector)                             \
+  V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap)           \
+  V(FixedArray, detached_contexts, DetachedContexts)                           \
+  V(ArrayList, retained_maps, RetainedMaps)                                    \
+  V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable)           \
+  V(PropertyCell, array_protector, ArrayProtector)                             \
+  V(PropertyCell, empty_property_cell, EmptyPropertyCell)                      \
+  V(Object, weak_stack_trace_list, WeakStackTraceList)                         \
+  V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos)       \
+  V(FixedArray, interpreter_table, InterpreterTable)                           \
+  V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
+  V(WeakCell, empty_weak_cell, EmptyWeakCell)                                  \
+  V(BytecodeArray, empty_bytecode_array, EmptyBytecodeArray)
+
 
 // Entries in this list are limited to Smis and are not visited during GC.
 #define SMI_ROOT_LIST(V)                                                   \
@@ -194,127 +203,208 @@
   V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)             \
   V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
 
+
 #define ROOT_LIST(V)  \
   STRONG_ROOT_LIST(V) \
   SMI_ROOT_LIST(V)    \
   V(StringTable, string_table, StringTable)
 
-#define INTERNALIZED_STRING_LIST(V)                        \
-  V(Object_string, "Object")                               \
-  V(proto_string, "__proto__")                             \
-  V(arguments_string, "arguments")                         \
-  V(Arguments_string, "Arguments")                         \
-  V(caller_string, "caller")                               \
-  V(boolean_string, "boolean")                             \
-  V(Boolean_string, "Boolean")                             \
-  V(callee_string, "callee")                               \
-  V(constructor_string, "constructor")                     \
-  V(dot_result_string, ".result")                          \
-  V(eval_string, "eval")                                   \
-  V(empty_string, "")                                      \
-  V(function_string, "function")                           \
-  V(Function_string, "Function")                           \
-  V(length_string, "length")                               \
-  V(name_string, "name")                                   \
-  V(null_string, "null")                                   \
-  V(number_string, "number")                               \
-  V(Number_string, "Number")                               \
-  V(nan_string, "NaN")                                     \
-  V(source_string, "source")                               \
-  V(source_url_string, "source_url")                       \
-  V(source_mapping_url_string, "source_mapping_url")       \
-  V(global_string, "global")                               \
-  V(ignore_case_string, "ignoreCase")                      \
-  V(multiline_string, "multiline")                         \
-  V(sticky_string, "sticky")                               \
-  V(harmony_regexps_string, "harmony_regexps")             \
-  V(input_string, "input")                                 \
-  V(index_string, "index")                                 \
-  V(last_index_string, "lastIndex")                        \
-  V(object_string, "object")                               \
-  V(prototype_string, "prototype")                         \
-  V(string_string, "string")                               \
-  V(String_string, "String")                               \
-  V(symbol_string, "symbol")                               \
-  V(Symbol_string, "Symbol")                               \
-  V(Map_string, "Map")                                     \
-  V(Set_string, "Set")                                     \
-  V(WeakMap_string, "WeakMap")                             \
-  V(WeakSet_string, "WeakSet")                             \
-  V(for_string, "for")                                     \
-  V(for_api_string, "for_api")                             \
-  V(for_intern_string, "for_intern")                       \
-  V(private_api_string, "private_api")                     \
-  V(private_intern_string, "private_intern")               \
-  V(Date_string, "Date")                                   \
-  V(char_at_string, "CharAt")                              \
-  V(undefined_string, "undefined")                         \
-  V(value_of_string, "valueOf")                            \
-  V(stack_string, "stack")                                 \
-  V(toJSON_string, "toJSON")                               \
-  V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic")   \
-  V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
-  V(stack_overflow_string, "kStackOverflowBoilerplate")    \
-  V(illegal_access_string, "illegal access")               \
-  V(cell_value_string, "%cell_value")                      \
-  V(illegal_argument_string, "illegal argument")           \
-  V(identity_hash_string, "v8::IdentityHash")              \
-  V(closure_string, "(closure)")                           \
-  V(dot_string, ".")                                       \
-  V(compare_ic_string, "==")                               \
-  V(strict_compare_ic_string, "===")                       \
-  V(infinity_string, "Infinity")                           \
-  V(minus_infinity_string, "-Infinity")                    \
-  V(query_colon_string, "(?:)")                            \
-  V(Generator_string, "Generator")                         \
-  V(throw_string, "throw")                                 \
-  V(done_string, "done")                                   \
-  V(value_string, "value")                                 \
-  V(next_string, "next")                                   \
-  V(byte_length_string, "byteLength")                      \
-  V(byte_offset_string, "byteOffset")                      \
-  V(minus_zero_string, "-0")                               \
-  V(Array_string, "Array")                                 \
-  V(Error_string, "Error")                                 \
-  V(RegExp_string, "RegExp")
+#define INTERNALIZED_STRING_LIST(V)                              \
+  V(anonymous_string, "anonymous")                               \
+  V(apply_string, "apply")                                       \
+  V(assign_string, "assign")                                     \
+  V(arguments_string, "arguments")                               \
+  V(Arguments_string, "Arguments")                               \
+  V(Array_string, "Array")                                       \
+  V(bind_string, "bind")                                         \
+  V(bool16x8_string, "bool16x8")                                 \
+  V(Bool16x8_string, "Bool16x8")                                 \
+  V(bool32x4_string, "bool32x4")                                 \
+  V(Bool32x4_string, "Bool32x4")                                 \
+  V(bool8x16_string, "bool8x16")                                 \
+  V(Bool8x16_string, "Bool8x16")                                 \
+  V(boolean_string, "boolean")                                   \
+  V(Boolean_string, "Boolean")                                   \
+  V(bound__string, "bound ")                                     \
+  V(byte_length_string, "byteLength")                            \
+  V(byte_offset_string, "byteOffset")                            \
+  V(call_string, "call")                                         \
+  V(callee_string, "callee")                                     \
+  V(caller_string, "caller")                                     \
+  V(cell_value_string, "%cell_value")                            \
+  V(char_at_string, "CharAt")                                    \
+  V(closure_string, "(closure)")                                 \
+  V(compare_ic_string, "==")                                     \
+  V(configurable_string, "configurable")                         \
+  V(constructor_string, "constructor")                           \
+  V(construct_string, "construct")                               \
+  V(create_string, "create")                                     \
+  V(Date_string, "Date")                                         \
+  V(default_string, "default")                                   \
+  V(defineProperty_string, "defineProperty")                     \
+  V(deleteProperty_string, "deleteProperty")                     \
+  V(display_name_string, "displayName")                          \
+  V(done_string, "done")                                         \
+  V(dot_result_string, ".result")                                \
+  V(dot_string, ".")                                             \
+  V(enumerable_string, "enumerable")                             \
+  V(enumerate_string, "enumerate")                               \
+  V(Error_string, "Error")                                       \
+  V(eval_string, "eval")                                         \
+  V(false_string, "false")                                       \
+  V(float32x4_string, "float32x4")                               \
+  V(Float32x4_string, "Float32x4")                               \
+  V(for_api_string, "for_api")                                   \
+  V(for_string, "for")                                           \
+  V(function_string, "function")                                 \
+  V(Function_string, "Function")                                 \
+  V(Generator_string, "Generator")                               \
+  V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+  V(getPrototypeOf_string, "getPrototypeOf")                     \
+  V(get_string, "get")                                           \
+  V(global_string, "global")                                     \
+  V(has_string, "has")                                           \
+  V(illegal_access_string, "illegal access")                     \
+  V(illegal_argument_string, "illegal argument")                 \
+  V(index_string, "index")                                       \
+  V(infinity_string, "Infinity")                                 \
+  V(input_string, "input")                                       \
+  V(int16x8_string, "int16x8")                                   \
+  V(Int16x8_string, "Int16x8")                                   \
+  V(int32x4_string, "int32x4")                                   \
+  V(Int32x4_string, "Int32x4")                                   \
+  V(int8x16_string, "int8x16")                                   \
+  V(Int8x16_string, "Int8x16")                                   \
+  V(isExtensible_string, "isExtensible")                         \
+  V(isView_string, "isView")                                     \
+  V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic")         \
+  V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic")       \
+  V(last_index_string, "lastIndex")                              \
+  V(length_string, "length")                                     \
+  V(Map_string, "Map")                                           \
+  V(minus_infinity_string, "-Infinity")                          \
+  V(minus_zero_string, "-0")                                     \
+  V(name_string, "name")                                         \
+  V(nan_string, "NaN")                                           \
+  V(next_string, "next")                                         \
+  V(null_string, "null")                                         \
+  V(null_to_string, "[object Null]")                             \
+  V(number_string, "number")                                     \
+  V(Number_string, "Number")                                     \
+  V(object_string, "object")                                     \
+  V(Object_string, "Object")                                     \
+  V(ownKeys_string, "ownKeys")                                   \
+  V(preventExtensions_string, "preventExtensions")               \
+  V(private_api_string, "private_api")                           \
+  V(Promise_string, "Promise")                                   \
+  V(proto_string, "__proto__")                                   \
+  V(prototype_string, "prototype")                               \
+  V(Proxy_string, "Proxy")                                       \
+  V(query_colon_string, "(?:)")                                  \
+  V(RegExp_string, "RegExp")                                     \
+  V(setPrototypeOf_string, "setPrototypeOf")                     \
+  V(set_string, "set")                                           \
+  V(Set_string, "Set")                                           \
+  V(source_mapping_url_string, "source_mapping_url")             \
+  V(source_string, "source")                                     \
+  V(source_url_string, "source_url")                             \
+  V(stack_string, "stack")                                       \
+  V(strict_compare_ic_string, "===")                             \
+  V(string_string, "string")                                     \
+  V(String_string, "String")                                     \
+  V(symbol_string, "symbol")                                     \
+  V(Symbol_string, "Symbol")                                     \
+  V(this_string, "this")                                         \
+  V(throw_string, "throw")                                       \
+  V(toJSON_string, "toJSON")                                     \
+  V(toString_string, "toString")                                 \
+  V(true_string, "true")                                         \
+  V(uint16x8_string, "uint16x8")                                 \
+  V(Uint16x8_string, "Uint16x8")                                 \
+  V(uint32x4_string, "uint32x4")                                 \
+  V(Uint32x4_string, "Uint32x4")                                 \
+  V(uint8x16_string, "uint8x16")                                 \
+  V(Uint8x16_string, "Uint8x16")                                 \
+  V(undefined_string, "undefined")                               \
+  V(undefined_to_string, "[object Undefined]")                   \
+  V(valueOf_string, "valueOf")                                   \
+  V(value_string, "value")                                       \
+  V(WeakMap_string, "WeakMap")                                   \
+  V(WeakSet_string, "WeakSet")                                   \
+  V(writable_string, "writable")
 
-#define PRIVATE_SYMBOL_LIST(V)      \
-  V(nonextensible_symbol)           \
-  V(sealed_symbol)                  \
-  V(frozen_symbol)                  \
-  V(nonexistent_symbol)             \
-  V(elements_transition_symbol)     \
-  V(prototype_users_symbol)         \
-  V(observed_symbol)                \
-  V(uninitialized_symbol)           \
-  V(megamorphic_symbol)             \
-  V(premonomorphic_symbol)          \
-  V(generic_symbol)                 \
-  V(stack_trace_symbol)             \
-  V(detailed_stack_trace_symbol)    \
-  V(normal_ic_symbol)               \
-  V(home_object_symbol)             \
-  V(intl_initialized_marker_symbol) \
-  V(intl_impl_object_symbol)        \
-  V(promise_debug_marker_symbol)    \
-  V(promise_has_handler_symbol)     \
-  V(class_script_symbol)            \
-  V(class_start_position_symbol)    \
-  V(class_end_position_symbol)
+#define PRIVATE_SYMBOL_LIST(V)              \
+  V(array_iteration_kind_symbol)            \
+  V(array_iterator_next_symbol)             \
+  V(array_iterator_object_symbol)           \
+  V(call_site_function_symbol)              \
+  V(call_site_position_symbol)              \
+  V(call_site_receiver_symbol)              \
+  V(call_site_strict_symbol)                \
+  V(class_end_position_symbol)              \
+  V(class_start_position_symbol)            \
+  V(detailed_stack_trace_symbol)            \
+  V(elements_transition_symbol)             \
+  V(error_end_pos_symbol)                   \
+  V(error_script_symbol)                    \
+  V(error_start_pos_symbol)                 \
+  V(formatted_stack_trace_symbol)           \
+  V(frozen_symbol)                          \
+  V(hash_code_symbol)                       \
+  V(home_object_symbol)                     \
+  V(internal_error_symbol)                  \
+  V(intl_impl_object_symbol)                \
+  V(intl_initialized_marker_symbol)         \
+  V(intl_pattern_symbol)                    \
+  V(intl_resolved_symbol)                   \
+  V(megamorphic_symbol)                     \
+  V(native_context_index_symbol)            \
+  V(nonexistent_symbol)                     \
+  V(nonextensible_symbol)                   \
+  V(normal_ic_symbol)                       \
+  V(not_mapped_symbol)                      \
+  V(observed_symbol)                        \
+  V(premonomorphic_symbol)                  \
+  V(promise_combined_deferred_symbol)       \
+  V(promise_debug_marker_symbol)            \
+  V(promise_has_handler_symbol)             \
+  V(promise_on_resolve_symbol)              \
+  V(promise_on_reject_symbol)               \
+  V(promise_raw_symbol)                     \
+  V(promise_status_symbol)                  \
+  V(promise_value_symbol)                   \
+  V(sealed_symbol)                          \
+  V(stack_trace_symbol)                     \
+  V(strict_function_transition_symbol)      \
+  V(string_iterator_iterated_string_symbol) \
+  V(string_iterator_next_index_symbol)      \
+  V(strong_function_transition_symbol)      \
+  V(uninitialized_symbol)
 
-#define PUBLIC_SYMBOL_LIST(V)                                    \
-  V(has_instance_symbol, symbolHasInstance, Symbol.hasInstance)  \
-  V(is_concat_spreadable_symbol, symbolIsConcatSpreadable,       \
-    Symbol.isConcatSpreadable)                                   \
-  V(is_regexp_symbol, symbolIsRegExp, Symbol.isRegExp)           \
-  V(iterator_symbol, symbolIterator, Symbol.iterator)            \
-  V(to_string_tag_symbol, symbolToStringTag, Symbol.toStringTag) \
-  V(unscopables_symbol, symbolUnscopables, Symbol.unscopables)
+#define PUBLIC_SYMBOL_LIST(V)                \
+  V(has_instance_symbol, Symbol.hasInstance) \
+  V(iterator_symbol, Symbol.iterator)        \
+  V(match_symbol, Symbol.match)              \
+  V(replace_symbol, Symbol.replace)          \
+  V(search_symbol, Symbol.search)            \
+  V(species_symbol, Symbol.species)          \
+  V(split_symbol, Symbol.split)              \
+  V(to_primitive_symbol, Symbol.toPrimitive) \
+  V(unscopables_symbol, Symbol.unscopables)
+
+// Well-Known Symbols are "Public" symbols, which have a bit set which causes
+// them to produce an undefined value when a load results in a failed access
+// check. Because this behaviour is not specified properly as of yet, it only
+// applies to a subset of spec-defined Well-Known Symbols.
+#define WELL_KNOWN_SYMBOL_LIST(V)                           \
+  V(is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
+  V(to_string_tag_symbol, Symbol.toStringTag)
 
 // Heap roots that are known to be immortal immovable, for which we can safely
 // skip write barriers. This list is not complete and has omissions.
 #define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
   V(ByteArrayMap)                       \
+  V(BytecodeArrayMap)                   \
   V(FreeSpaceMap)                       \
   V(OnePointerFillerMap)                \
   V(TwoPointerFillerMap)                \
@@ -330,21 +420,31 @@
   V(MetaMap)                            \
   V(HeapNumberMap)                      \
   V(MutableHeapNumberMap)               \
+  V(Float32x4Map)                       \
+  V(Int32x4Map)                         \
+  V(Uint32x4Map)                        \
+  V(Bool32x4Map)                        \
+  V(Int16x8Map)                         \
+  V(Uint16x8Map)                        \
+  V(Bool16x8Map)                        \
+  V(Int8x16Map)                         \
+  V(Uint8x16Map)                        \
+  V(Bool8x16Map)                        \
   V(NativeContextMap)                   \
   V(FixedArrayMap)                      \
   V(CodeMap)                            \
   V(ScopeInfoMap)                       \
   V(FixedCOWArrayMap)                   \
   V(FixedDoubleArrayMap)                \
-  V(ConstantPoolArrayMap)               \
   V(WeakCellMap)                        \
+  V(TransitionArrayMap)                 \
   V(NoInterceptorResultSentinel)        \
   V(HashTableMap)                       \
   V(OrderedHashTableMap)                \
   V(EmptyFixedArray)                    \
   V(EmptyByteArray)                     \
+  V(EmptyBytecodeArray)                 \
   V(EmptyDescriptorArray)               \
-  V(EmptyConstantPoolArray)             \
   V(ArgumentsMarker)                    \
   V(SymbolMap)                          \
   V(SloppyArgumentsElementsMap)         \
@@ -363,39 +463,32 @@
   V(JSMessageObjectMap)                 \
   V(ForeignMap)                         \
   V(NeanderMap)                         \
+  V(EmptyWeakCell)                      \
+  V(empty_string)                       \
   PRIVATE_SYMBOL_LIST(V)
 
 // Forward declarations.
+class ArrayBufferTracker;
+class GCIdleTimeAction;
+class GCIdleTimeHandler;
+class GCIdleTimeHeapState;
+class GCTracer;
+class HeapObjectsFilter;
 class HeapStats;
+class HistogramTimer;
 class Isolate;
+class MemoryReducer;
+class ObjectStats;
+class Scavenger;
+class ScavengeJob;
 class WeakObjectRetainer;
 
 
-typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
-                                                      Object** pointer);
-
-class StoreBufferRebuilder {
- public:
-  explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
-      : store_buffer_(store_buffer) {}
-
-  void Callback(MemoryChunk* page, StoreBufferEvent event);
-
- private:
-  StoreBuffer* store_buffer_;
-
-  // We record in this variable how full the store buffer was when we started
-  // iterating over the current page, finding pointers to new space.  If the
-  // store buffer overflows again we can exempt the page from the store buffer
-  // by rewinding to this point instead of having to search the store buffer.
-  Object*** start_of_current_page_;
-  // The current page we are scanning in the store buffer iterator.
-  MemoryChunk* current_page_;
-};
-
-
 // A queue of objects promoted during scavenge. Each object is accompanied
 // by it's size to avoid dereferencing a map pointer for scanning.
+// The last page in to-space is used for the promotion queue. On conflict
+// during scavenge, the promotion queue is allocated externally and all
+// entries are copied to the external queue.
 class PromotionQueue {
  public:
   explicit PromotionQueue(Heap* heap)
@@ -418,6 +511,12 @@
   }
 
   void SetNewLimit(Address limit) {
+    // If we are already using an emergency stack, we can ignore it.
+    if (emergency_stack_) return;
+
+    // If the limit is not on the same page, we can ignore it.
+    if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
+
     limit_ = reinterpret_cast<intptr_t*>(limit);
 
     if (limit_ <= rear_) {
@@ -428,6 +527,10 @@
   }
 
   bool IsBelowPromotionQueue(Address to_space_top) {
+    // If an emergency stack is used, the to-space address cannot interfere
+    // with the promotion queue.
+    if (emergency_stack_) return true;
+
     // If the given to-space top pointer and the head of the promotion queue
     // are not on the same page, then the to-space objects are below the
     // promotion queue.
@@ -455,12 +558,6 @@
       return;
     }
 
-    if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
-      NewSpacePage* front_page =
-          NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
-      DCHECK(!front_page->prev_page()->is_anchor());
-      front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
-    }
     *target = reinterpret_cast<HeapObject*>(*(--front_));
     *size = static_cast<int>(*(--front_));
     // Assert no underflow.
@@ -492,50 +589,6 @@
 };
 
 
-typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
-                                   HeapObject* object);
-
-
-// External strings table is a place where all external strings are
-// registered.  We need to keep track of such strings to properly
-// finalize them.
-class ExternalStringTable {
- public:
-  // Registers an external string.
-  inline void AddString(String* string);
-
-  inline void Iterate(ObjectVisitor* v);
-
-  // Restores internal invariant and gets rid of collected strings.
-  // Must be called after each Iterate() that modified the strings.
-  void CleanUp();
-
-  // Destroys all allocated memory.
-  void TearDown();
-
- private:
-  explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
-
-  friend class Heap;
-
-  inline void Verify();
-
-  inline void AddOldString(String* string);
-
-  // Notifies the table that only a prefix of the new list is valid.
-  inline void ShrinkNewStrings(int position);
-
-  // To speed up scavenge collections new space string are kept
-  // separate from old space strings.
-  List<Object*> new_space_strings_;
-  List<Object*> old_space_strings_;
-
-  Heap* heap_;
-
-  DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
-};
-
-
 enum ArrayStorageAllocationMode {
   DONT_INITIALIZE_ARRAY_ELEMENTS,
   INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
@@ -544,494 +597,62 @@
 
 class Heap {
  public:
-  // Configure heap size in MB before setup. Return false if the heap has been
-  // set up already.
-  bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
-                     int max_executable_size, size_t code_range_size);
-  bool ConfigureHeapDefault();
+  // Declare all the root indices.  This defines the root list order.
+  enum RootListIndex {
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+    STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
 
-  // Prepares the heap, setting up memory areas that are needed in the isolate
-  // without actually creating any objects.
-  bool SetUp();
+#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
+        INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
+#undef STRING_DECLARATION
 
-  // Bootstraps the object heap with the core set of objects required to run.
-  // Returns whether it succeeded.
-  bool CreateHeapObjects();
+#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
+            PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+#undef SYMBOL_INDEX_DECLARATION
 
-  // Destroys all memory allocated by the heap.
-  void TearDown();
-
-  // Set the stack limit in the roots_ array.  Some architectures generate
-  // code that looks here, because it is faster than loading from the static
-  // jslimit_/real_jslimit_ variable in the StackGuard.
-  void SetStackLimits();
-
-  // Notifies the heap that is ok to start marking or other activities that
-  // should not happen during deserialization.
-  void NotifyDeserializationComplete();
-
-  // Returns whether SetUp has been called.
-  bool HasBeenSetUp();
-
-  // Returns the maximum amount of memory reserved for the heap.  For
-  // the young generation, we reserve 4 times the amount needed for a
-  // semi space.  The young generation consists of two semi spaces and
-  // we reserve twice the amount needed for those in order to ensure
-  // that new space can be aligned to its size.
-  intptr_t MaxReserved() {
-    return 4 * reserved_semispace_size_ + max_old_generation_size_;
-  }
-  int MaxSemiSpaceSize() { return max_semi_space_size_; }
-  int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
-  int InitialSemiSpaceSize() { return initial_semispace_size_; }
-  int TargetSemiSpaceSize() { return target_semispace_size_; }
-  intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
-  intptr_t MaxExecutableSize() { return max_executable_size_; }
-
-  // Returns the capacity of the heap in bytes w/o growing. Heap grows when
-  // more spaces are needed until it reaches the limit.
-  intptr_t Capacity();
-
-  // Returns the amount of memory currently committed for the heap.
-  intptr_t CommittedMemory();
-
-  // Returns the amount of executable memory currently committed for the heap.
-  intptr_t CommittedMemoryExecutable();
-
-  // Returns the amount of phyical memory currently committed for the heap.
-  size_t CommittedPhysicalMemory();
-
-  // Returns the maximum amount of memory ever committed for the heap.
-  intptr_t MaximumCommittedMemory() { return maximum_committed_; }
-
-  // Updates the maximum committed memory for the heap. Should be called
-  // whenever a space grows.
-  void UpdateMaximumCommitted();
-
-  // Returns the available bytes in space w/o growing.
-  // Heap doesn't guarantee that it can allocate an object that requires
-  // all available bytes. Check MaxHeapObjectSize() instead.
-  intptr_t Available();
-
-  // Returns of size of all objects residing in the heap.
-  intptr_t SizeOfObjects();
-
-  // Return the starting address and a mask for the new space.  And-masking an
-  // address with the mask will result in the start address of the new space
-  // for all addresses in either semispace.
-  Address NewSpaceStart() { return new_space_.start(); }
-  uintptr_t NewSpaceMask() { return new_space_.mask(); }
-  Address NewSpaceTop() { return new_space_.top(); }
-
-  NewSpace* new_space() { return &new_space_; }
-  OldSpace* old_pointer_space() { return old_pointer_space_; }
-  OldSpace* old_data_space() { return old_data_space_; }
-  OldSpace* code_space() { return code_space_; }
-  MapSpace* map_space() { return map_space_; }
-  CellSpace* cell_space() { return cell_space_; }
-  PropertyCellSpace* property_cell_space() { return property_cell_space_; }
-  LargeObjectSpace* lo_space() { return lo_space_; }
-  PagedSpace* paged_space(int idx) {
-    switch (idx) {
-      case OLD_POINTER_SPACE:
-        return old_pointer_space();
-      case OLD_DATA_SPACE:
-        return old_data_space();
-      case MAP_SPACE:
-        return map_space();
-      case CELL_SPACE:
-        return cell_space();
-      case PROPERTY_CELL_SPACE:
-        return property_cell_space();
-      case CODE_SPACE:
-        return code_space();
-      case NEW_SPACE:
-      case LO_SPACE:
-        UNREACHABLE();
-    }
-    return NULL;
-  }
-
-  bool always_allocate() { return always_allocate_scope_depth_ != 0; }
-  Address always_allocate_scope_depth_address() {
-    return reinterpret_cast<Address>(&always_allocate_scope_depth_);
-  }
-
-  Address* NewSpaceAllocationTopAddress() {
-    return new_space_.allocation_top_address();
-  }
-  Address* NewSpaceAllocationLimitAddress() {
-    return new_space_.allocation_limit_address();
-  }
-
-  Address* OldPointerSpaceAllocationTopAddress() {
-    return old_pointer_space_->allocation_top_address();
-  }
-  Address* OldPointerSpaceAllocationLimitAddress() {
-    return old_pointer_space_->allocation_limit_address();
-  }
-
-  Address* OldDataSpaceAllocationTopAddress() {
-    return old_data_space_->allocation_top_address();
-  }
-  Address* OldDataSpaceAllocationLimitAddress() {
-    return old_data_space_->allocation_limit_address();
-  }
-
-  // Returns a deep copy of the JavaScript object.
-  // Properties and elements are copied too.
-  // Optionally takes an AllocationSite to be appended in an AllocationMemento.
-  MUST_USE_RESULT AllocationResult
-      CopyJSObject(JSObject* source, AllocationSite* site = NULL);
-
-  // Clear the Instanceof cache (used when a prototype changes).
-  inline void ClearInstanceofCache();
-
-  // Iterates the whole code space to clear all ICs of the given kind.
-  void ClearAllICsByKind(Code::Kind kind);
-
-  // For use during bootup.
-  void RepairFreeListsAfterBoot();
-
-  template <typename T>
-  static inline bool IsOneByte(T t, int chars);
-
-  // Move len elements within a given array from src_index index to dst_index
-  // index.
-  void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
-
-  // Sloppy mode arguments object size.
-  static const int kSloppyArgumentsObjectSize =
-      JSObject::kHeaderSize + 2 * kPointerSize;
-  // Strict mode arguments has no callee so it is smaller.
-  static const int kStrictArgumentsObjectSize =
-      JSObject::kHeaderSize + 1 * kPointerSize;
-  // Indicies for direct access into argument objects.
-  static const int kArgumentsLengthIndex = 0;
-  // callee is only valid in sloppy mode.
-  static const int kArgumentsCalleeIndex = 1;
-
-  // Finalizes an external string by deleting the associated external
-  // data and clearing the resource pointer.
-  inline void FinalizeExternalString(String* string);
-
-  // Initialize a filler object to keep the ability to iterate over the heap
-  // when introducing gaps within pages.
-  void CreateFillerObjectAt(Address addr, int size);
-
-  bool CanMoveObjectStart(HeapObject* object);
-
-  // Indicates whether live bytes adjustment is triggered from within the GC
-  // code or from mutator code.
-  enum InvocationMode { FROM_GC, FROM_MUTATOR };
-
-  // Maintain consistency of live bytes during incremental marking.
-  void AdjustLiveBytes(Address address, int by, InvocationMode mode);
-
-  // Trim the given array from the left. Note that this relocates the object
-  // start and hence is only valid if there is only a single reference to it.
-  FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
-
-  // Trim the given array from the right.
-  template<Heap::InvocationMode mode>
-  void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
-
-  // Converts the given boolean condition to JavaScript boolean value.
-  inline Object* ToBoolean(bool condition);
-
-  // Performs garbage collection operation.
-  // Returns whether there is a chance that another major GC could
-  // collect more garbage.
-  inline bool CollectGarbage(
-      AllocationSpace space, const char* gc_reason = NULL,
-      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
-  static const int kNoGCFlags = 0;
-  static const int kReduceMemoryFootprintMask = 1;
-  static const int kAbortIncrementalMarkingMask = 2;
-
-  // Making the heap iterable requires us to abort incremental marking.
-  static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
-
-  // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
-  // non-zero, then the slower precise sweeper is used, which leaves the heap
-  // in a state where we can iterate over the heap visiting all objects.
-  void CollectAllGarbage(
-      int flags, const char* gc_reason = NULL,
-      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
-  // Last hope GC, should try to squeeze as much as possible.
-  void CollectAllAvailableGarbage(const char* gc_reason = NULL);
-
-  // Check whether the heap is currently iterable.
-  bool IsHeapIterable();
-
-  // Notify the heap that a context has been disposed.
-  int NotifyContextDisposed(bool dependant_context);
-
-  inline void increment_scan_on_scavenge_pages() {
-    scan_on_scavenge_pages_++;
-    if (FLAG_gc_verbose) {
-      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
-    }
-  }
-
-  inline void decrement_scan_on_scavenge_pages() {
-    scan_on_scavenge_pages_--;
-    if (FLAG_gc_verbose) {
-      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
-    }
-  }
-
-  PromotionQueue* promotion_queue() { return &promotion_queue_; }
-
-  void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
-                             GCType gc_type_filter, bool pass_isolate = true);
-  void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
-
-  void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
-                             GCType gc_type_filter, bool pass_isolate = true);
-  void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
-
-// Heap root getters.  We have versions with and without type::cast() here.
-// You can't use type::cast during GC because the assert fails.
-// TODO(1490): Try removing the unchecked accessors, now that GC marking does
-// not corrupt the map.
-#define ROOT_ACCESSOR(type, name, camel_name)                           \
-  type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \
-  type* raw_unchecked_##name() {                                        \
-    return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]);   \
-  }
-  ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR
+#define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex,
+                PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+                    WELL_KNOWN_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+#undef SYMBOL_INDEX_DECLARATION
 
 // Utility type maps
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
-  Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
-  STRUCT_LIST(STRUCT_MAP_ACCESSOR)
-#undef STRUCT_MAP_ACCESSOR
+#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
+                        STRUCT_LIST(DECLARE_STRUCT_MAP)
+#undef DECLARE_STRUCT_MAP
+                            kStringTableRootIndex,
 
-#define STRING_ACCESSOR(name, str) \
-  String* name() { return String::cast(roots_[k##name##RootIndex]); }
-  INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
-#undef STRING_ACCESSOR
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+    SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+        kRootListLength,
+    kStrongRootListLength = kStringTableRootIndex,
+    kSmiRootsStart = kStringTableRootIndex + 1
+  };
 
-#define SYMBOL_ACCESSOR(name) \
-  Symbol* name() { return Symbol::cast(roots_[k##name##RootIndex]); }
-  PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
+  // Indicates whether live bytes adjustment is triggered
+  // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
+  // - or from within GC (CONCURRENT_TO_SWEEPER),
+  // - or mutator code (CONCURRENT_TO_SWEEPER).
+  enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
 
-#define SYMBOL_ACCESSOR(name, varname, description) \
-  Symbol* name() { return Symbol::cast(roots_[k##name##RootIndex]); }
-  PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-  // The hidden_string is special because it is the empty string, but does
-  // not match the empty string.
-  String* hidden_string() { return hidden_string_; }
-
-  void set_native_contexts_list(Object* object) {
-    native_contexts_list_ = object;
-  }
-  Object* native_contexts_list() const { return native_contexts_list_; }
-
-  void set_array_buffers_list(Object* object) { array_buffers_list_ = object; }
-  Object* array_buffers_list() const { return array_buffers_list_; }
-
-  void set_allocation_sites_list(Object* object) {
-    allocation_sites_list_ = object;
-  }
-  Object* allocation_sites_list() { return allocation_sites_list_; }
-
-  // Used in CreateAllocationSiteStub and the (de)serializer.
-  Object** allocation_sites_list_address() { return &allocation_sites_list_; }
-
-  Object* weak_object_to_code_table() { return weak_object_to_code_table_; }
-
-  void set_encountered_weak_collections(Object* weak_collection) {
-    encountered_weak_collections_ = weak_collection;
-  }
-  Object* encountered_weak_collections() const {
-    return encountered_weak_collections_;
-  }
-
-  void set_encountered_weak_cells(Object* weak_cell) {
-    encountered_weak_cells_ = weak_cell;
-  }
-  Object* encountered_weak_cells() const { return encountered_weak_cells_; }
-
-  // Number of mark-sweeps.
-  unsigned int ms_count() { return ms_count_; }
-
-  // Iterates over all roots in the heap.
-  void IterateRoots(ObjectVisitor* v, VisitMode mode);
-  // Iterates over all strong roots in the heap.
-  void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
-  // Iterates over entries in the smi roots list.  Only interesting to the
-  // serializer/deserializer, since GC does not care about smis.
-  void IterateSmiRoots(ObjectVisitor* v);
-  // Iterates over all the other roots in the heap.
-  void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
-
-  // Iterate pointers to from semispace of new space found in memory interval
-  // from start to end.
-  void IterateAndMarkPointersToFromSpace(Address start, Address end,
-                                         ObjectSlotCallback callback);
-
-  // Returns whether the object resides in new space.
-  inline bool InNewSpace(Object* object);
-  inline bool InNewSpace(Address address);
-  inline bool InNewSpacePage(Address address);
-  inline bool InFromSpace(Object* object);
-  inline bool InToSpace(Object* object);
-
-  // Returns whether the object resides in old pointer space.
-  inline bool InOldPointerSpace(Address address);
-  inline bool InOldPointerSpace(Object* object);
-
-  // Returns whether the object resides in old data space.
-  inline bool InOldDataSpace(Address address);
-  inline bool InOldDataSpace(Object* object);
-
-  // Checks whether an address/object in the heap (including auxiliary
-  // area and unused area).
-  bool Contains(Address addr);
-  bool Contains(HeapObject* value);
-
-  // Checks whether an address/object in a space.
-  // Currently used by tests, serialization and heap verification only.
-  bool InSpace(Address addr, AllocationSpace space);
-  bool InSpace(HeapObject* value, AllocationSpace space);
-
-  // Finds out which space an object should get promoted to based on its type.
-  inline OldSpace* TargetSpace(HeapObject* object);
-  static inline AllocationSpace TargetSpaceId(InstanceType type);
-
-  // Checks whether the given object is allowed to be migrated from it's
-  // current space into the given destination space. Used for debugging.
-  inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
-
-  // Sets the stub_cache_ (only used when expanding the dictionary).
-  void public_set_code_stubs(UnseededNumberDictionary* value) {
-    roots_[kCodeStubsRootIndex] = value;
-  }
-
-  // Support for computing object sizes for old objects during GCs. Returns
-  // a function that is guaranteed to be safe for computing object sizes in
-  // the current GC phase.
-  HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
-    return gc_safe_size_of_old_object_;
-  }
-
-  // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
-  void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
-    roots_[kNonMonomorphicCacheRootIndex] = value;
-  }
-
-  void public_set_empty_script(Script* script) {
-    roots_[kEmptyScriptRootIndex] = script;
-  }
-
-  void public_set_store_buffer_top(Address* top) {
-    roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
-  }
-
-  void public_set_materialized_objects(FixedArray* objects) {
-    roots_[kMaterializedObjectsRootIndex] = objects;
-  }
-
-  // Generated code can embed this address to get access to the roots.
-  Object** roots_array_start() { return roots_; }
-
-  Address* store_buffer_top_address() {
-    return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
-  }
-
-  static bool RootIsImmortalImmovable(int root_index);
-
-#ifdef VERIFY_HEAP
-  // Verify the heap is in its normal state before or after a GC.
-  void Verify();
-
-
-  bool weak_embedded_objects_verification_enabled() {
-    return no_weak_object_verification_scope_depth_ == 0;
-  }
-#endif
-
-#ifdef DEBUG
-  void Print();
-  void PrintHandles();
-
-  void OldPointerSpaceCheckStoreBuffer();
-  void MapSpaceCheckStoreBuffer();
-  void LargeObjectSpaceCheckStoreBuffer();
-
-  // Report heap statistics.
-  void ReportHeapStatistics(const char* title);
-  void ReportCodeStatistics(const char* title);
-#endif
-
-  // Zapping is needed for verify heap, and always done in debug builds.
-  static inline bool ShouldZapGarbage() {
-#ifdef DEBUG
-    return true;
-#else
-#ifdef VERIFY_HEAP
-    return FLAG_verify_heap;
-#else
-    return false;
-#endif
-#endif
-  }
-
-  // Number of "runtime allocations" done so far.
-  uint32_t allocations_count() { return allocations_count_; }
-
-  // Returns deterministic "time" value in ms. Works only with
-  // FLAG_verify_predictable.
-  double synthetic_time() { return allocations_count_ / 2.0; }
-
-  // Print short heap statistics.
-  void PrintShortHeapStatistics();
-
-  // Write barrier support for address[offset] = o.
-  INLINE(void RecordWrite(Address address, int offset));
-
-  // Write barrier support for address[start : start + len[ = o.
-  INLINE(void RecordWrites(Address address, int start, int len));
+  enum PretenuringFeedbackInsertionMode { kCached, kGlobal };
 
   enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
-  inline HeapState gc_state() { return gc_state_; }
 
-  inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
+  // Taking this lock prevents the GC from entering a phase that relocates
+  // object references.
+  class RelocationLock {
+   public:
+    explicit RelocationLock(Heap* heap) : heap_(heap) {
+      heap_->relocation_mutex_.Lock();
+    }
 
-#ifdef DEBUG
-  void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
+    ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
 
-  void TracePathToObjectFrom(Object* target, Object* root);
-  void TracePathToObject(Object* target);
-  void TracePathToGlobal();
-#endif
-
-  // Callback function passed to Heap::Iterate etc.  Copies an object if
-  // necessary, the object might be promoted to an old space.  The caller must
-  // ensure the precondition that the object is (a) a heap object and (b) in
-  // the heap's from space.
-  static inline void ScavengePointer(HeapObject** p);
-  static inline void ScavengeObject(HeapObject** p, HeapObject* object);
-
-  enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
-
-  // If an object has an AllocationMemento trailing it, return it, otherwise
-  // return NULL;
-  inline AllocationMemento* FindAllocationMemento(HeapObject* object);
-
-  // An object may have an AllocationSite associated with it through a trailing
-  // AllocationMemento. Its feedback should be updated when objects are found
-  // in the heap.
-  static inline void UpdateAllocationSiteFeedback(HeapObject* object,
-                                                  ScratchpadSlotMode mode);
+   private:
+    Heap* heap_;
+  };
 
   // Support for partial snapshots.  After calling this we have a linear
   // space to write objects in each space.
@@ -1040,37 +661,20 @@
     Address start;
     Address end;
   };
-
   typedef List<Chunk> Reservation;
 
-  // Returns false if not able to reserve.
-  bool ReserveSpace(Reservation* reservations);
-
-  //
-  // Support for the API.
-  //
-
-  void CreateApiObjects();
-
-  inline intptr_t PromotedTotalSize() {
-    int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
-    if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt);
-    if (total < 0) return 0;
-    return static_cast<intptr_t>(total);
-  }
-
-  inline intptr_t OldGenerationSpaceAvailable() {
-    return old_generation_allocation_limit_ - PromotedTotalSize();
-  }
-
-  inline intptr_t OldGenerationCapacityAvailable() {
-    return max_old_generation_size_ - PromotedTotalSize();
-  }
-
   static const intptr_t kMinimumOldGenerationAllocationLimit =
       8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
 
+  static const int kInitalOldGenerationLimitFactor = 2;
+
+#if V8_OS_ANDROID
+  // Don't apply pointer multiplier on Android since it has no swap space and
+  // should instead adapt it's heap size based on available physical memory.
+  static const int kPointerMultiplier = 1;
+#else
   static const int kPointerMultiplier = i::kPointerSize / 4;
+#endif
 
   // The new space size has to be a power of 2. Sizes are in MB.
   static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
@@ -1096,53 +700,42 @@
   static const int kMaxExecutableSizeHugeMemoryDevice =
       256 * kPointerMultiplier;
 
-  intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
-                                        int freed_global_handles);
+  static const int kTraceRingBufferSize = 512;
+  static const int kStacktraceBufferSize = 512;
 
-  // Indicates whether inline bump-pointer allocation has been disabled.
-  bool inline_allocation_disabled() { return inline_allocation_disabled_; }
+  static const double kMinHeapGrowingFactor;
+  static const double kMaxHeapGrowingFactor;
+  static const double kMaxHeapGrowingFactorMemoryConstrained;
+  static const double kMaxHeapGrowingFactorIdle;
+  static const double kTargetMutatorUtilization;
 
-  // Switch whether inline bump-pointer allocation should be used.
-  void EnableInlineAllocation();
-  void DisableInlineAllocation();
+  // Sloppy mode arguments object size.
+  static const int kSloppyArgumentsObjectSize =
+      JSObject::kHeaderSize + 2 * kPointerSize;
 
-  // Implements the corresponding V8 API function.
-  bool IdleNotification(double deadline_in_seconds);
-  bool IdleNotification(int idle_time_in_ms);
+  // Strict mode arguments has no callee so it is smaller.
+  static const int kStrictArgumentsObjectSize =
+      JSObject::kHeaderSize + 1 * kPointerSize;
 
-  // Declare all the root indices.  This defines the root list order.
-  enum RootListIndex {
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
-    STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
+  // Indicies for direct access into argument objects.
+  static const int kArgumentsLengthIndex = 0;
 
-#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
-    INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
-#undef STRING_DECLARATION
+  // callee is only valid in sloppy mode.
+  static const int kArgumentsCalleeIndex = 1;
 
-#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
-    PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_INDEX_DECLARATION
+  static const int kNoGCFlags = 0;
+  static const int kReduceMemoryFootprintMask = 1;
+  static const int kAbortIncrementalMarkingMask = 2;
+  static const int kFinalizeIncrementalMarkingMask = 4;
 
-#define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex,
-    PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_INDEX_DECLARATION
+  // Making the heap iterable requires us to abort incremental marking.
+  static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
 
-// Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
-    STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
-    kStringTableRootIndex,
+  // The roots that have an index less than this are always in old space.
+  static const int kOldSpaceRoots = 0x20;
 
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
-    SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
-    kRootListLength,
-    kStrongRootListLength = kStringTableRootIndex,
-    kSmiRootsStart = kStringTableRootIndex + 1
-  };
-
-  Object* root(RootListIndex index) { return roots_[index]; }
+  // The minimum size of a HeapObject on the heap.
+  static const int kMinObjectSizeInWords = 2;
 
   STATIC_ASSERT(kUndefinedValueRootIndex ==
                 Internals::kUndefinedValueRootIndex);
@@ -1151,24 +744,42 @@
   STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
   STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
 
+  // Calculates the maximum amount of filler that could be required by the
+  // given alignment.
+  static int GetMaximumFillToAlign(AllocationAlignment alignment);
+  // Calculates the actual amount of filler required for a given address at the
+  // given alignment.
+  static int GetFillToAlign(Address address, AllocationAlignment alignment);
+
+  template <typename T>
+  static inline bool IsOneByte(T t, int chars);
+
+  static void FatalProcessOutOfMemory(const char* location,
+                                      bool take_snapshot = false);
+
+  static bool RootIsImmortalImmovable(int root_index);
+
+  // Checks whether the space is valid.
+  static bool IsValidAllocationSpace(AllocationSpace space);
+
   // Generated code can embed direct references to non-writable roots if
   // they are in new space.
   static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
-  // Generated code can treat direct references to this root as constant.
-  bool RootCanBeTreatedAsConstant(RootListIndex root_index);
 
-  Map* MapForFixedTypedArray(ExternalArrayType array_type);
-  RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
+  // Zapping is needed for verify heap, and always done in debug builds.
+  static inline bool ShouldZapGarbage() {
+#ifdef DEBUG
+    return true;
+#else
+#ifdef VERIFY_HEAP
+    return FLAG_verify_heap;
+#else
+    return false;
+#endif
+#endif
+  }
 
-  Map* MapForExternalArrayType(ExternalArrayType array_type);
-  RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type);
-
-  RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
-  RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
-  ExternalArray* EmptyExternalArrayForMap(Map* map);
-  FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
-
-  void RecordStats(HeapStats* stats, bool take_snapshot = false);
+  static double HeapGrowingFactor(double gc_speed, double mutator_speed);
 
   // Copy block of memory from src to dst. Size of block should be aligned
   // by pointer size.
@@ -1178,18 +789,642 @@
   // pointer size aligned addresses.
   static inline void MoveBlock(Address dst, Address src, int byte_size);
 
+  // Determines a static visitor id based on the given {map} that can then be
+  // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
+  static int GetStaticVisitorIdForMap(Map* map);
+
+  // Notifies the heap that is ok to start marking or other activities that
+  // should not happen during deserialization.
+  void NotifyDeserializationComplete();
+
+  intptr_t old_generation_allocation_limit() const {
+    return old_generation_allocation_limit_;
+  }
+
+  bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
+
+  Address* NewSpaceAllocationTopAddress() {
+    return new_space_.allocation_top_address();
+  }
+  Address* NewSpaceAllocationLimitAddress() {
+    return new_space_.allocation_limit_address();
+  }
+
+  Address* OldSpaceAllocationTopAddress() {
+    return old_space_->allocation_top_address();
+  }
+  Address* OldSpaceAllocationLimitAddress() {
+    return old_space_->allocation_limit_address();
+  }
+
+  // TODO(hpayer): There is still a missmatch between capacity and actual
+  // committed memory size.
+  bool CanExpandOldGeneration(int size = 0) {
+    if (force_oom_) return false;
+    return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize();
+  }
+
+  // Clear the Instanceof cache (used when a prototype changes).
+  inline void ClearInstanceofCache();
+
+  // FreeSpace objects have a null map after deserialization. Update the map.
+  void RepairFreeListsAfterDeserialization();
+
+  // Move len elements within a given array from src_index index to dst_index
+  // index.
+  void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
+
+  // Initialize a filler object to keep the ability to iterate over the heap
+  // when introducing gaps within pages.
+  void CreateFillerObjectAt(Address addr, int size);
+
+  bool CanMoveObjectStart(HeapObject* object);
+
+  // Maintain consistency of live bytes during incremental marking.
+  void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode);
+
+  // Trim the given array from the left. Note that this relocates the object
+  // start and hence is only valid if there is only a single reference to it.
+  FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
+
+  // Trim the given array from the right.
+  template<Heap::InvocationMode mode>
+  void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
+
+  // Converts the given boolean condition to JavaScript boolean value.
+  inline Object* ToBoolean(bool condition);
+
+  // Check whether the heap is currently iterable.
+  bool IsHeapIterable();
+
+  // Notify the heap that a context has been disposed.
+  int NotifyContextDisposed(bool dependant_context);
+
+  inline void increment_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_++;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
+  inline void decrement_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_--;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
+  void set_native_contexts_list(Object* object) {
+    native_contexts_list_ = object;
+  }
+  Object* native_contexts_list() const { return native_contexts_list_; }
+
+  void set_allocation_sites_list(Object* object) {
+    allocation_sites_list_ = object;
+  }
+  Object* allocation_sites_list() { return allocation_sites_list_; }
+
+  // Used in CreateAllocationSiteStub and the (de)serializer.
+  Object** allocation_sites_list_address() { return &allocation_sites_list_; }
+
+  void set_encountered_weak_collections(Object* weak_collection) {
+    encountered_weak_collections_ = weak_collection;
+  }
+  Object* encountered_weak_collections() const {
+    return encountered_weak_collections_;
+  }
+
+  void set_encountered_weak_cells(Object* weak_cell) {
+    encountered_weak_cells_ = weak_cell;
+  }
+  Object* encountered_weak_cells() const { return encountered_weak_cells_; }
+
+  void set_encountered_transition_arrays(Object* transition_array) {
+    encountered_transition_arrays_ = transition_array;
+  }
+  Object* encountered_transition_arrays() const {
+    return encountered_transition_arrays_;
+  }
+
+  // Number of mark-sweeps.
+  int ms_count() const { return ms_count_; }
+
+  // Checks whether the given object is allowed to be migrated from it's
+  // current space into the given destination space. Used for debugging.
+  inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
+
+  void CheckHandleCount();
+
+  // Number of "runtime allocations" done so far.
+  uint32_t allocations_count() { return allocations_count_; }
+
+  // Print short heap statistics.
+  void PrintShortHeapStatistics();
+
+  inline HeapState gc_state() { return gc_state_; }
+
+  inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
+
+  // If an object has an AllocationMemento trailing it, return it, otherwise
+  // return NULL;
+  inline AllocationMemento* FindAllocationMemento(HeapObject* object);
+
+  // Returns false if not able to reserve.
+  bool ReserveSpace(Reservation* reservations);
+
+  //
+  // Support for the API.
+  //
+
+  void CreateApiObjects();
+
+  // Implements the corresponding V8 API function.
+  bool IdleNotification(double deadline_in_seconds);
+  bool IdleNotification(int idle_time_in_ms);
+
+  double MonotonicallyIncreasingTimeInMs();
+
+  void RecordStats(HeapStats* stats, bool take_snapshot = false);
+
   // Check new space expansion criteria and expand semispaces if it was hit.
   void CheckNewSpaceExpansionCriteria();
 
-  inline void IncrementPromotedObjectsSize(int object_size) {
-    DCHECK(object_size > 0);
-    promoted_objects_size_ += object_size;
+  inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
+    if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
+
+    intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
+
+    if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
+
+    return false;
   }
 
+  void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
+
+  // An object should be promoted if the object has survived a
+  // scavenge operation.
+  inline bool ShouldBePromoted(Address old_address, int object_size);
+
+  void ClearNormalizedMapCaches();
+
+  void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
+
+  inline bool OldGenerationAllocationLimitReached();
+
+  void QueueMemoryChunkForFree(MemoryChunk* chunk);
+  void FilterStoreBufferEntriesOnAboutToBeFreedPages();
+  void FreeQueuedChunks(MemoryChunk* list_head);
+  void FreeQueuedChunks();
+  void WaitUntilUnmappingOfFreeChunksCompleted();
+
+  // Completely clear the Instanceof cache (to stop it keeping objects alive
+  // around a GC).
+  inline void CompletelyClearInstanceofCache();
+
+  inline uint32_t HashSeed();
+
+  inline int NextScriptId();
+
+  inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
+  inline void SetConstructStubDeoptPCOffset(int pc_offset);
+  inline void SetGetterStubDeoptPCOffset(int pc_offset);
+  inline void SetSetterStubDeoptPCOffset(int pc_offset);
+
+  // For post mortem debugging.
+  void RememberUnmappedPage(Address page, bool compacted);
+
+  // Global inline caching age: it is incremented on some GCs after context
+  // disposal. We use it to flush inline caches.
+  int global_ic_age() { return global_ic_age_; }
+
+  void AgeInlineCaches() {
+    global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
+  }
+
+  int64_t amount_of_external_allocated_memory() {
+    return amount_of_external_allocated_memory_;
+  }
+
+  void update_amount_of_external_allocated_memory(int64_t delta) {
+    amount_of_external_allocated_memory_ += delta;
+  }
+
+  void DeoptMarkedAllocationSites();
+
+  bool DeoptMaybeTenuredAllocationSites() {
+    return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
+  }
+
+  void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
+                                     Handle<DependentCode> dep);
+
+  DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
+
+  void AddRetainedMap(Handle<Map> map);
+
+  // This event is triggered after successful allocation of a new object made
+  // by runtime. Allocations of target space for object evacuation do not
+  // trigger the event. In order to track ALL allocations one must turn off
+  // FLAG_inline_new and FLAG_use_allocation_folding.
+  inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
+
+  // This event is triggered after object is moved to a new place.
+  inline void OnMoveEvent(HeapObject* target, HeapObject* source,
+                          int size_in_bytes);
+
+  bool deserialization_complete() const { return deserialization_complete_; }
+
+  bool HasLowAllocationRate();
+  bool HasHighFragmentation();
+  bool HasHighFragmentation(intptr_t used, intptr_t committed);
+
+  void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
+  void SetOptimizeForMemoryUsage() { optimize_for_memory_usage_ = true; }
+  bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
+
+  // ===========================================================================
+  // Initialization. ===========================================================
+  // ===========================================================================
+
+  // Configure heap size in MB before setup. Return false if the heap has been
+  // set up already.
+  bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
+                     int max_executable_size, size_t code_range_size);
+  bool ConfigureHeapDefault();
+
+  // Prepares the heap, setting up memory areas that are needed in the isolate
+  // without actually creating any objects.
+  bool SetUp();
+
+  // Bootstraps the object heap with the core set of objects required to run.
+  // Returns whether it succeeded.
+  bool CreateHeapObjects();
+
+  // Destroys all memory allocated by the heap.
+  void TearDown();
+
+  // Returns whether SetUp has been called.
+  bool HasBeenSetUp();
+
+  // ===========================================================================
+  // Getters for spaces. =======================================================
+  // ===========================================================================
+
+  // Return the starting address and a mask for the new space.  And-masking an
+  // address with the mask will result in the start address of the new space
+  // for all addresses in either semispace.
+  Address NewSpaceStart() { return new_space_.start(); }
+  uintptr_t NewSpaceMask() { return new_space_.mask(); }
+  Address NewSpaceTop() { return new_space_.top(); }
+
+  NewSpace* new_space() { return &new_space_; }
+  OldSpace* old_space() { return old_space_; }
+  OldSpace* code_space() { return code_space_; }
+  MapSpace* map_space() { return map_space_; }
+  LargeObjectSpace* lo_space() { return lo_space_; }
+
+  PagedSpace* paged_space(int idx) {
+    switch (idx) {
+      case OLD_SPACE:
+        return old_space();
+      case MAP_SPACE:
+        return map_space();
+      case CODE_SPACE:
+        return code_space();
+      case NEW_SPACE:
+      case LO_SPACE:
+        UNREACHABLE();
+    }
+    return NULL;
+  }
+
+  Space* space(int idx) {
+    switch (idx) {
+      case NEW_SPACE:
+        return new_space();
+      case LO_SPACE:
+        return lo_space();
+      default:
+        return paged_space(idx);
+    }
+  }
+
+  // Returns name of the space.
+  const char* GetSpaceName(int idx);
+
+  // ===========================================================================
+  // Getters to other components. ==============================================
+  // ===========================================================================
+
+  GCTracer* tracer() { return tracer_; }
+
+  PromotionQueue* promotion_queue() { return &promotion_queue_; }
+
+  inline Isolate* isolate();
+
+  MarkCompactCollector* mark_compact_collector() {
+    return mark_compact_collector_;
+  }
+
+  // ===========================================================================
+  // Root set access. ==========================================================
+  // ===========================================================================
+
+  // Heap root getters.
+#define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
+  ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+  // Utility type maps.
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
+  STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define STRING_ACCESSOR(name, str) inline String* name();
+  INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name) inline Symbol* name();
+  PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+#define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
+  PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+  WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
+  Object* root(RootListIndex index) { return roots_[index]; }
+  Handle<Object> root_handle(RootListIndex index) {
+    return Handle<Object>(&roots_[index]);
+  }
+
+  // Generated code can embed this address to get access to the roots.
+  Object** roots_array_start() { return roots_; }
+
+  // Sets the stub_cache_ (only used when expanding the dictionary).
+  void SetRootCodeStubs(UnseededNumberDictionary* value) {
+    roots_[kCodeStubsRootIndex] = value;
+  }
+
+  // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
+  void SetRootNonMonomorphicCache(UnseededNumberDictionary* value) {
+    roots_[kNonMonomorphicCacheRootIndex] = value;
+  }
+
+  void SetRootMaterializedObjects(FixedArray* objects) {
+    roots_[kMaterializedObjectsRootIndex] = objects;
+  }
+
+  void SetRootScriptList(Object* value) {
+    roots_[kScriptListRootIndex] = value;
+  }
+
+  void SetRootStringTable(StringTable* value) {
+    roots_[kStringTableRootIndex] = value;
+  }
+
+  void SetRootNoScriptSharedFunctionInfos(Object* value) {
+    roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
+  }
+
+  // Set the stack limit in the roots_ array.  Some architectures generate
+  // code that looks here, because it is faster than loading from the static
+  // jslimit_/real_jslimit_ variable in the StackGuard.
+  void SetStackLimits();
+
+  // Generated code can treat direct references to this root as constant.
+  bool RootCanBeTreatedAsConstant(RootListIndex root_index);
+
+  Map* MapForFixedTypedArray(ExternalArrayType array_type);
+  RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
+
+  RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
+  FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
+
+  void RegisterStrongRoots(Object** start, Object** end);
+  void UnregisterStrongRoots(Object** start);
+
+  // ===========================================================================
+  // Inline allocation. ========================================================
+  // ===========================================================================
+
+  // Indicates whether inline bump-pointer allocation has been disabled.
+  bool inline_allocation_disabled() { return inline_allocation_disabled_; }
+
+  // Switch whether inline bump-pointer allocation should be used.
+  void EnableInlineAllocation();
+  void DisableInlineAllocation();
+
+  // ===========================================================================
+  // Methods triggering GCs. ===================================================
+  // ===========================================================================
+
+  // Performs garbage collection operation.
+  // Returns whether there is a chance that another major GC could
+  // collect more garbage.
+  inline bool CollectGarbage(
+      AllocationSpace space, const char* gc_reason = NULL,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
+  // non-zero, then the slower precise sweeper is used, which leaves the heap
+  // in a state where we can iterate over the heap visiting all objects.
+  void CollectAllGarbage(
+      int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  // Last hope GC, should try to squeeze as much as possible.
+  void CollectAllAvailableGarbage(const char* gc_reason = NULL);
+
+  // Reports and external memory pressure event, either performs a major GC or
+  // completes incremental marking in order to free external resources.
+  void ReportExternalMemoryPressure(const char* gc_reason = NULL);
+
+  // Invoked when GC was requested via the stack guard.
+  void HandleGCRequest();
+
+  // ===========================================================================
+  // Iterators. ================================================================
+  // ===========================================================================
+
+  // Iterates over all roots in the heap.
+  void IterateRoots(ObjectVisitor* v, VisitMode mode);
+  // Iterates over all strong roots in the heap.
+  void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+  // Iterates over entries in the smi roots list.  Only interesting to the
+  // serializer/deserializer, since GC does not care about smis.
+  void IterateSmiRoots(ObjectVisitor* v);
+  // Iterates over all the other roots in the heap.
+  void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
+
+  // Iterate pointers to from semispace of new space found in memory interval
+  // from start to end within |object|.
+  void IteratePointersToFromSpace(HeapObject* target, int size,
+                                  ObjectSlotCallback callback);
+
+  void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
+                                         Address end, bool record_slots,
+                                         ObjectSlotCallback callback);
+
+  // ===========================================================================
+  // Store buffer API. =========================================================
+  // ===========================================================================
+
+  // Write barrier support for address[offset] = o.
+  INLINE(void RecordWrite(Address address, int offset));
+
+  // Write barrier support for address[start : start + len[ = o.
+  INLINE(void RecordWrites(Address address, int start, int len));
+
+  Address* store_buffer_top_address() {
+    return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
+  }
+
+  // ===========================================================================
+  // Incremental marking API. ==================================================
+  // ===========================================================================
+
+  // Start incremental marking and ensure that idle time handler can perform
+  // incremental steps.
+  void StartIdleIncrementalMarking();
+
+  // Starts incremental marking assuming incremental marking is currently
+  // stopped.
+  void StartIncrementalMarking(int gc_flags = kNoGCFlags,
+                               const GCCallbackFlags gc_callback_flags =
+                                   GCCallbackFlags::kNoGCCallbackFlags,
+                               const char* reason = nullptr);
+
+  void FinalizeIncrementalMarkingIfComplete(const char* comment);
+
+  bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms);
+
+  IncrementalMarking* incremental_marking() { return incremental_marking_; }
+
+  // ===========================================================================
+  // External string table API. ================================================
+  // ===========================================================================
+
+  // Registers an external string.
+  inline void RegisterExternalString(String* string);
+
+  // Finalizes an external string by deleting the associated external
+  // data and clearing the resource pointer.
+  inline void FinalizeExternalString(String* string);
+
+  // ===========================================================================
+  // Methods checking/returning the space of a given object/address. ===========
+  // ===========================================================================
+
+  // Returns whether the object resides in new space.
+  inline bool InNewSpace(Object* object);
+  inline bool InNewSpace(Address address);
+  inline bool InNewSpacePage(Address address);
+  inline bool InFromSpace(Object* object);
+  inline bool InToSpace(Object* object);
+
+  // Returns whether the object resides in old space.
+  inline bool InOldSpace(Address address);
+  inline bool InOldSpace(Object* object);
+
+  // Checks whether an address/object in the heap (including auxiliary
+  // area and unused area).
+  bool Contains(Address addr);
+  bool Contains(HeapObject* value);
+
+  // Checks whether an address/object in a space.
+  // Currently used by tests, serialization and heap verification only.
+  bool InSpace(Address addr, AllocationSpace space);
+  bool InSpace(HeapObject* value, AllocationSpace space);
+
+  // ===========================================================================
+  // Object statistics tracking. ===============================================
+  // ===========================================================================
+
+  // Returns the number of buckets used by object statistics tracking during a
+  // major GC. Note that the following methods fail gracefully when the bounds
+  // are exceeded though.
+  size_t NumberOfTrackedHeapObjectTypes();
+
+  // Returns object statistics about count and size at the last major GC.
+  // Objects are being grouped into buckets that roughly resemble existing
+  // instance types.
+  size_t ObjectCountAtLastGC(size_t index);
+  size_t ObjectSizeAtLastGC(size_t index);
+
+  // Retrieves names of buckets used by object statistics tracking.
+  bool GetObjectTypeName(size_t index, const char** object_type,
+                         const char** object_sub_type);
+
+  // ===========================================================================
+  // GC statistics. ============================================================
+  // ===========================================================================
+
+  // Returns the maximum amount of memory reserved for the heap.  For
+  // the young generation, we reserve 4 times the amount needed for a
+  // semi space.  The young generation consists of two semi spaces and
+  // we reserve twice the amount needed for those in order to ensure
+  // that new space can be aligned to its size.
+  intptr_t MaxReserved() {
+    return 4 * reserved_semispace_size_ + max_old_generation_size_;
+  }
+  int MaxSemiSpaceSize() { return max_semi_space_size_; }
+  int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
+  int InitialSemiSpaceSize() { return initial_semispace_size_; }
+  int TargetSemiSpaceSize() { return target_semispace_size_; }
+  intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
+  intptr_t MaxExecutableSize() { return max_executable_size_; }
+
+  // Returns the capacity of the heap in bytes w/o growing. Heap grows when
+  // more spaces are needed until it reaches the limit.
+  intptr_t Capacity();
+
+  // Returns the amount of memory currently committed for the heap.
+  intptr_t CommittedMemory();
+
+  // Returns the amount of memory currently committed for the old space.
+  intptr_t CommittedOldGenerationMemory();
+
+  // Returns the amount of executable memory currently committed for the heap.
+  intptr_t CommittedMemoryExecutable();
+
+  // Returns the amount of phyical memory currently committed for the heap.
+  size_t CommittedPhysicalMemory();
+
+  // Returns the maximum amount of memory ever committed for the heap.
+  intptr_t MaximumCommittedMemory() { return maximum_committed_; }
+
+  // Updates the maximum committed memory for the heap. Should be called
+  // whenever a space grows.
+  void UpdateMaximumCommitted();
+
+  // Returns the available bytes in space w/o growing.
+  // Heap doesn't guarantee that it can allocate an object that requires
+  // all available bytes. Check MaxHeapObjectSize() instead.
+  intptr_t Available();
+
+  // Returns of size of all objects residing in the heap.
+  intptr_t SizeOfObjects();
+
+  void UpdateSurvivalStatistics(int start_new_space_size);
+
+  inline void IncrementPromotedObjectsSize(int object_size) {
+    DCHECK_GE(object_size, 0);
+    promoted_objects_size_ += object_size;
+  }
+  inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
+
   inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
-    DCHECK(object_size > 0);
+    DCHECK_GE(object_size, 0);
     semi_space_copied_object_size_ += object_size;
   }
+  inline intptr_t semi_space_copied_object_size() {
+    return semi_space_copied_object_size_;
+  }
+
+  inline intptr_t SurvivedNewSpaceObjectSize() {
+    return promoted_objects_size_ + semi_space_copied_object_size_;
+  }
 
   inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
 
@@ -1203,38 +1438,47 @@
     survived_since_last_expansion_ += survived;
   }
 
-  inline bool NextGCIsLikelyToBeFull() {
-    if (FLAG_gc_global) return true;
-
-    if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
-
-    intptr_t adjusted_allocation_limit =
-        old_generation_allocation_limit_ - new_space_.Capacity();
-
-    if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
-
-    return false;
+  inline intptr_t PromotedTotalSize() {
+    int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+    if (total > std::numeric_limits<intptr_t>::max()) {
+      // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations.
+      return std::numeric_limits<intptr_t>::max();
+    }
+    if (total < 0) return 0;
+    return static_cast<intptr_t>(total);
   }
 
-  void UpdateNewSpaceReferencesInExternalStringTable(
-      ExternalStringTableUpdaterCallback updater_func);
+  void UpdateNewSpaceAllocationCounter() {
+    new_space_allocation_counter_ = NewSpaceAllocationCounter();
+  }
 
-  void UpdateReferencesInExternalStringTable(
-      ExternalStringTableUpdaterCallback updater_func);
+  size_t NewSpaceAllocationCounter() {
+    return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
+  }
 
-  void ProcessWeakReferences(WeakObjectRetainer* retainer);
+  // This should be used only for testing.
+  void set_new_space_allocation_counter(size_t new_value) {
+    new_space_allocation_counter_ = new_value;
+  }
 
-  void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
+  void UpdateOldGenerationAllocationCounter() {
+    old_generation_allocation_counter_ = OldGenerationAllocationCounter();
+  }
 
-  // An object should be promoted if the object has survived a
-  // scavenge operation.
-  inline bool ShouldBePromoted(Address old_address, int object_size);
+  size_t OldGenerationAllocationCounter() {
+    return old_generation_allocation_counter_ + PromotedSinceLastGC();
+  }
 
-  void ClearJSFunctionResultCaches();
+  // This should be used only for testing.
+  void set_old_generation_allocation_counter(size_t new_value) {
+    old_generation_allocation_counter_ = new_value;
+  }
 
-  void ClearNormalizedMapCaches();
+  size_t PromotedSinceLastGC() {
+    return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
+  }
 
-  GCTracer* tracer() { return &tracer_; }
+  int gc_count() const { return gc_count_; }
 
   // Returns the size of objects residing in non new spaces.
   intptr_t PromotedSpaceSizeOfObjects();
@@ -1252,9 +1496,406 @@
     }
   }
 
-  // Update GC statistics that are tracked on the Heap.
-  void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
-                                    double marking_time);
+  // ===========================================================================
+  // Prologue/epilogue callback methods.========================================
+  // ===========================================================================
+
+  void AddGCPrologueCallback(v8::Isolate::GCCallback callback,
+                             GCType gc_type_filter, bool pass_isolate = true);
+  void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback);
+
+  void AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
+                             GCType gc_type_filter, bool pass_isolate = true);
+  void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback);
+
+  void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
+  void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
+
+  // ===========================================================================
+  // Allocation methods. =======================================================
+  // ===========================================================================
+
+  // Creates a filler object and returns a heap object immediately after it.
+  MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
+                                                int filler_size);
+
+  // Creates a filler object if needed for alignment and returns a heap object
+  // immediately after it. If any space is left after the returned object,
+  // another filler object is created so the over allocated memory is iterable.
+  MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
+                                              int object_size,
+                                              int allocation_size,
+                                              AllocationAlignment alignment);
+
+  // ===========================================================================
+  // ArrayBuffer tracking. =====================================================
+  // ===========================================================================
+
+  void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
+  void UnregisterArrayBuffer(JSArrayBuffer* buffer);
+
+  inline ArrayBufferTracker* array_buffer_tracker() {
+    return array_buffer_tracker_;
+  }
+
+  // ===========================================================================
+  // Allocation site tracking. =================================================
+  // ===========================================================================
+
+  // Updates the AllocationSite of a given {object}. If the global prenuring
+  // storage is passed as {pretenuring_feedback} the memento found count on
+  // the corresponding allocation site is immediately updated and an entry
+  // in the hash map is created. Otherwise the entry (including a the count
+  // value) is cached on the local pretenuring feedback.
+  inline void UpdateAllocationSite(HeapObject* object,
+                                   HashMap* pretenuring_feedback);
+
+  // Removes an entry from the global pretenuring storage.
+  inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
+
+  // Merges local pretenuring feedback into the global one. Note that this
+  // method needs to be called after evacuation, as allocation sites may be
+  // evacuated and this method resolves forward pointers accordingly.
+  void MergeAllocationSitePretenuringFeedback(
+      const HashMap& local_pretenuring_feedback);
+
+// =============================================================================
+
+#ifdef VERIFY_HEAP
+  // Verify the heap is in its normal state before or after a GC.
+  void Verify();
+#endif
+
+#ifdef DEBUG
+  void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
+
+  void TracePathToObjectFrom(Object* target, Object* root);
+  void TracePathToObject(Object* target);
+  void TracePathToGlobal();
+
+  void Print();
+  void PrintHandles();
+
+  // Report heap statistics.
+  void ReportHeapStatistics(const char* title);
+  void ReportCodeStatistics(const char* title);
+#endif
+
+ private:
+  class PretenuringScope;
+  class UnmapFreeMemoryTask;
+
+  // External strings table is a place where all external strings are
+  // registered.  We need to keep track of such strings to properly
+  // finalize them.
+  class ExternalStringTable {
+   public:
+    // Registers an external string.
+    inline void AddString(String* string);
+
+    inline void Iterate(ObjectVisitor* v);
+
+    // Restores internal invariant and gets rid of collected strings.
+    // Must be called after each Iterate() that modified the strings.
+    void CleanUp();
+
+    // Destroys all allocated memory.
+    void TearDown();
+
+   private:
+    explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
+
+    inline void Verify();
+
+    inline void AddOldString(String* string);
+
+    // Notifies the table that only a prefix of the new list is valid.
+    inline void ShrinkNewStrings(int position);
+
+    // To speed up scavenge collections new space string are kept
+    // separate from old space strings.
+    List<Object*> new_space_strings_;
+    List<Object*> old_space_strings_;
+
+    Heap* heap_;
+
+    friend class Heap;
+
+    DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
+  };
+
+  struct StrongRootsList;
+
+  struct StringTypeTable {
+    InstanceType type;
+    int size;
+    RootListIndex index;
+  };
+
+  struct ConstantStringTable {
+    const char* contents;
+    RootListIndex index;
+  };
+
+  struct StructTable {
+    InstanceType type;
+    int size;
+    RootListIndex index;
+  };
+
+  struct GCCallbackPair {
+    GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type,
+                   bool pass_isolate)
+        : callback(callback), gc_type(gc_type), pass_isolate(pass_isolate) {}
+
+    bool operator==(const GCCallbackPair& other) const {
+      return other.callback == callback;
+    }
+
+    v8::Isolate::GCCallback callback;
+    GCType gc_type;
+    bool pass_isolate;
+  };
+
+  typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
+                                                        Object** pointer);
+
+  static const int kInitialStringTableSize = 2048;
+  static const int kInitialEvalCacheSize = 64;
+  static const int kInitialNumberStringCacheSize = 256;
+
+  static const int kRememberedUnmappedPages = 128;
+
+  static const StringTypeTable string_type_table[];
+  static const ConstantStringTable constant_string_table[];
+  static const StructTable struct_table[];
+
+  static const int kYoungSurvivalRateHighThreshold = 90;
+  static const int kYoungSurvivalRateAllowedDeviation = 15;
+  static const int kOldSurvivalRateLowThreshold = 10;
+
+  static const int kMaxMarkCompactsInIdleRound = 7;
+  static const int kIdleScavengeThreshold = 5;
+
+  static const int kInitialFeedbackCapacity = 256;
+
+  Heap();
+
+  static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
+      Heap* heap, Object** pointer);
+
+  static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
+                                          StoreBufferEvent event);
+
+  // Selects the proper allocation space based on the pretenuring decision.
+  static AllocationSpace SelectSpace(PretenureFlag pretenure) {
+    return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
+  }
+
+#define ROOT_ACCESSOR(type, name, camel_name) \
+  inline void set_##name(type* value);
+  ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+  StoreBuffer* store_buffer() { return &store_buffer_; }
+
+  void set_current_gc_flags(int flags) {
+    current_gc_flags_ = flags;
+    DCHECK(!ShouldFinalizeIncrementalMarking() ||
+           !ShouldAbortIncrementalMarking());
+  }
+
+  inline bool ShouldReduceMemory() const {
+    return current_gc_flags_ & kReduceMemoryFootprintMask;
+  }
+
+  inline bool ShouldAbortIncrementalMarking() const {
+    return current_gc_flags_ & kAbortIncrementalMarkingMask;
+  }
+
+  inline bool ShouldFinalizeIncrementalMarking() const {
+    return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
+  }
+
+  void PreprocessStackTraces();
+
+  // Checks whether a global GC is necessary
+  GarbageCollector SelectGarbageCollector(AllocationSpace space,
+                                          const char** reason);
+
+  // Make sure there is a filler value behind the top of the new space
+  // so that the GC does not confuse some unintialized/stale memory
+  // with the allocation memento of the object at the top
+  void EnsureFillerObjectAtTop();
+
+  // Ensure that we have swept all spaces in such a way that we can iterate
+  // over all objects.  May cause a GC.
+  void MakeHeapIterable();
+
+  // Performs garbage collection operation.
+  // Returns whether there is a chance that another major GC could
+  // collect more garbage.
+  bool CollectGarbage(
+      GarbageCollector collector, const char* gc_reason,
+      const char* collector_reason,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  // Performs garbage collection
+  // Returns whether there is a chance another major GC could
+  // collect more garbage.
+  bool PerformGarbageCollection(
+      GarbageCollector collector,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  inline void UpdateOldSpaceLimits();
+
+  // Initializes a JSObject based on its map.
+  void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
+                                 Map* map);
+
+  // Initializes JSObject body starting at given offset.
+  void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);
+
+  void InitializeAllocationMemento(AllocationMemento* memento,
+                                   AllocationSite* allocation_site);
+
+  bool CreateInitialMaps();
+  void CreateInitialObjects();
+
+  // These five Create*EntryStub functions are here and forced to not be inlined
+  // because of a gcc-4.4 bug that assigns wrong vtable entries.
+  NO_INLINE(void CreateJSEntryStub());
+  NO_INLINE(void CreateJSConstructEntryStub());
+
+  void CreateFixedStubs();
+
+  HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
+
+  // Commits from space if it is uncommitted.
+  void EnsureFromSpaceIsCommitted();
+
+  // Uncommit unused semi space.
+  bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+
+  // Fill in bogus values in from space
+  void ZapFromSpace();
+
+  // Deopts all code that contains allocation instruction which are tenured or
+  // not tenured. Moreover it clears the pretenuring allocation site statistics.
+  void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
+
+  // Evaluates local pretenuring for the old space and calls
+  // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
+  // the old space.
+  void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
+
+  // Record statistics before and after garbage collection.
+  void ReportStatisticsBeforeGC();
+  void ReportStatisticsAfterGC();
+
+  // Creates and installs the full-sized number string cache.
+  int FullSizeNumberStringCacheLength();
+  // Flush the number to string cache.
+  void FlushNumberStringCache();
+
+  // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
+  // Re-visit incremental marking heuristics.
+  bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
+
+  void ConfigureInitialOldGenerationSize();
+
+  bool HasLowYoungGenerationAllocationRate();
+  bool HasLowOldGenerationAllocationRate();
+  double YoungGenerationMutatorUtilization();
+  double OldGenerationMutatorUtilization();
+
+  void ReduceNewSpaceSize();
+
+  bool TryFinalizeIdleIncrementalMarking(
+      double idle_time_in_ms, size_t size_of_objects,
+      size_t mark_compact_speed_in_bytes_per_ms);
+
+  GCIdleTimeHeapState ComputeHeapState();
+
+  bool PerformIdleTimeAction(GCIdleTimeAction action,
+                             GCIdleTimeHeapState heap_state,
+                             double deadline_in_ms);
+
+  void IdleNotificationEpilogue(GCIdleTimeAction action,
+                                GCIdleTimeHeapState heap_state, double start_ms,
+                                double deadline_in_ms);
+
+  inline void UpdateAllocationsHash(HeapObject* object);
+  inline void UpdateAllocationsHash(uint32_t value);
+  void PrintAlloctionsHash();
+
+  void AddToRingBuffer(const char* string);
+  void GetFromRingBuffer(char* buffer);
+
+  void CompactRetainedMaps(ArrayList* retained_maps);
+
+  // Attempt to over-approximate the weak closure by marking object groups and
+  // implicit references from global handles, but don't atomically complete
+  // marking. If we continue to mark incrementally, we might have marked
+  // objects that die later.
+  void FinalizeIncrementalMarking(const char* gc_reason);
+
+  // Returns the timer used for a given GC type.
+  // - GCScavenger: young generation GC
+  // - GCCompactor: full GC
+  // - GCFinalzeMC: finalization of incremental full GC
+  // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
+  // memory reduction
+  HistogramTimer* GCTypeTimer(GarbageCollector collector);
+
+  // ===========================================================================
+  // Pretenuring. ==============================================================
+  // ===========================================================================
+
+  // Pretenuring decisions are made based on feedback collected during new space
+  // evacuation. Note that between feedback collection and calling this method
+  // object in old space must not move.
+  void ProcessPretenuringFeedback();
+
+  // ===========================================================================
+  // Actual GC. ================================================================
+  // ===========================================================================
+
+  // Code that should be run before and after each GC.  Includes some
+  // reporting/verification activities when compiled with DEBUG set.
+  void GarbageCollectionPrologue();
+  void GarbageCollectionEpilogue();
+
+  // Performs a major collection in the whole heap.
+  void MarkCompact();
+
+  // Code to be run before and after mark-compact.
+  void MarkCompactPrologue();
+  void MarkCompactEpilogue();
+
+  // Performs a minor collection in new generation.
+  void Scavenge();
+
+  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+
+  void UpdateNewSpaceReferencesInExternalStringTable(
+      ExternalStringTableUpdaterCallback updater_func);
+
+  void UpdateReferencesInExternalStringTable(
+      ExternalStringTableUpdaterCallback updater_func);
+
+  void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
+  void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
+  void ProcessNativeContexts(WeakObjectRetainer* retainer);
+  void ProcessAllocationSites(WeakObjectRetainer* retainer);
+
+  // ===========================================================================
+  // GC statistics. ============================================================
+  // ===========================================================================
+
+  inline intptr_t OldGenerationSpaceAvailable() {
+    return old_generation_allocation_limit_ - PromotedTotalSize();
+  }
 
   // Returns maximum GC pause.
   double get_max_gc_pause() { return max_gc_pause_; }
@@ -1265,227 +1906,255 @@
   // Returns minimal interval between two subsequent collections.
   double get_min_in_mutator() { return min_in_mutator_; }
 
-  MarkCompactCollector* mark_compact_collector() {
-    return &mark_compact_collector_;
-  }
-
-  StoreBuffer* store_buffer() { return &store_buffer_; }
-
-  Marking* marking() { return &marking_; }
-
-  IncrementalMarking* incremental_marking() { return &incremental_marking_; }
-
-  ExternalStringTable* external_string_table() {
-    return &external_string_table_;
-  }
-
-  // Returns the current sweep generation.
-  int sweep_generation() { return sweep_generation_; }
-
-  inline Isolate* isolate();
-
-  void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
-  void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
-
-  inline bool OldGenerationAllocationLimitReached();
-
-  inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
-    scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
-  }
-
-  void QueueMemoryChunkForFree(MemoryChunk* chunk);
-  void FreeQueuedChunks();
-
-  int gc_count() const { return gc_count_; }
-
-  bool RecentIdleNotificationHappened();
-
-  // Completely clear the Instanceof cache (to stop it keeping objects alive
-  // around a GC).
-  inline void CompletelyClearInstanceofCache();
-
-  // The roots that have an index less than this are always in old space.
-  static const int kOldSpaceRoots = 0x20;
-
-  uint32_t HashSeed() {
-    uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
-    DCHECK(FLAG_randomize_hashes || seed == 0);
-    return seed;
-  }
-
-  void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
-    DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
-    set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
-  }
-
-  void SetConstructStubDeoptPCOffset(int pc_offset) {
-    DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
-    set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-  }
-
-  void SetGetterStubDeoptPCOffset(int pc_offset) {
-    DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
-    set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-  }
-
-  void SetSetterStubDeoptPCOffset(int pc_offset) {
-    DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
-    set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-  }
-
-  // For post mortem debugging.
-  void RememberUnmappedPage(Address page, bool compacted);
-
-  // Global inline caching age: it is incremented on some GCs after context
-  // disposal. We use it to flush inline caches.
-  int global_ic_age() { return global_ic_age_; }
-
-  void AgeInlineCaches() {
-    global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
-  }
-
-  bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
-
-  int64_t amount_of_external_allocated_memory() {
-    return amount_of_external_allocated_memory_;
-  }
-
-  void DeoptMarkedAllocationSites();
+  // Update GC statistics that are tracked on the Heap.
+  void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
+                                    double marking_time);
 
   bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
 
-  bool DeoptMaybeTenuredAllocationSites() {
-    return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
-  }
+  // ===========================================================================
+  // Growing strategy. =========================================================
+  // ===========================================================================
 
-  // ObjectStats are kept in two arrays, counts and sizes. Related stats are
-  // stored in a contiguous linear buffer. Stats groups are stored one after
-  // another.
-  enum {
-    FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
-    FIRST_FIXED_ARRAY_SUB_TYPE =
-        FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
-    FIRST_CODE_AGE_SUB_TYPE =
-        FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
-    OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
-  };
-
-  void RecordObjectStats(InstanceType type, size_t size) {
-    DCHECK(type <= LAST_TYPE);
-    object_counts_[type]++;
-    object_sizes_[type] += size;
-  }
-
-  void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
-    int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
-    int code_age_index =
-        FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
-    DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
-           code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
-    DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
-           code_age_index < OBJECT_STATS_COUNT);
-    object_counts_[code_sub_type_index]++;
-    object_sizes_[code_sub_type_index] += size;
-    object_counts_[code_age_index]++;
-    object_sizes_[code_age_index] += size;
-  }
-
-  void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
-    DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
-    object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
-    object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
-  }
-
-  void CheckpointObjectStats();
-
-  // We don't use a LockGuard here since we want to lock the heap
-  // only when FLAG_concurrent_recompilation is true.
-  class RelocationLock {
-   public:
-    explicit RelocationLock(Heap* heap) : heap_(heap) {
-      heap_->relocation_mutex_.Lock();
-    }
+  // Decrease the allocation limit if the new limit based on the given
+  // parameters is lower than the current limit.
+  void DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
+                                          double gc_speed,
+                                          double mutator_speed);
 
 
-    ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
+  // Calculates the allocation limit based on a given growing factor and a
+  // given old generation size.
+  intptr_t CalculateOldGenerationAllocationLimit(double factor,
+                                                 intptr_t old_gen_size);
 
-   private:
-    Heap* heap_;
-  };
+  // Sets the allocation limit to trigger the next full garbage collection.
+  void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
+                                       double mutator_speed);
 
-  void AddWeakObjectToCodeDependency(Handle<Object> obj,
-                                     Handle<DependentCode> dep);
+  // ===========================================================================
+  // Idle notification. ========================================================
+  // ===========================================================================
 
-  DependentCode* LookupWeakObjectToCodeDependency(Handle<Object> obj);
+  bool RecentIdleNotificationHappened();
+  void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
 
-  void InitializeWeakObjectToCodeTable() {
-    set_weak_object_to_code_table(undefined_value());
-  }
+  // ===========================================================================
+  // HeapIterator helpers. =====================================================
+  // ===========================================================================
 
-  void EnsureWeakObjectToCodeTable();
+  void heap_iterator_start() { heap_iterator_depth_++; }
 
-  static void FatalProcessOutOfMemory(const char* location,
-                                      bool take_snapshot = false);
+  void heap_iterator_end() { heap_iterator_depth_--; }
 
-  // This event is triggered after successful allocation of a new object made
-  // by runtime. Allocations of target space for object evacuation do not
-  // trigger the event. In order to track ALL allocations one must turn off
-  // FLAG_inline_new and FLAG_use_allocation_folding.
-  inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
+  bool in_heap_iterator() { return heap_iterator_depth_ > 0; }
 
-  // This event is triggered after object is moved to a new place.
-  inline void OnMoveEvent(HeapObject* target, HeapObject* source,
-                          int size_in_bytes);
+  // ===========================================================================
+  // Allocation methods. =======================================================
+  // ===========================================================================
 
-  bool deserialization_complete() const { return deserialization_complete_; }
-
- protected:
-  // Methods made available to tests.
+  // Returns a deep copy of the JavaScript object.
+  // Properties and elements are copied too.
+  // Optionally takes an AllocationSite to be appended in an AllocationMemento.
+  MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
+                                                AllocationSite* site = NULL);
 
   // Allocates a JS Map in the heap.
   MUST_USE_RESULT AllocationResult
-      AllocateMap(InstanceType instance_type, int instance_size,
-                  ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+  AllocateMap(InstanceType instance_type, int instance_size,
+              ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
 
   // Allocates and initializes a new JavaScript object based on a
   // constructor.
   // If allocation_site is non-null, then a memento is emitted after the object
   // that points to the site.
-  MUST_USE_RESULT AllocationResult
-      AllocateJSObject(JSFunction* constructor,
-                       PretenureFlag pretenure = NOT_TENURED,
-                       AllocationSite* allocation_site = NULL);
+  MUST_USE_RESULT AllocationResult AllocateJSObject(
+      JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED,
+      AllocationSite* allocation_site = NULL);
 
   // Allocates and initializes a new JavaScript object based on a map.
   // Passing an allocation site means that a memento will be created that
   // points to the site.
   MUST_USE_RESULT AllocationResult
-      AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
-                              bool alloc_props = true,
-                              AllocationSite* allocation_site = NULL);
+  AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
+                          AllocationSite* allocation_site = NULL);
 
-  // Allocated a HeapNumber from value.
+  // Allocates a HeapNumber from value.
   MUST_USE_RESULT AllocationResult
-      AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
-                         PretenureFlag pretenure = NOT_TENURED);
+  AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
+                     PretenureFlag pretenure = NOT_TENURED);
 
-  // Allocate a byte array of the specified length
+// Allocates SIMD values from the given lane values.
+#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \
+  AllocationResult Allocate##Type(lane_type lanes[lane_count],             \
+                                  PretenureFlag pretenure = NOT_TENURED);
+  SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION)
+#undef SIMD_ALLOCATE_DECLARATION
+
+  // Allocates a byte array of the specified length
   MUST_USE_RESULT AllocationResult
-      AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
+  AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocates a bytecode array with given contents.
+  MUST_USE_RESULT AllocationResult
+  AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
+                        int parameter_count, FixedArray* constant_pool);
 
   // Copy the code and scope info part of the code object, but insert
   // the provided data as the relocation information.
-  MUST_USE_RESULT AllocationResult
-      CopyCode(Code* code, Vector<byte> reloc_info);
+  MUST_USE_RESULT AllocationResult CopyCode(Code* code,
+                                            Vector<byte> reloc_info);
 
   MUST_USE_RESULT AllocationResult CopyCode(Code* code);
 
   // Allocates a fixed array initialized with undefined values
   MUST_USE_RESULT AllocationResult
-      AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
+  AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
 
- private:
-  Heap();
+  // Allocate an uninitialized object.  The memory is non-executable if the
+  // hardware and OS allow.  This is the single choke-point for allocations
+  // performed by the runtime and should not be bypassed (to extend this to
+  // inlined allocations, use the Heap::DisableInlineAllocation() support).
+  MUST_USE_RESULT inline AllocationResult AllocateRaw(
+      int size_in_bytes, AllocationSpace space,
+      AllocationAlignment aligment = kWordAligned);
+
+  // Allocates a heap object based on the map.
+  MUST_USE_RESULT AllocationResult
+      Allocate(Map* map, AllocationSpace space,
+               AllocationSite* allocation_site = NULL);
+
+  // Allocates a partial map for bootstrapping.
+  MUST_USE_RESULT AllocationResult
+      AllocatePartialMap(InstanceType instance_type, int instance_size);
+
+  // Allocate a block of memory in the given space (filled with a filler).
+  // Used as a fall-back for generated code when the space is full.
+  MUST_USE_RESULT AllocationResult
+      AllocateFillerObject(int size, bool double_align, AllocationSpace space);
+
+  // Allocate an uninitialized fixed array.
+  MUST_USE_RESULT AllocationResult
+      AllocateRawFixedArray(int length, PretenureFlag pretenure);
+
+  // Allocate an uninitialized fixed double array.
+  MUST_USE_RESULT AllocationResult
+      AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
+
+  // Allocate an initialized fixed array with the given filler value.
+  MUST_USE_RESULT AllocationResult
+      AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
+                                   Object* filler);
+
+  // Allocate and partially initializes a String.  There are two String
+  // encodings: one-byte and two-byte.  These functions allocate a string of
+  // the given length and set its map and length fields.  The characters of
+  // the string are uninitialized.
+  MUST_USE_RESULT AllocationResult
+      AllocateRawOneByteString(int length, PretenureFlag pretenure);
+  MUST_USE_RESULT AllocationResult
+      AllocateRawTwoByteString(int length, PretenureFlag pretenure);
+
+  // Allocates an internalized string in old space based on the character
+  // stream.
+  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
+      Vector<const char> str, int chars, uint32_t hash_field);
+
+  MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
+      Vector<const uint8_t> str, uint32_t hash_field);
+
+  MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
+      Vector<const uc16> str, uint32_t hash_field);
+
+  template <bool is_one_byte, typename T>
+  MUST_USE_RESULT AllocationResult
+      AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
+
+  template <typename T>
+  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
+      T t, int chars, uint32_t hash_field);
+
+  // Allocates an uninitialized fixed array. It must be filled by the caller.
+  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
+
+  // Make a copy of src and return it.
+  MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
+
+  // Make a copy of src, also grow the copy, and return the copy.
+  MUST_USE_RESULT AllocationResult
+  CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure);
+
+  // Make a copy of src, set the map, and return the copy.
+  MUST_USE_RESULT AllocationResult
+      CopyFixedArrayWithMap(FixedArray* src, Map* map);
+
+  // Make a copy of src and return it.
+  MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
+      FixedDoubleArray* src);
+
+  // Computes a single character string where the character has code.
+  // A cache is used for one-byte (Latin1) codes.
+  MUST_USE_RESULT AllocationResult
+      LookupSingleCharacterStringFromCode(uint16_t code);
+
+  // Allocate a symbol in old space.
+  MUST_USE_RESULT AllocationResult AllocateSymbol();
+
+  // Allocates an external array of the specified length and type.
+  MUST_USE_RESULT AllocationResult AllocateFixedTypedArrayWithExternalPointer(
+      int length, ExternalArrayType array_type, void* external_pointer,
+      PretenureFlag pretenure);
+
+  // Allocates a fixed typed array of the specified length and type.
+  MUST_USE_RESULT AllocationResult
+  AllocateFixedTypedArray(int length, ExternalArrayType array_type,
+                          bool initialize, PretenureFlag pretenure);
+
+  // Make a copy of src and return it.
+  MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
+
+  // Make a copy of src, set the map, and return the copy.
+  MUST_USE_RESULT AllocationResult
+      CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
+
+  // Allocates a fixed double array with uninitialized values. Returns
+  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
+      int length, PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocate empty fixed array.
+  MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
+
+  // Allocate empty fixed typed array of given type.
+  MUST_USE_RESULT AllocationResult
+      AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
+
+  // Allocate a tenured simple cell.
+  MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
+
+  // Allocate a tenured JS global property cell initialized with the hole.
+  MUST_USE_RESULT AllocationResult AllocatePropertyCell();
+
+  MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
+
+  MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);
+
+  // Allocates a new utility object in the old generation.
+  MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
+
+  // Allocates a new foreign object.
+  MUST_USE_RESULT AllocationResult
+      AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
+
+  MUST_USE_RESULT AllocationResult
+      AllocateCode(int object_size, bool immovable);
+
+  MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
+
+  MUST_USE_RESULT AllocationResult InternalizeString(String* str);
+
+  // ===========================================================================
+
+  void set_force_oom(bool value) { force_oom_ = value; }
 
   // The amount of external memory registered through the API kept alive
   // by global handles
@@ -1518,27 +2187,26 @@
   // ... and since the last scavenge.
   int survived_last_scavenge_;
 
-  // For keeping track on when to flush RegExp code.
-  int sweep_generation_;
-
-  int always_allocate_scope_depth_;
+  // This is not the depth of nested AlwaysAllocateScope's but rather a single
+  // count, as scopes can be acquired from multiple tasks (read: threads).
+  AtomicNumber<size_t> always_allocate_scope_count_;
 
   // For keeping track of context disposals.
   int contexts_disposed_;
 
-  int global_ic_age_;
+  // The length of the retained_maps array at the time of context disposal.
+  // This separates maps in the retained_maps array that were created before
+  // and after context disposal.
+  int number_of_disposed_maps_;
 
-  bool flush_monomorphic_ics_;
+  int global_ic_age_;
 
   int scan_on_scavenge_pages_;
 
   NewSpace new_space_;
-  OldSpace* old_pointer_space_;
-  OldSpace* old_data_space_;
+  OldSpace* old_space_;
   OldSpace* code_space_;
   MapSpace* map_space_;
-  CellSpace* cell_space_;
-  PropertyCellSpace* property_cell_space_;
   LargeObjectSpace* lo_space_;
   HeapState gc_state_;
   int gc_post_processing_depth_;
@@ -1553,9 +2221,6 @@
   // Running hash over allocations performed.
   uint32_t raw_allocations_hash_;
 
-  // Countdown counter, dumps allocation hash when 0.
-  uint32_t dump_allocations_hash_countdown_;
-
   // How many mark-sweep collections happened.
   unsigned int ms_count_;
 
@@ -1563,23 +2228,9 @@
   unsigned int gc_count_;
 
   // For post mortem debugging.
-  static const int kRememberedUnmappedPages = 128;
   int remembered_unmapped_pages_index_;
   Address remembered_unmapped_pages_[kRememberedUnmappedPages];
 
-  // Total length of the strings we failed to flatten since the last GC.
-  int unflattened_strings_length_;
-
-#define ROOT_ACCESSOR(type, name, camel_name)                                 \
-  inline void set_##name(type* value) {                                       \
-    /* The deserializer makes use of the fact that these common roots are */  \
-    /* never in new space and never on a page that is being compacted.    */  \
-    DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
-    roots_[k##camel_name##RootIndex] = value;                                 \
-  }
-  ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR
-
 #ifdef DEBUG
   // If the --gc-interval flag is set to a positive value, this
   // variable holds the value indicating the number of allocations
@@ -1597,21 +2248,19 @@
   // last GC.
   bool old_gen_exhausted_;
 
+  // Indicates that memory usage is more important than latency.
+  // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed.
+  bool optimize_for_memory_usage_;
+
   // Indicates that inline bump-pointer allocation has been globally disabled
   // for all spaces. This is used to disable allocations in generated code.
   bool inline_allocation_disabled_;
 
   // Weak list heads, threaded through the objects.
-  // List heads are initilized lazily and contain the undefined_value at start.
+  // List heads are initialized lazily and contain the undefined_value at start.
   Object* native_contexts_list_;
-  Object* array_buffers_list_;
   Object* allocation_sites_list_;
 
-  // WeakHashTable that maps objects embedded in optimized code to dependent
-  // code list. It is initilized lazily and contains the undefined_value at
-  // start.
-  Object* weak_object_to_code_table_;
-
   // List of encountered weak collections (JSWeakMap and JSWeakSet) during
   // marking. It is initialized during marking, destroyed after marking and
   // contains Smi(0) while marking is not active.
@@ -1619,379 +2268,19 @@
 
   Object* encountered_weak_cells_;
 
+  Object* encountered_transition_arrays_;
+
   StoreBufferRebuilder store_buffer_rebuilder_;
 
-  struct StringTypeTable {
-    InstanceType type;
-    int size;
-    RootListIndex index;
-  };
-
-  struct ConstantStringTable {
-    const char* contents;
-    RootListIndex index;
-  };
-
-  struct StructTable {
-    InstanceType type;
-    int size;
-    RootListIndex index;
-  };
-
-  static const StringTypeTable string_type_table[];
-  static const ConstantStringTable constant_string_table[];
-  static const StructTable struct_table[];
-
-  // The special hidden string which is an empty string, but does not match
-  // any string when looked up in properties.
-  String* hidden_string_;
-
-  // GC callback function, called before and after mark-compact GC.
-  // Allocations in the callback function are disallowed.
-  struct GCPrologueCallbackPair {
-    GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
-                           GCType gc_type, bool pass_isolate)
-        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
-    bool operator==(const GCPrologueCallbackPair& pair) const {
-      return pair.callback == callback;
-    }
-    v8::Isolate::GCPrologueCallback callback;
-    GCType gc_type;
-    // TODO(dcarney): remove variable
-    bool pass_isolate_;
-  };
-  List<GCPrologueCallbackPair> gc_prologue_callbacks_;
-
-  struct GCEpilogueCallbackPair {
-    GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
-                           GCType gc_type, bool pass_isolate)
-        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
-    bool operator==(const GCEpilogueCallbackPair& pair) const {
-      return pair.callback == callback;
-    }
-    v8::Isolate::GCPrologueCallback callback;
-    GCType gc_type;
-    // TODO(dcarney): remove variable
-    bool pass_isolate_;
-  };
-  List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
-
-  // Support for computing object sizes during GC.
-  HeapObjectCallback gc_safe_size_of_old_object_;
-  static int GcSafeSizeOfOldObject(HeapObject* object);
-
-  // Update the GC state. Called from the mark-compact collector.
-  void MarkMapPointersAsEncoded(bool encoded) {
-    DCHECK(!encoded);
-    gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
-  }
-
-  // Code that should be run before and after each GC.  Includes some
-  // reporting/verification activities when compiled with DEBUG set.
-  void GarbageCollectionPrologue();
-  void GarbageCollectionEpilogue();
-
-  // Pretenuring decisions are made based on feedback collected during new
-  // space evacuation. Note that between feedback collection and calling this
-  // method object in old space must not move.
-  // Right now we only process pretenuring feedback in high promotion mode.
-  void ProcessPretenuringFeedback();
-
-  // Checks whether a global GC is necessary
-  GarbageCollector SelectGarbageCollector(AllocationSpace space,
-                                          const char** reason);
-
-  // Make sure there is a filler value behind the top of the new space
-  // so that the GC does not confuse some unintialized/stale memory
-  // with the allocation memento of the object at the top
-  void EnsureFillerObjectAtTop();
-
-  // Ensure that we have swept all spaces in such a way that we can iterate
-  // over all objects.  May cause a GC.
-  void MakeHeapIterable();
-
-  // Performs garbage collection operation.
-  // Returns whether there is a chance that another major GC could
-  // collect more garbage.
-  bool CollectGarbage(
-      GarbageCollector collector, const char* gc_reason,
-      const char* collector_reason,
-      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
-  // Performs garbage collection
-  // Returns whether there is a chance another major GC could
-  // collect more garbage.
-  bool PerformGarbageCollection(
-      GarbageCollector collector,
-      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
-  inline void UpdateOldSpaceLimits();
-
-  // Selects the proper allocation space depending on the given object
-  // size, pretenuring decision, and preferred old-space.
-  static AllocationSpace SelectSpace(int object_size,
-                                     AllocationSpace preferred_old_space,
-                                     PretenureFlag pretenure) {
-    DCHECK(preferred_old_space == OLD_POINTER_SPACE ||
-           preferred_old_space == OLD_DATA_SPACE);
-    if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
-    return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
-  }
-
-  HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
-
-  // Allocate an uninitialized object.  The memory is non-executable if the
-  // hardware and OS allow.  This is the single choke-point for allocations
-  // performed by the runtime and should not be bypassed (to extend this to
-  // inlined allocations, use the Heap::DisableInlineAllocation() support).
-  MUST_USE_RESULT inline AllocationResult AllocateRaw(
-      int size_in_bytes, AllocationSpace space, AllocationSpace retry_space);
-
-  // Allocates a heap object based on the map.
-  MUST_USE_RESULT AllocationResult
-      Allocate(Map* map, AllocationSpace space,
-               AllocationSite* allocation_site = NULL);
-
-  // Allocates a partial map for bootstrapping.
-  MUST_USE_RESULT AllocationResult
-      AllocatePartialMap(InstanceType instance_type, int instance_size);
-
-  // Initializes a JSObject based on its map.
-  void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
-                                 Map* map);
-  void InitializeAllocationMemento(AllocationMemento* memento,
-                                   AllocationSite* allocation_site);
-
-  // Allocate a block of memory in the given space (filled with a filler).
-  // Used as a fall-back for generated code when the space is full.
-  MUST_USE_RESULT AllocationResult
-      AllocateFillerObject(int size, bool double_align, AllocationSpace space);
-
-  // Allocate an uninitialized fixed array.
-  MUST_USE_RESULT AllocationResult
-      AllocateRawFixedArray(int length, PretenureFlag pretenure);
-
-  // Allocate an uninitialized fixed double array.
-  MUST_USE_RESULT AllocationResult
-      AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
-
-  // Allocate an initialized fixed array with the given filler value.
-  MUST_USE_RESULT AllocationResult
-      AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
-                                   Object* filler);
-
-  // Allocate and partially initializes a String.  There are two String
-  // encodings: one-byte and two-byte.  These functions allocate a string of
-  // the given length and set its map and length fields.  The characters of
-  // the string are uninitialized.
-  MUST_USE_RESULT AllocationResult
-      AllocateRawOneByteString(int length, PretenureFlag pretenure);
-  MUST_USE_RESULT AllocationResult
-      AllocateRawTwoByteString(int length, PretenureFlag pretenure);
-
-  bool CreateInitialMaps();
-  void CreateInitialObjects();
-
-  // Allocates an internalized string in old space based on the character
-  // stream.
-  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
-      Vector<const char> str, int chars, uint32_t hash_field);
-
-  MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
-      Vector<const uint8_t> str, uint32_t hash_field);
-
-  MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
-      Vector<const uc16> str, uint32_t hash_field);
-
-  template <bool is_one_byte, typename T>
-  MUST_USE_RESULT AllocationResult
-      AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
-
-  template <typename T>
-  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
-      T t, int chars, uint32_t hash_field);
-
-  // Allocates an uninitialized fixed array. It must be filled by the caller.
-  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
-
-  // Make a copy of src and return it. Returns
-  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
-  MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
-
-  // Make a copy of src, set the map, and return the copy. Returns
-  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
-  MUST_USE_RESULT AllocationResult
-      CopyFixedArrayWithMap(FixedArray* src, Map* map);
-
-  // Make a copy of src and return it. Returns
-  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
-  MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
-      FixedDoubleArray* src);
-
-  // Make a copy of src and return it. Returns
-  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
-  MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray(
-      ConstantPoolArray* src);
-
-
-  // Computes a single character string where the character has code.
-  // A cache is used for one-byte (Latin1) codes.
-  MUST_USE_RESULT AllocationResult
-      LookupSingleCharacterStringFromCode(uint16_t code);
-
-  // Allocate a symbol in old space.
-  MUST_USE_RESULT AllocationResult AllocateSymbol();
-
-  // Make a copy of src, set the map, and return the copy.
-  MUST_USE_RESULT AllocationResult
-      CopyConstantPoolArrayWithMap(ConstantPoolArray* src, Map* map);
-
-  MUST_USE_RESULT AllocationResult AllocateConstantPoolArray(
-      const ConstantPoolArray::NumberOfEntries& small);
-
-  MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray(
-      const ConstantPoolArray::NumberOfEntries& small,
-      const ConstantPoolArray::NumberOfEntries& extended);
-
-  // Allocates an external array of the specified length and type.
-  MUST_USE_RESULT AllocationResult
-      AllocateExternalArray(int length, ExternalArrayType array_type,
-                            void* external_pointer, PretenureFlag pretenure);
-
-  // Allocates a fixed typed array of the specified length and type.
-  MUST_USE_RESULT AllocationResult
-      AllocateFixedTypedArray(int length, ExternalArrayType array_type,
-                              PretenureFlag pretenure);
-
-  // Make a copy of src and return it.
-  MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
-
-  // Make a copy of src, set the map, and return the copy.
-  MUST_USE_RESULT AllocationResult
-      CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
-
-  // Allocates a fixed double array with uninitialized values. Returns
-  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
-      int length, PretenureFlag pretenure = NOT_TENURED);
-
-  // These five Create*EntryStub functions are here and forced to not be inlined
-  // because of a gcc-4.4 bug that assigns wrong vtable entries.
-  NO_INLINE(void CreateJSEntryStub());
-  NO_INLINE(void CreateJSConstructEntryStub());
-
-  void CreateFixedStubs();
-
-  // Allocate empty fixed array.
-  MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
-
-  // Allocate empty external array of given type.
-  MUST_USE_RESULT AllocationResult
-      AllocateEmptyExternalArray(ExternalArrayType array_type);
-
-  // Allocate empty fixed typed array of given type.
-  MUST_USE_RESULT AllocationResult
-      AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
-
-  // Allocate empty constant pool array.
-  MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray();
-
-  // Allocate a tenured simple cell.
-  MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
-
-  // Allocate a tenured JS global property cell initialized with the hole.
-  MUST_USE_RESULT AllocationResult AllocatePropertyCell();
-
-  MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
-
-  // Allocates a new utility object in the old generation.
-  MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
-
-  // Allocates a new foreign object.
-  MUST_USE_RESULT AllocationResult
-      AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
-
-  MUST_USE_RESULT AllocationResult
-      AllocateCode(int object_size, bool immovable);
-
-  MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
-
-  MUST_USE_RESULT AllocationResult InternalizeString(String* str);
-
-  // Performs a minor collection in new generation.
-  void Scavenge();
-
-  // Commits from space if it is uncommitted.
-  void EnsureFromSpaceIsCommitted();
-
-  // Uncommit unused semi space.
-  bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
-
-  // Fill in bogus values in from space
-  void ZapFromSpace();
-
-  static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
-      Heap* heap, Object** pointer);
-
-  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
-  static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
-                                          StoreBufferEvent event);
-
-  // Performs a major collection in the whole heap.
-  void MarkCompact();
-
-  // Code to be run before and after mark-compact.
-  void MarkCompactPrologue();
-  void MarkCompactEpilogue();
-
-  void ProcessNativeContexts(WeakObjectRetainer* retainer);
-  void ProcessArrayBuffers(WeakObjectRetainer* retainer);
-  void ProcessAllocationSites(WeakObjectRetainer* retainer);
-
-  // Deopts all code that contains allocation instruction which are tenured or
-  // not tenured. Moreover it clears the pretenuring allocation site statistics.
-  void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
-
-  // Evaluates local pretenuring for the old space and calls
-  // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
-  // the old space.
-  void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
-
-  // Called on heap tear-down.
-  void TearDownArrayBuffers();
-
-  // Record statistics before and after garbage collection.
-  void ReportStatisticsBeforeGC();
-  void ReportStatisticsAfterGC();
-
-  // Slow part of scavenge object.
-  static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
+  List<GCCallbackPair> gc_epilogue_callbacks_;
+  List<GCCallbackPair> gc_prologue_callbacks_;
 
   // Total RegExp code ever generated
   double total_regexp_code_generated_;
 
-  GCTracer tracer_;
+  int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
 
-  // Creates and installs the full-sized number string cache.
-  int FullSizeNumberStringCacheLength();
-  // Flush the number to string cache.
-  void FlushNumberStringCache();
-
-  // Sets used allocation sites entries to undefined.
-  void FlushAllocationSitesScratchpad();
-
-  // Initializes the allocation sites scratchpad with undefined values.
-  void InitializeAllocationSitesScratchpad();
-
-  // Adds an allocation site to the scratchpad if there is space left.
-  void AddAllocationSiteToScratchpad(AllocationSite* site,
-                                     ScratchpadSlotMode mode);
-
-  void UpdateSurvivalStatistics(int start_new_space_size);
-
-  static const int kYoungSurvivalRateHighThreshold = 90;
-  static const int kYoungSurvivalRateAllowedDeviation = 15;
-
-  static const int kOldSurvivalRateLowThreshold = 10;
+  GCTracer* tracer_;
 
   int high_survival_rate_period_length_;
   intptr_t promoted_objects_size_;
@@ -2010,47 +2299,6 @@
   // of the allocation site.
   unsigned int maximum_size_scavenges_;
 
-  // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
-  // Re-visit incremental marking heuristics.
-  bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
-
-  void ConfigureInitialOldGenerationSize();
-
-  void SelectScavengingVisitorsTable();
-
-  void IdleMarkCompact(const char* message);
-
-  bool TryFinalizeIdleIncrementalMarking(
-      double idle_time_in_ms, size_t size_of_objects,
-      size_t mark_compact_speed_in_bytes_per_ms);
-
-  bool WorthActivatingIncrementalMarking();
-
-  void ClearObjectStats(bool clear_last_time_stats = false);
-
-  void set_weak_object_to_code_table(Object* value) {
-    DCHECK(!InNewSpace(value));
-    weak_object_to_code_table_ = value;
-  }
-
-  Object** weak_object_to_code_table_address() {
-    return &weak_object_to_code_table_;
-  }
-
-  inline void UpdateAllocationsHash(HeapObject* object);
-  inline void UpdateAllocationsHash(uint32_t value);
-  inline void PrintAlloctionsHash();
-
-  static const int kInitialStringTableSize = 2048;
-  static const int kInitialEvalCacheSize = 64;
-  static const int kInitialNumberStringCacheSize = 256;
-
-  // Object counts and used memory by InstanceType
-  size_t object_counts_[OBJECT_STATS_COUNT];
-  size_t object_counts_last_time_[OBJECT_STATS_COUNT];
-  size_t object_sizes_[OBJECT_STATS_COUNT];
-  size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
-
   // Maximum GC pause.
   double max_gc_pause_;
 
@@ -2063,44 +2311,71 @@
   // Minimal interval between two subsequent collections.
   double min_in_mutator_;
 
-  // Cumulative GC time spent in marking
+  // Cumulative GC time spent in marking.
   double marking_time_;
 
-  // Cumulative GC time spent in sweeping
+  // Cumulative GC time spent in sweeping.
   double sweeping_time_;
 
-  // Last time an idle notification happened
+  // Last time an idle notification happened.
   double last_idle_notification_time_;
 
-  MarkCompactCollector mark_compact_collector_;
+  // Last time a garbage collection happened.
+  double last_gc_time_;
+
+  Scavenger* scavenge_collector_;
+
+  MarkCompactCollector* mark_compact_collector_;
 
   StoreBuffer store_buffer_;
 
-  Marking marking_;
+  IncrementalMarking* incremental_marking_;
 
-  IncrementalMarking incremental_marking_;
+  GCIdleTimeHandler* gc_idle_time_handler_;
 
-  GCIdleTimeHandler gc_idle_time_handler_;
-  unsigned int gc_count_at_last_idle_gc_;
+  MemoryReducer* memory_reducer_;
+
+  ObjectStats* object_stats_;
+
+  ScavengeJob* scavenge_job_;
+
+  InlineAllocationObserver* idle_scavenge_observer_;
 
   // These two counters are monotomically increasing and never reset.
   size_t full_codegen_bytes_generated_;
   size_t crankshaft_codegen_bytes_generated_;
 
+  // This counter is increased before each GC and never reset.
+  // To account for the bytes allocated since the last GC, use the
+  // NewSpaceAllocationCounter() function.
+  size_t new_space_allocation_counter_;
+
+  // This counter is increased before each GC and never reset. To
+  // account for the bytes allocated since the last GC, use the
+  // OldGenerationAllocationCounter() function.
+  size_t old_generation_allocation_counter_;
+
+  // The size of objects in old generation after the last MarkCompact GC.
+  size_t old_generation_size_at_last_gc_;
+
   // If the --deopt_every_n_garbage_collections flag is set to a positive value,
   // this variable holds the number of garbage collections since the last
   // deoptimization triggered by garbage collection.
   int gcs_since_last_deopt_;
 
-#ifdef VERIFY_HEAP
-  int no_weak_object_verification_scope_depth_;
-#endif
+  // The feedback storage is used to store allocation sites (keys) and how often
+  // they have been visited (values) by finding a memento behind an object. The
+  // storage is only alive temporary during a GC. The invariant is that all
+  // pointers in this map are already fixed, i.e., they do not point to
+  // forwarding pointers.
+  HashMap* global_pretenuring_feedback_;
 
-  static const int kAllocationSiteScratchpadSize = 256;
-  int allocation_sites_scratchpad_length_;
-
-  static const int kMaxMarkCompactsInIdleRound = 7;
-  static const int kIdleScavengeThreshold = 5;
+  char trace_ring_buffer_[kTraceRingBufferSize];
+  // If it's not full then the data is from 0 to ring_buffer_end_.  If it's
+  // full then the data is from ring_buffer_end_ to the end of the buffer and
+  // from 0 to ring_buffer_end_.
+  bool ring_buffer_full_;
+  size_t ring_buffer_end_;
 
   // Shared state read by the scavenge collector and set by ScavengeObject.
   PromotionQueue promotion_queue_;
@@ -2109,32 +2384,61 @@
   // configured through the API until it is set up.
   bool configured_;
 
+  // Currently set GC flags that are respected by all GC components.
+  int current_gc_flags_;
+
+  // Currently set GC callback flags that are used to pass information between
+  // the embedder and V8's GC.
+  GCCallbackFlags current_gc_callback_flags_;
+
   ExternalStringTable external_string_table_;
 
-  VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
-
   MemoryChunk* chunks_queued_for_free_;
 
+  size_t concurrent_unmapping_tasks_active_;
+
+  base::Semaphore pending_unmapping_tasks_semaphore_;
+
   base::Mutex relocation_mutex_;
 
   int gc_callbacks_depth_;
 
   bool deserialization_complete_;
 
+  StrongRootsList* strong_roots_list_;
+
+  ArrayBufferTracker* array_buffer_tracker_;
+
+  // The depth of HeapIterator nestings.
+  int heap_iterator_depth_;
+
+  // Used for testing purposes.
+  bool force_oom_;
+
+  // Classes in "heap" can be friends.
   friend class AlwaysAllocateScope;
-  friend class Deserializer;
-  friend class Factory;
   friend class GCCallbacksScope;
   friend class GCTracer;
   friend class HeapIterator;
-  friend class Isolate;
+  friend class IdleScavengeObserver;
+  friend class IncrementalMarking;
+  friend class IteratePointersToFromSpaceVisitor;
   friend class MarkCompactCollector;
   friend class MarkCompactMarkingVisitor;
-  friend class MapCompact;
-#ifdef VERIFY_HEAP
-  friend class NoWeakObjectVerificationScope;
-#endif
+  friend class NewSpace;
+  friend class ObjectStatsVisitor;
   friend class Page;
+  friend class Scavenger;
+  friend class StoreBuffer;
+
+  // The allocator interface.
+  friend class Factory;
+
+  // The Isolate constructs us.
+  friend class Isolate;
+
+  // Used in cctest.
+  friend class HeapTester;
 
   DISALLOW_COPY_AND_ASSIGN(Heap);
 };
@@ -2148,30 +2452,26 @@
   int* start_marker;                       //  0
   int* new_space_size;                     //  1
   int* new_space_capacity;                 //  2
-  intptr_t* old_pointer_space_size;        //  3
-  intptr_t* old_pointer_space_capacity;    //  4
-  intptr_t* old_data_space_size;           //  5
-  intptr_t* old_data_space_capacity;       //  6
-  intptr_t* code_space_size;               //  7
-  intptr_t* code_space_capacity;           //  8
-  intptr_t* map_space_size;                //  9
-  intptr_t* map_space_capacity;            // 10
-  intptr_t* cell_space_size;               // 11
-  intptr_t* cell_space_capacity;           // 12
-  intptr_t* lo_space_size;                 // 13
-  int* global_handle_count;                // 14
-  int* weak_global_handle_count;           // 15
-  int* pending_global_handle_count;        // 16
-  int* near_death_global_handle_count;     // 17
-  int* free_global_handle_count;           // 18
-  intptr_t* memory_allocator_size;         // 19
-  intptr_t* memory_allocator_capacity;     // 20
-  int* objects_per_type;                   // 21
-  int* size_per_type;                      // 22
-  int* os_error;                           // 23
-  int* end_marker;                         // 24
-  intptr_t* property_cell_space_size;      // 25
-  intptr_t* property_cell_space_capacity;  // 26
+  intptr_t* old_space_size;                //  3
+  intptr_t* old_space_capacity;            //  4
+  intptr_t* code_space_size;               //  5
+  intptr_t* code_space_capacity;           //  6
+  intptr_t* map_space_size;                //  7
+  intptr_t* map_space_capacity;            //  8
+  intptr_t* lo_space_size;                 //  9
+  int* global_handle_count;                // 10
+  int* weak_global_handle_count;           // 11
+  int* pending_global_handle_count;        // 12
+  int* near_death_global_handle_count;     // 13
+  int* free_global_handle_count;           // 14
+  intptr_t* memory_allocator_size;         // 15
+  intptr_t* memory_allocator_capacity;     // 16
+  int* objects_per_type;                   // 17
+  int* size_per_type;                      // 18
+  int* os_error;                           // 19
+  char* last_few_messages;                 // 20
+  char* js_stacktrace;                     // 21
+  int* end_marker;                         // 22
 };
 
 
@@ -2181,29 +2481,6 @@
   inline ~AlwaysAllocateScope();
 
  private:
-  // Implicitly disable artificial allocation failures.
-  Heap* heap_;
-  DisallowAllocationFailure daf_;
-};
-
-
-#ifdef VERIFY_HEAP
-class NoWeakObjectVerificationScope {
- public:
-  inline NoWeakObjectVerificationScope();
-  inline ~NoWeakObjectVerificationScope();
-};
-#endif
-
-
-class GCCallbacksScope {
- public:
-  explicit inline GCCallbacksScope(Heap* heap);
-  inline ~GCCallbacksScope();
-
-  inline bool CheckReenter();
-
- private:
   Heap* heap_;
 };
 
@@ -2215,14 +2492,14 @@
 // objects in a heap space but above the allocation pointer.
 class VerifyPointersVisitor : public ObjectVisitor {
  public:
-  inline void VisitPointers(Object** start, Object** end);
+  inline void VisitPointers(Object** start, Object** end) override;
 };
 
 
 // Verify that all objects are Smis.
 class VerifySmisVisitor : public ObjectVisitor {
  public:
-  inline void VisitPointers(Object** start, Object** end);
+  inline void VisitPointers(Object** start, Object** end) override;
 };
 
 
@@ -2239,12 +2516,11 @@
 };
 
 
-// Space iterator for iterating over all old spaces of the heap: Old pointer
-// space, old data space and code space.  Returns each space in turn, and null
-// when it is done.
+// Space iterator for iterating over all old spaces of the heap: Old space
+// and code space.  Returns each space in turn, and null when it is done.
 class OldSpaces BASE_EMBEDDED {
  public:
-  explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
+  explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
   OldSpace* next();
 
  private:
@@ -2254,11 +2530,11 @@
 
 
 // Space iterator for iterating over all the paged spaces of the heap: Map
-// space, old pointer space, old data space, code space and cell space.  Returns
+// space, old space, code space and cell space.  Returns
 // each space in turn, and null when it is done.
 class PagedSpaces BASE_EMBEDDED {
  public:
-  explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
+  explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
   PagedSpace* next();
 
  private:
@@ -2273,7 +2549,6 @@
 class SpaceIterator : public Malloced {
  public:
   explicit SpaceIterator(Heap* heap);
-  SpaceIterator(Heap* heap, HeapObjectCallback size_func);
   virtual ~SpaceIterator();
 
   bool has_next();
@@ -2285,7 +2560,6 @@
   Heap* heap_;
   int current_space_;         // from enum AllocationSpace.
   ObjectIterator* iterator_;  // object iterator for the current space.
-  HeapObjectCallback size_func_;
 };
 
 
@@ -2301,32 +2575,30 @@
 // nodes filtering uses GC marks, it can't be used during MS/MC GC
 // phases. Also, it is forbidden to interrupt iteration in this mode,
 // as this will leave heap objects marked (and thus, unusable).
-class HeapObjectsFilter;
-
 class HeapIterator BASE_EMBEDDED {
  public:
   enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
 
-  explicit HeapIterator(Heap* heap);
-  HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
+  explicit HeapIterator(Heap* heap,
+                        HeapObjectsFiltering filtering = kNoFiltering);
   ~HeapIterator();
 
   HeapObject* next();
-  void reset();
 
  private:
   struct MakeHeapIterableHelper {
     explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
   };
 
-  // Perform the initialization.
-  void Init();
-  // Perform all necessary shutdown (destruction) work.
-  void Shutdown();
   HeapObject* NextObject();
 
+  // The following two fields need to be declared in this order. Initialization
+  // order guarantees that we first make the heap iterable (which may involve
+  // allocations) and only then lock it down by not allowing further
+  // allocations.
   MakeHeapIterableHelper make_heap_iterable_helper_;
   DisallowHeapAllocation no_heap_allocation_;
+
   Heap* heap_;
   HeapObjectsFiltering filtering_;
   HeapObjectsFilter* filter_;
@@ -2405,25 +2677,10 @@
  public:
   // Lookup descriptor index for (map, name).
   // If absent, kAbsent is returned.
-  int Lookup(Map* source, Name* name) {
-    if (!name->IsUniqueName()) return kAbsent;
-    int index = Hash(source, name);
-    Key& key = keys_[index];
-    if ((key.source == source) && (key.name == name)) return results_[index];
-    return kAbsent;
-  }
+  inline int Lookup(Map* source, Name* name);
 
   // Update an element in the cache.
-  void Update(Map* source, Name* name, int result) {
-    DCHECK(result != kAbsent);
-    if (name->IsUniqueName()) {
-      int index = Hash(source, name);
-      Key& key = keys_[index];
-      key.source = source;
-      key.name = name;
-      results_[index] = result;
-    }
-  }
+  inline void Update(Map* source, Name* name, int result);
 
   // Clear the cache.
   void Clear();
@@ -2464,30 +2721,6 @@
 };
 
 
-class RegExpResultsCache {
- public:
-  enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
-
-  // Attempt to retrieve a cached result.  On failure, 0 is returned as a Smi.
-  // On success, the returned result is guaranteed to be a COW-array.
-  static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
-                        ResultsCacheType type);
-  // Attempt to add value_array to the cache specified by type.  On success,
-  // value_array is turned into a COW-array.
-  static void Enter(Isolate* isolate, Handle<String> key_string,
-                    Handle<Object> key_pattern, Handle<FixedArray> value_array,
-                    ResultsCacheType type);
-  static void Clear(FixedArray* cache);
-  static const int kRegExpResultsCacheSize = 0x100;
-
- private:
-  static const int kArrayEntriesPerCacheEntry = 4;
-  static const int kStringOffset = 0;
-  static const int kPatternOffset = 1;
-  static const int kArrayOffset = 2;
-};
-
-
 // Abstract base class for checking whether a weak object should be retained.
 class WeakObjectRetainer {
  public:
@@ -2500,46 +2733,6 @@
 };
 
 
-// Intrusive object marking uses least significant bit of
-// heap object's map word to mark objects.
-// Normally all map words have least significant bit set
-// because they contain tagged map pointer.
-// If the bit is not set object is marked.
-// All objects should be unmarked before resuming
-// JavaScript execution.
-class IntrusiveMarking {
- public:
-  static bool IsMarked(HeapObject* object) {
-    return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
-  }
-
-  static void ClearMark(HeapObject* object) {
-    uintptr_t map_word = object->map_word().ToRawValue();
-    object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
-    DCHECK(!IsMarked(object));
-  }
-
-  static void SetMark(HeapObject* object) {
-    uintptr_t map_word = object->map_word().ToRawValue();
-    object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
-    DCHECK(IsMarked(object));
-  }
-
-  static Map* MapOfMarkedObject(HeapObject* object) {
-    uintptr_t map_word = object->map_word().ToRawValue();
-    return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
-  }
-
-  static int SizeOfMarkedObject(HeapObject* object) {
-    return object->SizeFromMap(MapOfMarkedObject(object));
-  }
-
- private:
-  static const uintptr_t kNotMarkedBit = 0x1;
-  STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);  // NOLINT
-};
-
-
 #ifdef DEBUG
 // Helper class for tracing paths to a search target Object from all roots.
 // The TracePathFrom() method can be used to trace paths from a specific
@@ -2567,7 +2760,7 @@
         object_stack_(20),
         no_allocation() {}
 
-  virtual void VisitPointers(Object** start, Object** end);
+  void VisitPointers(Object** start, Object** end) override;
 
   void Reset();
   void TracePathFrom(Object** root);
@@ -2597,7 +2790,7 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
 };
 #endif  // DEBUG
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_HEAP_HEAP_H_
diff --git a/src/heap/incremental-marking-inl.h b/src/heap/incremental-marking-inl.h
index 496e02d..0d55b83 100644
--- a/src/heap/incremental-marking-inl.h
+++ b/src/heap/incremental-marking-inl.h
@@ -11,36 +11,6 @@
 namespace internal {
 
 
-bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object** slot,
-                                         Object* value) {
-  HeapObject* value_heap_obj = HeapObject::cast(value);
-  MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
-  if (Marking::IsWhite(value_bit)) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-      if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
-        if (chunk->IsLeftOfProgressBar(slot)) {
-          WhiteToGreyAndPush(value_heap_obj, value_bit);
-          RestartIfNotMarking();
-        } else {
-          return false;
-        }
-      } else {
-        BlackToGreyAndUnshift(obj, obj_bit);
-        RestartIfNotMarking();
-        return false;
-      }
-    } else {
-      return false;
-    }
-  }
-  if (!is_compacting_) return false;
-  MarkBit obj_bit = Marking::MarkBitFrom(obj);
-  return Marking::IsBlack(obj_bit);
-}
-
-
 void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
                                      Object* value) {
   if (IsMarking() && value->IsHeapObject()) {
@@ -51,7 +21,9 @@
 
 void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
                                                 Code* value) {
-  if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
+  if (IsMarking()) {
+    RecordWriteOfCodeEntrySlow(host, slot, value);
+  }
 }
 
 
@@ -63,55 +35,7 @@
 }
 
 
-void IncrementalMarking::RecordWrites(HeapObject* obj) {
-  if (IsMarking()) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-      if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
-        chunk->set_progress_bar(0);
-      }
-      BlackToGreyAndUnshift(obj, obj_bit);
-      RestartIfNotMarking();
-    }
-  }
-}
-
-
-void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
-                                               MarkBit mark_bit) {
-  DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
-  DCHECK(obj->Size() >= 2 * kPointerSize);
-  DCHECK(IsMarking());
-  Marking::BlackToGrey(mark_bit);
-  int obj_size = obj->Size();
-  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size);
-  bytes_scanned_ -= obj_size;
-  int64_t old_bytes_rescanned = bytes_rescanned_;
-  bytes_rescanned_ = old_bytes_rescanned + obj_size;
-  if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
-    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
-      // If we have queued twice the heap size for rescanning then we are
-      // going around in circles, scanning the same objects again and again
-      // as the program mutates the heap faster than we can incrementally
-      // trace it.  In this case we switch to non-incremental marking in
-      // order to finish off this marking phase.
-      if (FLAG_trace_gc) {
-        PrintPID("Hurrying incremental marking because of lack of progress\n");
-      }
-      marking_speed_ = kMaxMarkingSpeed;
-    }
-  }
-
-  heap_->mark_compact_collector()->marking_deque()->UnshiftGrey(obj);
-}
-
-
-void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
-  Marking::WhiteToGrey(mark_bit);
-  heap_->mark_compact_collector()->marking_deque()->PushGrey(obj);
-}
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_HEAP_INCREMENTAL_MARKING_INL_H_
diff --git a/src/heap/incremental-marking-job.cc b/src/heap/incremental-marking-job.cc
new file mode 100644
index 0000000..a69dfac
--- /dev/null
+++ b/src/heap/incremental-marking-job.cc
@@ -0,0 +1,145 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/incremental-marking-job.h"
+
+#include "src/base/platform/time.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/incremental-marking.h"
+#include "src/isolate.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+void IncrementalMarkingJob::Start(Heap* heap) {
+  DCHECK(!heap->incremental_marking()->IsStopped());
+  // We don't need to reset the flags because tasks from the previous job
+  // can still be pending. We just want to ensure that tasks are posted
+  // if they are not pending.
+  // If delayed task is pending and made_progress_since_last_delayed_task_ is
+  // true, then the delayed task will clear that flag when it is rescheduled.
+  ScheduleIdleTask(heap);
+  ScheduleDelayedTask(heap);
+}
+
+
+void IncrementalMarkingJob::NotifyIdleTask() { idle_task_pending_ = false; }
+
+
+void IncrementalMarkingJob::NotifyDelayedTask() {
+  delayed_task_pending_ = false;
+}
+
+
+void IncrementalMarkingJob::NotifyIdleTaskProgress() {
+  made_progress_since_last_delayed_task_ = true;
+}
+
+
+void IncrementalMarkingJob::ScheduleIdleTask(Heap* heap) {
+  if (!idle_task_pending_) {
+    v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
+    if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
+      idle_task_pending_ = true;
+      auto task = new IdleTask(heap->isolate(), this);
+      V8::GetCurrentPlatform()->CallIdleOnForegroundThread(isolate, task);
+    }
+  }
+}
+
+
+void IncrementalMarkingJob::ScheduleDelayedTask(Heap* heap) {
+  if (!delayed_task_pending_ && FLAG_memory_reducer) {
+    v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
+    delayed_task_pending_ = true;
+    made_progress_since_last_delayed_task_ = false;
+    auto task = new DelayedTask(heap->isolate(), this);
+    V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(isolate, task,
+                                                            kDelayInSeconds);
+  }
+}
+
+
+IncrementalMarkingJob::IdleTask::Progress IncrementalMarkingJob::IdleTask::Step(
+    Heap* heap, double deadline_in_ms) {
+  IncrementalMarking* incremental_marking = heap->incremental_marking();
+  MarkCompactCollector* mark_compact_collector = heap->mark_compact_collector();
+  if (incremental_marking->IsStopped()) {
+    return kDone;
+  }
+  if (mark_compact_collector->sweeping_in_progress()) {
+    if (mark_compact_collector->IsSweepingCompleted()) {
+      mark_compact_collector->EnsureSweepingCompleted();
+    }
+    return kMoreWork;
+  }
+  const double remaining_idle_time_in_ms =
+      incremental_marking->AdvanceIncrementalMarking(
+          0, deadline_in_ms, IncrementalMarking::IdleStepActions());
+  if (remaining_idle_time_in_ms > 0.0) {
+    heap->TryFinalizeIdleIncrementalMarking(remaining_idle_time_in_ms);
+  }
+  return incremental_marking->IsStopped() ? kDone : kMoreWork;
+}
+
+
+void IncrementalMarkingJob::IdleTask::RunInternal(double deadline_in_seconds) {
+  double deadline_in_ms =
+      deadline_in_seconds *
+      static_cast<double>(base::Time::kMillisecondsPerSecond);
+  Heap* heap = isolate()->heap();
+  double start_ms = heap->MonotonicallyIncreasingTimeInMs();
+  job_->NotifyIdleTask();
+  job_->NotifyIdleTaskProgress();
+  if (Step(heap, deadline_in_ms) == kMoreWork) {
+    job_->ScheduleIdleTask(heap);
+  }
+  if (FLAG_trace_idle_notification) {
+    double current_time_ms = heap->MonotonicallyIncreasingTimeInMs();
+    double idle_time_in_ms = deadline_in_ms - start_ms;
+    double deadline_difference = deadline_in_ms - current_time_ms;
+    PrintIsolate(isolate(), "%8.0f ms: ", isolate()->time_millis_since_init());
+    PrintF(
+        "Idle task: requested idle time %.2f ms, used idle time %.2f "
+        "ms, deadline usage %.2f ms\n",
+        idle_time_in_ms, idle_time_in_ms - deadline_difference,
+        deadline_difference);
+  }
+}
+
+
+void IncrementalMarkingJob::DelayedTask::Step(Heap* heap) {
+  const int kIncrementalMarkingDelayMs = 50;
+  double deadline =
+      heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
+  heap->incremental_marking()->AdvanceIncrementalMarking(
+      0, deadline, i::IncrementalMarking::StepActions(
+                       i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+                       i::IncrementalMarking::FORCE_MARKING,
+                       i::IncrementalMarking::FORCE_COMPLETION));
+  heap->FinalizeIncrementalMarkingIfComplete(
+      "Incremental marking task: finalize incremental marking");
+}
+
+
+void IncrementalMarkingJob::DelayedTask::RunInternal() {
+  Heap* heap = isolate()->heap();
+  job_->NotifyDelayedTask();
+  IncrementalMarking* incremental_marking = heap->incremental_marking();
+  if (!incremental_marking->IsStopped()) {
+    if (job_->ShouldForceMarkingStep()) {
+      Step(heap);
+    }
+    // The Step() above could have finished incremental marking.
+    if (!incremental_marking->IsStopped()) {
+      job_->ScheduleDelayedTask(heap);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/incremental-marking-job.h b/src/heap/incremental-marking-job.h
new file mode 100644
index 0000000..c998139
--- /dev/null
+++ b/src/heap/incremental-marking-job.h
@@ -0,0 +1,81 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_INCREMENTAL_MARKING_JOB_H_
+#define V8_HEAP_INCREMENTAL_MARKING_JOB_H_
+
+#include "src/cancelable-task.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Isolate;
+
+// The incremental marking job uses platform tasks to perform incremental
+// marking steps. The job posts an idle and a delayed task with a large delay.
+// The delayed task performs steps only if the idle task is not making progress.
+// We expect this to be a rare event since incremental marking should finish
+// quickly with the help of the mutator and the idle task.
+// The delayed task guarantees that we eventually finish incremental marking
+// even if the mutator becomes idle and the platform stops running idle tasks,
+// which can happen for background tabs in Chrome.
+class IncrementalMarkingJob {
+ public:
+  class IdleTask : public CancelableIdleTask {
+   public:
+    explicit IdleTask(Isolate* isolate, IncrementalMarkingJob* job)
+        : CancelableIdleTask(isolate), job_(job) {}
+    enum Progress { kDone, kMoreWork };
+    static Progress Step(Heap* heap, double deadline_in_ms);
+    // CancelableIdleTask overrides.
+    void RunInternal(double deadline_in_seconds) override;
+
+   private:
+    IncrementalMarkingJob* job_;
+  };
+
+  class DelayedTask : public CancelableTask {
+   public:
+    explicit DelayedTask(Isolate* isolate, IncrementalMarkingJob* job)
+        : CancelableTask(isolate), job_(job) {}
+    static void Step(Heap* heap);
+    // CancelableTask overrides.
+    void RunInternal() override;
+
+   private:
+    IncrementalMarkingJob* job_;
+  };
+
+  // Delay of the delayed task.
+  static const int kDelayInSeconds = 5;
+
+  IncrementalMarkingJob()
+      : idle_task_pending_(false),
+        delayed_task_pending_(false),
+        made_progress_since_last_delayed_task_(false) {}
+
+  bool ShouldForceMarkingStep() {
+    return !made_progress_since_last_delayed_task_;
+  }
+
+  bool IdleTaskPending() { return idle_task_pending_; }
+
+  void Start(Heap* heap);
+
+  void NotifyIdleTask();
+  void NotifyDelayedTask();
+  void NotifyIdleTaskProgress();
+  void ScheduleIdleTask(Heap* heap);
+  void ScheduleDelayedTask(Heap* heap);
+
+ private:
+  bool idle_task_pending_;
+  bool delayed_task_pending_;
+  bool made_progress_since_last_delayed_task_;
+};
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_HEAP_INCREMENTAL_MARKING_JOB_H_
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
index aadd17c..52d0ca4 100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -2,44 +2,73 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-
 #include "src/heap/incremental-marking.h"
 
 #include "src/code-stubs.h"
 #include "src/compilation-cache.h"
 #include "src/conversions.h"
+#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/gc-tracer.h"
+#include "src/heap/mark-compact-inl.h"
 #include "src/heap/objects-visiting.h"
 #include "src/heap/objects-visiting-inl.h"
+#include "src/v8.h"
 
 namespace v8 {
 namespace internal {
 
+IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
+  return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+                     IncrementalMarking::FORCE_MARKING,
+                     IncrementalMarking::DO_NOT_FORCE_COMPLETION);
+}
+
 
 IncrementalMarking::IncrementalMarking(Heap* heap)
     : heap_(heap),
+      observer_(*this, kAllocatedThreshold),
       state_(STOPPED),
+      is_compacting_(false),
       steps_count_(0),
       old_generation_space_available_at_start_of_incremental_(0),
       old_generation_space_used_at_start_of_incremental_(0),
+      bytes_rescanned_(0),
       should_hurry_(false),
       marking_speed_(0),
+      bytes_scanned_(0),
       allocated_(0),
+      write_barriers_invoked_since_last_step_(0),
       idle_marking_delay_counter_(0),
       no_marking_scope_depth_(0),
       unscanned_bytes_of_large_object_(0),
-      was_activated_(false) {}
+      was_activated_(false),
+      finalize_marking_completed_(false),
+      incremental_marking_finalization_rounds_(0),
+      request_type_(COMPLETE_MARKING) {}
+
+
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
+  HeapObject* value_heap_obj = HeapObject::cast(value);
+  MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
+  DCHECK(!Marking::IsImpossible(value_bit));
+
+  MarkBit obj_bit = Marking::MarkBitFrom(obj);
+  DCHECK(!Marking::IsImpossible(obj_bit));
+  bool is_black = Marking::IsBlack(obj_bit);
+
+  if (is_black && Marking::IsWhite(value_bit)) {
+    WhiteToGreyAndPush(value_heap_obj, value_bit);
+    RestartIfNotMarking();
+  }
+  return is_compacting_ && is_black;
+}
 
 
 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
                                          Object* value) {
-  if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      // Object is not going to be rescanned we need to record the slot.
-      heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
-                                                  slot, value);
-    }
+  if (BaseRecordWrite(obj, value) && slot != NULL) {
+    // Object is not going to be rescanned we need to record the slot.
+    heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
   }
 }
 
@@ -66,7 +95,7 @@
 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
                                                HeapObject* value) {
   if (IsMarking()) {
-    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+    RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
     RecordWriteIntoCode(host, &rinfo, value);
   }
 }
@@ -77,7 +106,7 @@
     Code* host = heap_->isolate()
                      ->inner_pointer_to_code_cache()
                      ->GcSafeFindCodeForInnerPointer(pc);
-    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+    RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
     RecordWriteIntoCode(host, &rinfo, value);
   }
 }
@@ -86,10 +115,10 @@
 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
                                                     Object** slot,
                                                     Code* value) {
-  if (BaseRecordWrite(host, slot, value)) {
+  if (BaseRecordWrite(host, value)) {
     DCHECK(slot != NULL);
     heap_->mark_compact_collector()->RecordCodeEntrySlot(
-        reinterpret_cast<Address>(slot), value);
+        host, reinterpret_cast<Address>(slot), value);
   }
 }
 
@@ -97,58 +126,84 @@
 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
                                                  RelocInfo* rinfo,
                                                  Object* value) {
-  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
-  if (Marking::IsWhite(value_bit)) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      BlackToGreyAndUnshift(obj, obj_bit);
-      RestartIfNotMarking();
-    }
-    // Object is either grey or white.  It will be scanned if survives.
-    return;
-  }
-
-  if (is_compacting_) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
+  if (BaseRecordWrite(obj, value)) {
       // Object is not going to be rescanned.  We need to record the slot.
       heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
                                                        Code::cast(value));
+  }
+}
+
+
+void IncrementalMarking::RecordWrites(HeapObject* obj) {
+  if (IsMarking()) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+      if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+        chunk->set_progress_bar(0);
+      }
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
     }
   }
 }
 
 
+void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
+                                               MarkBit mark_bit) {
+  DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
+  DCHECK(obj->Size() >= 2 * kPointerSize);
+  DCHECK(IsMarking());
+  Marking::BlackToGrey(mark_bit);
+  int obj_size = obj->Size();
+  MemoryChunk::IncrementLiveBytesFromGC(obj, -obj_size);
+  bytes_scanned_ -= obj_size;
+  int64_t old_bytes_rescanned = bytes_rescanned_;
+  bytes_rescanned_ = old_bytes_rescanned + obj_size;
+  if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
+    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
+      // If we have queued twice the heap size for rescanning then we are
+      // going around in circles, scanning the same objects again and again
+      // as the program mutates the heap faster than we can incrementally
+      // trace it.  In this case we switch to non-incremental marking in
+      // order to finish off this marking phase.
+      if (FLAG_trace_incremental_marking) {
+        PrintIsolate(
+            heap()->isolate(),
+            "Hurrying incremental marking because of lack of progress\n");
+      }
+      marking_speed_ = kMaxMarkingSpeed;
+    }
+  }
+
+  heap_->mark_compact_collector()->marking_deque()->Unshift(obj);
+}
+
+
+void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
+  Marking::WhiteToGrey(mark_bit);
+  heap_->mark_compact_collector()->marking_deque()->Push(obj);
+}
+
+
 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
   if (obj->IsHeapObject()) {
     HeapObject* heap_obj = HeapObject::cast(obj);
     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
     if (Marking::IsBlack(mark_bit)) {
-      MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
-                                            -heap_obj->Size());
+      MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
     }
     Marking::AnyToGrey(mark_bit);
   }
 }
 
 
-static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
-                                       MarkBit mark_bit, int size) {
-  DCHECK(!Marking::IsImpossible(mark_bit));
-  if (mark_bit.Get()) return;
-  mark_bit.Set();
-  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
-  DCHECK(Marking::IsBlack(mark_bit));
-}
-
-
 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
                                         MarkBit mark_bit, int size) {
   DCHECK(!Marking::IsImpossible(mark_bit));
   if (Marking::IsBlack(mark_bit)) return;
   Marking::MarkBlack(mark_bit);
-  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
-  DCHECK(Marking::IsBlack(mark_bit));
+  MemoryChunk::IncrementLiveBytesFromGC(heap_object, size);
 }
 
 
@@ -186,9 +241,8 @@
       int already_scanned_offset = start_offset;
       bool scan_until_end = false;
       do {
-        VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
-                                HeapObject::RawField(object, start_offset),
-                                HeapObject::RawField(object, end_offset));
+        VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
+                      HeapObject::RawField(object, end_offset));
         start_offset = end_offset;
         end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
         scan_until_end =
@@ -196,7 +250,12 @@
       } while (scan_until_end && start_offset < object_size);
       chunk->set_progress_bar(start_offset);
       if (start_offset < object_size) {
-        heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
+        if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
+          heap->mark_compact_collector()->marking_deque()->Unshift(object);
+        } else {
+          DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+          heap->mark_compact_collector()->UnshiftBlack(object);
+        }
         heap->incremental_marking()->NotifyIncompleteScanOfObject(
             object_size - (start_offset - already_scanned_offset));
       }
@@ -218,44 +277,28 @@
     VisitNativeContext(map, context);
   }
 
-  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
-    Object* obj = *p;
-    if (obj->IsHeapObject()) {
-      heap->mark_compact_collector()->RecordSlot(p, p, obj);
-      MarkObject(heap, obj);
+  INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
+    Object* target = *p;
+    if (target->IsHeapObject()) {
+      heap->mark_compact_collector()->RecordSlot(object, p, target);
+      MarkObject(heap, target);
     }
   }
 
-  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
+  INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
+                                   Object** start, Object** end)) {
     for (Object** p = start; p < end; p++) {
-      Object* obj = *p;
-      if (obj->IsHeapObject()) {
-        heap->mark_compact_collector()->RecordSlot(start, p, obj);
-        MarkObject(heap, obj);
-      }
-    }
-  }
-
-  INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
-                                             Object** start, Object** end)) {
-    for (Object** p = start; p < end; p++) {
-      Object* obj = *p;
-      if (obj->IsHeapObject()) {
-        heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
-        MarkObject(heap, obj);
+      Object* target = *p;
+      if (target->IsHeapObject()) {
+        heap->mark_compact_collector()->RecordSlot(object, p, target);
+        MarkObject(heap, target);
       }
     }
   }
 
   // Marks the object grey and pushes it on the marking stack.
   INLINE(static void MarkObject(Heap* heap, Object* obj)) {
-    HeapObject* heap_object = HeapObject::cast(obj);
-    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
-    if (mark_bit.data_only()) {
-      MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
-    } else if (Marking::IsWhite(mark_bit)) {
-      heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
-    }
+    IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
   }
 
   // Marks the object black without pushing it on the marking stack.
@@ -264,9 +307,8 @@
     HeapObject* heap_object = HeapObject::cast(obj);
     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
     if (Marking::IsWhite(mark_bit)) {
-      mark_bit.Set();
-      MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
-                                            heap_object->Size());
+      Marking::MarkBlack(mark_bit);
+      MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
       return true;
     }
     return false;
@@ -278,11 +320,11 @@
  public:
   explicit IncrementalMarkingRootMarkingVisitor(
       IncrementalMarking* incremental_marking)
-      : incremental_marking_(incremental_marking) {}
+      : heap_(incremental_marking->heap()) {}
 
-  void VisitPointer(Object** p) { MarkObjectByPointer(p); }
+  void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
 
-  void VisitPointers(Object** start, Object** end) {
+  void VisitPointers(Object** start, Object** end) override {
     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
   }
 
@@ -291,18 +333,10 @@
     Object* obj = *p;
     if (!obj->IsHeapObject()) return;
 
-    HeapObject* heap_object = HeapObject::cast(obj);
-    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
-    if (mark_bit.data_only()) {
-      MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
-    } else {
-      if (Marking::IsWhite(mark_bit)) {
-        incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
-      }
-    }
+    IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
   }
 
-  IncrementalMarking* incremental_marking_;
+  Heap* heap_;
 };
 
 
@@ -317,17 +351,6 @@
   if (is_marking) {
     chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
-    // It's difficult to filter out slots recorded for large objects.
-    if (chunk->owner()->identity() == LO_SPACE &&
-        chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
-      chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
-    }
-  } else if (chunk->owner()->identity() == CELL_SPACE ||
-             chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
-             chunk->scan_on_scavenge()) {
-    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
-    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
   } else {
     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
@@ -335,7 +358,7 @@
 }
 
 
-void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
+void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
                                               bool is_marking) {
   chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
   if (is_marking) {
@@ -368,10 +391,7 @@
 
 
 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
-  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
   DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
@@ -403,10 +423,7 @@
 
 
 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
-  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
-  ActivateIncrementalWriteBarrier(heap_->old_data_space());
-  ActivateIncrementalWriteBarrier(heap_->cell_space());
-  ActivateIncrementalWriteBarrier(heap_->property_cell_space());
+  ActivateIncrementalWriteBarrier(heap_->old_space());
   ActivateIncrementalWriteBarrier(heap_->map_space());
   ActivateIncrementalWriteBarrier(heap_->code_space());
   ActivateIncrementalWriteBarrier(heap_->new_space());
@@ -419,15 +436,17 @@
 }
 
 
-bool IncrementalMarking::ShouldActivate() {
-  return WorthActivating() && heap_->NextGCIsLikelyToBeFull();
+bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
+  return CanBeActivated() &&
+         heap_->HeapIsFullEnoughToStartIncrementalMarking(
+             heap_->old_generation_allocation_limit());
 }
 
 
 bool IncrementalMarking::WasActivated() { return was_activated_; }
 
 
-bool IncrementalMarking::WorthActivating() {
+bool IncrementalMarking::CanBeActivated() {
 #ifndef DEBUG
   static const intptr_t kActivationThreshold = 8 * MB;
 #else
@@ -438,8 +457,8 @@
   // Only start incremental marking in a safe state: 1) when incremental
   // marking is turned on, 2) when we are currently not in a GC, and
   // 3) when we are currently not serializing or deserializing the heap.
-  return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
-         heap_->gc_state() == Heap::NOT_IN_GC &&
+  // Don't switch on for very small heaps.
+  return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
          heap_->deserialization_complete() &&
          !heap_->isolate()->serializer_enabled() &&
          heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
@@ -461,6 +480,21 @@
 }
 
 
+void IncrementalMarking::NotifyOfHighPromotionRate() {
+  if (IsMarking()) {
+    if (marking_speed_ < kFastMarking) {
+      if (FLAG_trace_gc) {
+        PrintIsolate(heap()->isolate(),
+                     "Increasing marking speed to %d "
+                     "due to high promotion rate\n",
+                     static_cast<int>(kFastMarking));
+      }
+      marking_speed_ = kFastMarking;
+    }
+  }
+}
+
+
 static void PatchIncrementalMarkingRecordWriteStubs(
     Heap* heap, RecordWriteStub::Mode mode) {
   UnseededNumberDictionary* stubs = heap->code_stubs();
@@ -482,22 +516,24 @@
 }
 
 
-void IncrementalMarking::Start(CompactionFlag flag) {
+void IncrementalMarking::Start(const char* reason) {
   if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Start\n");
+    PrintF("[IncrementalMarking] Start (%s)\n",
+           (reason == nullptr) ? "unknown reason" : reason);
   }
   DCHECK(FLAG_incremental_marking);
-  DCHECK(FLAG_incremental_marking_steps);
   DCHECK(state_ == STOPPED);
   DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
   DCHECK(!heap_->isolate()->serializer_enabled());
 
+  HistogramTimerScope incremental_marking_scope(
+      heap_->isolate()->counters()->gc_incremental_marking_start());
   ResetStepCounters();
 
   was_activated_ = true;
 
   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
-    StartMarking(flag);
+    StartMarking();
   } else {
     if (FLAG_trace_incremental_marking) {
       PrintF("[IncrementalMarking] Start sweeping.\n");
@@ -505,16 +541,18 @@
     state_ = SWEEPING;
   }
 
-  heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
+  heap_->new_space()->AddInlineAllocationObserver(&observer_);
+
+  incremental_marking_job()->Start(heap_);
 }
 
 
-void IncrementalMarking::StartMarking(CompactionFlag flag) {
+void IncrementalMarking::StartMarking() {
   if (FLAG_trace_incremental_marking) {
     PrintF("[IncrementalMarking] Start marking\n");
   }
 
-  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
+  is_compacting_ = !FLAG_never_compact &&
                    heap_->mark_compact_collector()->StartCompaction(
                        MarkCompactCollector::INCREMENTAL_COMPACTION);
 
@@ -526,7 +564,8 @@
 
   PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
 
-  heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize();
+  heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
+      MarkCompactCollector::kMaxMarkingDequeSize);
 
   ActivateIncrementalWriteBarrier();
 
@@ -550,8 +589,6 @@
   IncrementalMarkingRootMarkingVisitor visitor(this);
   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
 
-  heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
-
   // Ready to start incremental marking.
   if (FLAG_trace_incremental_marking) {
     PrintF("[IncrementalMarking] Running\n");
@@ -559,12 +596,176 @@
 }
 
 
-void IncrementalMarking::PrepareForScavenge() {
-  if (!IsMarking()) return;
-  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
-                          heap_->new_space()->FromSpaceEnd());
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
+void IncrementalMarking::MarkRoots() {
+  DCHECK(!finalize_marking_completed_);
+  DCHECK(IsMarking());
+
+  IncrementalMarkingRootMarkingVisitor visitor(this);
+  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+}
+
+
+void IncrementalMarking::MarkObjectGroups() {
+  DCHECK(!finalize_marking_completed_);
+  DCHECK(IsMarking());
+
+  IncrementalMarkingRootMarkingVisitor visitor(this);
+  heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
+  heap_->isolate()->global_handles()->IterateObjectGroups(
+      &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
+  heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
+  heap_->isolate()->global_handles()->RemoveObjectGroups();
+}
+
+
+void IncrementalMarking::ProcessWeakCells() {
+  DCHECK(!finalize_marking_completed_);
+  DCHECK(IsMarking());
+
+  Object* the_hole_value = heap()->the_hole_value();
+  Object* weak_cell_obj = heap()->encountered_weak_cells();
+  Object* weak_cell_head = Smi::FromInt(0);
+  WeakCell* prev_weak_cell_obj = NULL;
+  while (weak_cell_obj != Smi::FromInt(0)) {
+    WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
+    // We do not insert cleared weak cells into the list, so the value
+    // cannot be a Smi here.
+    HeapObject* value = HeapObject::cast(weak_cell->value());
+    // Remove weak cells with live objects from the list, they do not need
+    // clearing.
+    if (MarkCompactCollector::IsMarked(value)) {
+      // Record slot, if value is pointing to an evacuation candidate.
+      Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
+      heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
+      // Remove entry somewhere after top.
+      if (prev_weak_cell_obj != NULL) {
+        prev_weak_cell_obj->set_next(weak_cell->next());
+      }
+      weak_cell_obj = weak_cell->next();
+      weak_cell->clear_next(the_hole_value);
+    } else {
+      if (weak_cell_head == Smi::FromInt(0)) {
+        weak_cell_head = weak_cell;
+      }
+      prev_weak_cell_obj = weak_cell;
+      weak_cell_obj = weak_cell->next();
+    }
+  }
+  // Top may have changed.
+  heap()->set_encountered_weak_cells(weak_cell_head);
+}
+
+
+bool ShouldRetainMap(Map* map, int age) {
+  if (age == 0) {
+    // The map has aged. Do not retain this map.
+    return false;
+  }
+  Object* constructor = map->GetConstructor();
+  if (!constructor->IsHeapObject() ||
+      Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) {
+    // The constructor is dead, no new objects with this map can
+    // be created. Do not retain this map.
+    return false;
+  }
+  return true;
+}
+
+
+void IncrementalMarking::RetainMaps() {
+  // Do not retain dead maps if flag disables it or there is
+  // - memory pressure (reduce_memory_footprint_),
+  // - GC is requested by tests or dev-tools (abort_incremental_marking_).
+  bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
+                                   heap()->ShouldAbortIncrementalMarking() ||
+                                   FLAG_retain_maps_for_n_gc == 0;
+  ArrayList* retained_maps = heap()->retained_maps();
+  int length = retained_maps->Length();
+  // The number_of_disposed_maps separates maps in the retained_maps
+  // array that were created before and after context disposal.
+  // We do not age and retain disposed maps to avoid memory leaks.
+  int number_of_disposed_maps = heap()->number_of_disposed_maps_;
+  for (int i = 0; i < length; i += 2) {
+    DCHECK(retained_maps->Get(i)->IsWeakCell());
+    WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
+    if (cell->cleared()) continue;
+    int age = Smi::cast(retained_maps->Get(i + 1))->value();
+    int new_age;
+    Map* map = Map::cast(cell->value());
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
+        Marking::IsWhite(map_mark)) {
+      if (ShouldRetainMap(map, age)) {
+        MarkObject(heap(), map);
+      }
+      Object* prototype = map->prototype();
+      if (age > 0 && prototype->IsHeapObject() &&
+          Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
+        // The prototype is not marked, age the map.
+        new_age = age - 1;
+      } else {
+        // The prototype and the constructor are marked, this map keeps only
+        // transition tree alive, not JSObjects. Do not age the map.
+        new_age = age;
+      }
+    } else {
+      new_age = FLAG_retain_maps_for_n_gc;
+    }
+    // Compact the array and update the age.
+    if (new_age != age) {
+      retained_maps->Set(i + 1, Smi::FromInt(new_age));
+    }
+  }
+}
+
+
+void IncrementalMarking::FinalizeIncrementally() {
+  DCHECK(!finalize_marking_completed_);
+  DCHECK(IsMarking());
+
+  double start = heap_->MonotonicallyIncreasingTimeInMs();
+
+  int old_marking_deque_top =
+      heap_->mark_compact_collector()->marking_deque()->top();
+
+  // After finishing incremental marking, we try to discover all unmarked
+  // objects to reduce the marking load in the final pause.
+  // 1) We scan and mark the roots again to find all changes to the root set.
+  // 2) We mark the object groups.
+  // 3) Age and retain maps embedded in optimized code.
+  // 4) Remove weak cell with live values from the list of weak cells, they
+  // do not need processing during GC.
+  MarkRoots();
+  MarkObjectGroups();
+  if (incremental_marking_finalization_rounds_ == 0) {
+    // Map retaining is needed for perfromance, not correctness,
+    // so we can do it only once at the beginning of the finalization.
+    RetainMaps();
+  }
+  ProcessWeakCells();
+
+  int marking_progress =
+      abs(old_marking_deque_top -
+          heap_->mark_compact_collector()->marking_deque()->top());
+
+  double end = heap_->MonotonicallyIncreasingTimeInMs();
+  double delta = end - start;
+  heap_->tracer()->AddMarkingTime(delta);
+  heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta);
+  if (FLAG_trace_incremental_marking) {
+    PrintF(
+        "[IncrementalMarking] Finalize incrementally round %d, "
+        "spent %d ms, marking progress %d.\n",
+        static_cast<int>(delta), incremental_marking_finalization_rounds_,
+        marking_progress);
+  }
+
+  ++incremental_marking_finalization_rounds_;
+  if ((incremental_marking_finalization_rounds_ >=
+       FLAG_max_incremental_marking_finalization_rounds) ||
+      (marking_progress <
+       FLAG_min_progress_during_incremental_marking_finalization)) {
+    finalize_marking_completed_ = true;
   }
 }
 
@@ -620,10 +821,7 @@
 
 
 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
-  MarkBit map_mark_bit = Marking::MarkBitFrom(map);
-  if (Marking::IsWhite(map_mark_bit)) {
-    WhiteToGreyAndPush(map, map_mark_bit);
-  }
+  MarkObject(heap_, map);
 
   IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
 
@@ -639,6 +837,14 @@
 }
 
 
+void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
+  MarkBit mark_bit = Marking::MarkBitFrom(obj);
+  if (Marking::IsWhite(mark_bit)) {
+    heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
+  }
+}
+
+
 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
   intptr_t bytes_processed = 0;
   Map* filler_map = heap_->one_pointer_filler_map();
@@ -655,10 +861,7 @@
     int size = obj->SizeFromMap(map);
     unscanned_bytes_of_large_object_ = 0;
     VisitObject(map, obj, size);
-    int delta = (size - unscanned_bytes_of_large_object_);
-    // TODO(jochen): remove after http://crbug.com/381820 is resolved.
-    CHECK_LT(0, delta);
-    bytes_processed += delta;
+    bytes_processed += size - unscanned_bytes_of_large_object_;
   }
   return bytes_processed;
 }
@@ -685,7 +888,7 @@
   if (state() == MARKING) {
     double start = 0.0;
     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
-      start = base::OS::TimeCurrentMillis();
+      start = heap_->MonotonicallyIncreasingTimeInMs();
       if (FLAG_trace_incremental_marking) {
         PrintF("[IncrementalMarking] Hurry\n");
       }
@@ -695,7 +898,7 @@
     ProcessMarkingDeque();
     state_ = COMPLETE;
     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
-      double end = base::OS::TimeCurrentMillis();
+      double end = heap_->MonotonicallyIncreasingTimeInMs();
       double delta = end - start;
       heap_->tracer()->AddMarkingTime(delta);
       if (FLAG_trace_incremental_marking) {
@@ -708,7 +911,7 @@
   if (FLAG_cleanup_code_caches_at_gc) {
     PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
     Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
-    MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
+    MemoryChunk::IncrementLiveBytesFromGC(poly_cache,
                                           PolymorphicCodeCache::kSize);
   }
 
@@ -722,7 +925,7 @@
       MarkBit mark_bit = Marking::MarkBitFrom(cache);
       if (Marking::IsGrey(mark_bit)) {
         Marking::GreyToBlack(mark_bit);
-        MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
+        MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
       }
     }
     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
@@ -730,12 +933,13 @@
 }
 
 
-void IncrementalMarking::Abort() {
+void IncrementalMarking::Stop() {
   if (IsStopped()) return;
   if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Aborting.\n");
+    PrintF("[IncrementalMarking] Stopping.\n");
   }
-  heap_->new_space()->LowerInlineAllocationLimit(0);
+
+  heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
   IncrementalMarking::set_should_hurry(false);
   ResetStepCounters();
   if (IsMarking()) {
@@ -763,7 +967,8 @@
   Hurry();
   state_ = STOPPED;
   is_compacting_ = false;
-  heap_->new_space()->LowerInlineAllocationLimit(0);
+
+  heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
   IncrementalMarking::set_should_hurry(false);
   ResetStepCounters();
   PatchIncrementalMarkingRecordWriteStubs(heap_,
@@ -774,6 +979,20 @@
 }
 
 
+void IncrementalMarking::FinalizeMarking(CompletionAction action) {
+  DCHECK(!finalize_marking_completed_);
+  if (FLAG_trace_incremental_marking) {
+    PrintF(
+        "[IncrementalMarking] requesting finalization of incremental "
+        "marking.\n");
+  }
+  request_type_ = FINALIZATION;
+  if (action == GC_VIA_STACK_GUARD) {
+    heap_->isolate()->stack_guard()->RequestGC();
+  }
+}
+
+
 void IncrementalMarking::MarkingComplete(CompletionAction action) {
   state_ = COMPLETE;
   // We will set the stack guard to request a GC now.  This will mean the rest
@@ -785,20 +1004,52 @@
   if (FLAG_trace_incremental_marking) {
     PrintF("[IncrementalMarking] Complete (normal).\n");
   }
+  request_type_ = COMPLETE_MARKING;
   if (action == GC_VIA_STACK_GUARD) {
     heap_->isolate()->stack_guard()->RequestGC();
   }
 }
 
 
-void IncrementalMarking::Epilogue() { was_activated_ = false; }
+void IncrementalMarking::Epilogue() {
+  was_activated_ = false;
+  finalize_marking_completed_ = false;
+  incremental_marking_finalization_rounds_ = 0;
+}
+
+
+double IncrementalMarking::AdvanceIncrementalMarking(
+    intptr_t step_size_in_bytes, double deadline_in_ms,
+    IncrementalMarking::StepActions step_actions) {
+  DCHECK(!IsStopped());
+
+  if (step_size_in_bytes == 0) {
+    step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
+        static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs),
+        static_cast<size_t>(
+            heap()
+                ->tracer()
+                ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()));
+  }
+
+  double remaining_time_in_ms = 0.0;
+  do {
+    Step(step_size_in_bytes, step_actions.completion_action,
+         step_actions.force_marking, step_actions.force_completion);
+    remaining_time_in_ms =
+        deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
+  } while (remaining_time_in_ms >=
+               2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
+           !IsComplete() &&
+           !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
+  return remaining_time_in_ms;
+}
 
 
 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
-  if (IsStopped() && ShouldActivate()) {
-    // TODO(hpayer): Let's play safe for now, but compaction should be
-    // in principle possible.
-    Start(PREVENT_COMPACTION);
+  if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
+    heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
+                                    "old space step");
   } else {
     Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
   }
@@ -809,9 +1060,9 @@
   bool speed_up = false;
 
   if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
-    if (FLAG_trace_gc) {
-      PrintPID("Speed up marking after %d steps\n",
-               static_cast<int>(kMarkingSpeedAccellerationInterval));
+    if (FLAG_trace_incremental_marking) {
+      PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
+                   static_cast<int>(kMarkingSpeedAccellerationInterval));
     }
     speed_up = true;
   }
@@ -825,7 +1076,9 @@
 
   if (space_left_is_very_small ||
       only_1_nth_of_space_that_was_available_still_left) {
-    if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
+    if (FLAG_trace_incremental_marking)
+      PrintIsolate(heap()->isolate(),
+                   "Speed up marking because of low space left\n");
     speed_up = true;
   }
 
@@ -835,8 +1088,9 @@
            old_generation_space_used_at_start_of_incremental_);
   if (size_of_old_space_multiplied_by_n_during_marking) {
     speed_up = true;
-    if (FLAG_trace_gc) {
-      PrintPID("Speed up marking because of heap size increase\n");
+    if (FLAG_trace_incremental_marking) {
+      PrintIsolate(heap()->isolate(),
+                   "Speed up marking because of heap size increase\n");
     }
   }
 
@@ -848,23 +1102,26 @@
 
   // We try to scan at at least twice the speed that we are allocating.
   if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
-    if (FLAG_trace_gc) {
-      PrintPID("Speed up marking because marker was not keeping up\n");
+    if (FLAG_trace_incremental_marking) {
+      PrintIsolate(heap()->isolate(),
+                   "Speed up marking because marker was not keeping up\n");
     }
     speed_up = true;
   }
 
   if (speed_up) {
     if (state_ != MARKING) {
-      if (FLAG_trace_gc) {
-        PrintPID("Postponing speeding up marking until marking starts\n");
+      if (FLAG_trace_incremental_marking) {
+        PrintIsolate(heap()->isolate(),
+                     "Postponing speeding up marking until marking starts\n");
       }
     } else {
       marking_speed_ += kMarkingSpeedAccelleration;
       marking_speed_ = static_cast<int>(
           Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
-      if (FLAG_trace_gc) {
-        PrintPID("Marking speed increased to %d\n", marking_speed_);
+      if (FLAG_trace_incremental_marking) {
+        PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
+                     marking_speed_);
       }
     }
   }
@@ -875,8 +1132,9 @@
                                   CompletionAction action,
                                   ForceMarkingAction marking,
                                   ForceCompletionAction completion) {
+  DCHECK(allocated_bytes >= 0);
+
   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
-      !FLAG_incremental_marking_steps ||
       (state_ != SWEEPING && state_ != MARKING)) {
     return 0;
   }
@@ -901,7 +1159,7 @@
   {
     HistogramTimerScope incremental_marking_scope(
         heap_->isolate()->counters()->gc_incremental_marking());
-    double start = base::OS::TimeCurrentMillis();
+    double start = heap_->MonotonicallyIncreasingTimeInMs();
 
     // The marking speed is driven either by the allocation rate or by the rate
     // at which we are having to check the color of objects in the write
@@ -927,14 +1185,18 @@
       }
       if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
         bytes_scanned_ = 0;
-        StartMarking(PREVENT_COMPACTION);
+        StartMarking();
       }
     } else if (state_ == MARKING) {
       bytes_processed = ProcessMarkingDeque(bytes_to_process);
       if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
         if (completion == FORCE_COMPLETION ||
             IsIdleMarkingDelayCounterLimitReached()) {
-          MarkingComplete(action);
+          if (!finalize_marking_completed_) {
+            FinalizeMarking(action);
+          } else {
+            MarkingComplete(action);
+          }
         } else {
           IncrementIdleMarkingDelayCounter();
         }
@@ -947,7 +1209,7 @@
     // with marking.
     SpeedUp();
 
-    double end = base::OS::TimeCurrentMillis();
+    double end = heap_->MonotonicallyIncreasingTimeInMs();
     double duration = (end - start);
     // Note that we report zero bytes here when sweeping was in progress or
     // when we just started incremental marking. In these cases we did not
@@ -989,5 +1251,5 @@
 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
   idle_marking_delay_counter_ = 0;
 }
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index 56c5a24..be63021 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -5,14 +5,18 @@
 #ifndef V8_HEAP_INCREMENTAL_MARKING_H_
 #define V8_HEAP_INCREMENTAL_MARKING_H_
 
-
+#include "src/cancelable-task.h"
 #include "src/execution.h"
-#include "src/heap/mark-compact.h"
+#include "src/heap/incremental-marking-job.h"
+#include "src/heap/spaces.h"
 #include "src/objects.h"
 
 namespace v8 {
 namespace internal {
 
+// Forward declarations.
+class MarkBit;
+class PagedSpace;
 
 class IncrementalMarking {
  public:
@@ -24,6 +28,23 @@
 
   enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
 
+  enum GCRequestType { COMPLETE_MARKING, FINALIZATION };
+
+  struct StepActions {
+    StepActions(CompletionAction complete_action_,
+                ForceMarkingAction force_marking_,
+                ForceCompletionAction force_completion_)
+        : completion_action(complete_action_),
+          force_marking(force_marking_),
+          force_completion(force_completion_) {}
+
+    CompletionAction completion_action;
+    ForceMarkingAction force_marking;
+    ForceCompletionAction force_completion;
+  };
+
+  static StepActions IdleStepActions();
+
   explicit IncrementalMarking(Heap* heap);
 
   static void Initialize();
@@ -36,6 +57,14 @@
   bool should_hurry() { return should_hurry_; }
   void set_should_hurry(bool val) { should_hurry_ = val; }
 
+  bool finalize_marking_completed() const {
+    return finalize_marking_completed_;
+  }
+
+  void SetWeakClosureWasOverApproximatedForTesting(bool val) {
+    finalize_marking_completed_ = val;
+  }
+
   inline bool IsStopped() { return state() == STOPPED; }
 
   INLINE(bool IsMarking()) { return state() >= MARKING; }
@@ -44,19 +73,21 @@
 
   inline bool IsComplete() { return state() == COMPLETE; }
 
-  bool WorthActivating();
+  inline bool IsReadyToOverApproximateWeakClosure() const {
+    return request_type_ == FINALIZATION && !finalize_marking_completed_;
+  }
 
-  bool ShouldActivate();
+  GCRequestType request_type() const { return request_type_; }
+
+  bool CanBeActivated();
+
+  bool ShouldActivateEvenWithoutIdleNotification();
 
   bool WasActivated();
 
-  enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
+  void Start(const char* reason = nullptr);
 
-  void Start(CompactionFlag flag = ALLOW_COMPACTION);
-
-  void Stop();
-
-  void PrepareForScavenge();
+  void FinalizeIncrementally();
 
   void UpdateMarkingDequeAfterScavenge();
 
@@ -64,12 +95,23 @@
 
   void Finalize();
 
-  void Abort();
+  void Stop();
+
+  void FinalizeMarking(CompletionAction action);
 
   void MarkingComplete(CompletionAction action);
 
   void Epilogue();
 
+  // Performs incremental marking steps of step_size_in_bytes as long as
+  // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute
+  // an estimate increment. Returns the remaining time that cannot be used
+  // for incremental marking anymore because a single step would exceed the
+  // deadline.
+  double AdvanceIncrementalMarking(intptr_t step_size_in_bytes,
+                                   double deadline_in_ms,
+                                   StepActions step_actions);
+
   // It's hard to know how much work the incremental marker should do to make
   // progress in the face of the mutator creating new work for it.  We start
   // of at a moderate rate of work and gradually increase the speed of the
@@ -117,7 +159,7 @@
   // No slots in white objects should be recorded, as some slots are typed and
   // cannot be interpreted correctly if the underlying object does not survive
   // the incremental cycle (stays white).
-  INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
   INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
   INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
                                   Object* value));
@@ -132,17 +174,17 @@
   void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
   void RecordCodeTargetPatch(Address pc, HeapObject* value);
 
-  inline void RecordWrites(HeapObject* obj);
+  void RecordWrites(HeapObject* obj);
 
-  inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
+  void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
 
-  inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+  void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
 
   inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
     SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
   }
 
-  inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
+  inline void SetNewSpacePageFlags(MemoryChunk* chunk) {
     SetNewSpacePageFlags(chunk, IsMarking());
   }
 
@@ -150,19 +192,7 @@
 
   void ActivateGeneratedStub(Code* stub);
 
-  void NotifyOfHighPromotionRate() {
-    if (IsMarking()) {
-      if (marking_speed_ < kFastMarking) {
-        if (FLAG_trace_gc) {
-          PrintPID(
-              "Increasing marking speed to %d "
-              "due to high promotion rate\n",
-              static_cast<int>(kFastMarking));
-        }
-        marking_speed_ = kFastMarking;
-      }
-    }
-  }
+  void NotifyOfHighPromotionRate();
 
   void EnterNoMarkingScope() { no_marking_scope_depth_++; }
 
@@ -176,14 +206,44 @@
 
   bool IsIdleMarkingDelayCounterLimitReached();
 
+  INLINE(static void MarkObject(Heap* heap, HeapObject* object));
+
+  Heap* heap() const { return heap_; }
+
+  IncrementalMarkingJob* incremental_marking_job() {
+    return &incremental_marking_job_;
+  }
+
  private:
+  class Observer : public InlineAllocationObserver {
+   public:
+    Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
+        : InlineAllocationObserver(step_size),
+          incremental_marking_(incremental_marking) {}
+
+    void Step(int bytes_allocated, Address, size_t) override {
+      incremental_marking_.Step(bytes_allocated,
+                                IncrementalMarking::GC_VIA_STACK_GUARD);
+    }
+
+   private:
+    IncrementalMarking& incremental_marking_;
+  };
+
   int64_t SpaceLeftInOldSpace();
 
   void SpeedUp();
 
   void ResetStepCounters();
 
-  void StartMarking(CompactionFlag flag);
+  void StartMarking();
+
+  void MarkRoots();
+  void MarkObjectGroups();
+  void ProcessWeakCells();
+  // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
+  // increase chances of reusing of map transition tree in future.
+  void RetainMaps();
 
   void ActivateIncrementalWriteBarrier(PagedSpace* space);
   static void ActivateIncrementalWriteBarrier(NewSpace* space);
@@ -196,7 +256,7 @@
   static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
                                    bool is_compacting);
 
-  static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
+  static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
 
   INLINE(void ProcessMarkingDeque());
 
@@ -208,6 +268,8 @@
 
   Heap* heap_;
 
+  Observer observer_;
+
   State state_;
   bool is_compacting_;
 
@@ -228,9 +290,17 @@
 
   bool was_activated_;
 
+  bool finalize_marking_completed_;
+
+  int incremental_marking_finalization_rounds_;
+
+  GCRequestType request_type_;
+
+  IncrementalMarkingJob incremental_marking_job_;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
 };
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_HEAP_INCREMENTAL_MARKING_H_
diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h
index 66b0a59..a59d36b 100644
--- a/src/heap/mark-compact-inl.h
+++ b/src/heap/mark-compact-inl.h
@@ -6,67 +6,190 @@
 #define V8_HEAP_MARK_COMPACT_INL_H_
 
 #include "src/heap/mark-compact.h"
+#include "src/heap/slots-buffer.h"
 #include "src/isolate.h"
 
-
 namespace v8 {
 namespace internal {
 
-
-MarkBit Marking::MarkBitFrom(Address addr) {
-  MemoryChunk* p = MemoryChunk::FromAddress(addr);
-  return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
-                                         p->ContainsOnlyData());
+void MarkCompactCollector::PushBlack(HeapObject* obj) {
+  DCHECK(Marking::IsBlack(Marking::MarkBitFrom(obj)));
+  if (marking_deque_.Push(obj)) {
+    MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
+  } else {
+    Marking::BlackToGrey(obj);
+  }
 }
 
 
-void MarkCompactCollector::SetFlags(int flags) {
-  reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
-  abort_incremental_marking_ =
-      ((flags & Heap::kAbortIncrementalMarkingMask) != 0);
+void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
+  DCHECK(Marking::IsBlack(Marking::MarkBitFrom(obj)));
+  if (!marking_deque_.Unshift(obj)) {
+    MemoryChunk::IncrementLiveBytesFromGC(obj, -obj->Size());
+    Marking::BlackToGrey(obj);
+  }
 }
 
 
 void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
   DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
-  if (!mark_bit.Get()) {
-    mark_bit.Set();
-    MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
-    DCHECK(IsMarked(obj));
+  if (Marking::IsWhite(mark_bit)) {
+    Marking::WhiteToBlack(mark_bit);
     DCHECK(obj->GetIsolate()->heap()->Contains(obj));
-    marking_deque_.PushBlack(obj);
+    PushBlack(obj);
   }
 }
 
 
 void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
-  DCHECK(!mark_bit.Get());
+  DCHECK(Marking::IsWhite(mark_bit));
   DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
-  mark_bit.Set();
-  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+  Marking::WhiteToBlack(mark_bit);
+  MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
 }
 
 
 bool MarkCompactCollector::IsMarked(Object* obj) {
   DCHECK(obj->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(obj);
-  return Marking::MarkBitFrom(heap_object).Get();
+  return Marking::IsBlackOrGrey(Marking::MarkBitFrom(heap_object));
 }
 
 
-void MarkCompactCollector::RecordSlot(Object** anchor_slot, Object** slot,
-                                      Object* object,
-                                      SlotsBuffer::AdditionMode mode) {
-  Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
-  if (object_page->IsEvacuationCandidate() &&
-      !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
-    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                            object_page->slots_buffer_address(), slot, mode)) {
-      EvictEvacuationCandidate(object_page);
+void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
+                                      Object* target) {
+  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  if (target_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(object)) {
+    if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
+                            target_page->slots_buffer_address(), slot,
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictPopularEvacuationCandidate(target_page);
     }
   }
 }
+
+
+void MarkCompactCollector::ForceRecordSlot(HeapObject* object, Object** slot,
+                                           Object* target) {
+  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  if (target_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(object)) {
+    CHECK(SlotsBuffer::AddTo(slots_buffer_allocator_,
+                             target_page->slots_buffer_address(), slot,
+                             SlotsBuffer::IGNORE_OVERFLOW));
+  }
 }
-}  // namespace v8::internal
+
+
+void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
+  if (GetNextCandidate(shared_info) == NULL) {
+    SetNextCandidate(shared_info, shared_function_info_candidates_head_);
+    shared_function_info_candidates_head_ = shared_info;
+  }
+}
+
+
+void CodeFlusher::AddCandidate(JSFunction* function) {
+  DCHECK(function->code() == function->shared()->code());
+  if (GetNextCandidate(function)->IsUndefined()) {
+    SetNextCandidate(function, jsfunction_candidates_head_);
+    jsfunction_candidates_head_ = function;
+  }
+}
+
+
+JSFunction** CodeFlusher::GetNextCandidateSlot(JSFunction* candidate) {
+  return reinterpret_cast<JSFunction**>(
+      HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
+}
+
+
+JSFunction* CodeFlusher::GetNextCandidate(JSFunction* candidate) {
+  Object* next_candidate = candidate->next_function_link();
+  return reinterpret_cast<JSFunction*>(next_candidate);
+}
+
+
+void CodeFlusher::SetNextCandidate(JSFunction* candidate,
+                                   JSFunction* next_candidate) {
+  candidate->set_next_function_link(next_candidate, UPDATE_WEAK_WRITE_BARRIER);
+}
+
+
+void CodeFlusher::ClearNextCandidate(JSFunction* candidate, Object* undefined) {
+  DCHECK(undefined->IsUndefined());
+  candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
+}
+
+
+SharedFunctionInfo* CodeFlusher::GetNextCandidate(
+    SharedFunctionInfo* candidate) {
+  Object* next_candidate = candidate->code()->gc_metadata();
+  return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
+}
+
+
+void CodeFlusher::SetNextCandidate(SharedFunctionInfo* candidate,
+                                   SharedFunctionInfo* next_candidate) {
+  candidate->code()->set_gc_metadata(next_candidate);
+}
+
+
+void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
+  candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
+}
+
+
+template <LiveObjectIterationMode T>
+HeapObject* LiveObjectIterator<T>::Next() {
+  while (!it_.Done()) {
+    HeapObject* object = nullptr;
+    while (current_cell_ != 0) {
+      uint32_t trailing_zeros = base::bits::CountTrailingZeros32(current_cell_);
+      Address addr = cell_base_ + trailing_zeros * kPointerSize;
+
+      // Clear the first bit of the found object..
+      current_cell_ &= ~(1u << trailing_zeros);
+
+      uint32_t second_bit_index = 0;
+      if (trailing_zeros < Bitmap::kBitIndexMask) {
+        second_bit_index = 1u << (trailing_zeros + 1);
+      } else {
+        second_bit_index = 0x1;
+        // The overlapping case; there has to exist a cell after the current
+        // cell.
+        DCHECK(!it_.Done());
+        it_.Advance();
+        cell_base_ = it_.CurrentCellBase();
+        current_cell_ = *it_.CurrentCell();
+      }
+      if (T == kBlackObjects && (current_cell_ & second_bit_index)) {
+        object = HeapObject::FromAddress(addr);
+      } else if (T == kGreyObjects && !(current_cell_ & second_bit_index)) {
+        object = HeapObject::FromAddress(addr);
+      } else if (T == kAllLiveObjects) {
+        object = HeapObject::FromAddress(addr);
+      }
+      // Clear the second bit of the found object.
+      current_cell_ &= ~second_bit_index;
+
+      // We found a live object.
+      if (object != nullptr) break;
+    }
+    if (current_cell_ == 0) {
+      if (!it_.Done()) {
+        it_.Advance();
+        cell_base_ = it_.CurrentCellBase();
+        current_cell_ = *it_.CurrentCell();
+      }
+    }
+    if (object != nullptr) return object;
+  }
+  return nullptr;
+}
+
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_HEAP_MARK_COMPACT_INL_H_
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index c9a310a..65bfdd9 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -2,36 +2,47 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
+#include "src/heap/mark-compact.h"
 
 #include "src/base/atomicops.h"
 #include "src/base/bits.h"
+#include "src/base/sys-info.h"
 #include "src/code-stubs.h"
 #include "src/compilation-cache.h"
-#include "src/cpu-profiler.h"
 #include "src/deoptimizer.h"
 #include "src/execution.h"
+#include "src/frames-inl.h"
 #include "src/gdb-jit.h"
 #include "src/global-handles.h"
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/gc-tracer.h"
 #include "src/heap/incremental-marking.h"
-#include "src/heap/mark-compact.h"
+#include "src/heap/mark-compact-inl.h"
+#include "src/heap/object-stats.h"
 #include "src/heap/objects-visiting.h"
 #include "src/heap/objects-visiting-inl.h"
+#include "src/heap/slots-buffer.h"
 #include "src/heap/spaces-inl.h"
-#include "src/heap-profiler.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/v8.h"
 
 namespace v8 {
 namespace internal {
 
 
 const char* Marking::kWhiteBitPattern = "00";
-const char* Marking::kBlackBitPattern = "10";
-const char* Marking::kGreyBitPattern = "11";
+const char* Marking::kBlackBitPattern = "11";
+const char* Marking::kGreyBitPattern = "10";
 const char* Marking::kImpossibleBitPattern = "01";
 
 
+// The following has to hold in order for {Marking::MarkBitFrom} to not produce
+// invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
+STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
+
+
 // -------------------------------------------------------------------------
 // MarkCompactCollector
 
@@ -40,20 +51,21 @@
 #ifdef DEBUG
       state_(IDLE),
 #endif
-      reduce_memory_footprint_(false),
-      abort_incremental_marking_(false),
       marking_parity_(ODD_MARKING_PARITY),
-      compacting_(false),
       was_marked_incrementally_(false),
-      sweeping_in_progress_(false),
-      pending_sweeper_jobs_semaphore_(0),
       evacuation_(false),
-      migration_slots_buffer_(NULL),
+      slots_buffer_allocator_(nullptr),
+      migration_slots_buffer_(nullptr),
       heap_(heap),
       marking_deque_memory_(NULL),
-      marking_deque_memory_committed_(false),
-      code_flusher_(NULL),
-      have_code_to_deoptimize_(false) {
+      marking_deque_memory_committed_(0),
+      code_flusher_(nullptr),
+      have_code_to_deoptimize_(false),
+      compacting_(false),
+      sweeping_in_progress_(false),
+      compaction_in_progress_(false),
+      pending_sweeper_tasks_semaphore_(0),
+      pending_compaction_tasks_semaphore_(0) {
 }
 
 #ifdef VERIFY_HEAP
@@ -61,7 +73,7 @@
  public:
   explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
 
-  void VisitPointers(Object** start, Object** end) {
+  void VisitPointers(Object** start, Object** end) override {
     for (Object** current = start; current < end; current++) {
       if ((*current)->IsHeapObject()) {
         HeapObject* object = HeapObject::cast(*current);
@@ -70,7 +82,7 @@
     }
   }
 
-  void VisitEmbeddedPointer(RelocInfo* rinfo) {
+  void VisitEmbeddedPointer(RelocInfo* rinfo) override {
     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
     if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
       Object* p = rinfo->target_object();
@@ -78,7 +90,7 @@
     }
   }
 
-  void VisitCell(RelocInfo* rinfo) {
+  void VisitCell(RelocInfo* rinfo) override {
     Code* code = rinfo->host();
     DCHECK(rinfo->rmode() == RelocInfo::CELL);
     if (!code->IsWeakObject(rinfo->target_cell())) {
@@ -99,9 +111,12 @@
   for (Address current = bottom; current < top; current += kPointerSize) {
     object = HeapObject::FromAddress(current);
     if (MarkCompactCollector::IsMarked(object)) {
+      CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
       CHECK(current >= next_object_must_be_here_or_later);
       object->Iterate(&visitor);
       next_object_must_be_here_or_later = current + object->Size();
+      // The next word for sure belongs to the current object, jump over it.
+      current += kPointerSize;
     }
   }
 }
@@ -134,11 +149,8 @@
 
 
 static void VerifyMarking(Heap* heap) {
-  VerifyMarking(heap->old_pointer_space());
-  VerifyMarking(heap->old_data_space());
+  VerifyMarking(heap->old_space());
   VerifyMarking(heap->code_space());
-  VerifyMarking(heap->cell_space());
-  VerifyMarking(heap->property_cell_space());
   VerifyMarking(heap->map_space());
   VerifyMarking(heap->new_space());
 
@@ -157,7 +169,7 @@
 
 class VerifyEvacuationVisitor : public ObjectVisitor {
  public:
-  void VisitPointers(Object** start, Object** end) {
+  void VisitPointers(Object** start, Object** end) override {
     for (Object** current = start; current < end; current++) {
       if ((*current)->IsHeapObject()) {
         HeapObject* object = HeapObject::cast(*current);
@@ -170,7 +182,7 @@
 
 static void VerifyEvacuation(Page* page) {
   VerifyEvacuationVisitor visitor;
-  HeapObjectIterator iterator(page, NULL);
+  HeapObjectIterator iterator(page);
   for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
        heap_object = iterator.Next()) {
     // We skip free space objects.
@@ -200,8 +212,7 @@
 
 
 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
-  if (FLAG_use_allocation_folding &&
-      (space == heap->old_pointer_space() || space == heap->old_data_space())) {
+  if (FLAG_use_allocation_folding && (space == heap->old_space())) {
     return;
   }
   PageIterator it(space);
@@ -215,11 +226,8 @@
 
 
 static void VerifyEvacuation(Heap* heap) {
-  VerifyEvacuation(heap, heap->old_pointer_space());
-  VerifyEvacuation(heap, heap->old_data_space());
+  VerifyEvacuation(heap, heap->old_space());
   VerifyEvacuation(heap, heap->code_space());
-  VerifyEvacuation(heap, heap->cell_space());
-  VerifyEvacuation(heap, heap->property_cell_space());
   VerifyEvacuation(heap, heap->map_space());
   VerifyEvacuation(heap->new_space());
 
@@ -230,18 +238,37 @@
 
 
 void MarkCompactCollector::SetUp() {
-  free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
-  free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  free_list_old_space_.Reset(new FreeList(heap_->old_space()));
+  free_list_code_space_.Reset(new FreeList(heap_->code_space()));
+  free_list_map_space_.Reset(new FreeList(heap_->map_space()));
+  EnsureMarkingDequeIsReserved();
+  EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
+  slots_buffer_allocator_ = new SlotsBufferAllocator();
+
+  if (FLAG_flush_code) {
+    code_flusher_ = new CodeFlusher(isolate());
+    if (FLAG_trace_code_flushing) {
+      PrintF("[code-flushing is now on]\n");
+    }
+  }
 }
 
 
 void MarkCompactCollector::TearDown() {
   AbortCompaction();
   delete marking_deque_memory_;
+  delete slots_buffer_allocator_;
+  delete code_flusher_;
 }
 
 
 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
+  DCHECK(!p->NeverEvacuate());
   p->MarkEvacuationCandidate();
   evacuation_candidates_.Add(p);
 }
@@ -261,16 +288,9 @@
   if (!compacting_) {
     DCHECK(evacuation_candidates_.length() == 0);
 
-#ifdef ENABLE_GDB_JIT_INTERFACE
-    // If GDBJIT interface is active disable compaction.
-    if (FLAG_gdbjit) return false;
-#endif
+    CollectEvacuationCandidates(heap()->old_space());
 
-    CollectEvacuationCandidates(heap()->old_pointer_space());
-    CollectEvacuationCandidates(heap()->old_data_space());
-
-    if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
-                                    FLAG_incremental_code_compaction)) {
+    if (FLAG_compact_code_space) {
       CollectEvacuationCandidates(heap()->code_space());
     } else if (FLAG_trace_fragmentation) {
       TraceFragmentation(heap()->code_space());
@@ -278,13 +298,10 @@
 
     if (FLAG_trace_fragmentation) {
       TraceFragmentation(heap()->map_space());
-      TraceFragmentation(heap()->cell_space());
-      TraceFragmentation(heap()->property_cell_space());
     }
 
-    heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
-    heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
-    heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
+    heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
+    heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
 
     compacting_ = evacuation_candidates_.length() > 0;
   }
@@ -293,23 +310,66 @@
 }
 
 
+void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
+    heap_->store_buffer()->ClearInvalidStoreBufferEntries();
+  }
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
+    int number_of_pages = evacuation_candidates_.length();
+    for (int i = 0; i < number_of_pages; i++) {
+      Page* p = evacuation_candidates_[i];
+      SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
+    }
+  }
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    VerifyValidStoreAndSlotsBufferEntries();
+  }
+#endif
+}
+
+
+#ifdef VERIFY_HEAP
+static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    SlotsBuffer::VerifySlots(heap, p->slots_buffer());
+  }
+}
+
+
+void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
+  heap()->store_buffer()->VerifyValidStoreBufferEntries();
+
+  VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
+  VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
+  VerifyValidSlotsBufferEntries(heap(), heap()->map_space());
+
+  LargeObjectIterator it(heap()->lo_space());
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+    SlotsBuffer::VerifySlots(heap(), chunk->slots_buffer());
+  }
+}
+#endif
+
+
 void MarkCompactCollector::CollectGarbage() {
   // Make sure that Prepare() has been called. The individual steps below will
   // update the state as they proceed.
   DCHECK(state_ == PREPARE_GC);
 
   MarkLiveObjects();
+
   DCHECK(heap_->incremental_marking()->IsStopped());
 
-  if (FLAG_collect_maps) ClearNonLiveReferences();
-
-  ProcessAndClearWeakCells();
-
-  ClearWeakCollections();
-
-  heap_->set_encountered_weak_cells(Smi::FromInt(0));
-
-  isolate()->global_handles()->CollectPhantomCallbackData();
+  ClearNonLiveReferences();
 
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
@@ -319,23 +379,9 @@
 
   SweepSpaces();
 
-#ifdef VERIFY_HEAP
-  if (heap()->weak_embedded_objects_verification_enabled()) {
-    VerifyWeakEmbeddedObjectsInCode();
-  }
-  if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
-    VerifyOmittedMapChecks();
-  }
-#endif
+  EvacuateNewSpaceAndCandidates();
 
   Finish();
-
-  if (marking_parity_ == EVEN_MARKING_PARITY) {
-    marking_parity_ = ODD_MARKING_PARITY;
-  } else {
-    DCHECK(marking_parity_ == ODD_MARKING_PARITY);
-    marking_parity_ = EVEN_MARKING_PARITY;
-  }
 }
 
 
@@ -363,11 +409,8 @@
 
 
 void MarkCompactCollector::VerifyMarkbitsAreClean() {
-  VerifyMarkbitsAreClean(heap_->old_pointer_space());
-  VerifyMarkbitsAreClean(heap_->old_data_space());
+  VerifyMarkbitsAreClean(heap_->old_space());
   VerifyMarkbitsAreClean(heap_->code_space());
-  VerifyMarkbitsAreClean(heap_->cell_space());
-  VerifyMarkbitsAreClean(heap_->property_cell_space());
   VerifyMarkbitsAreClean(heap_->map_space());
   VerifyMarkbitsAreClean(heap_->new_space());
 
@@ -423,23 +466,42 @@
 void MarkCompactCollector::ClearMarkbits() {
   ClearMarkbitsInPagedSpace(heap_->code_space());
   ClearMarkbitsInPagedSpace(heap_->map_space());
-  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
-  ClearMarkbitsInPagedSpace(heap_->old_data_space());
-  ClearMarkbitsInPagedSpace(heap_->cell_space());
-  ClearMarkbitsInPagedSpace(heap_->property_cell_space());
+  ClearMarkbitsInPagedSpace(heap_->old_space());
   ClearMarkbitsInNewSpace(heap_->new_space());
 
   LargeObjectIterator it(heap_->lo_space());
   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    MarkBit mark_bit = Marking::MarkBitFrom(obj);
-    mark_bit.Clear();
-    mark_bit.Next().Clear();
+    Marking::MarkWhite(Marking::MarkBitFrom(obj));
     Page::FromAddress(obj->address())->ResetProgressBar();
     Page::FromAddress(obj->address())->ResetLiveBytes();
   }
 }
 
 
+class MarkCompactCollector::CompactionTask : public CancelableTask {
+ public:
+  explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
+      : CancelableTask(heap->isolate()), spaces_(spaces) {}
+
+  virtual ~CompactionTask() {}
+
+ private:
+  // v8::internal::CancelableTask overrides.
+  void RunInternal() override {
+    MarkCompactCollector* mark_compact =
+        isolate()->heap()->mark_compact_collector();
+    SlotsBuffer* evacuation_slots_buffer = nullptr;
+    mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
+    mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
+    mark_compact->pending_compaction_tasks_semaphore_.Signal();
+  }
+
+  CompactionSpaceCollection* spaces_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompactionTask);
+};
+
+
 class MarkCompactCollector::SweeperTask : public v8::Task {
  public:
   SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
@@ -448,9 +510,9 @@
 
  private:
   // v8::Task overrides.
-  void Run() OVERRIDE {
+  void Run() override {
     heap_->mark_compact_collector()->SweepInParallel(space_, 0);
-    heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
+    heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
   }
 
   Heap* heap_;
@@ -461,14 +523,40 @@
 
 
 void MarkCompactCollector::StartSweeperThreads() {
-  DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
-  DCHECK(free_list_old_data_space_.get()->IsEmpty());
+  DCHECK(free_list_old_space_.get()->IsEmpty());
+  DCHECK(free_list_code_space_.get()->IsEmpty());
+  DCHECK(free_list_map_space_.get()->IsEmpty());
   V8::GetCurrentPlatform()->CallOnBackgroundThread(
-      new SweeperTask(heap(), heap()->old_data_space()),
+      new SweeperTask(heap(), heap()->old_space()),
       v8::Platform::kShortRunningTask);
   V8::GetCurrentPlatform()->CallOnBackgroundThread(
-      new SweeperTask(heap(), heap()->old_pointer_space()),
+      new SweeperTask(heap(), heap()->code_space()),
       v8::Platform::kShortRunningTask);
+  V8::GetCurrentPlatform()->CallOnBackgroundThread(
+      new SweeperTask(heap(), heap()->map_space()),
+      v8::Platform::kShortRunningTask);
+}
+
+
+void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
+  PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
+  if (!page->SweepingCompleted()) {
+    SweepInParallel(page, owner);
+    if (!page->SweepingCompleted()) {
+      // We were not able to sweep that page, i.e., a concurrent
+      // sweeper thread currently owns this page. Wait for the sweeper
+      // thread to be done with this page.
+      page->WaitUntilSweepingCompleted();
+    }
+  }
+}
+
+
+void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
+  if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) {
+    SweepInParallel(heap()->paged_space(space->identity()), 0);
+    space->RefillFreeList();
+  }
 }
 
 
@@ -478,20 +566,22 @@
   // If sweeping is not completed or not running at all, we try to complete it
   // here.
   if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
-    SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
-    SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
+    SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
+    SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
+    SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
   }
-  // Wait twice for both jobs.
+
   if (FLAG_concurrent_sweeping) {
-    pending_sweeper_jobs_semaphore_.Wait();
-    pending_sweeper_jobs_semaphore_.Wait();
+    pending_sweeper_tasks_semaphore_.Wait();
+    pending_sweeper_tasks_semaphore_.Wait();
+    pending_sweeper_tasks_semaphore_.Wait();
   }
+
   ParallelSweepSpacesComplete();
   sweeping_in_progress_ = false;
-  RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
-  RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
-  heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
-  heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
+  heap()->old_space()->RefillFreeList();
+  heap()->code_space()->RefillFreeList();
+  heap()->map_space()->RefillFreeList();
 
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap && !evacuation()) {
@@ -502,40 +592,21 @@
 
 
 bool MarkCompactCollector::IsSweepingCompleted() {
-  if (!pending_sweeper_jobs_semaphore_.WaitFor(
+  if (!pending_sweeper_tasks_semaphore_.WaitFor(
           base::TimeDelta::FromSeconds(0))) {
     return false;
   }
-  pending_sweeper_jobs_semaphore_.Signal();
+  pending_sweeper_tasks_semaphore_.Signal();
   return true;
 }
 
 
-void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
-  FreeList* free_list;
-
-  if (space == heap()->old_pointer_space()) {
-    free_list = free_list_old_pointer_space_.get();
-  } else if (space == heap()->old_data_space()) {
-    free_list = free_list_old_data_space_.get();
-  } else {
-    // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
-    // to only refill them for old data and pointer spaces.
-    return;
-  }
-
-  intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
-  space->AddToAccountingStats(freed_bytes);
-  space->DecrementUnsweptFreeBytes(freed_bytes);
-}
-
-
-void Marking::TransferMark(Address old_start, Address new_start) {
+void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
   // This is only used when resizing an object.
   DCHECK(MemoryChunk::FromAddress(old_start) ==
          MemoryChunk::FromAddress(new_start));
 
-  if (!heap_->incremental_marking()->IsMarking()) return;
+  if (!heap->incremental_marking()->IsMarking()) return;
 
   // If the mark doesn't move, we don't check the color of the object.
   // It doesn't matter whether the object is black, since it hasn't changed
@@ -550,17 +621,14 @@
 #endif
 
   if (Marking::IsBlack(old_mark_bit)) {
-    old_mark_bit.Clear();
-    DCHECK(IsWhite(old_mark_bit));
+    Marking::BlackToWhite(old_mark_bit);
     Marking::MarkBlack(new_mark_bit);
     return;
   } else if (Marking::IsGrey(old_mark_bit)) {
-    old_mark_bit.Clear();
-    old_mark_bit.Next().Clear();
-    DCHECK(IsWhite(old_mark_bit));
-    heap_->incremental_marking()->WhiteToGreyAndPush(
+    Marking::GreyToWhite(old_mark_bit);
+    heap->incremental_marking()->WhiteToGreyAndPush(
         HeapObject::FromAddress(new_start), new_mark_bit);
-    heap_->incremental_marking()->RestartIfNotMarking();
+    heap->incremental_marking()->RestartIfNotMarking();
   }
 
 #ifdef DEBUG
@@ -574,18 +642,12 @@
   switch (space) {
     case NEW_SPACE:
       return "NEW_SPACE";
-    case OLD_POINTER_SPACE:
-      return "OLD_POINTER_SPACE";
-    case OLD_DATA_SPACE:
-      return "OLD_DATA_SPACE";
+    case OLD_SPACE:
+      return "OLD_SPACE";
     case CODE_SPACE:
       return "CODE_SPACE";
     case MAP_SPACE:
       return "MAP_SPACE";
-    case CELL_SPACE:
-      return "CELL_SPACE";
-    case PROPERTY_CELL_SPACE:
-      return "PROPERTY_CELL_SPACE";
     case LO_SPACE:
       return "LO_SPACE";
     default:
@@ -596,203 +658,173 @@
 }
 
 
-// Returns zero for pages that have so little fragmentation that it is not
-// worth defragmenting them.  Otherwise a positive integer that gives an
-// estimate of fragmentation on an arbitrary scale.
-static int FreeListFragmentation(PagedSpace* space, Page* p) {
-  // If page was not swept then there are no free list items on it.
-  if (!p->WasSwept()) {
-    if (FLAG_trace_fragmentation) {
-      PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
-             AllocationSpaceName(space->identity()), p->LiveBytes());
-    }
-    return 0;
-  }
+void MarkCompactCollector::ComputeEvacuationHeuristics(
+    int area_size, int* target_fragmentation_percent,
+    int* max_evacuated_bytes) {
+  // For memory reducing mode we directly define both constants.
+  const int kTargetFragmentationPercentForReduceMemory = 20;
+  const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
 
-  PagedSpace::SizeStats sizes;
-  space->ObtainFreeListStatistics(p, &sizes);
+  // For regular mode (which is latency critical) we define less aggressive
+  // defaults to start and switch to a trace-based (using compaction speed)
+  // approach as soon as we have enough samples.
+  const int kTargetFragmentationPercent = 70;
+  const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
+  // Time to take for a single area (=payload of page). Used as soon as there
+  // exist enough compaction speed samples.
+  const int kTargetMsPerArea = 1;
 
-  intptr_t ratio;
-  intptr_t ratio_threshold;
-  intptr_t area_size = space->AreaSize();
-  if (space->identity() == CODE_SPACE) {
-    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
-    ratio_threshold = 10;
+  if (heap()->ShouldReduceMemory()) {
+    *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
+    *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
   } else {
-    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
-    ratio_threshold = 15;
+    const intptr_t estimated_compaction_speed =
+        heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+    if (estimated_compaction_speed != 0) {
+      // Estimate the target fragmentation based on traced compaction speed
+      // and a goal for a single page.
+      const intptr_t estimated_ms_per_area =
+          1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed;
+      *target_fragmentation_percent =
+          100 - 100 * kTargetMsPerArea / estimated_ms_per_area;
+      if (*target_fragmentation_percent <
+          kTargetFragmentationPercentForReduceMemory) {
+        *target_fragmentation_percent =
+            kTargetFragmentationPercentForReduceMemory;
+      }
+    } else {
+      *target_fragmentation_percent = kTargetFragmentationPercent;
+    }
+    *max_evacuated_bytes = kMaxEvacuatedBytes;
   }
-
-  if (FLAG_trace_fragmentation) {
-    PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
-           reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
-           static_cast<int>(sizes.small_size_),
-           static_cast<double>(sizes.small_size_ * 100) / area_size,
-           static_cast<int>(sizes.medium_size_),
-           static_cast<double>(sizes.medium_size_ * 100) / area_size,
-           static_cast<int>(sizes.large_size_),
-           static_cast<double>(sizes.large_size_ * 100) / area_size,
-           static_cast<int>(sizes.huge_size_),
-           static_cast<double>(sizes.huge_size_ * 100) / area_size,
-           (ratio > ratio_threshold) ? "[fragmented]" : "");
-  }
-
-  if (FLAG_always_compact && sizes.Total() != area_size) {
-    return 1;
-  }
-
-  if (ratio <= ratio_threshold) return 0;  // Not fragmented.
-
-  return static_cast<int>(ratio - ratio_threshold);
 }
 
 
 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
-  DCHECK(space->identity() == OLD_POINTER_SPACE ||
-         space->identity() == OLD_DATA_SPACE ||
-         space->identity() == CODE_SPACE);
+  DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
 
-  static const int kMaxMaxEvacuationCandidates = 1000;
   int number_of_pages = space->CountTotalPages();
-  int max_evacuation_candidates =
-      static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
+  int area_size = space->AreaSize();
 
-  if (FLAG_stress_compaction || FLAG_always_compact) {
-    max_evacuation_candidates = kMaxMaxEvacuationCandidates;
-  }
-
-  class Candidate {
-   public:
-    Candidate() : fragmentation_(0), page_(NULL) {}
-    Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
-
-    int fragmentation() { return fragmentation_; }
-    Page* page() { return page_; }
-
-   private:
-    int fragmentation_;
-    Page* page_;
-  };
-
-  enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
-
-  CompactionMode mode = COMPACT_FREE_LISTS;
-
-  intptr_t reserved = number_of_pages * space->AreaSize();
-  intptr_t over_reserved = reserved - space->SizeOfObjects();
-  static const intptr_t kFreenessThreshold = 50;
-
-  if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
-    // If reduction of memory footprint was requested, we are aggressive
-    // about choosing pages to free.  We expect that half-empty pages
-    // are easier to compact so slightly bump the limit.
-    mode = REDUCE_MEMORY_FOOTPRINT;
-    max_evacuation_candidates += 2;
-  }
-
-
-  if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
-    // If over-usage is very high (more than a third of the space), we
-    // try to free all mostly empty pages.  We expect that almost empty
-    // pages are even easier to compact so bump the limit even more.
-    mode = REDUCE_MEMORY_FOOTPRINT;
-    max_evacuation_candidates *= 2;
-  }
-
-  if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
-    PrintF(
-        "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
-        "evacuation candidate limit: %d\n",
-        static_cast<double>(over_reserved) / MB,
-        static_cast<double>(reserved) / MB,
-        static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
-  }
-
-  intptr_t estimated_release = 0;
-
-  Candidate candidates[kMaxMaxEvacuationCandidates];
-
-  max_evacuation_candidates =
-      Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
-
-  int count = 0;
-  int fragmentation = 0;
-  Candidate* least = NULL;
+  // Pairs of (live_bytes_in_page, page).
+  typedef std::pair<int, Page*> LiveBytesPagePair;
+  std::vector<LiveBytesPagePair> pages;
+  pages.reserve(number_of_pages);
 
   PageIterator it(space);
-  if (it.has_next()) it.next();  // Never compact the first page.
-
   while (it.has_next()) {
     Page* p = it.next();
-    p->ClearEvacuationCandidate();
-
-    if (FLAG_stress_compaction) {
-      unsigned int counter = space->heap()->ms_count();
-      uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
-      if ((counter & 1) == (page_number & 1)) fragmentation = 1;
-    } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
-      // Don't try to release too many pages.
-      if (estimated_release >= over_reserved) {
-        continue;
-      }
-
-      intptr_t free_bytes = 0;
-
-      if (!p->WasSwept()) {
-        free_bytes = (p->area_size() - p->LiveBytes());
-      } else {
-        PagedSpace::SizeStats sizes;
-        space->ObtainFreeListStatistics(p, &sizes);
-        free_bytes = sizes.Total();
-      }
-
-      int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
-
-      if (free_pct >= kFreenessThreshold) {
-        estimated_release += free_bytes;
-        fragmentation = free_pct;
-      } else {
-        fragmentation = 0;
-      }
-
-      if (FLAG_trace_fragmentation) {
-        PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
-               AllocationSpaceName(space->identity()),
-               static_cast<int>(free_bytes),
-               static_cast<double>(free_bytes * 100) / p->area_size(),
-               (fragmentation > 0) ? "[fragmented]" : "");
-      }
-    } else {
-      fragmentation = FreeListFragmentation(space, p);
+    if (p->NeverEvacuate()) continue;
+    if (p->IsFlagSet(Page::POPULAR_PAGE)) {
+      // This page had slots buffer overflow on previous GC, skip it.
+      p->ClearFlag(Page::POPULAR_PAGE);
+      continue;
     }
+    // Invariant: Evacuation candidates are just created when marking is
+    // started. At the end of a GC all evacuation candidates are cleared and
+    // their slot buffers are released.
+    CHECK(!p->IsEvacuationCandidate());
+    CHECK(p->slots_buffer() == NULL);
+    DCHECK(p->area_size() == area_size);
+    int live_bytes =
+        p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
+    pages.push_back(std::make_pair(live_bytes, p));
+  }
 
-    if (fragmentation != 0) {
-      if (count < max_evacuation_candidates) {
-        candidates[count++] = Candidate(fragmentation, p);
-      } else {
-        if (least == NULL) {
-          for (int i = 0; i < max_evacuation_candidates; i++) {
-            if (least == NULL ||
-                candidates[i].fragmentation() < least->fragmentation()) {
-              least = candidates + i;
-            }
-          }
-        }
-        if (least->fragmentation() < fragmentation) {
-          *least = Candidate(fragmentation, p);
-          least = NULL;
-        }
+  int candidate_count = 0;
+  int total_live_bytes = 0;
+
+  const bool reduce_memory = heap()->ShouldReduceMemory();
+  if (FLAG_manual_evacuation_candidates_selection) {
+    for (size_t i = 0; i < pages.size(); i++) {
+      Page* p = pages[i].second;
+      if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
+        candidate_count++;
+        total_live_bytes += pages[i].first;
+        p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+        AddEvacuationCandidate(p);
       }
     }
+  } else if (FLAG_stress_compaction) {
+    for (size_t i = 0; i < pages.size(); i++) {
+      Page* p = pages[i].second;
+      if (i % 2 == 0) {
+        candidate_count++;
+        total_live_bytes += pages[i].first;
+        AddEvacuationCandidate(p);
+      }
+    }
+  } else {
+    // The following approach determines the pages that should be evacuated.
+    //
+    // We use two conditions to decide whether a page qualifies as an evacuation
+    // candidate, or not:
+    // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
+    //   between live bytes and capacity of this page (= area).
+    // * Evacuation quota: A global quota determining how much bytes should be
+    //   compacted.
+    //
+    // The algorithm sorts all pages by live bytes and then iterates through
+    // them starting with the page with the most free memory, adding them to the
+    // set of evacuation candidates as long as both conditions (fragmentation
+    // and quota) hold.
+    int max_evacuated_bytes;
+    int target_fragmentation_percent;
+    ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
+                                &max_evacuated_bytes);
+
+    const intptr_t free_bytes_threshold =
+        target_fragmentation_percent * (area_size / 100);
+
+    // Sort pages from the most free to the least free, then select
+    // the first n pages for evacuation such that:
+    // - the total size of evacuated objects does not exceed the specified
+    // limit.
+    // - fragmentation of (n+1)-th page does not exceed the specified limit.
+    std::sort(pages.begin(), pages.end(),
+              [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
+                return a.first < b.first;
+              });
+    for (size_t i = 0; i < pages.size(); i++) {
+      int live_bytes = pages[i].first;
+      int free_bytes = area_size - live_bytes;
+      if (FLAG_always_compact ||
+          ((free_bytes >= free_bytes_threshold) &&
+           ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
+        candidate_count++;
+        total_live_bytes += live_bytes;
+      }
+      if (FLAG_trace_fragmentation_verbose) {
+        PrintIsolate(isolate(),
+                     "compaction-selection-page: space=%s free_bytes_page=%d "
+                     "fragmentation_limit_kb=%d fragmentation_limit_percent=%d "
+                     "sum_compaction_kb=%d "
+                     "compaction_limit_kb=%d\n",
+                     AllocationSpaceName(space->identity()), free_bytes / KB,
+                     free_bytes_threshold / KB, target_fragmentation_percent,
+                     total_live_bytes / KB, max_evacuated_bytes / KB);
+      }
+    }
+    // How many pages we will allocated for the evacuated objects
+    // in the worst case: ceil(total_live_bytes / area_size)
+    int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
+    DCHECK_LE(estimated_new_pages, candidate_count);
+    int estimated_released_pages = candidate_count - estimated_new_pages;
+    // Avoid (compact -> expand) cycles.
+    if ((estimated_released_pages == 0) && !FLAG_always_compact) {
+      candidate_count = 0;
+    }
+    for (int i = 0; i < candidate_count; i++) {
+      AddEvacuationCandidate(pages[i].second);
+    }
   }
 
-  for (int i = 0; i < count; i++) {
-    AddEvacuationCandidate(candidates[i].page());
-  }
-
-  if (count > 0 && FLAG_trace_fragmentation) {
-    PrintF("Collected %d evacuation candidates for space %s\n", count,
-           AllocationSpaceName(space->identity()));
+  if (FLAG_trace_fragmentation) {
+    PrintIsolate(isolate(),
+                 "compaction-selection: space=%s reduce_memory=%d pages=%d "
+                 "total_live_bytes=%d\n",
+                 AllocationSpaceName(space->identity()), reduce_memory,
+                 candidate_count, total_live_bytes / KB);
   }
 }
 
@@ -802,13 +834,12 @@
     int npages = evacuation_candidates_.length();
     for (int i = 0; i < npages; i++) {
       Page* p = evacuation_candidates_[i];
-      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+      slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
       p->ClearEvacuationCandidate();
       p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
     }
     compacting_ = false;
     evacuation_candidates_.Rewind(0);
-    invalidated_code_.Rewind(0);
   }
   DCHECK_EQ(0, evacuation_candidates_.length());
 }
@@ -829,12 +860,17 @@
     EnsureSweepingCompleted();
   }
 
+  // If concurrent unmapping tasks are still running, we should wait for
+  // them here.
+  heap()->WaitUntilUnmappingOfFreeChunksCompleted();
+
   // Clear marking bits if incremental marking is aborted.
-  if (was_marked_incrementally_ && abort_incremental_marking_) {
-    heap()->incremental_marking()->Abort();
+  if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
+    heap()->incremental_marking()->Stop();
     ClearMarkbits();
     AbortWeakCollections();
     AbortWeakCells();
+    AbortTransitionArrays();
     AbortCompaction();
     was_marked_incrementally_ = false;
   }
@@ -860,10 +896,21 @@
 
 
 void MarkCompactCollector::Finish() {
+  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_FINISH);
+
+  // The hashing of weak_object_to_code_table is no longer valid.
+  heap()->weak_object_to_code_table()->Rehash(
+      heap()->isolate()->factory()->undefined_value());
+
+  // Clear the marking state of live large objects.
+  heap_->lo_space()->ClearMarkingStateOfLiveObjects();
+
 #ifdef DEBUG
   DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
   state_ = IDLE;
 #endif
+  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+
   // The stub cache is not traversed during GC; clear the cache to
   // force lazy re-initialization of it. This must be done after the
   // GC, because it relies on the new address of certain old space
@@ -877,6 +924,13 @@
   }
 
   heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
+
+  if (marking_parity_ == EVEN_MARKING_PARITY) {
+    marking_parity_ = ODD_MARKING_PARITY;
+  } else {
+    DCHECK(marking_parity_ == ODD_MARKING_PARITY);
+    marking_parity_ = EVEN_MARKING_PARITY;
+  }
 }
 
 
@@ -922,15 +976,20 @@
 
     Code* code = shared->code();
     MarkBit code_mark = Marking::MarkBitFrom(code);
-    if (!code_mark.Get()) {
+    if (Marking::IsWhite(code_mark)) {
       if (FLAG_trace_code_flushing && shared->is_compiled()) {
         PrintF("[code-flushing clears: ");
         shared->ShortPrint();
         PrintF(" - age: %d]\n", code->GetAge());
       }
+      // Always flush the optimized code map if there is one.
+      if (!shared->OptimizedCodeMapIsCleared()) {
+        shared->ClearOptimizedCodeMap();
+      }
       shared->set_code(lazy_compile);
       candidate->set_code(lazy_compile);
     } else {
+      DCHECK(Marking::IsBlack(code_mark));
       candidate->set_code(code);
     }
 
@@ -938,13 +997,13 @@
     // setter did not record the slot update and we have to do that manually.
     Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
     Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
-    isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
-                                                                    target);
+    isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
+        candidate, slot, target);
 
     Object** shared_code_slot =
         HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
     isolate_->heap()->mark_compact_collector()->RecordSlot(
-        shared_code_slot, shared_code_slot, *shared_code_slot);
+        shared, shared_code_slot, *shared_code_slot);
 
     candidate = next_candidate;
   }
@@ -964,18 +1023,22 @@
 
     Code* code = candidate->code();
     MarkBit code_mark = Marking::MarkBitFrom(code);
-    if (!code_mark.Get()) {
+    if (Marking::IsWhite(code_mark)) {
       if (FLAG_trace_code_flushing && candidate->is_compiled()) {
         PrintF("[code-flushing clears: ");
         candidate->ShortPrint();
         PrintF(" - age: %d]\n", code->GetAge());
       }
+      // Always flush the optimized code map if there is one.
+      if (!candidate->OptimizedCodeMapIsCleared()) {
+        candidate->ClearOptimizedCodeMap();
+      }
       candidate->set_code(lazy_compile);
     }
 
     Object** code_slot =
         HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
-    isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
+    isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
                                                            *code_slot);
 
     candidate = next_candidate;
@@ -985,54 +1048,6 @@
 }
 
 
-void CodeFlusher::ProcessOptimizedCodeMaps() {
-  STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
-
-  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
-  SharedFunctionInfo* next_holder;
-
-  while (holder != NULL) {
-    next_holder = GetNextCodeMap(holder);
-    ClearNextCodeMap(holder);
-
-    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
-    int new_length = SharedFunctionInfo::kEntriesStart;
-    int old_length = code_map->length();
-    for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
-         i += SharedFunctionInfo::kEntryLength) {
-      Code* code =
-          Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
-      if (!Marking::MarkBitFrom(code).Get()) continue;
-
-      // Move every slot in the entry.
-      for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
-        int dst_index = new_length++;
-        Object** slot = code_map->RawFieldOfElementAt(dst_index);
-        Object* object = code_map->get(i + j);
-        code_map->set(dst_index, object);
-        if (j == SharedFunctionInfo::kOsrAstIdOffset) {
-          DCHECK(object->IsSmi());
-        } else {
-          DCHECK(
-              Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
-          isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
-                                                                 *slot);
-        }
-      }
-    }
-
-    // Trim the optimized code map if entries have been removed.
-    if (new_length < old_length) {
-      holder->TrimOptimizedCodeMap(old_length - new_length);
-    }
-
-    holder = next_holder;
-  }
-
-  optimized_code_map_holder_head_ = NULL;
-}
-
-
 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
   // Make sure previous flushing decisions are revisited.
   isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
@@ -1103,79 +1118,6 @@
 }
 
 
-void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
-  DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
-              ->get(SharedFunctionInfo::kNextMapIndex)
-              ->IsUndefined());
-
-  // Make sure previous flushing decisions are revisited.
-  isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
-
-  if (FLAG_trace_code_flushing) {
-    PrintF("[code-flushing abandons code-map: ");
-    code_map_holder->ShortPrint();
-    PrintF("]\n");
-  }
-
-  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
-  SharedFunctionInfo* next_holder;
-  if (holder == code_map_holder) {
-    next_holder = GetNextCodeMap(code_map_holder);
-    optimized_code_map_holder_head_ = next_holder;
-    ClearNextCodeMap(code_map_holder);
-  } else {
-    while (holder != NULL) {
-      next_holder = GetNextCodeMap(holder);
-
-      if (next_holder == code_map_holder) {
-        next_holder = GetNextCodeMap(code_map_holder);
-        SetNextCodeMap(holder, next_holder);
-        ClearNextCodeMap(code_map_holder);
-        break;
-      }
-
-      holder = next_holder;
-    }
-  }
-}
-
-
-void CodeFlusher::EvictJSFunctionCandidates() {
-  JSFunction* candidate = jsfunction_candidates_head_;
-  JSFunction* next_candidate;
-  while (candidate != NULL) {
-    next_candidate = GetNextCandidate(candidate);
-    EvictCandidate(candidate);
-    candidate = next_candidate;
-  }
-  DCHECK(jsfunction_candidates_head_ == NULL);
-}
-
-
-void CodeFlusher::EvictSharedFunctionInfoCandidates() {
-  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
-  SharedFunctionInfo* next_candidate;
-  while (candidate != NULL) {
-    next_candidate = GetNextCandidate(candidate);
-    EvictCandidate(candidate);
-    candidate = next_candidate;
-  }
-  DCHECK(shared_function_info_candidates_head_ == NULL);
-}
-
-
-void CodeFlusher::EvictOptimizedCodeMaps() {
-  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
-  SharedFunctionInfo* next_holder;
-  while (holder != NULL) {
-    next_holder = GetNextCodeMap(holder);
-    EvictOptimizedCodeMap(holder);
-    holder = next_holder;
-  }
-  DCHECK(optimized_code_map_holder_head_ == NULL);
-}
-
-
 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
   Heap* heap = isolate_->heap();
 
@@ -1191,81 +1133,26 @@
 }
 
 
-MarkCompactCollector::~MarkCompactCollector() {
-  if (code_flusher_ != NULL) {
-    delete code_flusher_;
-    code_flusher_ = NULL;
-  }
-}
-
-
-static inline HeapObject* ShortCircuitConsString(Object** p) {
-  // Optimization: If the heap object pointed to by p is a non-internalized
-  // cons string whose right substring is HEAP->empty_string, update
-  // it in place to its left substring.  Return the updated value.
-  //
-  // Here we assume that if we change *p, we replace it with a heap object
-  // (i.e., the left substring of a cons string is always a heap object).
-  //
-  // The check performed is:
-  //   object->IsConsString() && !object->IsInternalizedString() &&
-  //   (ConsString::cast(object)->second() == HEAP->empty_string())
-  // except the maps for the object and its possible substrings might be
-  // marked.
-  HeapObject* object = HeapObject::cast(*p);
-  Map* map = object->map();
-  InstanceType type = map->instance_type();
-  if (!IsShortcutCandidate(type)) return object;
-
-  Object* second = reinterpret_cast<ConsString*>(object)->second();
-  Heap* heap = map->GetHeap();
-  if (second != heap->empty_string()) {
-    return object;
-  }
-
-  // Since we don't have the object's start, it is impossible to update the
-  // page dirty marks. Therefore, we only replace the string with its left
-  // substring when page dirty marks do not change.
-  Object* first = reinterpret_cast<ConsString*>(object)->first();
-  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
-
-  *p = first;
-  return HeapObject::cast(first);
-}
-
-
 class MarkCompactMarkingVisitor
     : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
  public:
-  static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
-                                   HeapObject* obj);
-
-  static void ObjectStatsCountFixedArray(
-      FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
-      FixedArraySubInstanceType dictionary_type);
-
-  template <MarkCompactMarkingVisitor::VisitorId id>
-  class ObjectStatsTracker {
-   public:
-    static inline void Visit(Map* map, HeapObject* obj);
-  };
-
   static void Initialize();
 
-  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
-    MarkObjectByPointer(heap->mark_compact_collector(), p, p);
+  INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
+    MarkObjectByPointer(heap->mark_compact_collector(), object, p);
   }
 
-  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
+  INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
+                                   Object** start, Object** end)) {
     // Mark all objects pointed to in [start, end).
     const int kMinRangeForMarkingRecursion = 64;
     if (end - start >= kMinRangeForMarkingRecursion) {
-      if (VisitUnmarkedObjects(heap, start, end)) return;
+      if (VisitUnmarkedObjects(heap, object, start, end)) return;
       // We are close to a stack overflow, so just mark the objects.
     }
     MarkCompactCollector* collector = heap->mark_compact_collector();
     for (Object** p = start; p < end; p++) {
-      MarkObjectByPointer(collector, start, p);
+      MarkObjectByPointer(collector, object, p);
     }
   }
 
@@ -1279,7 +1166,7 @@
   // Returns true if object needed marking and false otherwise.
   INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
     MarkBit mark_bit = Marking::MarkBitFrom(object);
-    if (!mark_bit.Get()) {
+    if (Marking::IsWhite(mark_bit)) {
       heap->mark_compact_collector()->SetMark(object, mark_bit);
       return true;
     }
@@ -1288,12 +1175,12 @@
 
   // Mark object pointed to by p.
   INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
-                                         Object** anchor_slot, Object** p)) {
+                                         HeapObject* object, Object** p)) {
     if (!(*p)->IsHeapObject()) return;
-    HeapObject* object = ShortCircuitConsString(p);
-    collector->RecordSlot(anchor_slot, p, object);
-    MarkBit mark = Marking::MarkBitFrom(object);
-    collector->MarkObject(object, mark);
+    HeapObject* target_object = HeapObject::cast(*p);
+    collector->RecordSlot(object, p, target_object);
+    MarkBit mark = Marking::MarkBitFrom(target_object);
+    collector->MarkObject(target_object, mark);
   }
 
 
@@ -1316,8 +1203,8 @@
 
   // Visit all unmarked objects pointed to by [start, end).
   // Returns false if the operation fails (lack of stack space).
-  INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
-                                          Object** end)) {
+  INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
+                                          Object** start, Object** end)) {
     // Return false is we are close to the stack limit.
     StackLimitCheck check(heap->isolate());
     if (check.HasOverflowed()) return false;
@@ -1327,10 +1214,10 @@
     for (Object** p = start; p < end; p++) {
       Object* o = *p;
       if (!o->IsHeapObject()) continue;
-      collector->RecordSlot(start, p, o);
+      collector->RecordSlot(object, p, o);
       HeapObject* obj = HeapObject::cast(o);
       MarkBit mark = Marking::MarkBitFrom(obj);
-      if (mark.Get()) continue;
+      if (Marking::IsBlackOrGrey(mark)) continue;
       VisitUnmarkedObject(collector, obj);
     }
     return true;
@@ -1368,11 +1255,11 @@
       FixedArray* data = FixedArray::cast(re->data());
       Object** slot =
           data->data_start() + JSRegExp::saved_code_index(is_one_byte);
-      heap->mark_compact_collector()->RecordSlot(slot, slot, code);
+      heap->mark_compact_collector()->RecordSlot(data, slot, code);
 
       // Set a number in the 0-255 range to guarantee no smi overflow.
       re->SetDataAt(JSRegExp::code_index(is_one_byte),
-                    Smi::FromInt(heap->sweep_generation() & 0xff));
+                    Smi::FromInt(heap->ms_count() & 0xff));
     } else if (code->IsSmi()) {
       int value = Smi::cast(code)->value();
       // The regexp has not been compiled yet or there was a compilation error.
@@ -1382,7 +1269,7 @@
       }
 
       // Check if we should flush now.
-      if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
+      if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
         re->SetDataAt(JSRegExp::code_index(is_one_byte),
                       Smi::FromInt(JSRegExp::kUninitializedValue));
         re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
@@ -1411,131 +1298,6 @@
     // Visit the fields of the RegExp, including the updated FixedArray.
     VisitJSRegExp(map, object);
   }
-
-  static VisitorDispatchTable<Callback> non_count_table_;
-};
-
-
-void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
-    FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
-    FixedArraySubInstanceType dictionary_type) {
-  Heap* heap = fixed_array->map()->GetHeap();
-  if (fixed_array->map() != heap->fixed_cow_array_map() &&
-      fixed_array->map() != heap->fixed_double_array_map() &&
-      fixed_array != heap->empty_fixed_array()) {
-    if (fixed_array->IsDictionary()) {
-      heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
-    } else {
-      heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
-    }
-  }
-}
-
-
-void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
-    MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
-  Heap* heap = map->GetHeap();
-  int object_size = obj->Size();
-  heap->RecordObjectStats(map->instance_type(), object_size);
-  non_count_table_.GetVisitorById(id)(map, obj);
-  if (obj->IsJSObject()) {
-    JSObject* object = JSObject::cast(obj);
-    ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
-                               FAST_ELEMENTS_SUB_TYPE);
-    ObjectStatsCountFixedArray(object->properties(),
-                               DICTIONARY_PROPERTIES_SUB_TYPE,
-                               FAST_PROPERTIES_SUB_TYPE);
-  }
-}
-
-
-template <MarkCompactMarkingVisitor::VisitorId id>
-void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
-                                                              HeapObject* obj) {
-  ObjectStatsVisitBase(id, map, obj);
-}
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
-    MarkCompactMarkingVisitor::kVisitMap> {
- public:
-  static inline void Visit(Map* map, HeapObject* obj) {
-    Heap* heap = map->GetHeap();
-    Map* map_obj = Map::cast(obj);
-    DCHECK(map->instance_type() == MAP_TYPE);
-    DescriptorArray* array = map_obj->instance_descriptors();
-    if (map_obj->owns_descriptors() &&
-        array != heap->empty_descriptor_array()) {
-      int fixed_array_size = array->Size();
-      heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
-                                         fixed_array_size);
-    }
-    if (map_obj->HasTransitionArray()) {
-      int fixed_array_size = map_obj->transitions()->Size();
-      heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
-                                         fixed_array_size);
-    }
-    if (map_obj->has_code_cache()) {
-      CodeCache* cache = CodeCache::cast(map_obj->code_cache());
-      heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
-                                         cache->default_cache()->Size());
-      if (!cache->normal_type_cache()->IsUndefined()) {
-        heap->RecordFixedArraySubTypeStats(
-            MAP_CODE_CACHE_SUB_TYPE,
-            FixedArray::cast(cache->normal_type_cache())->Size());
-      }
-    }
-    ObjectStatsVisitBase(kVisitMap, map, obj);
-  }
-};
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
-    MarkCompactMarkingVisitor::kVisitCode> {
- public:
-  static inline void Visit(Map* map, HeapObject* obj) {
-    Heap* heap = map->GetHeap();
-    int object_size = obj->Size();
-    DCHECK(map->instance_type() == CODE_TYPE);
-    Code* code_obj = Code::cast(obj);
-    heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
-                                 object_size);
-    ObjectStatsVisitBase(kVisitCode, map, obj);
-  }
-};
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
-    MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
- public:
-  static inline void Visit(Map* map, HeapObject* obj) {
-    Heap* heap = map->GetHeap();
-    SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
-    if (sfi->scope_info() != heap->empty_fixed_array()) {
-      heap->RecordFixedArraySubTypeStats(
-          SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
-    }
-    ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
-  }
-};
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
-    MarkCompactMarkingVisitor::kVisitFixedArray> {
- public:
-  static inline void Visit(Map* map, HeapObject* obj) {
-    Heap* heap = map->GetHeap();
-    FixedArray* fixed_array = FixedArray::cast(obj);
-    if (fixed_array == heap->string_table()) {
-      heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
-                                         fixed_array->Size());
-    }
-    ObjectStatsVisitBase(kVisitFixedArray, map, obj);
-  }
 };
 
 
@@ -1545,20 +1307,11 @@
   table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
 
   if (FLAG_track_gc_object_stats) {
-    // Copy the visitor table to make call-through possible.
-    non_count_table_.CopyFrom(&table_);
-#define VISITOR_ID_COUNT_FUNCTION(id) \
-  table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
-    VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
-#undef VISITOR_ID_COUNT_FUNCTION
+    ObjectStatsVisitor::Initialize(&table_);
   }
 }
 
 
-VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
-    MarkCompactMarkingVisitor::non_count_table_;
-
-
 class CodeMarkingVisitor : public ThreadVisitor {
  public:
   explicit CodeMarkingVisitor(MarkCompactCollector* collector)
@@ -1578,11 +1331,11 @@
   explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
       : collector_(collector) {}
 
-  void VisitPointers(Object** start, Object** end) {
+  void VisitPointers(Object** start, Object** end) override {
     for (Object** p = start; p < end; p++) VisitPointer(p);
   }
 
-  void VisitPointer(Object** slot) {
+  void VisitPointer(Object** slot) override {
     Object* obj = *slot;
     if (obj->IsSharedFunctionInfo()) {
       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
@@ -1610,19 +1363,15 @@
     MarkBit code_mark = Marking::MarkBitFrom(code);
     MarkObject(code, code_mark);
     if (frame->is_optimized()) {
-      MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
-                                                          frame->LookupCode());
+      Code* optimized_code = frame->LookupCode();
+      MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
+      MarkObject(optimized_code, optimized_code_mark);
     }
   }
 }
 
 
 void MarkCompactCollector::PrepareForCodeFlushing() {
-  // Enable code flushing for non-incremental cycles.
-  if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
-    EnableCodeFlushing(!was_marked_incrementally_);
-  }
-
   // If code flushing is disabled, there is no need to prepare for it.
   if (!is_code_flushing_enabled()) return;
 
@@ -1657,24 +1406,24 @@
   explicit RootMarkingVisitor(Heap* heap)
       : collector_(heap->mark_compact_collector()) {}
 
-  void VisitPointer(Object** p) { MarkObjectByPointer(p); }
+  void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
 
-  void VisitPointers(Object** start, Object** end) {
+  void VisitPointers(Object** start, Object** end) override {
     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
   }
 
   // Skip the weak next code link in a code object, which is visited in
   // ProcessTopOptimizedFrame.
-  void VisitNextCodeLink(Object** p) {}
+  void VisitNextCodeLink(Object** p) override {}
 
  private:
   void MarkObjectByPointer(Object** p) {
     if (!(*p)->IsHeapObject()) return;
 
     // Replace flat cons strings in place.
-    HeapObject* object = ShortCircuitConsString(p);
+    HeapObject* object = HeapObject::cast(*p);
     MarkBit mark_bit = Marking::MarkBitFrom(object);
-    if (mark_bit.Get()) return;
+    if (Marking::IsBlackOrGrey(mark_bit)) return;
 
     Map* map = object->map();
     // Mark the object.
@@ -1700,12 +1449,12 @@
  public:
   explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
 
-  virtual void VisitPointers(Object** start, Object** end) {
+  void VisitPointers(Object** start, Object** end) override {
     // Visit all HeapObject pointers in [start, end).
     for (Object** p = start; p < end; p++) {
       Object* o = *p;
       if (o->IsHeapObject() &&
-          !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
+          Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
         if (finalize_external_strings) {
           DCHECK(o->IsExternalString());
           heap_->FinalizeExternalString(String::cast(*p));
@@ -1738,7 +1487,9 @@
 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
  public:
   virtual Object* RetainAs(Object* object) {
-    if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
+    DCHECK(!Marking::IsGrey(mark_bit));
+    if (Marking::IsBlack(mark_bit)) {
       return object;
     } else if (object->IsAllocationSite() &&
                !(AllocationSite::cast(object)->IsZombie())) {
@@ -1759,155 +1510,260 @@
 // iterator.  Stop when the marking stack is filled or the end of the space
 // is reached, whichever comes first.
 template <class T>
-static void DiscoverGreyObjectsWithIterator(Heap* heap,
-                                            MarkingDeque* marking_deque,
-                                            T* it) {
+void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
   // The caller should ensure that the marking stack is initially not full,
   // so that we don't waste effort pointlessly scanning for objects.
-  DCHECK(!marking_deque->IsFull());
+  DCHECK(!marking_deque()->IsFull());
 
-  Map* filler_map = heap->one_pointer_filler_map();
+  Map* filler_map = heap()->one_pointer_filler_map();
   for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
     MarkBit markbit = Marking::MarkBitFrom(object);
     if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
       Marking::GreyToBlack(markbit);
-      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
-      marking_deque->PushBlack(object);
-      if (marking_deque->IsFull()) return;
+      PushBlack(object);
+      if (marking_deque()->IsFull()) return;
     }
   }
 }
 
 
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
-
-
-static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
-                                      MemoryChunk* p) {
-  DCHECK(!marking_deque->IsFull());
-  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
-  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
-    Address cell_base = it.CurrentCellBase();
-    MarkBit::CellType* cell = it.CurrentCell();
-
-    const MarkBit::CellType current_cell = *cell;
-    if (current_cell == 0) continue;
-
-    MarkBit::CellType grey_objects;
-    if (it.HasNext()) {
-      const MarkBit::CellType next_cell = *(cell + 1);
-      grey_objects = current_cell & ((current_cell >> 1) |
-                                     (next_cell << (Bitmap::kBitsPerCell - 1)));
-    } else {
-      grey_objects = current_cell & (current_cell >> 1);
-    }
-
-    int offset = 0;
-    while (grey_objects != 0) {
-      int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
-      grey_objects >>= trailing_zeros;
-      offset += trailing_zeros;
-      MarkBit markbit(cell, 1 << offset, false);
-      DCHECK(Marking::IsGrey(markbit));
-      Marking::GreyToBlack(markbit);
-      Address addr = cell_base + offset * kPointerSize;
-      HeapObject* object = HeapObject::FromAddress(addr);
-      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
-      marking_deque->PushBlack(object);
-      if (marking_deque->IsFull()) return;
-      offset += 2;
-      grey_objects >>= 2;
-    }
-
-    grey_objects >>= (Bitmap::kBitsPerCell - 1);
+void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
+  DCHECK(!marking_deque()->IsFull());
+  LiveObjectIterator<kGreyObjects> it(p);
+  HeapObject* object = NULL;
+  while ((object = it.Next()) != NULL) {
+    MarkBit markbit = Marking::MarkBitFrom(object);
+    DCHECK(Marking::IsGrey(markbit));
+    Marking::GreyToBlack(markbit);
+    PushBlack(object);
+    if (marking_deque()->IsFull()) return;
   }
 }
 
 
-int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
-    NewSpace* new_space, NewSpacePage* p) {
-  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+class MarkCompactCollector::HeapObjectVisitor {
+ public:
+  virtual ~HeapObjectVisitor() {}
+  virtual bool Visit(HeapObject* object) = 0;
+};
 
-  MarkBit::CellType* cells = p->markbits()->cells();
-  int survivors_size = 0;
 
-  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
-    Address cell_base = it.CurrentCellBase();
-    MarkBit::CellType* cell = it.CurrentCell();
+class MarkCompactCollector::EvacuateVisitorBase
+    : public MarkCompactCollector::HeapObjectVisitor {
+ public:
+  EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
+      : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
 
-    MarkBit::CellType current_cell = *cell;
-    if (current_cell == 0) continue;
+  bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
+                         HeapObject** target_object) {
+    int size = object->Size();
+    AllocationAlignment alignment = object->RequiredAlignment();
+    AllocationResult allocation = target_space->AllocateRaw(size, alignment);
+    if (allocation.To(target_object)) {
+      heap_->mark_compact_collector()->MigrateObject(
+          *target_object, object, size, target_space->identity(),
+          evacuation_slots_buffer_);
+      return true;
+    }
+    return false;
+  }
 
-    int offset = 0;
-    while (current_cell != 0) {
-      int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
-      current_cell >>= trailing_zeros;
-      offset += trailing_zeros;
-      Address address = cell_base + offset * kPointerSize;
-      HeapObject* object = HeapObject::FromAddress(address);
+ protected:
+  Heap* heap_;
+  SlotsBuffer** evacuation_slots_buffer_;
+};
 
-      int size = object->Size();
-      survivors_size += size;
 
-      Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
+class MarkCompactCollector::EvacuateNewSpaceVisitor final
+    : public MarkCompactCollector::EvacuateVisitorBase {
+ public:
+  static const intptr_t kLabSize = 4 * KB;
+  static const intptr_t kMaxLabObjectSize = 256;
 
-      offset++;
-      current_cell >>= 1;
+  explicit EvacuateNewSpaceVisitor(Heap* heap,
+                                   SlotsBuffer** evacuation_slots_buffer,
+                                   HashMap* local_pretenuring_feedback)
+      : EvacuateVisitorBase(heap, evacuation_slots_buffer),
+        buffer_(LocalAllocationBuffer::InvalidBuffer()),
+        space_to_allocate_(NEW_SPACE),
+        promoted_size_(0),
+        semispace_copied_size_(0),
+        local_pretenuring_feedback_(local_pretenuring_feedback) {}
 
-      // TODO(hpayer): Refactor EvacuateObject and call this function instead.
-      if (heap()->ShouldBePromoted(object->address(), size) &&
-          TryPromoteObject(object, size)) {
-        continue;
+  bool Visit(HeapObject* object) override {
+    heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
+    int size = object->Size();
+    HeapObject* target_object = nullptr;
+    if (heap_->ShouldBePromoted(object->address(), size) &&
+        TryEvacuateObject(heap_->old_space(), object, &target_object)) {
+      // If we end up needing more special cases, we should factor this out.
+      if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
+        heap_->array_buffer_tracker()->Promote(
+            JSArrayBuffer::cast(target_object));
       }
+      promoted_size_ += size;
+      return true;
+    }
+    HeapObject* target = nullptr;
+    AllocationSpace space = AllocateTargetObject(object, &target);
+    heap_->mark_compact_collector()->MigrateObject(
+        HeapObject::cast(target), object, size, space,
+        (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
+    if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
+      heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
+    }
+    semispace_copied_size_ += size;
+    return true;
+  }
 
-      AllocationResult allocation = new_space->AllocateRaw(size);
-      if (allocation.IsRetry()) {
-        if (!new_space->AddFreshPage()) {
-          // Shouldn't happen. We are sweeping linearly, and to-space
-          // has the same number of pages as from-space, so there is
-          // always room.
-          UNREACHABLE();
+  intptr_t promoted_size() { return promoted_size_; }
+  intptr_t semispace_copied_size() { return semispace_copied_size_; }
+
+ private:
+  enum NewSpaceAllocationMode {
+    kNonstickyBailoutOldSpace,
+    kStickyBailoutOldSpace,
+  };
+
+  inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
+                                              HeapObject** target_object) {
+    const int size = old_object->Size();
+    AllocationAlignment alignment = old_object->RequiredAlignment();
+    AllocationResult allocation;
+    if (space_to_allocate_ == NEW_SPACE) {
+      if (size > kMaxLabObjectSize) {
+        allocation =
+            AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
+      } else {
+        allocation = AllocateInLab(size, alignment);
+      }
+    }
+    if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
+      allocation = AllocateInOldSpace(size, alignment);
+    }
+    bool ok = allocation.To(target_object);
+    DCHECK(ok);
+    USE(ok);
+    return space_to_allocate_;
+  }
+
+  inline bool NewLocalAllocationBuffer() {
+    AllocationResult result =
+        AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
+    LocalAllocationBuffer saved_old_buffer = buffer_;
+    buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
+    if (buffer_.IsValid()) {
+      buffer_.TryMerge(&saved_old_buffer);
+      return true;
+    }
+    return false;
+  }
+
+  inline AllocationResult AllocateInNewSpace(int size_in_bytes,
+                                             AllocationAlignment alignment,
+                                             NewSpaceAllocationMode mode) {
+    AllocationResult allocation =
+        heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
+    if (allocation.IsRetry()) {
+      if (!heap_->new_space()->AddFreshPageSynchronized()) {
+        if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
+      } else {
+        allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
+                                                                 alignment);
+        if (allocation.IsRetry()) {
+          if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
         }
-        allocation = new_space->AllocateRaw(size);
-        DCHECK(!allocation.IsRetry());
       }
-      Object* target = allocation.ToObjectChecked();
-
-      MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
-      heap()->IncrementSemiSpaceCopiedObjectSize(size);
     }
-    *cells = 0;
+    return allocation;
   }
-  return survivors_size;
-}
+
+  inline AllocationResult AllocateInOldSpace(int size_in_bytes,
+                                             AllocationAlignment alignment) {
+    AllocationResult allocation =
+        heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
+    if (allocation.IsRetry()) {
+      FatalProcessOutOfMemory(
+          "MarkCompactCollector: semi-space copy, fallback in old gen\n");
+    }
+    return allocation;
+  }
+
+  inline AllocationResult AllocateInLab(int size_in_bytes,
+                                        AllocationAlignment alignment) {
+    AllocationResult allocation;
+    if (!buffer_.IsValid()) {
+      if (!NewLocalAllocationBuffer()) {
+        space_to_allocate_ = OLD_SPACE;
+        return AllocationResult::Retry(OLD_SPACE);
+      }
+    }
+    allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
+    if (allocation.IsRetry()) {
+      if (!NewLocalAllocationBuffer()) {
+        space_to_allocate_ = OLD_SPACE;
+        return AllocationResult::Retry(OLD_SPACE);
+      } else {
+        allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
+        if (allocation.IsRetry()) {
+          space_to_allocate_ = OLD_SPACE;
+          return AllocationResult::Retry(OLD_SPACE);
+        }
+      }
+    }
+    return allocation;
+  }
+
+  LocalAllocationBuffer buffer_;
+  AllocationSpace space_to_allocate_;
+  intptr_t promoted_size_;
+  intptr_t semispace_copied_size_;
+  HashMap* local_pretenuring_feedback_;
+};
 
 
-static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
-                                       PagedSpace* space) {
+class MarkCompactCollector::EvacuateOldSpaceVisitor final
+    : public MarkCompactCollector::EvacuateVisitorBase {
+ public:
+  EvacuateOldSpaceVisitor(Heap* heap,
+                          CompactionSpaceCollection* compaction_spaces,
+                          SlotsBuffer** evacuation_slots_buffer)
+      : EvacuateVisitorBase(heap, evacuation_slots_buffer),
+        compaction_spaces_(compaction_spaces) {}
+
+  bool Visit(HeapObject* object) override {
+    CompactionSpace* target_space = compaction_spaces_->Get(
+        Page::FromAddress(object->address())->owner()->identity());
+    HeapObject* target_object = nullptr;
+    if (TryEvacuateObject(target_space, object, &target_object)) {
+      DCHECK(object->map_word().IsForwardingAddress());
+      return true;
+    }
+    return false;
+  }
+
+ private:
+  CompactionSpaceCollection* compaction_spaces_;
+};
+
+
+void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
   PageIterator it(space);
   while (it.has_next()) {
     Page* p = it.next();
-    DiscoverGreyObjectsOnPage(marking_deque, p);
-    if (marking_deque->IsFull()) return;
+    DiscoverGreyObjectsOnPage(p);
+    if (marking_deque()->IsFull()) return;
   }
 }
 
 
-static void DiscoverGreyObjectsInNewSpace(Heap* heap,
-                                          MarkingDeque* marking_deque) {
-  NewSpace* space = heap->new_space();
+void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
+  NewSpace* space = heap()->new_space();
   NewSpacePageIterator it(space->bottom(), space->top());
   while (it.has_next()) {
     NewSpacePage* page = it.next();
-    DiscoverGreyObjectsOnPage(marking_deque, page);
-    if (marking_deque->IsFull()) return;
+    DiscoverGreyObjectsOnPage(page);
+    if (marking_deque()->IsFull()) return;
   }
 }
 
@@ -1917,7 +1773,7 @@
   if (!o->IsHeapObject()) return false;
   HeapObject* heap_object = HeapObject::cast(o);
   MarkBit mark = Marking::MarkBitFrom(heap_object);
-  return !mark.Get();
+  return Marking::IsWhite(mark);
 }
 
 
@@ -1927,7 +1783,7 @@
   DCHECK(o->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(o);
   MarkBit mark = Marking::MarkBitFrom(heap_object);
-  return !mark.Get();
+  return Marking::IsWhite(mark);
 }
 
 
@@ -1935,7 +1791,7 @@
   StringTable* string_table = heap()->string_table();
   // Mark the string table itself.
   MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
-  if (!string_table_mark.Get()) {
+  if (Marking::IsWhite(string_table_mark)) {
     // String table could have already been marked by visiting the handles list.
     SetMark(string_table, string_table_mark);
   }
@@ -1959,8 +1815,6 @@
   // Handle the string table specially.
   MarkStringTable(visitor);
 
-  MarkWeakObjectToCodeTable();
-
   // There may be overflowed objects in the heap.  Visit them now.
   while (marking_deque_.overflowed()) {
     RefillMarkingDeque();
@@ -1969,7 +1823,8 @@
 }
 
 
-void MarkCompactCollector::MarkImplicitRefGroups() {
+void MarkCompactCollector::MarkImplicitRefGroups(
+    MarkObjectFunction mark_object) {
   List<ImplicitRefGroup*>* ref_groups =
       isolate()->global_handles()->implicit_ref_groups();
 
@@ -1987,9 +1842,7 @@
     // A parent object is marked, so mark all child heap objects.
     for (size_t j = 0; j < entry->length; ++j) {
       if ((*children[j])->IsHeapObject()) {
-        HeapObject* child = HeapObject::cast(*children[j]);
-        MarkBit mark = Marking::MarkBitFrom(child);
-        MarkObject(child, mark);
+        mark_object(heap(), HeapObject::cast(*children[j]));
       }
     }
 
@@ -2001,16 +1854,6 @@
 }
 
 
-void MarkCompactCollector::MarkWeakObjectToCodeTable() {
-  HeapObject* weak_object_to_code_table =
-      HeapObject::cast(heap()->weak_object_to_code_table());
-  if (!IsMarked(weak_object_to_code_table)) {
-    MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
-    SetMark(weak_object_to_code_table, mark);
-  }
-}
-
-
 // Mark all objects reachable from the objects on the marking stack.
 // Before: the marking stack contains zero or more heap object pointers.
 // After: the marking stack is empty, and all objects reachable from the
@@ -2042,33 +1885,23 @@
 // overflowed objects in the heap so the overflow flag on the markings stack
 // is cleared.
 void MarkCompactCollector::RefillMarkingDeque() {
+  isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
   DCHECK(marking_deque_.overflowed());
 
-  DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
+  DiscoverGreyObjectsInNewSpace();
   if (marking_deque_.IsFull()) return;
 
-  DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
-                             heap()->old_pointer_space());
+  DiscoverGreyObjectsInSpace(heap()->old_space());
   if (marking_deque_.IsFull()) return;
 
-  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
+  DiscoverGreyObjectsInSpace(heap()->code_space());
   if (marking_deque_.IsFull()) return;
 
-  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
-  if (marking_deque_.IsFull()) return;
-
-  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
-  if (marking_deque_.IsFull()) return;
-
-  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
-  if (marking_deque_.IsFull()) return;
-
-  DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
-                             heap()->property_cell_space());
+  DiscoverGreyObjectsInSpace(heap()->map_space());
   if (marking_deque_.IsFull()) return;
 
   LargeObjectIterator lo_it(heap()->lo_space());
-  DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
+  DiscoverGreyObjectsWithIterator(&lo_it);
   if (marking_deque_.IsFull()) return;
 
   marking_deque_.ClearOverflowed();
@@ -2098,7 +1931,7 @@
     if (!only_process_harmony_weak_collections) {
       isolate()->global_handles()->IterateObjectGroups(
           visitor, &IsUnmarkedHeapObjectWithHeap);
-      MarkImplicitRefGroups();
+      MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
     }
     ProcessWeakCollections();
     work_to_do = !marking_deque_.IsEmpty();
@@ -2116,7 +1949,7 @@
     if (it.frame()->type() == StackFrame::OPTIMIZED) {
       Code* code = it.frame()->LookupCode();
       if (!code->CanDeoptAt(it.frame()->pc())) {
-        code->CodeIterateBody(visitor);
+        Code::BodyDescriptor::IterateBody(code, visitor);
       }
       ProcessMarkingDeque();
       return;
@@ -2125,40 +1958,84 @@
 }
 
 
-void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize() {
+void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
+  DCHECK(!marking_deque_.in_use());
   if (marking_deque_memory_ == NULL) {
-    marking_deque_memory_ = new base::VirtualMemory(4 * MB);
+    marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
+    marking_deque_memory_committed_ = 0;
   }
-  if (!marking_deque_memory_committed_) {
-    bool success = marking_deque_memory_->Commit(
-        reinterpret_cast<Address>(marking_deque_memory_->address()),
-        marking_deque_memory_->size(),
+  if (marking_deque_memory_ == NULL) {
+    V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
+  }
+}
+
+
+void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
+  // If the marking deque is too small, we try to allocate a bigger one.
+  // If that fails, make do with a smaller one.
+  CHECK(!marking_deque_.in_use());
+  for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
+    base::VirtualMemory* memory = marking_deque_memory_;
+    size_t currently_committed = marking_deque_memory_committed_;
+
+    if (currently_committed == size) return;
+
+    if (currently_committed > size) {
+      bool success = marking_deque_memory_->Uncommit(
+          reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
+          currently_committed - size);
+      if (success) {
+        marking_deque_memory_committed_ = size;
+        return;
+      }
+      UNREACHABLE();
+    }
+
+    bool success = memory->Commit(
+        reinterpret_cast<Address>(memory->address()) + currently_committed,
+        size - currently_committed,
         false);  // Not executable.
-    CHECK(success);
-    marking_deque_memory_committed_ = true;
-    InitializeMarkingDeque();
+    if (success) {
+      marking_deque_memory_committed_ = size;
+      return;
+    }
   }
+  V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
 }
 
 
 void MarkCompactCollector::InitializeMarkingDeque() {
-  if (marking_deque_memory_committed_) {
-    Address addr = static_cast<Address>(marking_deque_memory_->address());
-    size_t size = marking_deque_memory_->size();
-    if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
-    marking_deque_.Initialize(addr, addr + size);
-  }
+  DCHECK(!marking_deque_.in_use());
+  DCHECK(marking_deque_memory_committed_ > 0);
+  Address addr = static_cast<Address>(marking_deque_memory_->address());
+  size_t size = marking_deque_memory_committed_;
+  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
+  marking_deque_.Initialize(addr, addr + size);
 }
 
 
-void MarkCompactCollector::UncommitMarkingDeque() {
-  if (marking_deque_memory_committed_) {
-    bool success = marking_deque_memory_->Uncommit(
-        reinterpret_cast<Address>(marking_deque_memory_->address()),
-        marking_deque_memory_->size());
-    CHECK(success);
-    marking_deque_memory_committed_ = false;
+void MarkingDeque::Initialize(Address low, Address high) {
+  DCHECK(!in_use_);
+  HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
+  HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
+  array_ = obj_low;
+  mask_ = base::bits::RoundDownToPowerOfTwo32(
+              static_cast<uint32_t>(obj_high - obj_low)) -
+          1;
+  top_ = bottom_ = 0;
+  overflowed_ = false;
+  in_use_ = true;
+}
+
+
+void MarkingDeque::Uninitialize(bool aborting) {
+  if (!aborting) {
+    DCHECK(IsEmpty());
+    DCHECK(!overflowed_);
   }
+  DCHECK(in_use_);
+  top_ = bottom_ = 0xdecbad;
+  in_use_ = false;
 }
 
 
@@ -2166,20 +2043,26 @@
   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
   double start_time = 0.0;
   if (FLAG_print_cumulative_gc_stat) {
-    start_time = base::OS::TimeCurrentMillis();
+    start_time = heap_->MonotonicallyIncreasingTimeInMs();
   }
   // The recursive GC marker detects when it is nearing stack overflow,
   // and switches to a different marking system.  JS interrupts interfere
   // with the C stack limit check.
   PostponeInterruptsScope postpone(isolate());
 
-  IncrementalMarking* incremental_marking = heap_->incremental_marking();
-  if (was_marked_incrementally_) {
-    incremental_marking->Finalize();
-  } else {
-    // Abort any pending incremental activities e.g. incremental sweeping.
-    incremental_marking->Abort();
-    InitializeMarkingDeque();
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
+    IncrementalMarking* incremental_marking = heap_->incremental_marking();
+    if (was_marked_incrementally_) {
+      incremental_marking->Finalize();
+    } else {
+      // Abort any pending incremental activities e.g. incremental sweeping.
+      incremental_marking->Stop();
+      if (marking_deque_.in_use()) {
+        marking_deque_.Uninitialize(true);
+      }
+    }
   }
 
 #ifdef DEBUG
@@ -2187,45 +2070,26 @@
   state_ = MARK_LIVE_OBJECTS;
 #endif
 
-  EnsureMarkingDequeIsCommittedAndInitialize();
+  EnsureMarkingDequeIsCommittedAndInitialize(
+      MarkCompactCollector::kMaxMarkingDequeSize);
 
-  PrepareForCodeFlushing();
-
-  if (was_marked_incrementally_) {
-    // There is no write barrier on cells so we have to scan them now at the end
-    // of the incremental marking.
-    {
-      HeapObjectIterator cell_iterator(heap()->cell_space());
-      HeapObject* cell;
-      while ((cell = cell_iterator.Next()) != NULL) {
-        DCHECK(cell->IsCell());
-        if (IsMarked(cell)) {
-          int offset = Cell::kValueOffset;
-          MarkCompactMarkingVisitor::VisitPointer(
-              heap(), reinterpret_cast<Object**>(cell->address() + offset));
-        }
-      }
-    }
-    {
-      HeapObjectIterator js_global_property_cell_iterator(
-          heap()->property_cell_space());
-      HeapObject* cell;
-      while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
-        DCHECK(cell->IsPropertyCell());
-        if (IsMarked(cell)) {
-          MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
-        }
-      }
-    }
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
+    PrepareForCodeFlushing();
   }
 
   RootMarkingVisitor root_visitor(heap());
-  MarkRoots(&root_visitor);
-
-  ProcessTopOptimizedFrame(&root_visitor);
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_WEAKCLOSURE);
+    GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
+    MarkRoots(&root_visitor);
+    ProcessTopOptimizedFrame(&root_visitor);
+  }
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
 
     // The objects reachable from the roots are marked, yet unreachable
     // objects are unmarked.  Mark objects reachable due to host
@@ -2253,274 +2117,254 @@
     ProcessEphemeralMarking(&root_visitor, true);
   }
 
-  AfterMarking();
-
   if (FLAG_print_cumulative_gc_stat) {
-    heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
+    heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
+                                    start_time);
   }
-}
-
-
-void MarkCompactCollector::AfterMarking() {
-  // Prune the string table removing all strings only pointed to by the
-  // string table.  Cannot use string_table() here because the string
-  // table is marked.
-  StringTable* string_table = heap()->string_table();
-  InternalizedStringTableCleaner internalized_visitor(heap());
-  string_table->IterateElements(&internalized_visitor);
-  string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
-
-  ExternalStringTableCleaner external_visitor(heap());
-  heap()->external_string_table_.Iterate(&external_visitor);
-  heap()->external_string_table_.CleanUp();
-
-  // Process the weak references.
-  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
-  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
-
-  // Remove object groups after marking phase.
-  heap()->isolate()->global_handles()->RemoveObjectGroups();
-  heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
-
-  // Flush code from collected candidates.
-  if (is_code_flushing_enabled()) {
-    code_flusher_->ProcessCandidates();
-    // If incremental marker does not support code flushing, we need to
-    // disable it before incremental marking steps for next cycle.
-    if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
-      EnableCodeFlushing(false);
-    }
-  }
-
   if (FLAG_track_gc_object_stats) {
-    heap()->CheckpointObjectStats();
+    if (FLAG_trace_gc_object_stats) {
+      heap()->object_stats_->TraceObjectStats();
+    }
+    heap()->object_stats_->CheckpointObjectStats();
   }
 }
 
 
 void MarkCompactCollector::ClearNonLiveReferences() {
-  // Iterate over the map space, setting map transitions that go from
-  // a marked map to an unmarked map to null transitions.  This action
-  // is carried out only on maps of JSObjects and related subtypes.
-  HeapObjectIterator map_iterator(heap()->map_space());
-  for (HeapObject* obj = map_iterator.Next(); obj != NULL;
-       obj = map_iterator.Next()) {
-    Map* map = Map::cast(obj);
+  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
 
-    if (!map->CanTransition()) continue;
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_CLEAR_STRING_TABLE);
 
-    MarkBit map_mark = Marking::MarkBitFrom(map);
-    ClearNonLivePrototypeTransitions(map);
-    ClearNonLiveMapTransitions(map, map_mark);
+    // Prune the string table removing all strings only pointed to by the
+    // string table.  Cannot use string_table() here because the string
+    // table is marked.
+    StringTable* string_table = heap()->string_table();
+    InternalizedStringTableCleaner internalized_visitor(heap());
+    string_table->IterateElements(&internalized_visitor);
+    string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
 
-    if (map_mark.Get()) {
-      ClearNonLiveDependentCode(map->dependent_code());
-    } else {
-      ClearDependentCode(map->dependent_code());
-      map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
-    }
+    ExternalStringTableCleaner external_visitor(heap());
+    heap()->external_string_table_.Iterate(&external_visitor);
+    heap()->external_string_table_.CleanUp();
   }
 
-  // Iterate over property cell space, removing dependent code that is not
-  // otherwise kept alive by strong references.
-  HeapObjectIterator cell_iterator(heap_->property_cell_space());
-  for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
-       cell = cell_iterator.Next()) {
-    if (IsMarked(cell)) {
-      ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
-    }
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
+    // Process the weak references.
+    MarkCompactWeakObjectRetainer mark_compact_object_retainer;
+    heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
   }
 
-  // Iterate over allocation sites, removing dependent code that is not
-  // otherwise kept alive by strong references.
-  Object* undefined = heap()->undefined_value();
-  for (Object* site = heap()->allocation_sites_list(); site != undefined;
-       site = AllocationSite::cast(site)->weak_next()) {
-    if (IsMarked(site)) {
-      ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
-    }
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
+
+    // Remove object groups after marking phase.
+    heap()->isolate()->global_handles()->RemoveObjectGroups();
+    heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
   }
 
-  if (heap_->weak_object_to_code_table()->IsHashTable()) {
-    WeakHashTable* table =
-        WeakHashTable::cast(heap_->weak_object_to_code_table());
-    uint32_t capacity = table->Capacity();
-    for (uint32_t i = 0; i < capacity; i++) {
-      uint32_t key_index = table->EntryToIndex(i);
-      Object* key = table->get(key_index);
-      if (!table->IsKey(key)) continue;
-      uint32_t value_index = table->EntryToValueIndex(i);
-      Object* value = table->get(value_index);
-      if (key->IsCell() && !IsMarked(key)) {
-        Cell* cell = Cell::cast(key);
-        Object* object = cell->value();
-        if (IsMarked(object)) {
-          MarkBit mark = Marking::MarkBitFrom(cell);
-          SetMark(cell, mark);
-          Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
-          RecordSlot(value_slot, value_slot, *value_slot);
-        }
-      }
-      if (IsMarked(key)) {
-        if (!IsMarked(value)) {
-          HeapObject* obj = HeapObject::cast(value);
-          MarkBit mark = Marking::MarkBitFrom(obj);
-          SetMark(obj, mark);
-        }
-        ClearNonLiveDependentCode(DependentCode::cast(value));
-      } else {
-        ClearDependentCode(DependentCode::cast(value));
-        table->set(key_index, heap_->the_hole_value());
-        table->set(value_index, heap_->the_hole_value());
-        table->ElementRemoved();
+  // Flush code from collected candidates.
+  if (is_code_flushing_enabled()) {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
+    code_flusher_->ProcessCandidates();
+  }
+
+
+  DependentCode* dependent_code_list;
+  Object* non_live_map_list;
+  ClearWeakCells(&non_live_map_list, &dependent_code_list);
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
+    ClearSimpleMapTransitions(non_live_map_list);
+    ClearFullMapTransitions();
+  }
+
+  MarkDependentCodeForDeoptimization(dependent_code_list);
+
+  ClearWeakCollections();
+
+  ClearInvalidStoreAndSlotsBufferEntries();
+}
+
+
+void MarkCompactCollector::MarkDependentCodeForDeoptimization(
+    DependentCode* list_head) {
+  GCTracer::Scope gc_scope(heap()->tracer(),
+                           GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
+  Isolate* isolate = this->isolate();
+  DependentCode* current = list_head;
+  while (current->length() > 0) {
+    have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
+        isolate, DependentCode::kWeakCodeGroup);
+    current = current->next_link();
+  }
+
+  WeakHashTable* table = heap_->weak_object_to_code_table();
+  uint32_t capacity = table->Capacity();
+  for (uint32_t i = 0; i < capacity; i++) {
+    uint32_t key_index = table->EntryToIndex(i);
+    Object* key = table->get(key_index);
+    if (!table->IsKey(key)) continue;
+    uint32_t value_index = table->EntryToValueIndex(i);
+    Object* value = table->get(value_index);
+    DCHECK(key->IsWeakCell());
+    if (WeakCell::cast(key)->cleared()) {
+      have_code_to_deoptimize_ |=
+          DependentCode::cast(value)->MarkCodeForDeoptimization(
+              isolate, DependentCode::kWeakCodeGroup);
+      table->set(key_index, heap_->the_hole_value());
+      table->set(value_index, heap_->the_hole_value());
+      table->ElementRemoved();
+    }
+  }
+}
+
+
+void MarkCompactCollector::ClearSimpleMapTransitions(
+    Object* non_live_map_list) {
+  Object* the_hole_value = heap()->the_hole_value();
+  Object* weak_cell_obj = non_live_map_list;
+  while (weak_cell_obj != Smi::FromInt(0)) {
+    WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
+    Map* map = Map::cast(weak_cell->value());
+    DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
+    Object* potential_parent = map->constructor_or_backpointer();
+    if (potential_parent->IsMap()) {
+      Map* parent = Map::cast(potential_parent);
+      if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
+          parent->raw_transitions() == weak_cell) {
+        ClearSimpleMapTransition(parent, map);
       }
     }
+    weak_cell->clear();
+    weak_cell_obj = weak_cell->next();
+    weak_cell->clear_next(the_hole_value);
   }
 }
 
 
-void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
-  int number_of_transitions = map->NumberOfProtoTransitions();
-  FixedArray* prototype_transitions = map->GetPrototypeTransitions();
-
-  int new_number_of_transitions = 0;
-  const int header = Map::kProtoTransitionHeaderSize;
-  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
-  const int map_offset = header + Map::kProtoTransitionMapOffset;
-  const int step = Map::kProtoTransitionElementsPerEntry;
-  for (int i = 0; i < number_of_transitions; i++) {
-    Object* prototype = prototype_transitions->get(proto_offset + i * step);
-    Object* cached_map = prototype_transitions->get(map_offset + i * step);
-    if (IsMarked(prototype) && IsMarked(cached_map)) {
-      DCHECK(!prototype->IsUndefined());
-      int proto_index = proto_offset + new_number_of_transitions * step;
-      int map_index = map_offset + new_number_of_transitions * step;
-      if (new_number_of_transitions != i) {
-        prototype_transitions->set(proto_index, prototype,
-                                   UPDATE_WRITE_BARRIER);
-        prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
-      }
-      Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
-      RecordSlot(slot, slot, prototype);
-      new_number_of_transitions++;
-    }
-  }
-
-  if (new_number_of_transitions != number_of_transitions) {
-    map->SetNumberOfProtoTransitions(new_number_of_transitions);
-  }
-
-  // Fill slots that became free with undefined value.
-  for (int i = new_number_of_transitions * step;
-       i < number_of_transitions * step; i++) {
-    prototype_transitions->set_undefined(header + i);
-  }
-}
-
-
-void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
-                                                      MarkBit map_mark) {
-  Object* potential_parent = map->GetBackPointer();
-  if (!potential_parent->IsMap()) return;
-  Map* parent = Map::cast(potential_parent);
-
-  // Follow back pointer, check whether we are dealing with a map transition
-  // from a live map to a dead path and in case clear transitions of parent.
-  bool current_is_alive = map_mark.Get();
-  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
-  if (!current_is_alive && parent_is_alive) {
-    ClearMapTransitions(parent);
-  }
-}
-
-
-// Clear a possible back pointer in case the transition leads to a dead map.
-// Return true in case a back pointer has been cleared and false otherwise.
-bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
-  if (Marking::MarkBitFrom(target).Get()) return false;
-  target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
-  return true;
-}
-
-
-void MarkCompactCollector::ClearMapTransitions(Map* map) {
-  // If there are no transitions to be cleared, return.
-  // TODO(verwaest) Should be an assert, otherwise back pointers are not
-  // properly cleared.
-  if (!map->HasTransitionArray()) return;
-
-  TransitionArray* t = map->transitions();
-
-  int transition_index = 0;
-
+void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
+                                                    Map* dead_transition) {
+  // A previously existing simple transition (stored in a WeakCell) is going
+  // to be cleared. Clear the useless cell pointer, and take ownership
+  // of the descriptor array.
+  map->set_raw_transitions(Smi::FromInt(0));
+  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
   DescriptorArray* descriptors = map->instance_descriptors();
-  bool descriptors_owner_died = false;
+  if (descriptors == dead_transition->instance_descriptors() &&
+      number_of_own_descriptors > 0) {
+    TrimDescriptorArray(map, descriptors);
+    DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+    map->set_owns_descriptors(true);
+  }
+}
 
-  // Compact all live descriptors to the left.
-  for (int i = 0; i < t->number_of_transitions(); ++i) {
-    Map* target = t->GetTarget(i);
-    if (ClearMapBackPointer(target)) {
-      if (target->instance_descriptors() == descriptors) {
+
+void MarkCompactCollector::ClearFullMapTransitions() {
+  HeapObject* undefined = heap()->undefined_value();
+  Object* obj = heap()->encountered_transition_arrays();
+  while (obj != Smi::FromInt(0)) {
+    TransitionArray* array = TransitionArray::cast(obj);
+    int num_transitions = array->number_of_entries();
+    DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
+    if (num_transitions > 0) {
+      Map* map = array->GetTarget(0);
+      Map* parent = Map::cast(map->constructor_or_backpointer());
+      bool parent_is_alive =
+          Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
+      DescriptorArray* descriptors =
+          parent_is_alive ? parent->instance_descriptors() : nullptr;
+      bool descriptors_owner_died =
+          CompactTransitionArray(parent, array, descriptors);
+      if (descriptors_owner_died) {
+        TrimDescriptorArray(parent, descriptors);
+      }
+    }
+    obj = array->next_link();
+    array->set_next_link(undefined, SKIP_WRITE_BARRIER);
+  }
+  heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+}
+
+
+bool MarkCompactCollector::CompactTransitionArray(
+    Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
+  int num_transitions = transitions->number_of_entries();
+  bool descriptors_owner_died = false;
+  int transition_index = 0;
+  // Compact all live transitions to the left.
+  for (int i = 0; i < num_transitions; ++i) {
+    Map* target = transitions->GetTarget(i);
+    DCHECK_EQ(target->constructor_or_backpointer(), map);
+    if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
+      if (descriptors != nullptr &&
+          target->instance_descriptors() == descriptors) {
         descriptors_owner_died = true;
       }
     } else {
       if (i != transition_index) {
-        Name* key = t->GetKey(i);
-        t->SetKey(transition_index, key);
-        Object** key_slot = t->GetKeySlot(transition_index);
-        RecordSlot(key_slot, key_slot, key);
+        Name* key = transitions->GetKey(i);
+        transitions->SetKey(transition_index, key);
+        Object** key_slot = transitions->GetKeySlot(transition_index);
+        RecordSlot(transitions, key_slot, key);
         // Target slots do not need to be recorded since maps are not compacted.
-        t->SetTarget(transition_index, t->GetTarget(i));
+        transitions->SetTarget(transition_index, transitions->GetTarget(i));
       }
       transition_index++;
     }
   }
-
   // If there are no transitions to be cleared, return.
-  // TODO(verwaest) Should be an assert, otherwise back pointers are not
-  // properly cleared.
-  if (transition_index == t->number_of_transitions()) return;
-
-  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
-
-  if (descriptors_owner_died) {
-    if (number_of_own_descriptors > 0) {
-      TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
-      DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
-      map->set_owns_descriptors(true);
-    } else {
-      DCHECK(descriptors == heap_->empty_descriptor_array());
-    }
+  if (transition_index == num_transitions) {
+    DCHECK(!descriptors_owner_died);
+    return false;
   }
-
   // Note that we never eliminate a transition array, though we might right-trim
   // such that number_of_transitions() == 0. If this assumption changes,
   // TransitionArray::Insert() will need to deal with the case that a transition
   // array disappeared during GC.
-  int trim = t->number_of_transitions_storage() - transition_index;
+  int trim = TransitionArray::Capacity(transitions) - transition_index;
   if (trim > 0) {
-    heap_->RightTrimFixedArray<Heap::FROM_GC>(
-        t, t->IsSimpleTransition() ? trim
-                                   : trim * TransitionArray::kTransitionSize);
-    t->SetNumberOfTransitions(transition_index);
+    heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+        transitions, trim * TransitionArray::kTransitionSize);
+    transitions->SetNumberOfTransitions(transition_index);
   }
-  DCHECK(map->HasTransitionArray());
+  return descriptors_owner_died;
 }
 
 
 void MarkCompactCollector::TrimDescriptorArray(Map* map,
-                                               DescriptorArray* descriptors,
-                                               int number_of_own_descriptors) {
+                                               DescriptorArray* descriptors) {
+  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+  if (number_of_own_descriptors == 0) {
+    DCHECK(descriptors == heap_->empty_descriptor_array());
+    return;
+  }
+
   int number_of_descriptors = descriptors->number_of_descriptors_storage();
   int to_trim = number_of_descriptors - number_of_own_descriptors;
-  if (to_trim == 0) return;
+  if (to_trim > 0) {
+    heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+        descriptors, to_trim * DescriptorArray::kDescriptorSize);
+    descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
 
-  heap_->RightTrimFixedArray<Heap::FROM_GC>(
-      descriptors, to_trim * DescriptorArray::kDescriptorSize);
-  descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
+    if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
+    descriptors->Sort();
 
-  if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
-  descriptors->Sort();
+    if (FLAG_unbox_double_fields) {
+      LayoutDescriptor* layout_descriptor = map->layout_descriptor();
+      layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
+                                                  number_of_own_descriptors);
+      SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
+    }
+  }
+  DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+  map->set_owns_descriptors(true);
 }
 
 
@@ -2528,7 +2372,8 @@
                                          DescriptorArray* descriptors) {
   int live_enum = map->EnumLength();
   if (live_enum == kInvalidEnumCacheSentinel) {
-    live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
+    live_enum =
+        map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
   }
   if (live_enum == 0) return descriptors->ClearEnumCache();
 
@@ -2536,82 +2381,17 @@
 
   int to_trim = enum_cache->length() - live_enum;
   if (to_trim <= 0) return;
-  heap_->RightTrimFixedArray<Heap::FROM_GC>(descriptors->GetEnumCache(),
-                                            to_trim);
+  heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+      descriptors->GetEnumCache(), to_trim);
 
   if (!descriptors->HasEnumIndicesCache()) return;
   FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
-  heap_->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim);
-}
-
-
-void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
-  DisallowHeapAllocation no_allocation;
-  DependentCode::GroupStartIndexes starts(entries);
-  int number_of_entries = starts.number_of_entries();
-  if (number_of_entries == 0) return;
-  int g = DependentCode::kWeakCodeGroup;
-  for (int i = starts.at(g); i < starts.at(g + 1); i++) {
-    // If the entry is compilation info then the map must be alive,
-    // and ClearDependentCode shouldn't be called.
-    DCHECK(entries->is_code_at(i));
-    Code* code = entries->code_at(i);
-    if (IsMarked(code) && !code->marked_for_deoptimization()) {
-      DependentCode::SetMarkedForDeoptimization(
-          code, static_cast<DependentCode::DependencyGroup>(g));
-      code->InvalidateEmbeddedObjects();
-      have_code_to_deoptimize_ = true;
-    }
-  }
-  for (int i = 0; i < number_of_entries; i++) {
-    entries->clear_at(i);
-  }
-}
-
-
-int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
-    DependentCode* entries, int group, int start, int end, int new_start) {
-  int survived = 0;
-  for (int i = start; i < end; i++) {
-    Object* obj = entries->object_at(i);
-    DCHECK(obj->IsCode() || IsMarked(obj));
-    if (IsMarked(obj) &&
-        (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
-      if (new_start + survived != i) {
-        entries->set_object_at(new_start + survived, obj);
-      }
-      Object** slot = entries->slot_at(new_start + survived);
-      RecordSlot(slot, slot, obj);
-      survived++;
-    }
-  }
-  entries->set_number_of_entries(
-      static_cast<DependentCode::DependencyGroup>(group), survived);
-  return survived;
-}
-
-
-void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
-  DisallowHeapAllocation no_allocation;
-  DependentCode::GroupStartIndexes starts(entries);
-  int number_of_entries = starts.number_of_entries();
-  if (number_of_entries == 0) return;
-  int new_number_of_entries = 0;
-  // Go through all groups, remove dead codes and compact.
-  for (int g = 0; g < DependentCode::kGroupCount; g++) {
-    int survived = ClearNonLiveDependentCodeInGroup(
-        entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
-    new_number_of_entries += survived;
-  }
-  for (int i = new_number_of_entries; i < number_of_entries; i++) {
-    entries->clear_at(i);
-  }
+  heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
+                                                          to_trim);
 }
 
 
 void MarkCompactCollector::ProcessWeakCollections() {
-  GCTracer::Scope gc_scope(heap()->tracer(),
-                           GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
   Object* weak_collection_obj = heap()->encountered_weak_collections();
   while (weak_collection_obj != Smi::FromInt(0)) {
     JSWeakCollection* weak_collection =
@@ -2619,15 +2399,14 @@
     DCHECK(MarkCompactCollector::IsMarked(weak_collection));
     if (weak_collection->table()->IsHashTable()) {
       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
-      Object** anchor = reinterpret_cast<Object**>(table->address());
       for (int i = 0; i < table->Capacity(); i++) {
         if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
           Object** key_slot =
               table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
-          RecordSlot(anchor, key_slot, *key_slot);
+          RecordSlot(table, key_slot, *key_slot);
           Object** value_slot =
               table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
-          MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
+          MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
                                                          value_slot);
         }
       }
@@ -2639,7 +2418,7 @@
 
 void MarkCompactCollector::ClearWeakCollections() {
   GCTracer::Scope gc_scope(heap()->tracer(),
-                           GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
+                           GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
   Object* weak_collection_obj = heap()->encountered_weak_collections();
   while (weak_collection_obj != Smi::FromInt(0)) {
     JSWeakCollection* weak_collection =
@@ -2662,8 +2441,6 @@
 
 
 void MarkCompactCollector::AbortWeakCollections() {
-  GCTracer::Scope gc_scope(heap()->tracer(),
-                           GCTracer::Scope::MC_WEAKCOLLECTION_ABORT);
   Object* weak_collection_obj = heap()->encountered_weak_collections();
   while (weak_collection_obj != Smi::FromInt(0)) {
     JSWeakCollection* weak_collection =
@@ -2675,50 +2452,225 @@
 }
 
 
-void MarkCompactCollector::ProcessAndClearWeakCells() {
-  HeapObject* undefined = heap()->undefined_value();
-  Object* weak_cell_obj = heap()->encountered_weak_cells();
+void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
+                                          DependentCode** dependent_code_list) {
+  Heap* heap = this->heap();
+  GCTracer::Scope gc_scope(heap->tracer(),
+                           GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
+  Object* weak_cell_obj = heap->encountered_weak_cells();
+  Object* the_hole_value = heap->the_hole_value();
+  DependentCode* dependent_code_head =
+      DependentCode::cast(heap->empty_fixed_array());
+  Object* non_live_map_head = Smi::FromInt(0);
   while (weak_cell_obj != Smi::FromInt(0)) {
     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
+    Object* next_weak_cell = weak_cell->next();
+    bool clear_value = true;
+    bool clear_next = true;
     // We do not insert cleared weak cells into the list, so the value
     // cannot be a Smi here.
     HeapObject* value = HeapObject::cast(weak_cell->value());
     if (!MarkCompactCollector::IsMarked(value)) {
-      weak_cell->clear();
+      // Cells for new-space objects embedded in optimized code are wrapped in
+      // WeakCell and put into Heap::weak_object_to_code_table.
+      // Such cells do not have any strong references but we want to keep them
+      // alive as long as the cell value is alive.
+      // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
+      if (value->IsCell()) {
+        Object* cell_value = Cell::cast(value)->value();
+        if (cell_value->IsHeapObject() &&
+            MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
+          // Resurrect the cell.
+          MarkBit mark = Marking::MarkBitFrom(value);
+          SetMark(value, mark);
+          Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
+          RecordSlot(value, slot, *slot);
+          slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
+          RecordSlot(weak_cell, slot, *slot);
+          clear_value = false;
+        }
+      }
+      if (value->IsMap()) {
+        // The map is non-live.
+        Map* map = Map::cast(value);
+        // Add dependent code to the dependent_code_list.
+        DependentCode* candidate = map->dependent_code();
+        // We rely on the fact that the weak code group comes first.
+        STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
+        if (candidate->length() > 0 &&
+            candidate->group() == DependentCode::kWeakCodeGroup) {
+          candidate->set_next_link(dependent_code_head);
+          dependent_code_head = candidate;
+        }
+        // Add the weak cell to the non_live_map list.
+        weak_cell->set_next(non_live_map_head);
+        non_live_map_head = weak_cell;
+        clear_value = false;
+        clear_next = false;
+      }
     } else {
+      // The value of the weak cell is alive.
       Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
-      heap()->mark_compact_collector()->RecordSlot(slot, slot, value);
+      RecordSlot(weak_cell, slot, *slot);
+      clear_value = false;
     }
-    weak_cell_obj = weak_cell->next();
-    weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
+    if (clear_value) {
+      weak_cell->clear();
+    }
+    if (clear_next) {
+      weak_cell->clear_next(the_hole_value);
+    }
+    weak_cell_obj = next_weak_cell;
   }
-  heap()->set_encountered_weak_cells(Smi::FromInt(0));
+  heap->set_encountered_weak_cells(Smi::FromInt(0));
+  *non_live_map_list = non_live_map_head;
+  *dependent_code_list = dependent_code_head;
 }
 
 
 void MarkCompactCollector::AbortWeakCells() {
-  Object* undefined = heap()->undefined_value();
+  Object* the_hole_value = heap()->the_hole_value();
   Object* weak_cell_obj = heap()->encountered_weak_cells();
   while (weak_cell_obj != Smi::FromInt(0)) {
     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
     weak_cell_obj = weak_cell->next();
-    weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
+    weak_cell->clear_next(the_hole_value);
   }
   heap()->set_encountered_weak_cells(Smi::FromInt(0));
 }
 
 
-void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
+void MarkCompactCollector::AbortTransitionArrays() {
+  HeapObject* undefined = heap()->undefined_value();
+  Object* obj = heap()->encountered_transition_arrays();
+  while (obj != Smi::FromInt(0)) {
+    TransitionArray* array = TransitionArray::cast(obj);
+    obj = array->next_link();
+    array->set_next_link(undefined, SKIP_WRITE_BARRIER);
+  }
+  heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+}
+
+
+void MarkCompactCollector::RecordMigratedSlot(
+    Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
+  // When parallel compaction is in progress, store and slots buffer entries
+  // require synchronization.
   if (heap_->InNewSpace(value)) {
-    heap_->store_buffer()->Mark(slot);
+    if (compaction_in_progress_) {
+      heap_->store_buffer()->MarkSynchronized(slot);
+    } else {
+      heap_->store_buffer()->Mark(slot);
+    }
   } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
-    SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+    SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
                        reinterpret_cast<Object**>(slot),
                        SlotsBuffer::IGNORE_OVERFLOW);
   }
 }
 
 
+void MarkCompactCollector::RecordMigratedCodeEntrySlot(
+    Address code_entry, Address code_entry_slot,
+    SlotsBuffer** evacuation_slots_buffer) {
+  if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+    SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
+                       SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
+                       SlotsBuffer::IGNORE_OVERFLOW);
+  }
+}
+
+
+void MarkCompactCollector::RecordMigratedCodeObjectSlot(
+    Address code_object, SlotsBuffer** evacuation_slots_buffer) {
+  SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
+                     SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
+                     SlotsBuffer::IGNORE_OVERFLOW);
+}
+
+
+static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
+  if (RelocInfo::IsCodeTarget(rmode)) {
+    return SlotsBuffer::CODE_TARGET_SLOT;
+  } else if (RelocInfo::IsCell(rmode)) {
+    return SlotsBuffer::CELL_TARGET_SLOT;
+  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
+    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
+  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
+    return SlotsBuffer::DEBUG_TARGET_SLOT;
+  }
+  UNREACHABLE();
+  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
+}
+
+
+static inline SlotsBuffer::SlotType DecodeSlotType(
+    SlotsBuffer::ObjectSlot slot) {
+  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
+}
+
+
+void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
+  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  RelocInfo::Mode rmode = rinfo->rmode();
+  if (target_page->IsEvacuationCandidate() &&
+      (rinfo->host() == NULL ||
+       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+    Address addr = rinfo->pc();
+    SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
+    if (rinfo->IsInConstantPool()) {
+      addr = rinfo->constant_pool_entry_address();
+      if (RelocInfo::IsCodeTarget(rmode)) {
+        slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
+      } else {
+        DCHECK(RelocInfo::IsEmbeddedObject(rmode));
+        slot_type = SlotsBuffer::OBJECT_SLOT;
+      }
+    }
+    bool success = SlotsBuffer::AddTo(
+        slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type,
+        addr, SlotsBuffer::FAIL_ON_OVERFLOW);
+    if (!success) {
+      EvictPopularEvacuationCandidate(target_page);
+    }
+  }
+}
+
+
+class RecordMigratedSlotVisitor final : public ObjectVisitor {
+ public:
+  RecordMigratedSlotVisitor(MarkCompactCollector* collector,
+                            SlotsBuffer** evacuation_slots_buffer)
+      : collector_(collector),
+        evacuation_slots_buffer_(evacuation_slots_buffer) {}
+
+  V8_INLINE void VisitPointer(Object** p) override {
+    collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
+                                   evacuation_slots_buffer_);
+  }
+
+  V8_INLINE void VisitPointers(Object** start, Object** end) override {
+    while (start < end) {
+      collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
+                                     evacuation_slots_buffer_);
+      ++start;
+    }
+  }
+
+  V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
+    if (collector_->compacting_) {
+      Address code_entry = Memory::Address_at(code_entry_slot);
+      collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
+                                              evacuation_slots_buffer_);
+    }
+  }
+
+ private:
+  MarkCompactCollector* collector_;
+  SlotsBuffer** evacuation_slots_buffer_;
+};
+
+
 // We scavenge new space simultaneously with sweeping. This is done in two
 // passes.
 //
@@ -2733,85 +2685,32 @@
 // pointer iteration.  This is an issue if the store buffer overflows and we
 // have to scan the entire old space, including dead objects, looking for
 // pointers to new space.
-void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
-                                         int size, AllocationSpace dest) {
+void MarkCompactCollector::MigrateObject(
+    HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
+    SlotsBuffer** evacuation_slots_buffer) {
   Address dst_addr = dst->address();
   Address src_addr = src->address();
   DCHECK(heap()->AllowedToBeMigrated(src, dest));
-  DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
-  if (dest == OLD_POINTER_SPACE) {
-    Address src_slot = src_addr;
-    Address dst_slot = dst_addr;
+  DCHECK(dest != LO_SPACE);
+  if (dest == OLD_SPACE) {
+    DCHECK_OBJECT_SIZE(size);
+    DCHECK(evacuation_slots_buffer != nullptr);
     DCHECK(IsAligned(size, kPointerSize));
 
-    bool may_contain_raw_values = src->MayContainRawValues();
-#if V8_DOUBLE_FIELDS_UNBOXING
-    LayoutDescriptorHelper helper(src->map());
-    bool has_only_tagged_fields = helper.all_fields_tagged();
-#endif
-    for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
-      Object* value = Memory::Object_at(src_slot);
-
-      Memory::Object_at(dst_slot) = value;
-
-#if V8_DOUBLE_FIELDS_UNBOXING
-      if (!may_contain_raw_values &&
-          (has_only_tagged_fields ||
-           helper.IsTagged(static_cast<int>(src_slot - src_addr))))
-#else
-      if (!may_contain_raw_values)
-#endif
-      {
-        RecordMigratedSlot(value, dst_slot);
-      }
-
-      src_slot += kPointerSize;
-      dst_slot += kPointerSize;
-    }
-
-    if (compacting_ && dst->IsJSFunction()) {
-      Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
-      Address code_entry = Memory::Address_at(code_entry_slot);
-
-      if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
-        SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
-                           SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
-                           SlotsBuffer::IGNORE_OVERFLOW);
-      }
-    } else if (dst->IsConstantPoolArray()) {
-      // We special case ConstantPoolArrays since they could contain integers
-      // value entries which look like tagged pointers.
-      // TODO(mstarzinger): restructure this code to avoid this special-casing.
-      ConstantPoolArray* array = ConstantPoolArray::cast(dst);
-      ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
-      while (!code_iter.is_finished()) {
-        Address code_entry_slot =
-            dst_addr + array->OffsetOfElementAt(code_iter.next_index());
-        Address code_entry = Memory::Address_at(code_entry_slot);
-
-        if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
-          SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
-                             SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
-                             SlotsBuffer::IGNORE_OVERFLOW);
-        }
-      }
-      ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
-      while (!heap_iter.is_finished()) {
-        Address heap_slot =
-            dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
-        Object* value = Memory::Object_at(heap_slot);
-        RecordMigratedSlot(value, heap_slot);
-      }
-    }
+    heap()->MoveBlock(dst->address(), src->address(), size);
+    RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer);
+    dst->IterateBody(&visitor);
   } else if (dest == CODE_SPACE) {
+    DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
+    DCHECK(evacuation_slots_buffer != nullptr);
     PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
     heap()->MoveBlock(dst_addr, src_addr, size);
-    SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
-                       SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
-                       SlotsBuffer::IGNORE_OVERFLOW);
+    RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
     Code::cast(dst)->Relocate(dst_addr - src_addr);
   } else {
-    DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
+    DCHECK_OBJECT_SIZE(size);
+    DCHECK(evacuation_slots_buffer == nullptr);
+    DCHECK(dest == NEW_SPACE);
     heap()->MoveBlock(dst_addr, src_addr, size);
   }
   heap()->OnMoveEvent(dst, src, size);
@@ -2819,19 +2718,73 @@
 }
 
 
+static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
+                              SlotsBuffer::SlotType slot_type, Address addr) {
+  switch (slot_type) {
+    case SlotsBuffer::CODE_TARGET_SLOT: {
+      RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
+      rinfo.Visit(isolate, v);
+      break;
+    }
+    case SlotsBuffer::CELL_TARGET_SLOT: {
+      RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
+      rinfo.Visit(isolate, v);
+      break;
+    }
+    case SlotsBuffer::CODE_ENTRY_SLOT: {
+      v->VisitCodeEntry(addr);
+      break;
+    }
+    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
+      HeapObject* obj = HeapObject::FromAddress(addr);
+      Code::BodyDescriptor::IterateBody(obj, v);
+      break;
+    }
+    case SlotsBuffer::DEBUG_TARGET_SLOT: {
+      RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
+                      NULL);
+      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
+      break;
+    }
+    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
+      RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+      rinfo.Visit(isolate, v);
+      break;
+    }
+    case SlotsBuffer::OBJECT_SLOT: {
+      v->VisitPointer(reinterpret_cast<Object**>(addr));
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
 // Visitor for updating pointers from live objects in old spaces to new space.
 // It does not expect to encounter pointers to dead objects.
 class PointersUpdatingVisitor : public ObjectVisitor {
  public:
   explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
 
-  void VisitPointer(Object** p) { UpdatePointer(p); }
+  void VisitPointer(Object** p) override { UpdatePointer(p); }
 
-  void VisitPointers(Object** start, Object** end) {
+  void VisitPointers(Object** start, Object** end) override {
     for (Object** p = start; p < end; p++) UpdatePointer(p);
   }
 
-  void VisitEmbeddedPointer(RelocInfo* rinfo) {
+  void VisitCell(RelocInfo* rinfo) override {
+    DCHECK(rinfo->rmode() == RelocInfo::CELL);
+    Object* cell = rinfo->target_cell();
+    Object* old_cell = cell;
+    VisitPointer(&cell);
+    if (cell != old_cell) {
+      rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
+    }
+  }
+
+  void VisitEmbeddedPointer(RelocInfo* rinfo) override {
     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
     Object* target = rinfo->target_object();
     Object* old_target = target;
@@ -2843,7 +2796,7 @@
     }
   }
 
-  void VisitCodeTarget(RelocInfo* rinfo) {
+  void VisitCodeTarget(RelocInfo* rinfo) override {
     DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
     Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     Object* old_target = target;
@@ -2853,7 +2806,7 @@
     }
   }
 
-  void VisitCodeAgeSequence(RelocInfo* rinfo) {
+  void VisitCodeAgeSequence(RelocInfo* rinfo) override {
     DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
     Object* stub = rinfo->code_age_stub();
     DCHECK(stub != NULL);
@@ -2863,14 +2816,13 @@
     }
   }
 
-  void VisitDebugTarget(RelocInfo* rinfo) {
-    DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
-            rinfo->IsPatchedReturnSequence()) ||
-           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
-            rinfo->IsPatchedDebugBreakSlotSequence()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+  void VisitDebugTarget(RelocInfo* rinfo) override {
+    DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+           rinfo->IsPatchedDebugBreakSlotSequence());
+    Object* target =
+        Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
     VisitPointer(&target);
-    rinfo->set_call_address(Code::cast(target)->instruction_start());
+    rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
   }
 
   static inline void UpdateSlot(Heap* heap, Object** slot) {
@@ -2884,7 +2836,9 @@
     MapWord map_word = heap_obj->map_word();
     if (map_word.IsForwardingAddress()) {
       DCHECK(heap->InFromSpace(heap_obj) ||
-             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
+             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
+             Page::FromAddress(heap_obj->address())
+                 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
       HeapObject* target = map_word.ToForwardingAddress();
       base::NoBarrier_CompareAndSwap(
           reinterpret_cast<base::AtomicWord*>(slot),
@@ -2902,20 +2856,42 @@
 };
 
 
-static void UpdatePointer(HeapObject** address, HeapObject* object) {
-  Address new_addr = Memory::Address_at(object->address());
+void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) {
+  PointersUpdatingVisitor v(heap_);
+  size_t buffer_size = buffer->Size();
 
-  // The new space sweep will overwrite the map word of dead objects
-  // with NULL. In this case we do not need to transfer this entry to
-  // the store buffer which we are rebuilding.
-  // We perform the pointer update with a no barrier compare-and-swap. The
-  // compare and swap may fail in the case where the pointer update tries to
-  // update garbage memory which was concurrently accessed by the sweeper.
-  if (new_addr != NULL) {
-    base::NoBarrier_CompareAndSwap(
-        reinterpret_cast<base::AtomicWord*>(address),
-        reinterpret_cast<base::AtomicWord>(object),
-        reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
+  for (size_t slot_idx = 0; slot_idx < buffer_size; ++slot_idx) {
+    SlotsBuffer::ObjectSlot slot = buffer->Get(slot_idx);
+    if (!SlotsBuffer::IsTypedSlot(slot)) {
+      PointersUpdatingVisitor::UpdateSlot(heap_, slot);
+    } else {
+      ++slot_idx;
+      DCHECK(slot_idx < buffer_size);
+      UpdateSlot(heap_->isolate(), &v, DecodeSlotType(slot),
+                 reinterpret_cast<Address>(buffer->Get(slot_idx)));
+    }
+  }
+}
+
+
+void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
+  while (buffer != NULL) {
+    UpdateSlots(buffer);
+    buffer = buffer->next();
+  }
+}
+
+
+static void UpdatePointer(HeapObject** address, HeapObject* object) {
+  MapWord map_word = object->map_word();
+  // The store buffer can still contain stale pointers in dead large objects.
+  // Ignore these pointers here.
+  DCHECK(map_word.IsForwardingAddress() ||
+         object->GetHeap()->lo_space()->FindPage(
+             reinterpret_cast<Address>(address)) != NULL);
+  if (map_word.IsForwardingAddress()) {
+    // Update the corresponding slot.
+    *address = map_word.ToForwardingAddress();
   }
 }
 
@@ -2932,27 +2908,158 @@
 }
 
 
-bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
-                                            int object_size) {
-  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
-
-  OldSpace* target_space = heap()->TargetSpace(object);
-
-  DCHECK(target_space == heap()->old_pointer_space() ||
-         target_space == heap()->old_data_space());
-  HeapObject* target;
-  AllocationResult allocation = target_space->AllocateRaw(object_size);
-  if (allocation.To(&target)) {
-    MigrateObject(target, object, object_size, target_space->identity());
-    heap()->IncrementPromotedObjectsSize(object_size);
-    return true;
+bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
+                                               HeapObject** out_object) {
+  Space* owner = p->owner();
+  if (owner == heap_->lo_space() || owner == NULL) {
+    Object* large_object = heap_->lo_space()->FindObject(slot);
+    // This object has to exist, otherwise we would not have recorded a slot
+    // for it.
+    CHECK(large_object->IsHeapObject());
+    HeapObject* large_heap_object = HeapObject::cast(large_object);
+    if (IsMarked(large_heap_object)) {
+      *out_object = large_heap_object;
+      return true;
+    }
+    return false;
   }
 
+  uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
+  unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
+  MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
+  MarkBit::CellType* cells = p->markbits()->cells();
+  Address base_address = p->area_start();
+  unsigned int base_address_cell_index = Bitmap::IndexToCell(
+      Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
+
+  // Check if the slot points to the start of an object. This can happen e.g.
+  // when we left trim a fixed array. Such slots are invalid and we can remove
+  // them.
+  if (index_mask > 1) {
+    if ((cells[cell_index] & index_mask) != 0 &&
+        (cells[cell_index] & (index_mask >> 1)) == 0) {
+      return false;
+    }
+  } else {
+    // Left trimming moves the mark bits so we cannot be in the very first cell.
+    DCHECK(cell_index != base_address_cell_index);
+    if ((cells[cell_index] & index_mask) != 0 &&
+        (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
+      return false;
+    }
+  }
+
+  // Check if the object is in the current cell.
+  MarkBit::CellType slot_mask;
+  if ((cells[cell_index] == 0) ||
+      (base::bits::CountTrailingZeros32(cells[cell_index]) >
+       base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
+    // If we are already in the first cell, there is no live object.
+    if (cell_index == base_address_cell_index) return false;
+
+    // If not, find a cell in a preceding cell slot that has a mark bit set.
+    do {
+      cell_index--;
+    } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
+
+    // The slot must be in a dead object if there are no preceding cells that
+    // have mark bits set.
+    if (cells[cell_index] == 0) {
+      return false;
+    }
+
+    // The object is in a preceding cell. Set the mask to find any object.
+    slot_mask = ~0u;
+  } else {
+    // We are interested in object mark bits right before the slot.
+    slot_mask = index_mask + (index_mask - 1);
+  }
+
+  MarkBit::CellType current_cell = cells[cell_index];
+  CHECK(current_cell != 0);
+
+  // Find the last live object in the cell.
+  unsigned int leading_zeros =
+      base::bits::CountLeadingZeros32(current_cell & slot_mask);
+  CHECK(leading_zeros != Bitmap::kBitsPerCell);
+  int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
+
+  base_address += (cell_index - base_address_cell_index) *
+                  Bitmap::kBitsPerCell * kPointerSize;
+  Address address = base_address + offset * kPointerSize;
+  HeapObject* object = HeapObject::FromAddress(address);
+  CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+  CHECK(object->address() < reinterpret_cast<Address>(slot));
+  if ((object->address() + kPointerSize) <= slot &&
+      (object->address() + object->Size()) > slot) {
+    // If the slot is within the last found object in the cell, the slot is
+    // in a live object.
+    // Slots pointing to the first word of an object are invalid and removed.
+    // This can happen when we move the object header while left trimming.
+    *out_object = object;
+    return true;
+  }
   return false;
 }
 
 
-void MarkCompactCollector::EvacuateNewSpace() {
+bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) {
+  // This function does not support large objects right now.
+  Space* owner = p->owner();
+  if (owner == heap_->lo_space() || owner == NULL) {
+    Object* large_object = heap_->lo_space()->FindObject(slot);
+    // This object has to exist, otherwise we would not have recorded a slot
+    // for it.
+    CHECK(large_object->IsHeapObject());
+    HeapObject* large_heap_object = HeapObject::cast(large_object);
+    if (IsMarked(large_heap_object)) {
+      return true;
+    }
+    return false;
+  }
+
+  LiveObjectIterator<kBlackObjects> it(p);
+  HeapObject* object = NULL;
+  while ((object = it.Next()) != NULL) {
+    int size = object->Size();
+
+    if (object->address() > slot) return false;
+    if (object->address() <= slot && slot < (object->address() + size)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
+  HeapObject* object = NULL;
+  // The target object is black but we don't know if the source slot is black.
+  // The source object could have died and the slot could be part of a free
+  // space. Find out based on mark bits if the slot is part of a live object.
+  if (!IsSlotInBlackObject(Page::FromAddress(slot), slot, &object)) {
+    return false;
+  }
+
+  DCHECK(object != NULL);
+  int offset = static_cast<int>(slot - object->address());
+  return object->IsValidSlot(offset);
+}
+
+
+void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
+                                                    HeapObject* object) {
+  // The target object has to be black.
+  CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+
+  // The target object is black but we don't know if the source slot is black.
+  // The source object could have died and the slot could be part of a free
+  // space. Use the mark bit iterator to find out about liveness of the slot.
+  CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
+}
+
+
+void MarkCompactCollector::EvacuateNewSpacePrologue() {
   // There are soft limits in the allocation code, designed trigger a mark
   // sweep collection by failing allocations.  But since we are already in
   // a mark-sweep allocation, there is no sense in trying to trigger one.
@@ -2969,114 +3076,247 @@
   new_space->Flip();
   new_space->ResetAllocationInfo();
 
-  int survivors_size = 0;
+  newspace_evacuation_candidates_.Clear();
+  NewSpacePageIterator it(from_bottom, from_top);
+  while (it.has_next()) {
+    newspace_evacuation_candidates_.Add(it.next());
+  }
+}
 
+
+HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
+  HashMap* local_pretenuring_feedback = new HashMap(
+      HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
+  EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
+                                            local_pretenuring_feedback);
   // First pass: traverse all objects in inactive semispace, remove marks,
   // migrate live objects and write forwarding addresses.  This stage puts
   // new entries in the store buffer and may cause some pages to be marked
   // scan-on-scavenge.
-  NewSpacePageIterator it(from_bottom, from_top);
-  while (it.has_next()) {
-    NewSpacePage* p = it.next();
-    survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
+  for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
+    NewSpacePage* p =
+        reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
+    bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
+    USE(ok);
+    DCHECK(ok);
   }
-
-  heap_->IncrementYoungSurvivorsCounter(survivors_size);
-  new_space->set_age_mark(new_space->top());
+  heap_->IncrementPromotedObjectsSize(
+      static_cast<int>(new_space_visitor.promoted_size()));
+  heap_->IncrementSemiSpaceCopiedObjectSize(
+      static_cast<int>(new_space_visitor.semispace_copied_size()));
+  heap_->IncrementYoungSurvivorsCounter(
+      static_cast<int>(new_space_visitor.promoted_size()) +
+      static_cast<int>(new_space_visitor.semispace_copied_size()));
+  return local_pretenuring_feedback;
 }
 
 
-void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
-  AlwaysAllocateScope always_allocate(isolate());
-  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-  DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
-  p->SetWasSwept();
+void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
+    SlotsBuffer* evacuation_slots_buffer) {
+  base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
+  evacuation_slots_buffers_.Add(evacuation_slots_buffer);
+}
 
-  int offsets[16];
 
-  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
-    Address cell_base = it.CurrentCellBase();
-    MarkBit::CellType* cell = it.CurrentCell();
+int MarkCompactCollector::NumberOfParallelCompactionTasks() {
+  if (!FLAG_parallel_compaction) return 1;
+  // Compute the number of needed tasks based on a target compaction time, the
+  // profiled compaction speed and marked live memory.
+  //
+  // The number of parallel compaction tasks is limited by:
+  // - #evacuation pages
+  // - (#cores - 1)
+  // - a hard limit
+  const double kTargetCompactionTimeInMs = 1;
+  const int kMaxCompactionTasks = 8;
 
-    if (*cell == 0) continue;
+  intptr_t compaction_speed =
+      heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+  if (compaction_speed == 0) return 1;
 
-    int live_objects = MarkWordToObjectStarts(*cell, offsets);
-    for (int i = 0; i < live_objects; i++) {
-      Address object_addr = cell_base + offsets[i] * kPointerSize;
-      HeapObject* object = HeapObject::FromAddress(object_addr);
-      DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+  intptr_t live_bytes = 0;
+  for (Page* page : evacuation_candidates_) {
+    live_bytes += page->LiveBytes();
+  }
 
-      int size = object->Size();
+  const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
+  const int tasks =
+      1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
+                           kTargetCompactionTimeInMs);
+  const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
+  const int tasks_capped_cores = Min(cores, tasks_capped_pages);
+  const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores);
+  return tasks_capped_hard;
+}
 
-      HeapObject* target_object;
-      AllocationResult allocation = space->AllocateRaw(size);
-      if (!allocation.To(&target_object)) {
-        // If allocation failed, use emergency memory and re-try allocation.
-        CHECK(space->HasEmergencyMemory());
-        space->UseEmergencyMemory();
-        allocation = space->AllocateRaw(size);
-      }
-      if (!allocation.To(&target_object)) {
-        // OS refused to give us memory.
-        V8::FatalProcessOutOfMemory("Evacuation");
-        return;
-      }
 
-      MigrateObject(target_object, object, size, space->identity());
-      DCHECK(object->map_word().IsForwardingAddress());
+void MarkCompactCollector::EvacuatePagesInParallel() {
+  const int num_pages = evacuation_candidates_.length();
+  if (num_pages == 0) return;
+
+  // Used for trace summary.
+  intptr_t live_bytes = 0;
+  intptr_t compaction_speed = 0;
+  if (FLAG_trace_fragmentation) {
+    for (Page* page : evacuation_candidates_) {
+      live_bytes += page->LiveBytes();
     }
-
-    // Clear marking bits for current cell.
-    *cell = 0;
+    compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
   }
-  p->ResetLiveBytes();
+  const int num_tasks = NumberOfParallelCompactionTasks();
+
+  // Set up compaction spaces.
+  CompactionSpaceCollection** compaction_spaces_for_tasks =
+      new CompactionSpaceCollection*[num_tasks];
+  for (int i = 0; i < num_tasks; i++) {
+    compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
+  }
+
+  heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
+                                                  num_tasks);
+  heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
+                                                   num_tasks);
+
+  uint32_t* task_ids = new uint32_t[num_tasks - 1];
+  // Kick off parallel tasks.
+  StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks);
+  // Wait for unfinished and not-yet-started tasks.
+  WaitUntilCompactionCompleted(task_ids, num_tasks - 1);
+  delete[] task_ids;
+
+  double compaction_duration = 0.0;
+  intptr_t compacted_memory = 0;
+  // Merge back memory (compacted and unused) from compaction spaces.
+  for (int i = 0; i < num_tasks; i++) {
+    heap()->old_space()->MergeCompactionSpace(
+        compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
+    heap()->code_space()->MergeCompactionSpace(
+        compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
+    compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
+    compaction_duration += compaction_spaces_for_tasks[i]->duration();
+    delete compaction_spaces_for_tasks[i];
+  }
+  delete[] compaction_spaces_for_tasks;
+  heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
+
+  // Finalize sequentially.
+  int abandoned_pages = 0;
+  for (int i = 0; i < num_pages; i++) {
+    Page* p = evacuation_candidates_[i];
+    switch (p->parallel_compaction_state().Value()) {
+      case MemoryChunk::ParallelCompactingState::kCompactingAborted:
+        // We have partially compacted the page, i.e., some objects may have
+        // moved, others are still in place.
+        // We need to:
+        // - Leave the evacuation candidate flag for later processing of
+        //   slots buffer entries.
+        // - Leave the slots buffer there for processing of entries added by
+        //   the write barrier.
+        // - Rescan the page as slot recording in the migration buffer only
+        //   happens upon moving (which we potentially didn't do).
+        // - Leave the page in the list of pages of a space since we could not
+        //   fully evacuate it.
+        // - Mark them for rescanning for store buffer entries as we otherwise
+        //   might have stale store buffer entries that become "valid" again
+        //   after reusing the memory. Note that all existing store buffer
+        //   entries of such pages are filtered before rescanning.
+        DCHECK(p->IsEvacuationCandidate());
+        p->SetFlag(Page::COMPACTION_WAS_ABORTED);
+        p->set_scan_on_scavenge(true);
+        abandoned_pages++;
+        break;
+      case MemoryChunk::kCompactingFinalize:
+        DCHECK(p->IsEvacuationCandidate());
+        p->SetWasSwept();
+        p->Unlink();
+        break;
+      case MemoryChunk::kCompactingDone:
+        DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
+        DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+        break;
+      default:
+        // We should not observe kCompactingInProgress, or kCompactingDone.
+        UNREACHABLE();
+    }
+    p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
+  }
+  if (FLAG_trace_fragmentation) {
+    PrintIsolate(isolate(),
+                 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
+                 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
+                 "d compaction_speed=%" V8_PTR_PREFIX "d\n",
+                 isolate()->time_millis_since_init(), FLAG_parallel_compaction,
+                 num_pages, abandoned_pages, num_tasks,
+                 base::SysInfo::NumberOfProcessors(), live_bytes,
+                 compaction_speed);
+  }
 }
 
 
-void MarkCompactCollector::EvacuatePages() {
-  int npages = evacuation_candidates_.length();
-  for (int i = 0; i < npages; i++) {
+void MarkCompactCollector::StartParallelCompaction(
+    CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids,
+    int len) {
+  compaction_in_progress_ = true;
+  for (int i = 1; i < len; i++) {
+    CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]);
+    task_ids[i - 1] = task->id();
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        task, v8::Platform::kShortRunningTask);
+  }
+
+  // Contribute in main thread.
+  EvacuatePages(compaction_spaces[0], &migration_slots_buffer_);
+}
+
+
+void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
+                                                        int len) {
+  // Try to cancel compaction tasks that have not been run (as they might be
+  // stuck in a worker queue). Tasks that cannot be canceled, have either
+  // already completed or are still running, hence we need to wait for their
+  // semaphore signal.
+  for (int i = 0; i < len; i++) {
+    if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) {
+      pending_compaction_tasks_semaphore_.Wait();
+    }
+  }
+  compaction_in_progress_ = false;
+}
+
+
+void MarkCompactCollector::EvacuatePages(
+    CompactionSpaceCollection* compaction_spaces,
+    SlotsBuffer** evacuation_slots_buffer) {
+  EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
+                                  evacuation_slots_buffer);
+  for (int i = 0; i < evacuation_candidates_.length(); i++) {
     Page* p = evacuation_candidates_[i];
     DCHECK(p->IsEvacuationCandidate() ||
            p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
-    DCHECK(static_cast<int>(p->parallel_sweeping()) ==
-           MemoryChunk::SWEEPING_DONE);
-    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-    // Allocate emergency memory for the case when compaction fails due to out
-    // of memory.
-    if (!space->HasEmergencyMemory()) {
-      space->CreateEmergencyMemory();
-    }
-    if (p->IsEvacuationCandidate()) {
-      // During compaction we might have to request a new page. Check that we
-      // have an emergency page and the space still has room for that.
-      if (space->HasEmergencyMemory() && space->CanExpand()) {
-        EvacuateLiveObjectsFromPage(p);
-        // Unlink the page from the list of pages here. We must not iterate
-        // over that page later (e.g. when scan on scavenge pages are
-        // processed). The page itself will be freed later and is still
-        // reachable from the evacuation candidates list.
-        p->Unlink();
-      } else {
-        // Without room for expansion evacuation is not guaranteed to succeed.
-        // Pessimistically abandon unevacuated pages.
-        for (int j = i; j < npages; j++) {
-          Page* page = evacuation_candidates_[j];
-          slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
-          page->ClearEvacuationCandidate();
-          page->SetFlag(Page::RESCAN_ON_EVACUATION);
+    DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
+           MemoryChunk::kSweepingDone);
+    if (p->parallel_compaction_state().TrySetValue(
+            MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
+      if (p->IsEvacuationCandidate()) {
+        DCHECK_EQ(p->parallel_compaction_state().Value(),
+                  MemoryChunk::kCompactingInProgress);
+        double start = heap()->MonotonicallyIncreasingTimeInMs();
+        intptr_t live_bytes = p->LiveBytes();
+        AlwaysAllocateScope always_allocate(isolate());
+        if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
+          p->ResetLiveBytes();
+          p->parallel_compaction_state().SetValue(
+              MemoryChunk::kCompactingFinalize);
+          compaction_spaces->ReportCompactionProgress(
+              heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
+        } else {
+          p->parallel_compaction_state().SetValue(
+              MemoryChunk::kCompactingAborted);
         }
-        break;
-      }
-    }
-  }
-  if (npages > 0) {
-    // Release emergency memory.
-    PagedSpaces spaces(heap());
-    for (PagedSpace* space = spaces.next(); space != NULL;
-         space = spaces.next()) {
-      if (space->HasEmergencyMemory()) {
-        space->FreeEmergencyMemory();
+      } else {
+        // There could be popular pages in the list of evacuation candidates
+        // which we do compact.
+        p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
       }
     }
   }
@@ -3098,45 +3338,6 @@
 };
 
 
-static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
-                              SlotsBuffer::SlotType slot_type, Address addr) {
-  switch (slot_type) {
-    case SlotsBuffer::CODE_TARGET_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
-      rinfo.Visit(isolate, v);
-      break;
-    }
-    case SlotsBuffer::CODE_ENTRY_SLOT: {
-      v->VisitCodeEntry(addr);
-      break;
-    }
-    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
-      HeapObject* obj = HeapObject::FromAddress(addr);
-      Code::cast(obj)->CodeIterateBody(v);
-      break;
-    }
-    case SlotsBuffer::DEBUG_TARGET_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
-      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
-      break;
-    }
-    case SlotsBuffer::JS_RETURN_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
-      if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
-      break;
-    }
-    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
-      rinfo.Visit(isolate, v);
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
 enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
 
 
@@ -3153,7 +3354,6 @@
     DCHECK(free_list == NULL);
     return space->Free(start, size);
   } else {
-    // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
     return size - free_list->Free(start, size);
   }
 }
@@ -3178,58 +3378,52 @@
 
   Address free_start = p->area_start();
   DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
-  int offsets[16];
 
+  // If we use the skip list for code space pages, we have to lock the skip
+  // list because it could be accessed concurrently by the runtime or the
+  // deoptimizer.
   SkipList* skip_list = p->skip_list();
-  int curr_region = -1;
   if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
     skip_list->Clear();
   }
 
   intptr_t freed_bytes = 0;
   intptr_t max_freed_bytes = 0;
+  int curr_region = -1;
 
-  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
-    Address cell_base = it.CurrentCellBase();
-    MarkBit::CellType* cell = it.CurrentCell();
-    int live_objects = MarkWordToObjectStarts(*cell, offsets);
-    int live_index = 0;
-    for (; live_objects != 0; live_objects--) {
-      Address free_end = cell_base + offsets[live_index++] * kPointerSize;
-      if (free_end != free_start) {
-        int size = static_cast<int>(free_end - free_start);
-        if (free_space_mode == ZAP_FREE_SPACE) {
-          memset(free_start, 0xcc, size);
-        }
-        freed_bytes = Free<parallelism>(space, free_list, free_start, size);
-        max_freed_bytes = Max(freed_bytes, max_freed_bytes);
-#ifdef ENABLE_GDB_JIT_INTERFACE
-        if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
-          GDBJITInterface::RemoveCodeRange(free_start, free_end);
-        }
-#endif
+  LiveObjectIterator<kBlackObjects> it(p);
+  HeapObject* object = NULL;
+  while ((object = it.Next()) != NULL) {
+    DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+    Address free_end = object->address();
+    if (free_end != free_start) {
+      int size = static_cast<int>(free_end - free_start);
+      if (free_space_mode == ZAP_FREE_SPACE) {
+        memset(free_start, 0xcc, size);
       }
-      HeapObject* live_object = HeapObject::FromAddress(free_end);
-      DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
-      Map* map = live_object->synchronized_map();
-      int size = live_object->SizeFromMap(map);
-      if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
-        live_object->IterateBody(map->instance_type(), size, v);
-      }
-      if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
-        int new_region_start = SkipList::RegionNumber(free_end);
-        int new_region_end =
-            SkipList::RegionNumber(free_end + size - kPointerSize);
-        if (new_region_start != curr_region || new_region_end != curr_region) {
-          skip_list->AddObject(free_end, size);
-          curr_region = new_region_end;
-        }
-      }
-      free_start = free_end + size;
+      freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+      max_freed_bytes = Max(freed_bytes, max_freed_bytes);
     }
-    // Clear marking bits for current cell.
-    *cell = 0;
+    Map* map = object->synchronized_map();
+    int size = object->SizeFromMap(map);
+    if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
+      object->IterateBody(map->instance_type(), size, v);
+    }
+    if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
+      int new_region_start = SkipList::RegionNumber(free_end);
+      int new_region_end =
+          SkipList::RegionNumber(free_end + size - kPointerSize);
+      if (new_region_start != curr_region || new_region_end != curr_region) {
+        skip_list->AddObject(free_end, size);
+        curr_region = new_region_end;
+      }
+    }
+    free_start = free_end + size;
   }
+
+  // Clear the mark bits of that page and reset live bytes count.
+  Bitmap::Clear(p);
+
   if (free_start != p->area_end()) {
     int size = static_cast<int>(p->area_end() - free_start);
     if (free_space_mode == ZAP_FREE_SPACE) {
@@ -3237,18 +3431,12 @@
     }
     freed_bytes = Free<parallelism>(space, free_list, free_start, size);
     max_freed_bytes = Max(freed_bytes, max_freed_bytes);
-#ifdef ENABLE_GDB_JIT_INTERFACE
-    if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
-      GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
-    }
-#endif
   }
-  p->ResetLiveBytes();
 
   if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
     // When concurrent sweeping is active, the page will be marked after
     // sweeping by the main thread.
-    p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
+    p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
   } else {
     p->SetWasSwept();
   }
@@ -3256,70 +3444,6 @@
 }
 
 
-static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
-  Page* p = Page::FromAddress(code->address());
-
-  if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-    return false;
-  }
-
-  Address code_start = code->address();
-  Address code_end = code_start + code->Size();
-
-  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
-  uint32_t end_index =
-      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
-
-  Bitmap* b = p->markbits();
-
-  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
-  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
-
-  MarkBit::CellType* start_cell = start_mark_bit.cell();
-  MarkBit::CellType* end_cell = end_mark_bit.cell();
-
-  if (value) {
-    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
-    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
-
-    if (start_cell == end_cell) {
-      *start_cell |= start_mask & end_mask;
-    } else {
-      *start_cell |= start_mask;
-      for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
-        *cell = ~0;
-      }
-      *end_cell |= end_mask;
-    }
-  } else {
-    for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
-      *cell = 0;
-    }
-  }
-
-  return true;
-}
-
-
-static bool IsOnInvalidatedCodeObject(Address addr) {
-  // We did not record any slots in large objects thus
-  // we can safely go to the page from the slot address.
-  Page* p = Page::FromAddress(addr);
-
-  // First check owner's identity because old pointer and old data spaces
-  // are swept lazily and might still have non-zero mark-bits on some
-  // pages.
-  if (p->owner()->identity() != CODE_SPACE) return false;
-
-  // In code space only bits on evacuation candidates (but we don't record
-  // any slots on them) and under invalidated code objects are non-zero.
-  MarkBit mark_bit =
-      p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
-
-  return mark_bit.Get();
-}
-
-
 void MarkCompactCollector::InvalidateCode(Code* code) {
   if (heap_->incremental_marking()->IsCompacting() &&
       !ShouldSkipEvacuationSlotRecording(code)) {
@@ -3329,7 +3453,11 @@
     MarkBit mark_bit = Marking::MarkBitFrom(code);
     if (Marking::IsWhite(mark_bit)) return;
 
-    invalidated_code_.Add(code);
+    // Ignore all slots that might have been recorded in the body of the
+    // deoptimized code object. Assumption: no slots will be recorded for
+    // this object after invalidating it.
+    RemoveObjectSlots(code->instruction_start(),
+                      code->address() + code->Size());
   }
 }
 
@@ -3340,145 +3468,252 @@
 }
 
 
-bool MarkCompactCollector::MarkInvalidatedCode() {
-  bool code_marked = false;
-
-  int length = invalidated_code_.length();
-  for (int i = 0; i < length; i++) {
-    Code* code = invalidated_code_[i];
-
-    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
-      code_marked = true;
+void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
+                                             Address end_slot) {
+  // Remove entries by replacing them with an old-space slot containing a smi
+  // that is located in an unmovable page.
+  int npages = evacuation_candidates_.length();
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    DCHECK(p->IsEvacuationCandidate() ||
+           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+    if (p->IsEvacuationCandidate()) {
+      SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
+                                     end_slot);
     }
   }
-
-  return code_marked;
-}
-
-
-void MarkCompactCollector::RemoveDeadInvalidatedCode() {
-  int length = invalidated_code_.length();
-  for (int i = 0; i < length; i++) {
-    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
-  }
 }
 
 
-void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
-  int length = invalidated_code_.length();
-  for (int i = 0; i < length; i++) {
-    Code* code = invalidated_code_[i];
-    if (code != NULL) {
-      code->Iterate(visitor);
-      SetMarkBitsUnderInvalidatedCode(code, false);
+#ifdef VERIFY_HEAP
+static void VerifyAllBlackObjects(MemoryChunk* page) {
+  LiveObjectIterator<kAllLiveObjects> it(page);
+  HeapObject* object = NULL;
+  while ((object = it.Next()) != NULL) {
+    CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+  }
+}
+#endif  // VERIFY_HEAP
+
+
+bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
+                                            HeapObjectVisitor* visitor,
+                                            IterationMode mode) {
+#ifdef VERIFY_HEAP
+  VerifyAllBlackObjects(page);
+#endif  // VERIFY_HEAP
+
+  LiveObjectIterator<kBlackObjects> it(page);
+  HeapObject* object = nullptr;
+  while ((object = it.Next()) != nullptr) {
+    DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+    if (!visitor->Visit(object)) {
+      if (mode == kClearMarkbits) {
+        page->markbits()->ClearRange(
+            page->AddressToMarkbitIndex(page->area_start()),
+            page->AddressToMarkbitIndex(object->address()));
+        RecomputeLiveBytes(page);
+      }
+      return false;
     }
   }
-  invalidated_code_.Rewind(0);
+  if (mode == kClearMarkbits) {
+    Bitmap::Clear(page);
+  }
+  return true;
+}
+
+
+void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
+  LiveObjectIterator<kBlackObjects> it(page);
+  int new_live_size = 0;
+  HeapObject* object = nullptr;
+  while ((object = it.Next()) != nullptr) {
+    new_live_size += object->Size();
+  }
+  page->SetLiveBytes(new_live_size);
+}
+
+
+void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
+                                                ObjectVisitor* visitor) {
+#ifdef VERIFY_HEAP
+  VerifyAllBlackObjects(page);
+#endif  // VERIFY_HEAP
+
+  LiveObjectIterator<kBlackObjects> it(page);
+  HeapObject* object = NULL;
+  while ((object = it.Next()) != NULL) {
+    DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+    Map* map = object->synchronized_map();
+    int size = object->SizeFromMap(map);
+    object->IterateBody(map->instance_type(), size, visitor);
+  }
+}
+
+
+void MarkCompactCollector::SweepAbortedPages() {
+  // Second pass on aborted pages.
+  for (int i = 0; i < evacuation_candidates_.length(); i++) {
+    Page* p = evacuation_candidates_[i];
+    if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+      p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
+      PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+      switch (space->identity()) {
+        case OLD_SPACE:
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+          break;
+        case CODE_SPACE:
+          if (FLAG_zap_code_space) {
+            Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+                  ZAP_FREE_SPACE>(space, NULL, p, nullptr);
+          } else {
+            Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+                  IGNORE_FREE_SPACE>(space, NULL, p, nullptr);
+          }
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+    }
+  }
 }
 
 
 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
   Heap::RelocationLock relocation_lock(heap());
 
-  bool code_slots_filtering_required;
+  HashMap* local_pretenuring_feedback = nullptr;
   {
     GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_SWEEP_NEWSPACE);
-    code_slots_filtering_required = MarkInvalidatedCode();
-    EvacuateNewSpace();
+                             GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
+    EvacuationScope evacuation_scope(this);
+    EvacuateNewSpacePrologue();
+    local_pretenuring_feedback = EvacuateNewSpaceInParallel();
+    heap_->new_space()->set_age_mark(heap_->new_space()->top());
   }
 
   {
     GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_EVACUATE_PAGES);
+                             GCTracer::Scope::MC_EVACUATE_CANDIDATES);
     EvacuationScope evacuation_scope(this);
-    EvacuatePages();
+    EvacuatePagesInParallel();
+  }
+
+  {
+    heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
+    delete local_pretenuring_feedback;
+  }
+
+  UpdatePointersAfterEvacuation();
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
+    // After updating all pointers, we can finally sweep the aborted pages,
+    // effectively overriding any forward pointers.
+    SweepAbortedPages();
+
+    // EvacuateNewSpaceAndCandidates iterates over new space objects and for
+    // ArrayBuffers either re-registers them as live or promotes them. This is
+    // needed to properly free them.
+    heap()->array_buffer_tracker()->FreeDead(false);
+
+    // Deallocate evacuated candidate pages.
+    ReleaseEvacuationCandidates();
+  }
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap && !sweeping_in_progress_) {
+    VerifyEvacuation(heap());
+  }
+#endif
+}
+
+
+void MarkCompactCollector::UpdatePointersAfterEvacuation() {
+  GCTracer::Scope gc_scope(heap()->tracer(),
+                           GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
+  {
+    GCTracer::Scope gc_scope(
+        heap()->tracer(),
+        GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
+    UpdateSlotsRecordedIn(migration_slots_buffer_);
+    if (FLAG_trace_fragmentation_verbose) {
+      PrintF("  migration slots buffer: %d\n",
+             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
+    }
+    slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
+    DCHECK(migration_slots_buffer_ == NULL);
+
+    // TODO(hpayer): Process the slots buffers in parallel. This has to be done
+    // after evacuation of all pages finishes.
+    int buffers = evacuation_slots_buffers_.length();
+    for (int i = 0; i < buffers; i++) {
+      SlotsBuffer* buffer = evacuation_slots_buffers_[i];
+      UpdateSlotsRecordedIn(buffer);
+      slots_buffer_allocator_->DeallocateChain(&buffer);
+    }
+    evacuation_slots_buffers_.Rewind(0);
   }
 
   // Second pass: find pointers to new space and update them.
   PointersUpdatingVisitor updating_visitor(heap());
 
   {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
+    GCTracer::Scope gc_scope(
+        heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
     // Update pointers in to space.
-    SemiSpaceIterator to_it(heap()->new_space()->bottom(),
-                            heap()->new_space()->top());
+    SemiSpaceIterator to_it(heap()->new_space());
     for (HeapObject* object = to_it.Next(); object != NULL;
          object = to_it.Next()) {
       Map* map = object->map();
       object->IterateBody(map->instance_type(), object->SizeFromMap(map),
                           &updating_visitor);
     }
-  }
-
-  {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
     // Update roots.
     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
-  }
 
-  {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
     StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
                                   &Heap::ScavengeStoreBufferCallback);
-    heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
-        &UpdatePointer);
-  }
-
-  {
-    GCTracer::Scope gc_scope(heap()->tracer(),
-                             GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
-    SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
-                                       code_slots_filtering_required);
-    if (FLAG_trace_fragmentation) {
-      PrintF("  migration slots buffer: %d\n",
-             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
-    }
-
-    if (compacting_ && was_marked_incrementally_) {
-      // It's difficult to filter out slots recorded for large objects.
-      LargeObjectIterator it(heap_->lo_space());
-      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-        // LargeObjectSpace is not swept yet thus we have to skip
-        // dead objects explicitly.
-        if (!IsMarked(obj)) continue;
-
-        Page* p = Page::FromAddress(obj->address());
-        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-          obj->Iterate(&updating_visitor);
-          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
-        }
-      }
-    }
+    heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
   }
 
   int npages = evacuation_candidates_.length();
   {
     GCTracer::Scope gc_scope(
         heap()->tracer(),
-        GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+        GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
     for (int i = 0; i < npages; i++) {
       Page* p = evacuation_candidates_[i];
       DCHECK(p->IsEvacuationCandidate() ||
              p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
 
       if (p->IsEvacuationCandidate()) {
-        SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
-                                           code_slots_filtering_required);
-        if (FLAG_trace_fragmentation) {
+        UpdateSlotsRecordedIn(p->slots_buffer());
+        if (FLAG_trace_fragmentation_verbose) {
           PrintF("  page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
                  SlotsBuffer::SizeOfChain(p->slots_buffer()));
         }
+        slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
 
         // Important: skip list should be cleared only after roots were updated
         // because root iteration traverses the stack and might have to find
         // code objects from non-updated pc pointing into evacuation candidate.
         SkipList* list = p->skip_list();
         if (list != NULL) list->Clear();
-      } else {
+
+        // First pass on aborted pages, fixing up all live objects.
+        if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+          p->ClearEvacuationCandidate();
+          VisitLiveObjectsBody(p, &updating_visitor);
+        }
+      }
+
+      if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
         if (FLAG_gc_verbose) {
           PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
                  reinterpret_cast<intptr_t>(p));
@@ -3487,12 +3722,7 @@
         p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
 
         switch (space->identity()) {
-          case OLD_DATA_SPACE:
-            Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
-                  IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
-                                                       &updating_visitor);
-            break;
-          case OLD_POINTER_SPACE:
+          case OLD_SPACE:
             Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
                   IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
                                                        &updating_visitor);
@@ -3516,51 +3746,18 @@
     }
   }
 
-  GCTracer::Scope gc_scope(heap()->tracer(),
-                           GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
+    heap_->string_table()->Iterate(&updating_visitor);
 
-  // Update pointers from cells.
-  HeapObjectIterator cell_iterator(heap_->cell_space());
-  for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
-       cell = cell_iterator.Next()) {
-    if (cell->IsCell()) {
-      Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
-    }
+    // Update pointers from external string table.
+    heap_->UpdateReferencesInExternalStringTable(
+        &UpdateReferenceInExternalStringTableEntry);
+
+    EvacuationWeakObjectRetainer evacuation_object_retainer;
+    heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
   }
-
-  HeapObjectIterator js_global_property_cell_iterator(
-      heap_->property_cell_space());
-  for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
-       cell = js_global_property_cell_iterator.Next()) {
-    if (cell->IsPropertyCell()) {
-      PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
-    }
-  }
-
-  heap_->string_table()->Iterate(&updating_visitor);
-  updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
-  if (heap_->weak_object_to_code_table()->IsHashTable()) {
-    WeakHashTable* table =
-        WeakHashTable::cast(heap_->weak_object_to_code_table());
-    table->Iterate(&updating_visitor);
-    table->Rehash(heap_->isolate()->factory()->undefined_value());
-  }
-
-  // Update pointers from external string table.
-  heap_->UpdateReferencesInExternalStringTable(
-      &UpdateReferenceInExternalStringTableEntry);
-
-  EvacuationWeakObjectRetainer evacuation_object_retainer;
-  heap()->ProcessWeakReferences(&evacuation_object_retainer);
-
-  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
-  // under it.
-  ProcessInvalidatedCode(&updating_visitor);
-
-  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
-
-  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
-  DCHECK(migration_slots_buffer_ == NULL);
 }
 
 
@@ -3584,410 +3781,17 @@
     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
     space->Free(p->area_start(), p->area_size());
     p->set_scan_on_scavenge(false);
-    slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
     p->ResetLiveBytes();
+    CHECK(p->WasSwept());
     space->ReleasePage(p);
   }
   evacuation_candidates_.Rewind(0);
   compacting_ = false;
+  heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
   heap()->FreeQueuedChunks();
 }
 
 
-static const int kStartTableEntriesPerLine = 5;
-static const int kStartTableLines = 171;
-static const int kStartTableInvalidLine = 127;
-static const int kStartTableUnusedEntry = 126;
-
-#define _ kStartTableUnusedEntry
-#define X kStartTableInvalidLine
-// Mark-bit to object start offset table.
-//
-// The line is indexed by the mark bits in a byte.  The first number on
-// the line describes the number of live object starts for the line and the
-// other numbers on the line describe the offsets (in words) of the object
-// starts.
-//
-// Since objects are at least 2 words large we don't have entries for two
-// consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
-char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
-    0, _, _,
-    _, _,  // 0
-    1, 0, _,
-    _, _,  // 1
-    1, 1, _,
-    _, _,  // 2
-    X, _, _,
-    _, _,  // 3
-    1, 2, _,
-    _, _,  // 4
-    2, 0, 2,
-    _, _,  // 5
-    X, _, _,
-    _, _,  // 6
-    X, _, _,
-    _, _,  // 7
-    1, 3, _,
-    _, _,  // 8
-    2, 0, 3,
-    _, _,  // 9
-    2, 1, 3,
-    _, _,  // 10
-    X, _, _,
-    _, _,  // 11
-    X, _, _,
-    _, _,  // 12
-    X, _, _,
-    _, _,  // 13
-    X, _, _,
-    _, _,  // 14
-    X, _, _,
-    _, _,  // 15
-    1, 4, _,
-    _, _,  // 16
-    2, 0, 4,
-    _, _,  // 17
-    2, 1, 4,
-    _, _,  // 18
-    X, _, _,
-    _, _,  // 19
-    2, 2, 4,
-    _, _,  // 20
-    3, 0, 2,
-    4, _,  // 21
-    X, _, _,
-    _, _,  // 22
-    X, _, _,
-    _, _,  // 23
-    X, _, _,
-    _, _,  // 24
-    X, _, _,
-    _, _,  // 25
-    X, _, _,
-    _, _,  // 26
-    X, _, _,
-    _, _,  // 27
-    X, _, _,
-    _, _,  // 28
-    X, _, _,
-    _, _,  // 29
-    X, _, _,
-    _, _,  // 30
-    X, _, _,
-    _, _,  // 31
-    1, 5, _,
-    _, _,  // 32
-    2, 0, 5,
-    _, _,  // 33
-    2, 1, 5,
-    _, _,  // 34
-    X, _, _,
-    _, _,  // 35
-    2, 2, 5,
-    _, _,  // 36
-    3, 0, 2,
-    5, _,  // 37
-    X, _, _,
-    _, _,  // 38
-    X, _, _,
-    _, _,  // 39
-    2, 3, 5,
-    _, _,  // 40
-    3, 0, 3,
-    5, _,  // 41
-    3, 1, 3,
-    5, _,  // 42
-    X, _, _,
-    _, _,  // 43
-    X, _, _,
-    _, _,  // 44
-    X, _, _,
-    _, _,  // 45
-    X, _, _,
-    _, _,  // 46
-    X, _, _,
-    _, _,  // 47
-    X, _, _,
-    _, _,  // 48
-    X, _, _,
-    _, _,  // 49
-    X, _, _,
-    _, _,  // 50
-    X, _, _,
-    _, _,  // 51
-    X, _, _,
-    _, _,  // 52
-    X, _, _,
-    _, _,  // 53
-    X, _, _,
-    _, _,  // 54
-    X, _, _,
-    _, _,  // 55
-    X, _, _,
-    _, _,  // 56
-    X, _, _,
-    _, _,  // 57
-    X, _, _,
-    _, _,  // 58
-    X, _, _,
-    _, _,  // 59
-    X, _, _,
-    _, _,  // 60
-    X, _, _,
-    _, _,  // 61
-    X, _, _,
-    _, _,  // 62
-    X, _, _,
-    _, _,  // 63
-    1, 6, _,
-    _, _,  // 64
-    2, 0, 6,
-    _, _,  // 65
-    2, 1, 6,
-    _, _,  // 66
-    X, _, _,
-    _, _,  // 67
-    2, 2, 6,
-    _, _,  // 68
-    3, 0, 2,
-    6, _,  // 69
-    X, _, _,
-    _, _,  // 70
-    X, _, _,
-    _, _,  // 71
-    2, 3, 6,
-    _, _,  // 72
-    3, 0, 3,
-    6, _,  // 73
-    3, 1, 3,
-    6, _,  // 74
-    X, _, _,
-    _, _,  // 75
-    X, _, _,
-    _, _,  // 76
-    X, _, _,
-    _, _,  // 77
-    X, _, _,
-    _, _,  // 78
-    X, _, _,
-    _, _,  // 79
-    2, 4, 6,
-    _, _,  // 80
-    3, 0, 4,
-    6, _,  // 81
-    3, 1, 4,
-    6, _,  // 82
-    X, _, _,
-    _, _,  // 83
-    3, 2, 4,
-    6, _,  // 84
-    4, 0, 2,
-    4, 6,  // 85
-    X, _, _,
-    _, _,  // 86
-    X, _, _,
-    _, _,  // 87
-    X, _, _,
-    _, _,  // 88
-    X, _, _,
-    _, _,  // 89
-    X, _, _,
-    _, _,  // 90
-    X, _, _,
-    _, _,  // 91
-    X, _, _,
-    _, _,  // 92
-    X, _, _,
-    _, _,  // 93
-    X, _, _,
-    _, _,  // 94
-    X, _, _,
-    _, _,  // 95
-    X, _, _,
-    _, _,  // 96
-    X, _, _,
-    _, _,  // 97
-    X, _, _,
-    _, _,  // 98
-    X, _, _,
-    _, _,  // 99
-    X, _, _,
-    _, _,  // 100
-    X, _, _,
-    _, _,  // 101
-    X, _, _,
-    _, _,  // 102
-    X, _, _,
-    _, _,  // 103
-    X, _, _,
-    _, _,  // 104
-    X, _, _,
-    _, _,  // 105
-    X, _, _,
-    _, _,  // 106
-    X, _, _,
-    _, _,  // 107
-    X, _, _,
-    _, _,  // 108
-    X, _, _,
-    _, _,  // 109
-    X, _, _,
-    _, _,  // 110
-    X, _, _,
-    _, _,  // 111
-    X, _, _,
-    _, _,  // 112
-    X, _, _,
-    _, _,  // 113
-    X, _, _,
-    _, _,  // 114
-    X, _, _,
-    _, _,  // 115
-    X, _, _,
-    _, _,  // 116
-    X, _, _,
-    _, _,  // 117
-    X, _, _,
-    _, _,  // 118
-    X, _, _,
-    _, _,  // 119
-    X, _, _,
-    _, _,  // 120
-    X, _, _,
-    _, _,  // 121
-    X, _, _,
-    _, _,  // 122
-    X, _, _,
-    _, _,  // 123
-    X, _, _,
-    _, _,  // 124
-    X, _, _,
-    _, _,  // 125
-    X, _, _,
-    _, _,  // 126
-    X, _, _,
-    _, _,  // 127
-    1, 7, _,
-    _, _,  // 128
-    2, 0, 7,
-    _, _,  // 129
-    2, 1, 7,
-    _, _,  // 130
-    X, _, _,
-    _, _,  // 131
-    2, 2, 7,
-    _, _,  // 132
-    3, 0, 2,
-    7, _,  // 133
-    X, _, _,
-    _, _,  // 134
-    X, _, _,
-    _, _,  // 135
-    2, 3, 7,
-    _, _,  // 136
-    3, 0, 3,
-    7, _,  // 137
-    3, 1, 3,
-    7, _,  // 138
-    X, _, _,
-    _, _,  // 139
-    X, _, _,
-    _, _,  // 140
-    X, _, _,
-    _, _,  // 141
-    X, _, _,
-    _, _,  // 142
-    X, _, _,
-    _, _,  // 143
-    2, 4, 7,
-    _, _,  // 144
-    3, 0, 4,
-    7, _,  // 145
-    3, 1, 4,
-    7, _,  // 146
-    X, _, _,
-    _, _,  // 147
-    3, 2, 4,
-    7, _,  // 148
-    4, 0, 2,
-    4, 7,  // 149
-    X, _, _,
-    _, _,  // 150
-    X, _, _,
-    _, _,  // 151
-    X, _, _,
-    _, _,  // 152
-    X, _, _,
-    _, _,  // 153
-    X, _, _,
-    _, _,  // 154
-    X, _, _,
-    _, _,  // 155
-    X, _, _,
-    _, _,  // 156
-    X, _, _,
-    _, _,  // 157
-    X, _, _,
-    _, _,  // 158
-    X, _, _,
-    _, _,  // 159
-    2, 5, 7,
-    _, _,  // 160
-    3, 0, 5,
-    7, _,  // 161
-    3, 1, 5,
-    7, _,  // 162
-    X, _, _,
-    _, _,  // 163
-    3, 2, 5,
-    7, _,  // 164
-    4, 0, 2,
-    5, 7,  // 165
-    X, _, _,
-    _, _,  // 166
-    X, _, _,
-    _, _,  // 167
-    3, 3, 5,
-    7, _,  // 168
-    4, 0, 3,
-    5, 7,  // 169
-    4, 1, 3,
-    5, 7  // 170
-};
-#undef _
-#undef X
-
-
-// Takes a word of mark bits.  Returns the number of objects that start in the
-// range.  Puts the offsets of the words in the supplied array.
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
-  int objects = 0;
-  int offset = 0;
-
-  // No consecutive 1 bits.
-  DCHECK((mark_bits & 0x180) != 0x180);
-  DCHECK((mark_bits & 0x18000) != 0x18000);
-  DCHECK((mark_bits & 0x1800000) != 0x1800000);
-
-  while (mark_bits != 0) {
-    int byte = (mark_bits & 0xff);
-    mark_bits >>= 8;
-    if (byte != 0) {
-      DCHECK(byte < kStartTableLines);  // No consecutive 1 bits.
-      char* table = kStartTable + byte * kStartTableEntriesPerLine;
-      int objects_in_these_8_words = table[0];
-      DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
-      DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
-      for (int i = 0; i < objects_in_these_8_words; i++) {
-        starts[objects++] = offset + table[1 + i];
-      }
-    }
-    offset += 8;
-  }
-  return objects;
-}
-
-
 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
                                           int required_freed_bytes) {
   int max_freed = 0;
@@ -4009,20 +3813,40 @@
 
 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
   int max_freed = 0;
-  if (page->TryParallelSweeping()) {
-    FreeList* free_list = space == heap()->old_pointer_space()
-                              ? free_list_old_pointer_space_.get()
-                              : free_list_old_data_space_.get();
+  if (page->TryLock()) {
+    // If this page was already swept in the meantime, we can return here.
+    if (page->parallel_sweeping_state().Value() !=
+        MemoryChunk::kSweepingPending) {
+      page->mutex()->Unlock();
+      return 0;
+    }
+    page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress);
+    FreeList* free_list;
     FreeList private_free_list(space);
-    max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
-                      IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+    if (space->identity() == OLD_SPACE) {
+      free_list = free_list_old_space_.get();
+      max_freed =
+          Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+    } else if (space->identity() == CODE_SPACE) {
+      free_list = free_list_code_space_.get();
+      max_freed =
+          Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+    } else {
+      free_list = free_list_map_space_.get();
+      max_freed =
+          Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+    }
     free_list->Concatenate(&private_free_list);
+    page->mutex()->Unlock();
   }
   return max_freed;
 }
 
 
-void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
+void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
   space->ClearStats();
 
   // We defensively initialize end_of_unswept_pages_ here with the first page
@@ -4037,7 +3861,7 @@
 
   while (it.has_next()) {
     Page* p = it.next();
-    DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+    DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
 
     // Clear sweeping flags indicating that marking bits are still intact.
     p->ClearWasSwept();
@@ -4049,71 +3873,61 @@
       continue;
     }
 
+    if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
+      // We need to sweep the page to get it into an iterable state again. Note
+      // that this adds unusable memory into the free list that is later on
+      // (in the free list) dropped again. Since we only use the flag for
+      // testing this is fine.
+      Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+            IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+      continue;
+    }
+
     // One unused page is kept, all further are released before sweeping them.
     if (p->LiveBytes() == 0) {
       if (unused_page_present) {
         if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
-                 reinterpret_cast<intptr_t>(p));
+          PrintIsolate(isolate(), "sweeping: released page: %p", p);
         }
-        // Adjust unswept free bytes because releasing a page expects said
-        // counter to be accurate for unswept pages.
-        space->IncreaseUnsweptFreeBytes(p);
         space->ReleasePage(p);
         continue;
       }
       unused_page_present = true;
     }
 
-    switch (sweeper) {
-      case CONCURRENT_SWEEPING:
-        if (!parallel_sweeping_active) {
-          if (FLAG_gc_verbose) {
-            PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
-                   reinterpret_cast<intptr_t>(p));
-          }
-          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
-                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
-          pages_swept++;
-          parallel_sweeping_active = true;
-        } else {
-          if (FLAG_gc_verbose) {
-            PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
-                   reinterpret_cast<intptr_t>(p));
-          }
-          p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
-          space->IncreaseUnsweptFreeBytes(p);
-        }
-        space->set_end_of_unswept_pages(p);
-        break;
-      case SEQUENTIAL_SWEEPING: {
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
-        }
-        if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
+    if (!parallel_sweeping_active) {
+      if (FLAG_gc_verbose) {
+        PrintIsolate(isolate(), "sweeping: %p", p);
+      }
+      if (space->identity() == CODE_SPACE) {
+        if (FLAG_zap_code_space) {
           Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
                 ZAP_FREE_SPACE>(space, NULL, p, NULL);
-        } else if (space->identity() == CODE_SPACE) {
+        } else {
           Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
                 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
-        } else {
-          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
-                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
         }
-        pages_swept++;
-        break;
+      } else {
+        Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+              IGNORE_FREE_SPACE>(space, NULL, p, NULL);
       }
-      default: { UNREACHABLE(); }
+      pages_swept++;
+      parallel_sweeping_active = true;
+    } else {
+      if (FLAG_gc_verbose) {
+        PrintIsolate(isolate(), "sweeping: initialized for parallel: %p", p);
+      }
+      p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
+      int to_sweep = p->area_size() - p->LiveBytes();
+      space->accounting_stats_.ShrinkSpace(to_sweep);
     }
+    space->set_end_of_unswept_pages(p);
   }
 
   if (FLAG_gc_verbose) {
-    PrintF("SweepSpace: %s (%d pages swept)\n",
-           AllocationSpaceName(space->identity()), pages_swept);
+    PrintIsolate(isolate(), "sweeping: space=%s pages_swept=%d",
+                 AllocationSpaceName(space->identity()), pages_swept);
   }
-
-  // Give pages that are queued to be freed back to the OS.
-  heap()->FreeQueuedChunks();
 }
 
 
@@ -4121,69 +3935,46 @@
   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
   double start_time = 0.0;
   if (FLAG_print_cumulative_gc_stat) {
-    start_time = base::OS::TimeCurrentMillis();
+    start_time = heap_->MonotonicallyIncreasingTimeInMs();
   }
 
 #ifdef DEBUG
   state_ = SWEEP_SPACES;
 #endif
+
   MoveEvacuationCandidatesToEndOfPagesList();
 
-  // Noncompacting collections simply sweep the spaces to clear the mark
-  // bits and free the nonlive blocks (for old and map spaces).  We sweep
-  // the map space last because freeing non-live maps overwrites them and
-  // the other spaces rely on possibly non-live maps to get the sizes for
-  // non-live objects.
   {
-    GCTracer::Scope sweep_scope(heap()->tracer(),
-                                GCTracer::Scope::MC_SWEEP_OLDSPACE);
-    {
-      SweepSpace(heap()->old_pointer_space(), CONCURRENT_SWEEPING);
-      SweepSpace(heap()->old_data_space(), CONCURRENT_SWEEPING);
-    }
     sweeping_in_progress_ = true;
+    {
+      GCTracer::Scope sweep_scope(heap()->tracer(),
+                                  GCTracer::Scope::MC_SWEEP_OLD);
+      StartSweepSpace(heap()->old_space());
+    }
+    {
+      GCTracer::Scope sweep_scope(heap()->tracer(),
+                                  GCTracer::Scope::MC_SWEEP_CODE);
+      StartSweepSpace(heap()->code_space());
+    }
+    {
+      GCTracer::Scope sweep_scope(heap()->tracer(),
+                                  GCTracer::Scope::MC_SWEEP_MAP);
+      StartSweepSpace(heap()->map_space());
+    }
     if (FLAG_concurrent_sweeping) {
       StartSweeperThreads();
     }
   }
-  RemoveDeadInvalidatedCode();
 
-  {
-    GCTracer::Scope sweep_scope(heap()->tracer(),
-                                GCTracer::Scope::MC_SWEEP_CODE);
-    SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
-  }
-
-  {
-    GCTracer::Scope sweep_scope(heap()->tracer(),
-                                GCTracer::Scope::MC_SWEEP_CELL);
-    SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
-    SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
-  }
-
-  EvacuateNewSpaceAndCandidates();
-
-  // ClearNonLiveTransitions depends on precise sweeping of map space to
-  // detect whether unmarked map became dead in this collection or in one
-  // of the previous ones.
-  {
-    GCTracer::Scope sweep_scope(heap()->tracer(),
-                                GCTracer::Scope::MC_SWEEP_MAP);
-    SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
-  }
-
-  // Deallocate unmarked objects and clear marked bits for marked objects.
+  // Deallocate unmarked large objects.
   heap_->lo_space()->FreeUnmarkedObjects();
 
-  // Deallocate evacuated candidate pages.
-  ReleaseEvacuationCandidates();
-  CodeRange* code_range = heap()->isolate()->code_range();
-  if (code_range != NULL && code_range->valid()) {
-    code_range->ReserveEmergencyBlock();
-  }
+  // Give pages that are queued to be freed back to the OS. Invalid store
+  // buffer entries are already filter out. We can just release the memory.
+  heap()->FreeQueuedChunks();
 
   if (FLAG_print_cumulative_gc_stat) {
-    heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
+    heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
                                      start_time);
   }
 }
@@ -4193,40 +3984,20 @@
   PageIterator it(space);
   while (it.has_next()) {
     Page* p = it.next();
-    if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
-      p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
+    if (p->parallel_sweeping_state().Value() ==
+        MemoryChunk::kSweepingFinalize) {
+      p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
       p->SetWasSwept();
     }
-    DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+    DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
   }
 }
 
 
 void MarkCompactCollector::ParallelSweepSpacesComplete() {
-  ParallelSweepSpaceComplete(heap()->old_pointer_space());
-  ParallelSweepSpaceComplete(heap()->old_data_space());
-}
-
-
-void MarkCompactCollector::EnableCodeFlushing(bool enable) {
-  if (isolate()->debug()->is_loaded() ||
-      isolate()->debug()->has_break_points()) {
-    enable = false;
-  }
-
-  if (enable) {
-    if (code_flusher_ != NULL) return;
-    code_flusher_ = new CodeFlusher(isolate());
-  } else {
-    if (code_flusher_ == NULL) return;
-    code_flusher_->EvictAllCandidates();
-    delete code_flusher_;
-    code_flusher_ = NULL;
-  }
-
-  if (FLAG_trace_code_flushing) {
-    PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
-  }
+  ParallelSweepSpaceComplete(heap()->old_space());
+  ParallelSweepSpaceComplete(heap()->code_space());
+  ParallelSweepSpaceComplete(heap()->map_space());
 }
 
 
@@ -4250,85 +4021,39 @@
 }
 
 
-bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
-  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
-}
-
-
-bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
-                        SlotsBuffer** buffer_address, SlotType type,
-                        Address addr, AdditionMode mode) {
-  SlotsBuffer* buffer = *buffer_address;
-  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
-    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
-      allocator->DeallocateChain(buffer_address);
-      return false;
-    }
-    buffer = allocator->AllocateBuffer(buffer);
-    *buffer_address = buffer;
+void MarkCompactCollector::EvictPopularEvacuationCandidate(Page* page) {
+  if (FLAG_trace_fragmentation) {
+    PrintF("Page %p is too popular. Disabling evacuation.\n",
+           reinterpret_cast<void*>(page));
   }
-  DCHECK(buffer->HasSpaceForTypedSlot());
-  buffer->Add(reinterpret_cast<ObjectSlot>(type));
-  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
-  return true;
+
+  isolate()->CountUsage(v8::Isolate::UseCounterFeature::kSlotsBufferOverflow);
+
+  // TODO(gc) If all evacuation candidates are too popular we
+  // should stop slots recording entirely.
+  page->ClearEvacuationCandidate();
+
+  DCHECK(!page->IsFlagSet(Page::POPULAR_PAGE));
+  page->SetFlag(Page::POPULAR_PAGE);
+
+  // We were not collecting slots on this page that point
+  // to other evacuation candidates thus we have to
+  // rescan the page after evacuation to discover and update all
+  // pointers to evacuated objects.
+  page->SetFlag(Page::RESCAN_ON_EVACUATION);
 }
 
 
-static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
-  if (RelocInfo::IsCodeTarget(rmode)) {
-    return SlotsBuffer::CODE_TARGET_SLOT;
-  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
-    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
-  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
-    return SlotsBuffer::DEBUG_TARGET_SLOT;
-  } else if (RelocInfo::IsJSReturn(rmode)) {
-    return SlotsBuffer::JS_RETURN_SLOT;
-  }
-  UNREACHABLE();
-  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
-}
-
-
-void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
-  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
-  RelocInfo::Mode rmode = rinfo->rmode();
-  if (target_page->IsEvacuationCandidate() &&
-      (rinfo->host() == NULL ||
-       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
-    bool success;
-    if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
-      // This doesn't need to be typed since it is just a normal heap pointer.
-      Object** target_pointer =
-          reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
-      success = SlotsBuffer::AddTo(
-          &slots_buffer_allocator_, target_page->slots_buffer_address(),
-          target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
-    } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
-      success = SlotsBuffer::AddTo(
-          &slots_buffer_allocator_, target_page->slots_buffer_address(),
-          SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
-          SlotsBuffer::FAIL_ON_OVERFLOW);
-    } else {
-      success = SlotsBuffer::AddTo(
-          &slots_buffer_allocator_, target_page->slots_buffer_address(),
-          SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
-    }
-    if (!success) {
-      EvictEvacuationCandidate(target_page);
-    }
-  }
-}
-
-
-void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
+void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* object, Address slot,
+                                               Code* target) {
   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
   if (target_page->IsEvacuationCandidate() &&
-      !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
-    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+      !ShouldSkipEvacuationSlotRecording(object)) {
+    if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
                             target_page->slots_buffer_address(),
                             SlotsBuffer::CODE_ENTRY_SLOT, slot,
                             SlotsBuffer::FAIL_ON_OVERFLOW)) {
-      EvictEvacuationCandidate(target_page);
+      EvictPopularEvacuationCandidate(target_page);
     }
   }
 }
@@ -4342,76 +4067,11 @@
             pc);
     MarkBit mark_bit = Marking::MarkBitFrom(host);
     if (Marking::IsBlack(mark_bit)) {
-      RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+      RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
       RecordRelocSlot(&rinfo, target);
     }
   }
 }
 
-
-static inline SlotsBuffer::SlotType DecodeSlotType(
-    SlotsBuffer::ObjectSlot slot) {
-  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
-}
-
-
-void SlotsBuffer::UpdateSlots(Heap* heap) {
-  PointersUpdatingVisitor v(heap);
-
-  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
-    ObjectSlot slot = slots_[slot_idx];
-    if (!IsTypedSlot(slot)) {
-      PointersUpdatingVisitor::UpdateSlot(heap, slot);
-    } else {
-      ++slot_idx;
-      DCHECK(slot_idx < idx_);
-      UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
-                 reinterpret_cast<Address>(slots_[slot_idx]));
-    }
-  }
-}
-
-
-void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
-  PointersUpdatingVisitor v(heap);
-
-  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
-    ObjectSlot slot = slots_[slot_idx];
-    if (!IsTypedSlot(slot)) {
-      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
-        PointersUpdatingVisitor::UpdateSlot(heap, slot);
-      }
-    } else {
-      ++slot_idx;
-      DCHECK(slot_idx < idx_);
-      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
-      if (!IsOnInvalidatedCodeObject(pc)) {
-        UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
-                   reinterpret_cast<Address>(slots_[slot_idx]));
-      }
-    }
-  }
-}
-
-
-SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
-  return new SlotsBuffer(next_buffer);
-}
-
-
-void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
-  delete buffer;
-}
-
-
-void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
-  SlotsBuffer* buffer = *buffer_address;
-  while (buffer != NULL) {
-    SlotsBuffer* next_buffer = buffer->next();
-    DeallocateBuffer(buffer);
-    buffer = next_buffer;
-  }
-  *buffer_address = NULL;
-}
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index e26e06c..cfb2d9d 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -16,18 +16,24 @@
 // to the first live object in the page (only used for old and map objects).
 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
 
+// Callback function to mark an object in a given heap.
+typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
+
 // Forward declarations.
 class CodeFlusher;
 class MarkCompactCollector;
 class MarkingVisitor;
 class RootMarkingVisitor;
+class SlotsBuffer;
+class SlotsBufferAllocator;
 
 
-class Marking {
+class Marking : public AllStatic {
  public:
-  explicit Marking(Heap* heap) : heap_(heap) {}
-
-  INLINE(static MarkBit MarkBitFrom(Address addr));
+  INLINE(static MarkBit MarkBitFrom(Address addr)) {
+    MemoryChunk* p = MemoryChunk::FromAddress(addr);
+    return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
+  }
 
   INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
     return MarkBitFrom(reinterpret_cast<Address>(obj));
@@ -39,35 +45,71 @@
     return !mark_bit.Get() && mark_bit.Next().Get();
   }
 
-  // Black markbits: 10 - this is required by the sweeper.
+  // Black markbits: 11
   static const char* kBlackBitPattern;
   INLINE(static bool IsBlack(MarkBit mark_bit)) {
-    return mark_bit.Get() && !mark_bit.Next().Get();
+    return mark_bit.Get() && mark_bit.Next().Get();
   }
 
   // White markbits: 00 - this is required by the mark bit clearer.
   static const char* kWhiteBitPattern;
-  INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); }
+  INLINE(static bool IsWhite(MarkBit mark_bit)) {
+    DCHECK(!IsImpossible(mark_bit));
+    return !mark_bit.Get();
+  }
 
-  // Grey markbits: 11
+  // Grey markbits: 10
   static const char* kGreyBitPattern;
   INLINE(static bool IsGrey(MarkBit mark_bit)) {
-    return mark_bit.Get() && mark_bit.Next().Get();
+    return mark_bit.Get() && !mark_bit.Next().Get();
   }
 
+  // IsBlackOrGrey assumes that the first bit is set for black or grey
+  // objects.
+  INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); }
+
   INLINE(static void MarkBlack(MarkBit mark_bit)) {
     mark_bit.Set();
+    mark_bit.Next().Set();
+  }
+
+  INLINE(static void MarkWhite(MarkBit mark_bit)) {
+    mark_bit.Clear();
     mark_bit.Next().Clear();
   }
 
-  INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); }
+  INLINE(static void BlackToWhite(MarkBit markbit)) {
+    DCHECK(IsBlack(markbit));
+    markbit.Clear();
+    markbit.Next().Clear();
+  }
+
+  INLINE(static void GreyToWhite(MarkBit markbit)) {
+    DCHECK(IsGrey(markbit));
+    markbit.Clear();
+    markbit.Next().Clear();
+  }
+
+  INLINE(static void BlackToGrey(MarkBit markbit)) {
+    DCHECK(IsBlack(markbit));
+    markbit.Next().Clear();
+  }
 
   INLINE(static void WhiteToGrey(MarkBit markbit)) {
+    DCHECK(IsWhite(markbit));
+    markbit.Set();
+  }
+
+  INLINE(static void WhiteToBlack(MarkBit markbit)) {
+    DCHECK(IsWhite(markbit));
     markbit.Set();
     markbit.Next().Set();
   }
 
-  INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); }
+  INLINE(static void GreyToBlack(MarkBit markbit)) {
+    DCHECK(IsGrey(markbit));
+    markbit.Next().Set();
+  }
 
   INLINE(static void BlackToGrey(HeapObject* obj)) {
     BlackToGrey(MarkBitFrom(obj));
@@ -75,10 +117,10 @@
 
   INLINE(static void AnyToGrey(MarkBit markbit)) {
     markbit.Set();
-    markbit.Next().Set();
+    markbit.Next().Clear();
   }
 
-  void TransferMark(Address old_start, Address new_start);
+  static void TransferMark(Heap* heap, Address old_start, Address new_start);
 
 #ifdef DEBUG
   enum ObjectColor {
@@ -119,20 +161,19 @@
   INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
     MarkBit from_mark_bit = MarkBitFrom(from);
     MarkBit to_mark_bit = MarkBitFrom(to);
-    bool is_black = false;
+    DCHECK(Marking::IsWhite(to_mark_bit));
     if (from_mark_bit.Get()) {
       to_mark_bit.Set();
-      is_black = true;  // Looks black so far.
+      if (from_mark_bit.Next().Get()) {
+        to_mark_bit.Next().Set();
+        return true;
+      }
     }
-    if (from_mark_bit.Next().Get()) {
-      to_mark_bit.Next().Set();
-      is_black = false;  // Was actually gray.
-    }
-    return is_black;
+    return false;
   }
 
  private:
-  Heap* heap_;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
 };
 
 // ----------------------------------------------------------------------------
@@ -140,18 +181,15 @@
 class MarkingDeque {
  public:
   MarkingDeque()
-      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
+      : array_(NULL),
+        top_(0),
+        bottom_(0),
+        mask_(0),
+        overflowed_(false),
+        in_use_(false) {}
 
-  void Initialize(Address low, Address high) {
-    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
-    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
-    array_ = obj_low;
-    mask_ = base::bits::RoundDownToPowerOfTwo32(
-                static_cast<uint32_t>(obj_high - obj_low)) -
-            1;
-    top_ = bottom_ = 0;
-    overflowed_ = false;
-  }
+  void Initialize(Address low, Address high);
+  void Uninitialize(bool aborting = false);
 
   inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
 
@@ -159,32 +197,23 @@
 
   bool overflowed() const { return overflowed_; }
 
+  bool in_use() const { return in_use_; }
+
   void ClearOverflowed() { overflowed_ = false; }
 
   void SetOverflowed() { overflowed_ = true; }
 
-  // Push the (marked) object on the marking stack if there is room,
-  // otherwise mark the object as overflowed and wait for a rescan of the
-  // heap.
-  INLINE(void PushBlack(HeapObject* object)) {
-    DCHECK(object->IsHeapObject());
-    if (IsFull()) {
-      Marking::BlackToGrey(object);
-      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
-      SetOverflowed();
-    } else {
-      array_[top_] = object;
-      top_ = ((top_ + 1) & mask_);
-    }
-  }
-
-  INLINE(void PushGrey(HeapObject* object)) {
+  // Push the object on the marking stack if there is room, otherwise mark the
+  // deque as overflowed and wait for a rescan of the heap.
+  INLINE(bool Push(HeapObject* object)) {
     DCHECK(object->IsHeapObject());
     if (IsFull()) {
       SetOverflowed();
+      return false;
     } else {
       array_[top_] = object;
       top_ = ((top_ + 1) & mask_);
+      return true;
     }
   }
 
@@ -196,13 +225,17 @@
     return object;
   }
 
-  INLINE(void UnshiftGrey(HeapObject* object)) {
+  // Unshift the object into the marking stack if there is room, otherwise mark
+  // the deque as overflowed and wait for a rescan of the heap.
+  INLINE(bool Unshift(HeapObject* object)) {
     DCHECK(object->IsHeapObject());
     if (IsFull()) {
       SetOverflowed();
+      return false;
     } else {
       bottom_ = ((bottom_ - 1) & mask_);
       array_[bottom_] = object;
+      return true;
     }
   }
 
@@ -221,155 +254,18 @@
   int bottom_;
   int mask_;
   bool overflowed_;
+  bool in_use_;
 
   DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
 };
 
 
-class SlotsBufferAllocator {
- public:
-  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
-  void DeallocateBuffer(SlotsBuffer* buffer);
-
-  void DeallocateChain(SlotsBuffer** buffer_address);
-};
-
-
-// SlotsBuffer records a sequence of slots that has to be updated
-// after live objects were relocated from evacuation candidates.
-// All slots are either untyped or typed:
-//    - Untyped slots are expected to contain a tagged object pointer.
-//      They are recorded by an address.
-//    - Typed slots are expected to contain an encoded pointer to a heap
-//      object where the way of encoding depends on the type of the slot.
-//      They are recorded as a pair (SlotType, slot address).
-// We assume that zero-page is never mapped this allows us to distinguish
-// untyped slots from typed slots during iteration by a simple comparison:
-// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
-// is the first element of typed slot's pair.
-class SlotsBuffer {
- public:
-  typedef Object** ObjectSlot;
-
-  explicit SlotsBuffer(SlotsBuffer* next_buffer)
-      : idx_(0), chain_length_(1), next_(next_buffer) {
-    if (next_ != NULL) {
-      chain_length_ = next_->chain_length_ + 1;
-    }
-  }
-
-  ~SlotsBuffer() {}
-
-  void Add(ObjectSlot slot) {
-    DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
-    slots_[idx_++] = slot;
-  }
-
-  enum SlotType {
-    EMBEDDED_OBJECT_SLOT,
-    RELOCATED_CODE_OBJECT,
-    CODE_TARGET_SLOT,
-    CODE_ENTRY_SLOT,
-    DEBUG_TARGET_SLOT,
-    JS_RETURN_SLOT,
-    NUMBER_OF_SLOT_TYPES
-  };
-
-  static const char* SlotTypeToString(SlotType type) {
-    switch (type) {
-      case EMBEDDED_OBJECT_SLOT:
-        return "EMBEDDED_OBJECT_SLOT";
-      case RELOCATED_CODE_OBJECT:
-        return "RELOCATED_CODE_OBJECT";
-      case CODE_TARGET_SLOT:
-        return "CODE_TARGET_SLOT";
-      case CODE_ENTRY_SLOT:
-        return "CODE_ENTRY_SLOT";
-      case DEBUG_TARGET_SLOT:
-        return "DEBUG_TARGET_SLOT";
-      case JS_RETURN_SLOT:
-        return "JS_RETURN_SLOT";
-      case NUMBER_OF_SLOT_TYPES:
-        return "NUMBER_OF_SLOT_TYPES";
-    }
-    return "UNKNOWN SlotType";
-  }
-
-  void UpdateSlots(Heap* heap);
-
-  void UpdateSlotsWithFilter(Heap* heap);
-
-  SlotsBuffer* next() { return next_; }
-
-  static int SizeOfChain(SlotsBuffer* buffer) {
-    if (buffer == NULL) return 0;
-    return static_cast<int>(buffer->idx_ +
-                            (buffer->chain_length_ - 1) * kNumberOfElements);
-  }
-
-  inline bool IsFull() { return idx_ == kNumberOfElements; }
-
-  inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
-
-  static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
-                                    bool code_slots_filtering_required) {
-    while (buffer != NULL) {
-      if (code_slots_filtering_required) {
-        buffer->UpdateSlotsWithFilter(heap);
-      } else {
-        buffer->UpdateSlots(heap);
-      }
-      buffer = buffer->next();
-    }
-  }
-
-  enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
-
-  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
-    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
-  }
-
-  INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
-                           SlotsBuffer** buffer_address, ObjectSlot slot,
-                           AdditionMode mode)) {
-    SlotsBuffer* buffer = *buffer_address;
-    if (buffer == NULL || buffer->IsFull()) {
-      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
-        allocator->DeallocateChain(buffer_address);
-        return false;
-      }
-      buffer = allocator->AllocateBuffer(buffer);
-      *buffer_address = buffer;
-    }
-    buffer->Add(slot);
-    return true;
-  }
-
-  static bool IsTypedSlot(ObjectSlot slot);
-
-  static bool AddTo(SlotsBufferAllocator* allocator,
-                    SlotsBuffer** buffer_address, SlotType type, Address addr,
-                    AdditionMode mode);
-
-  static const int kNumberOfElements = 1021;
-
- private:
-  static const int kChainLengthThreshold = 15;
-
-  intptr_t idx_;
-  intptr_t chain_length_;
-  SlotsBuffer* next_;
-  ObjectSlot slots_[kNumberOfElements];
-};
-
-
 // CodeFlusher collects candidates for code flushing during marking and
 // processes those candidates after marking has completed in order to
 // reset those functions referencing code objects that would otherwise
-// be unreachable. Code objects can be referenced in three ways:
+// be unreachable. Code objects can be referenced in two ways:
 //    - SharedFunctionInfo references unoptimized code.
 //    - JSFunction references either unoptimized or optimized code.
-//    - OptimizedCodeMap references optimized code.
 // We are not allowed to flush unoptimized code for functions that got
 // optimized or inlined into optimized code, because we might bailout
 // into the unoptimized code again during deoptimization.
@@ -377,113 +273,42 @@
  public:
   explicit CodeFlusher(Isolate* isolate)
       : isolate_(isolate),
-        jsfunction_candidates_head_(NULL),
-        shared_function_info_candidates_head_(NULL),
-        optimized_code_map_holder_head_(NULL) {}
+        jsfunction_candidates_head_(nullptr),
+        shared_function_info_candidates_head_(nullptr) {}
 
-  void AddCandidate(SharedFunctionInfo* shared_info) {
-    if (GetNextCandidate(shared_info) == NULL) {
-      SetNextCandidate(shared_info, shared_function_info_candidates_head_);
-      shared_function_info_candidates_head_ = shared_info;
-    }
-  }
+  inline void AddCandidate(SharedFunctionInfo* shared_info);
+  inline void AddCandidate(JSFunction* function);
 
-  void AddCandidate(JSFunction* function) {
-    DCHECK(function->code() == function->shared()->code());
-    if (GetNextCandidate(function)->IsUndefined()) {
-      SetNextCandidate(function, jsfunction_candidates_head_);
-      jsfunction_candidates_head_ = function;
-    }
-  }
-
-  void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
-    if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
-      SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
-      optimized_code_map_holder_head_ = code_map_holder;
-    }
-  }
-
-  void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
   void EvictCandidate(SharedFunctionInfo* shared_info);
   void EvictCandidate(JSFunction* function);
 
   void ProcessCandidates() {
-    ProcessOptimizedCodeMaps();
     ProcessSharedFunctionInfoCandidates();
     ProcessJSFunctionCandidates();
   }
 
-  void EvictAllCandidates() {
-    EvictOptimizedCodeMaps();
-    EvictJSFunctionCandidates();
-    EvictSharedFunctionInfoCandidates();
-  }
-
   void IteratePointersToFromSpace(ObjectVisitor* v);
 
  private:
-  void ProcessOptimizedCodeMaps();
   void ProcessJSFunctionCandidates();
   void ProcessSharedFunctionInfoCandidates();
-  void EvictOptimizedCodeMaps();
-  void EvictJSFunctionCandidates();
-  void EvictSharedFunctionInfoCandidates();
 
-  static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
-    return reinterpret_cast<JSFunction**>(
-        HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
-  }
+  static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
+  static inline JSFunction* GetNextCandidate(JSFunction* candidate);
+  static inline void SetNextCandidate(JSFunction* candidate,
+                                      JSFunction* next_candidate);
+  static inline void ClearNextCandidate(JSFunction* candidate,
+                                        Object* undefined);
 
-  static JSFunction* GetNextCandidate(JSFunction* candidate) {
-    Object* next_candidate = candidate->next_function_link();
-    return reinterpret_cast<JSFunction*>(next_candidate);
-  }
-
-  static void SetNextCandidate(JSFunction* candidate,
-                               JSFunction* next_candidate) {
-    candidate->set_next_function_link(next_candidate);
-  }
-
-  static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
-    DCHECK(undefined->IsUndefined());
-    candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
-  }
-
-  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
-    Object* next_candidate = candidate->code()->gc_metadata();
-    return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
-  }
-
-  static void SetNextCandidate(SharedFunctionInfo* candidate,
-                               SharedFunctionInfo* next_candidate) {
-    candidate->code()->set_gc_metadata(next_candidate);
-  }
-
-  static void ClearNextCandidate(SharedFunctionInfo* candidate) {
-    candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
-  }
-
-  static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
-    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
-    Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
-    return reinterpret_cast<SharedFunctionInfo*>(next_map);
-  }
-
-  static void SetNextCodeMap(SharedFunctionInfo* holder,
-                             SharedFunctionInfo* next_holder) {
-    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
-    code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
-  }
-
-  static void ClearNextCodeMap(SharedFunctionInfo* holder) {
-    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
-    code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
-  }
+  static inline SharedFunctionInfo* GetNextCandidate(
+      SharedFunctionInfo* candidate);
+  static inline void SetNextCandidate(SharedFunctionInfo* candidate,
+                                      SharedFunctionInfo* next_candidate);
+  static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
 
   Isolate* isolate_;
   JSFunction* jsfunction_candidates_head_;
   SharedFunctionInfo* shared_function_info_candidates_head_;
-  SharedFunctionInfo* optimized_code_map_holder_head_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
 };
@@ -497,8 +322,10 @@
 // Mark-Compact collector
 class MarkCompactCollector {
  public:
-  // Set the global flags, it must be called before Prepare to take effect.
-  inline void SetFlags(int flags);
+  enum IterationMode {
+    kKeepMarking,
+    kClearMarkbits,
+  };
 
   static void Initialize();
 
@@ -538,22 +365,18 @@
   static const uint32_t kMultiFreeEncoding = 1;
 
   static inline bool IsMarked(Object* obj);
+  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
 
   inline Heap* heap() const { return heap_; }
   inline Isolate* isolate() const;
 
   CodeFlusher* code_flusher() { return code_flusher_; }
   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
-  void EnableCodeFlushing(bool enable);
-
-  enum SweeperType {
-    CONCURRENT_SWEEPING,
-    SEQUENTIAL_SWEEPING
-  };
 
   enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
 
 #ifdef VERIFY_HEAP
+  void VerifyValidStoreAndSlotsBufferEntries();
   void VerifyMarkbitsAreClean();
   static void VerifyMarkbitsAreClean(PagedSpace* space);
   static void VerifyMarkbitsAreClean(NewSpace* space);
@@ -561,11 +384,6 @@
   void VerifyOmittedMapChecks();
 #endif
 
-  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
-    return Page::FromAddress(reinterpret_cast<Address>(anchor))
-        ->ShouldSkipEvacuationSlotRecording();
-  }
-
   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
     return Page::FromAddress(reinterpret_cast<Address>(host))
         ->ShouldSkipEvacuationSlotRecording();
@@ -576,46 +394,24 @@
         ->IsEvacuationCandidate();
   }
 
-  INLINE(void EvictEvacuationCandidate(Page* page)) {
-    if (FLAG_trace_fragmentation) {
-      PrintF("Page %p is too popular. Disabling evacuation.\n",
-             reinterpret_cast<void*>(page));
-    }
-
-    // TODO(gc) If all evacuation candidates are too popular we
-    // should stop slots recording entirely.
-    page->ClearEvacuationCandidate();
-
-    // We were not collecting slots on this page that point
-    // to other evacuation candidates thus we have to
-    // rescan the page after evacuation to discover and update all
-    // pointers to evacuated objects.
-    if (page->owner()->identity() == OLD_DATA_SPACE) {
-      evacuation_candidates_.RemoveElement(page);
-    } else {
-      page->SetFlag(Page::RESCAN_ON_EVACUATION);
-    }
-  }
-
   void RecordRelocSlot(RelocInfo* rinfo, Object* target);
-  void RecordCodeEntrySlot(Address slot, Code* target);
+  void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target);
   void RecordCodeTargetPatch(Address pc, Code* target);
+  INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
+  INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
+                              Object* target));
 
-  INLINE(void RecordSlot(
-      Object** anchor_slot, Object** slot, Object* object,
-      SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
+  void UpdateSlots(SlotsBuffer* buffer);
+  void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
 
   void MigrateObject(HeapObject* dst, HeapObject* src, int size,
-                     AllocationSpace to_old_space);
-
-  bool TryPromoteObject(HeapObject* object, int object_size);
+                     AllocationSpace to_old_space,
+                     SlotsBuffer** evacuation_slots_buffer);
 
   void InvalidateCode(Code* code);
 
   void ClearMarkbits();
 
-  bool abort_incremental_marking() const { return abort_incremental_marking_; }
-
   bool is_compacting() const { return compacting_; }
 
   MarkingParity marking_parity() { return marking_parity_; }
@@ -631,15 +427,24 @@
   // size of the maximum continuous freed memory chunk.
   int SweepInParallel(Page* page, PagedSpace* space);
 
+  // Ensures that sweeping is finished.
+  //
+  // Note: Can only be called safely from main thread.
   void EnsureSweepingCompleted();
 
+  void SweepOrWaitUntilSweepingCompleted(Page* page);
+
+  // Help out in sweeping the corresponding space and refill memory that has
+  // been regained.
+  //
+  // Note: Thread-safe.
+  void SweepAndRefill(CompactionSpace* space);
+
   // If sweeper threads are not active this method will return true. If
   // this is a latency issue we should be smarter here. Otherwise, it will
   // return true if the sweeper threads are done processing the pages.
   bool IsSweepingCompleted();
 
-  void RefillFreeList(PagedSpace* space);
-
   // Checks if sweeping is in progress right now on any space.
   bool sweeping_in_progress() { return sweeping_in_progress_; }
 
@@ -647,35 +452,79 @@
 
   bool evacuation() const { return evacuation_; }
 
-  // Mark the global table which maps weak objects to dependent code without
-  // marking its contents.
-  void MarkWeakObjectToCodeTable();
-
   // Special case for processing weak references in a full collection. We need
   // to artificially keep AllocationSites alive for a time.
   void MarkAllocationSite(AllocationSite* site);
 
+  // Mark objects in implicit references groups if their parent object
+  // is marked.
+  void MarkImplicitRefGroups(MarkObjectFunction mark_object);
+
   MarkingDeque* marking_deque() { return &marking_deque_; }
 
-  void EnsureMarkingDequeIsCommittedAndInitialize();
+  static const size_t kMaxMarkingDequeSize = 4 * MB;
+  static const size_t kMinMarkingDequeSize = 256 * KB;
+
+  void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
+    if (!marking_deque_.in_use()) {
+      EnsureMarkingDequeIsCommitted(max_size);
+      InitializeMarkingDeque();
+    }
+  }
+
+  void EnsureMarkingDequeIsCommitted(size_t max_size);
+  void EnsureMarkingDequeIsReserved();
 
   void InitializeMarkingDeque();
 
-  void UncommitMarkingDeque();
+  // The following four methods can just be called after marking, when the
+  // whole transitive closure is known. They must be called before sweeping
+  // when mark bits are still intact.
+  bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
+  bool IsSlotInBlackObjectSlow(Page* p, Address slot);
+  bool IsSlotInLiveObject(Address slot);
+  void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
+
+  // Removes all the slots in the slot buffers that are within the given
+  // address range.
+  void RemoveObjectSlots(Address start_slot, Address end_slot);
+
+  //
+  // Free lists filled by sweeper and consumed by corresponding spaces
+  // (including compaction spaces).
+  //
+  base::SmartPointer<FreeList>& free_list_old_space() {
+    return free_list_old_space_;
+  }
+  base::SmartPointer<FreeList>& free_list_code_space() {
+    return free_list_code_space_;
+  }
+  base::SmartPointer<FreeList>& free_list_map_space() {
+    return free_list_map_space_;
+  }
 
  private:
+  class CompactionTask;
+  class EvacuateNewSpaceVisitor;
+  class EvacuateOldSpaceVisitor;
+  class EvacuateVisitorBase;
+  class HeapObjectVisitor;
   class SweeperTask;
 
-  explicit MarkCompactCollector(Heap* heap);
-  ~MarkCompactCollector();
+  static const int kInitialLocalPretenuringFeedbackCapacity = 256;
 
-  bool MarkInvalidatedCode();
+  explicit MarkCompactCollector(Heap* heap);
+
   bool WillBeDeoptimized(Code* code);
-  void RemoveDeadInvalidatedCode();
-  void ProcessInvalidatedCode(ObjectVisitor* visitor);
+  void EvictPopularEvacuationCandidate(Page* page);
+  void ClearInvalidStoreAndSlotsBufferEntries();
 
   void StartSweeperThreads();
 
+  void ComputeEvacuationHeuristics(int area_size,
+                                   int* target_fragmentation_percent,
+                                   int* max_evacuated_bytes);
+
 #ifdef DEBUG
   enum CollectorState {
     IDLE,
@@ -691,26 +540,13 @@
   CollectorState state_;
 #endif
 
-  bool reduce_memory_footprint_;
-
-  bool abort_incremental_marking_;
-
   MarkingParity marking_parity_;
 
-  // True if we are collecting slots to perform evacuation from evacuation
-  // candidates.
-  bool compacting_;
-
   bool was_marked_incrementally_;
 
-  // True if concurrent or parallel sweeping is currently in progress.
-  bool sweeping_in_progress_;
-
-  base::Semaphore pending_sweeper_jobs_semaphore_;
-
   bool evacuation_;
 
-  SlotsBufferAllocator slots_buffer_allocator_;
+  SlotsBufferAllocator* slots_buffer_allocator_;
 
   SlotsBuffer* migration_slots_buffer_;
 
@@ -726,10 +562,12 @@
   //
   //   After: Live objects are marked and non-live objects are unmarked.
 
-  friend class RootMarkingVisitor;
-  friend class MarkingVisitor;
-  friend class MarkCompactMarkingVisitor;
   friend class CodeMarkingVisitor;
+  friend class IncrementalMarkingMarkingVisitor;
+  friend class MarkCompactMarkingVisitor;
+  friend class MarkingVisitor;
+  friend class RecordMigratedSlotVisitor;
+  friend class RootMarkingVisitor;
   friend class SharedFunctionInfoMarkingVisitor;
 
   // Mark code objects that are active on the stack to prevent them
@@ -741,7 +579,13 @@
   // Marking operations for objects reachable from roots.
   void MarkLiveObjects();
 
-  void AfterMarking();
+  // Pushes a black object onto the marking stack and accounts for live bytes.
+  // Note that this assumes live bytes have not yet been counted.
+  INLINE(void PushBlack(HeapObject* obj));
+
+  // Unshifts a black object into the marking stack and accounts for live bytes.
+  // Note that this assumes lives bytes have already been counted.
+  INLINE(void UnshiftBlack(HeapObject* obj));
 
   // Marks the object black and pushes it on the marking stack.
   // This is for non-incremental marking only.
@@ -758,10 +602,6 @@
   // the string table are weak.
   void MarkStringTable(RootMarkingVisitor* visitor);
 
-  // Mark objects in implicit references groups if their parent object
-  // is marked.
-  void MarkImplicitRefGroups();
-
   // Mark objects reachable (transitively) from objects in the marking stack
   // or overflowed in the heap.
   void ProcessMarkingDeque();
@@ -780,6 +620,9 @@
   // otherwise a map can die and deoptimize the code.
   void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
 
+  // Collects a list of dependent code from maps embedded in optimize code.
+  DependentCode* DependentCodeListFromNonLiveMaps();
+
   // Mark objects reachable (transitively) from objects in the marking
   // stack.  This function empties the marking stack, but may leave
   // overflowed objects in the heap, in which case the marking stack's
@@ -791,27 +634,34 @@
   // flag on the marking stack.
   void RefillMarkingDeque();
 
+  // Helper methods for refilling the marking stack by discovering grey objects
+  // on various pages of the heap. Used by {RefillMarkingDeque} only.
+  template <class T>
+  void DiscoverGreyObjectsWithIterator(T* it);
+  void DiscoverGreyObjectsOnPage(MemoryChunk* p);
+  void DiscoverGreyObjectsInSpace(PagedSpace* space);
+  void DiscoverGreyObjectsInNewSpace();
+
   // Callback function for telling whether the object *p is an unmarked
   // heap object.
   static bool IsUnmarkedHeapObject(Object** p);
-  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
 
-  // Map transitions from a live map to a dead map must be killed.
-  // We replace them with a null descriptor, with the same key.
+  // Clear non-live references in weak cells, transition and descriptor arrays,
+  // and deoptimize dependent code of non-live maps.
   void ClearNonLiveReferences();
-  void ClearNonLivePrototypeTransitions(Map* map);
-  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
-  void ClearMapTransitions(Map* map);
-  bool ClearMapBackPointer(Map* map);
-  void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
-                           int number_of_own_descriptors);
+  void MarkDependentCodeForDeoptimization(DependentCode* list);
+  // Find non-live targets of simple transitions in the given list. Clear
+  // transitions to non-live targets and if needed trim descriptors arrays.
+  void ClearSimpleMapTransitions(Object* non_live_map_list);
+  void ClearSimpleMapTransition(Map* map, Map* dead_transition);
+  // Compact every array in the global list of transition arrays and
+  // trim the corresponding descriptor array if a transition target is non-live.
+  void ClearFullMapTransitions();
+  bool CompactTransitionArray(Map* map, TransitionArray* transitions,
+                              DescriptorArray* descriptors);
+  void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
   void TrimEnumCache(Map* map, DescriptorArray* descriptors);
 
-  void ClearDependentCode(DependentCode* dependent_code);
-  void ClearNonLiveDependentCode(DependentCode* dependent_code);
-  int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group,
-                                       int start, int end, int new_start);
-
   // Mark all values associated with reachable keys in weak collections
   // encountered so far.  This might push new object or even new weak maps onto
   // the marking stack.
@@ -826,10 +676,12 @@
   // collections when incremental marking is aborted.
   void AbortWeakCollections();
 
-
-  void ProcessAndClearWeakCells();
+  void ClearWeakCells(Object** non_live_map_list,
+                      DependentCode** dependent_code_list);
   void AbortWeakCells();
 
+  void AbortTransitionArrays();
+
   // -----------------------------------------------------------------------
   // Phase 2: Sweeping to clear mark bits and free non-live objects for
   // a non-compacting collection.
@@ -846,24 +698,51 @@
   // regions to each space's free list.
   void SweepSpaces();
 
-  int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
-                                            NewSpacePage* p);
+  void EvacuateNewSpacePrologue();
 
-  void EvacuateNewSpace();
+  // Returns local pretenuring feedback.
+  HashMap* EvacuateNewSpaceInParallel();
 
-  void EvacuateLiveObjectsFromPage(Page* p);
+  void AddEvacuationSlotsBufferSynchronized(
+      SlotsBuffer* evacuation_slots_buffer);
 
-  void EvacuatePages();
+  void EvacuatePages(CompactionSpaceCollection* compaction_spaces,
+                     SlotsBuffer** evacuation_slots_buffer);
+
+  void EvacuatePagesInParallel();
+
+  // The number of parallel compaction tasks, including the main thread.
+  int NumberOfParallelCompactionTasks();
+
+
+  void StartParallelCompaction(CompactionSpaceCollection** compaction_spaces,
+                               uint32_t* task_ids, int len);
+  void WaitUntilCompactionCompleted(uint32_t* task_ids, int len);
 
   void EvacuateNewSpaceAndCandidates();
 
+  void UpdatePointersAfterEvacuation();
+
+  // Iterates through all live objects on a page using marking information.
+  // Returns whether all objects have successfully been visited.
+  bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
+                        IterationMode mode);
+
+  void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
+
+  void RecomputeLiveBytes(MemoryChunk* page);
+
+  void SweepAbortedPages();
+
   void ReleaseEvacuationCandidates();
 
   // Moves the pages of the evacuation_candidates_ list to the end of their
   // corresponding space pages list.
   void MoveEvacuationCandidatesToEndOfPagesList();
 
-  void SweepSpace(PagedSpace* space, SweeperType sweeper);
+  // Starts sweeping of a space by contributing on the main thread and setting
+  // up other pages for sweeping.
+  void StartSweepSpace(PagedSpace* space);
 
   // Finalizes the parallel sweeping phase. Marks all the pages that were
   // swept in parallel.
@@ -872,7 +751,16 @@
   void ParallelSweepSpaceComplete(PagedSpace* space);
 
   // Updates store buffer and slot buffer for a pointer in a migrating object.
-  void RecordMigratedSlot(Object* value, Address slot);
+  void RecordMigratedSlot(Object* value, Address slot,
+                          SlotsBuffer** evacuation_slots_buffer);
+
+  // Adds the code entry slot to the slots buffer.
+  void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
+                                   SlotsBuffer** evacuation_slots_buffer);
+
+  // Adds the slot of a moved code object.
+  void RecordMigratedCodeObjectSlot(Address code_object,
+                                    SlotsBuffer** evacuation_slots_buffer);
 
 #ifdef DEBUG
   friend class MarkObjectVisitor;
@@ -884,18 +772,45 @@
 
   Heap* heap_;
   base::VirtualMemory* marking_deque_memory_;
-  bool marking_deque_memory_committed_;
+  size_t marking_deque_memory_committed_;
   MarkingDeque marking_deque_;
   CodeFlusher* code_flusher_;
   bool have_code_to_deoptimize_;
 
   List<Page*> evacuation_candidates_;
-  List<Code*> invalidated_code_;
 
-  SmartPointer<FreeList> free_list_old_data_space_;
-  SmartPointer<FreeList> free_list_old_pointer_space_;
+  List<MemoryChunk*> newspace_evacuation_candidates_;
+
+  // The evacuation_slots_buffers_ are used by the compaction threads.
+  // When a compaction task finishes, it uses
+  // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
+  // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_
+  // lock.
+  base::Mutex evacuation_slots_buffers_mutex_;
+  List<SlotsBuffer*> evacuation_slots_buffers_;
+
+  base::SmartPointer<FreeList> free_list_old_space_;
+  base::SmartPointer<FreeList> free_list_code_space_;
+  base::SmartPointer<FreeList> free_list_map_space_;
+
+  // True if we are collecting slots to perform evacuation from evacuation
+  // candidates.
+  bool compacting_;
+
+  // True if concurrent or parallel sweeping is currently in progress.
+  bool sweeping_in_progress_;
+
+  // True if parallel compaction is currently in progress.
+  bool compaction_in_progress_;
+
+  // Semaphore used to synchronize sweeper tasks.
+  base::Semaphore pending_sweeper_tasks_semaphore_;
+
+  // Semaphore used to synchronize compaction tasks.
+  base::Semaphore pending_compaction_tasks_semaphore_;
 
   friend class Heap;
+  friend class StoreBuffer;
 };
 
 
@@ -931,6 +846,14 @@
     cell_base_ += 32 * kPointerSize;
   }
 
+  // Return the next mark bit cell. If there is no next it returns 0;
+  inline MarkBit::CellType PeekNext() {
+    if (HasNext()) {
+      return cells_[cell_index_ + 1];
+    }
+    return 0;
+  }
+
  private:
   MemoryChunk* chunk_;
   MarkBit::CellType* cells_;
@@ -939,6 +862,26 @@
   Address cell_base_;
 };
 
+enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
+
+template <LiveObjectIterationMode T>
+class LiveObjectIterator BASE_EMBEDDED {
+ public:
+  explicit LiveObjectIterator(MemoryChunk* chunk)
+      : chunk_(chunk),
+        it_(chunk_),
+        cell_base_(it_.CurrentCellBase()),
+        current_cell_(*it_.CurrentCell()) {}
+
+  HeapObject* Next();
+
+ private:
+  MemoryChunk* chunk_;
+  MarkBitCellIterator it_;
+  Address cell_base_;
+  MarkBit::CellType current_cell_;
+};
+
 
 class EvacuationScope BASE_EMBEDDED {
  public:
@@ -955,7 +898,7 @@
 
 
 const char* AllocationSpaceName(AllocationSpace space);
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_HEAP_MARK_COMPACT_H_
diff --git a/src/heap/memory-reducer.cc b/src/heap/memory-reducer.cc
new file mode 100644
index 0000000..ee10091
--- /dev/null
+++ b/src/heap/memory-reducer.cc
@@ -0,0 +1,213 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/memory-reducer.h"
+
+#include "src/flags.h"
+#include "src/heap/gc-tracer.h"
+#include "src/heap/heap-inl.h"
+#include "src/utils.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+const int MemoryReducer::kLongDelayMs = 8000;
+const int MemoryReducer::kShortDelayMs = 500;
+const int MemoryReducer::kWatchdogDelayMs = 100000;
+const int MemoryReducer::kMaxNumberOfGCs = 3;
+
+MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
+    : CancelableTask(memory_reducer->heap()->isolate()),
+      memory_reducer_(memory_reducer) {}
+
+
+void MemoryReducer::TimerTask::RunInternal() {
+  const double kJsCallsPerMsThreshold = 0.5;
+  Heap* heap = memory_reducer_->heap();
+  Event event;
+  double time_ms = heap->MonotonicallyIncreasingTimeInMs();
+  heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
+                                   heap->OldGenerationAllocationCounter());
+  double js_call_rate = memory_reducer_->SampleAndGetJsCallsPerMs(time_ms);
+  bool low_allocation_rate = heap->HasLowAllocationRate();
+  bool is_idle = js_call_rate < kJsCallsPerMsThreshold && low_allocation_rate;
+  bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
+  if (FLAG_trace_gc_verbose) {
+    PrintIsolate(heap->isolate(), "Memory reducer: call rate %.3lf, %s, %s\n",
+                 js_call_rate, low_allocation_rate ? "low alloc" : "high alloc",
+                 optimize_for_memory ? "background" : "foreground");
+  }
+  event.type = kTimer;
+  event.time_ms = time_ms;
+  // The memory reducer will start incremental markig if
+  // 1) mutator is likely idle: js call rate is low and allocation rate is low.
+  // 2) mutator is in background: optimize for memory flag is set.
+  event.should_start_incremental_gc = is_idle || optimize_for_memory;
+  event.can_start_incremental_gc =
+      heap->incremental_marking()->IsStopped() &&
+      heap->incremental_marking()->CanBeActivated();
+  memory_reducer_->NotifyTimer(event);
+}
+
+
+double MemoryReducer::SampleAndGetJsCallsPerMs(double time_ms) {
+  unsigned int counter = heap()->isolate()->js_calls_from_api_counter();
+  unsigned int call_delta = counter - js_calls_counter_;
+  double time_delta_ms = time_ms - js_calls_sample_time_ms_;
+  js_calls_counter_ = counter;
+  js_calls_sample_time_ms_ = time_ms;
+  return time_delta_ms > 0 ? call_delta / time_delta_ms : 0;
+}
+
+
+void MemoryReducer::NotifyTimer(const Event& event) {
+  DCHECK_EQ(kTimer, event.type);
+  DCHECK_EQ(kWait, state_.action);
+  state_ = Step(state_, event);
+  if (state_.action == kRun) {
+    DCHECK(heap()->incremental_marking()->IsStopped());
+    DCHECK(FLAG_incremental_marking);
+    if (FLAG_trace_gc_verbose) {
+      PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
+                   state_.started_gcs);
+    }
+    heap()->StartIdleIncrementalMarking();
+  } else if (state_.action == kWait) {
+    if (!heap()->incremental_marking()->IsStopped() &&
+        heap()->ShouldOptimizeForMemoryUsage()) {
+      // Make progress with pending incremental marking if memory usage has
+      // higher priority than latency. This is important for background tabs
+      // that do not send idle notifications.
+      const int kIncrementalMarkingDelayMs = 500;
+      double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
+                        kIncrementalMarkingDelayMs;
+      heap()->incremental_marking()->AdvanceIncrementalMarking(
+          0, deadline, i::IncrementalMarking::StepActions(
+                           i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+                           i::IncrementalMarking::FORCE_MARKING,
+                           i::IncrementalMarking::FORCE_COMPLETION));
+      heap()->FinalizeIncrementalMarkingIfComplete(
+          "Memory reducer: finalize incremental marking");
+    }
+    // Re-schedule the timer.
+    ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
+    if (FLAG_trace_gc_verbose) {
+      PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n",
+                   state_.next_gc_start_ms - event.time_ms);
+    }
+  }
+}
+
+
+void MemoryReducer::NotifyMarkCompact(const Event& event) {
+  DCHECK_EQ(kMarkCompact, event.type);
+  Action old_action = state_.action;
+  state_ = Step(state_, event);
+  if (old_action != kWait && state_.action == kWait) {
+    // If we are transitioning to the WAIT state, start the timer.
+    ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
+  }
+  if (old_action == kRun) {
+    if (FLAG_trace_gc_verbose) {
+      PrintIsolate(heap()->isolate(), "Memory reducer: finished GC #%d (%s)\n",
+                   state_.started_gcs,
+                   state_.action == kWait ? "will do more" : "done");
+    }
+  }
+}
+
+
+void MemoryReducer::NotifyContextDisposed(const Event& event) {
+  DCHECK_EQ(kContextDisposed, event.type);
+  Action old_action = state_.action;
+  state_ = Step(state_, event);
+  if (old_action != kWait && state_.action == kWait) {
+    // If we are transitioning to the WAIT state, start the timer.
+    ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
+  }
+}
+
+
+bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
+  return state.last_gc_time_ms != 0 &&
+         event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs;
+}
+
+
+// For specification of this function see the comment for MemoryReducer class.
+MemoryReducer::State MemoryReducer::Step(const State& state,
+                                         const Event& event) {
+  if (!FLAG_incremental_marking || !FLAG_memory_reducer) {
+    return State(kDone, 0, 0, state.last_gc_time_ms);
+  }
+  switch (state.action) {
+    case kDone:
+      if (event.type == kTimer) {
+        return state;
+      } else {
+        DCHECK(event.type == kContextDisposed || event.type == kMarkCompact);
+        return State(
+            kWait, 0, event.time_ms + kLongDelayMs,
+            event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms);
+      }
+    case kWait:
+      switch (event.type) {
+        case kContextDisposed:
+          return state;
+        case kTimer:
+          if (state.started_gcs >= kMaxNumberOfGCs) {
+            return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms);
+          } else if (event.can_start_incremental_gc &&
+                     (event.should_start_incremental_gc ||
+                      WatchdogGC(state, event))) {
+            if (state.next_gc_start_ms <= event.time_ms) {
+              return State(kRun, state.started_gcs + 1, 0.0,
+                           state.last_gc_time_ms);
+            } else {
+              return state;
+            }
+          } else {
+            return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
+                         state.last_gc_time_ms);
+          }
+        case kMarkCompact:
+          return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
+                       event.time_ms);
+      }
+    case kRun:
+      if (event.type != kMarkCompact) {
+        return state;
+      } else {
+        if (state.started_gcs < kMaxNumberOfGCs &&
+            (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
+          return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
+                       event.time_ms);
+        } else {
+          return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms);
+        }
+      }
+  }
+  UNREACHABLE();
+  return State(kDone, 0, 0, 0.0);  // Make the compiler happy.
+}
+
+
+void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
+  DCHECK(delay_ms > 0);
+  // Record the time and the js call counter.
+  SampleAndGetJsCallsPerMs(time_ms);
+  // Leave some room for precision error in task scheduler.
+  const double kSlackMs = 100;
+  v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
+  auto timer_task = new MemoryReducer::TimerTask(this);
+  V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(
+      isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
+}
+
+
+void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/memory-reducer.h b/src/heap/memory-reducer.h
new file mode 100644
index 0000000..9213613
--- /dev/null
+++ b/src/heap/memory-reducer.h
@@ -0,0 +1,167 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_memory_reducer_H
+#define V8_HEAP_memory_reducer_H
+
+#include "include/v8-platform.h"
+#include "src/base/macros.h"
+#include "src/cancelable-task.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+
+// The goal of the MemoryReducer class is to detect transition of the mutator
+// from high allocation phase to low allocation phase and to collect potential
+// garbage created in the high allocation phase.
+//
+// The class implements an automaton with the following states and transitions.
+//
+// States:
+// - DONE <last_gc_time_ms>
+// - WAIT <started_gcs> <next_gc_start_ms> <last_gc_time_ms>
+// - RUN <started_gcs> <last_gc_time_ms>
+// The <started_gcs> is an integer in range from 0..kMaxNumberOfGCs that stores
+// the number of GCs initiated by the MemoryReducer since it left the DONE
+// state.
+// The <next_gc_start_ms> is a double that stores the earliest time the next GC
+// can be initiated by the MemoryReducer.
+// The <last_gc_start_ms> is a double that stores the time of the last full GC.
+// The DONE state means that the MemoryReducer is not active.
+// The WAIT state means that the MemoryReducer is waiting for mutator allocation
+// rate to drop. The check for the allocation rate happens in the timer task
+// callback. If the allocation rate does not drop in watchdog_delay_ms since
+// the last GC then transition to the RUN state is forced.
+// The RUN state means that the MemoryReducer started incremental marking and is
+// waiting for it to finish. Incremental marking steps are performed as usual
+// in the idle notification and in the mutator.
+//
+// Transitions:
+// DONE t -> WAIT 0 (now_ms + long_delay_ms) t' happens:
+//     - on context disposal.
+//     - at the end of mark-compact GC initiated by the mutator.
+// This signals that there is potential garbage to be collected.
+//
+// WAIT n x t -> WAIT n (now_ms + long_delay_ms) t' happens:
+//     - on mark-compact GC initiated by the mutator,
+//     - in the timer callback if the mutator allocation rate is high or
+//       incremental GC is in progress or (now_ms - t < watchdog_delay_ms)
+//
+// WAIT n x t -> WAIT (n+1) t happens:
+//     - on background idle notification, which signals that we can start
+//       incremental marking even if the allocation rate is high.
+// The MemoryReducer starts incremental marking on this transition but still
+// has a pending timer task.
+//
+// WAIT n x t -> DONE t happens:
+//     - in the timer callback if n >= kMaxNumberOfGCs.
+//
+// WAIT n x t -> RUN (n+1) t happens:
+//     - in the timer callback if the mutator allocation rate is low
+//       and now_ms >= x and there is no incremental GC in progress.
+//     - in the timer callback if (now_ms - t > watchdog_delay_ms) and
+//       and now_ms >= x and there is no incremental GC in progress.
+// The MemoryReducer starts incremental marking on this transition.
+//
+// RUN n t -> DONE now_ms happens:
+//     - at end of the incremental GC initiated by the MemoryReducer if
+//       (n > 1 and there is no more garbage to be collected) or
+//       n == kMaxNumberOfGCs.
+// RUN n t -> WAIT n (now_ms + short_delay_ms) now_ms happens:
+//     - at end of the incremental GC initiated by the MemoryReducer if
+//       (n == 1 or there is more garbage to be collected) and
+//       n < kMaxNumberOfGCs.
+//
+// now_ms is the current time,
+// t' is t if the current event is not a GC event and is now_ms otherwise,
+// long_delay_ms, short_delay_ms, and watchdog_delay_ms are constants.
+class MemoryReducer {
+ public:
+  enum Action { kDone, kWait, kRun };
+
+  struct State {
+    State(Action action, int started_gcs, double next_gc_start_ms,
+          double last_gc_time_ms)
+        : action(action),
+          started_gcs(started_gcs),
+          next_gc_start_ms(next_gc_start_ms),
+          last_gc_time_ms(last_gc_time_ms) {}
+    Action action;
+    int started_gcs;
+    double next_gc_start_ms;
+    double last_gc_time_ms;
+  };
+
+  enum EventType { kTimer, kMarkCompact, kContextDisposed };
+
+  struct Event {
+    EventType type;
+    double time_ms;
+    bool next_gc_likely_to_collect_more;
+    bool should_start_incremental_gc;
+    bool can_start_incremental_gc;
+  };
+
+  explicit MemoryReducer(Heap* heap)
+      : heap_(heap),
+        state_(kDone, 0, 0.0, 0.0),
+        js_calls_counter_(0),
+        js_calls_sample_time_ms_(0.0) {}
+  // Callbacks.
+  void NotifyMarkCompact(const Event& event);
+  void NotifyContextDisposed(const Event& event);
+  void NotifyBackgroundIdleNotification(const Event& event);
+  // The step function that computes the next state from the current state and
+  // the incoming event.
+  static State Step(const State& state, const Event& event);
+  // Posts a timer task that will call NotifyTimer after the given delay.
+  void ScheduleTimer(double time_ms, double delay_ms);
+  void TearDown();
+  static const int kLongDelayMs;
+  static const int kShortDelayMs;
+  static const int kWatchdogDelayMs;
+  static const int kMaxNumberOfGCs;
+
+  Heap* heap() { return heap_; }
+
+  bool ShouldGrowHeapSlowly() {
+    return state_.action == kDone && state_.started_gcs > 0;
+  }
+
+ private:
+  class TimerTask : public v8::internal::CancelableTask {
+   public:
+    explicit TimerTask(MemoryReducer* memory_reducer);
+
+   private:
+    // v8::internal::CancelableTask overrides.
+    void RunInternal() override;
+    MemoryReducer* memory_reducer_;
+    DISALLOW_COPY_AND_ASSIGN(TimerTask);
+  };
+
+  void NotifyTimer(const Event& event);
+
+  static bool WatchdogGC(const State& state, const Event& event);
+
+  // Returns the rate of JS calls initiated from the API.
+  double SampleAndGetJsCallsPerMs(double time_ms);
+
+  Heap* heap_;
+  State state_;
+  unsigned int js_calls_counter_;
+  double js_calls_sample_time_ms_;
+
+  // Used in cctest.
+  friend class HeapTester;
+  DISALLOW_COPY_AND_ASSIGN(MemoryReducer);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_HEAP_memory_reducer_H
diff --git a/src/heap/object-stats.cc b/src/heap/object-stats.cc
new file mode 100644
index 0000000..c1566ab
--- /dev/null
+++ b/src/heap/object-stats.cc
@@ -0,0 +1,252 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/object-stats.h"
+
+#include "src/counters.h"
+#include "src/heap/heap-inl.h"
+#include "src/isolate.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+static base::LazyMutex object_stats_mutex = LAZY_MUTEX_INITIALIZER;
+
+
+void ObjectStats::ClearObjectStats(bool clear_last_time_stats) {
+  memset(object_counts_, 0, sizeof(object_counts_));
+  memset(object_sizes_, 0, sizeof(object_sizes_));
+  if (clear_last_time_stats) {
+    memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
+    memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
+  }
+}
+
+
+void ObjectStats::TraceObjectStat(const char* name, int count, int size,
+                                  double time) {
+  int ms_count = heap()->ms_count();
+  PrintIsolate(isolate(),
+               "heap:%p, time:%f, gc:%d, type:%s, count:%d, size:%d\n",
+               static_cast<void*>(heap()), time, ms_count, name, count, size);
+}
+
+
+void ObjectStats::TraceObjectStats() {
+  base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
+  int index;
+  int count;
+  int size;
+  int total_size = 0;
+  double time = isolate()->time_millis_since_init();
+#define TRACE_OBJECT_COUNT(name)                     \
+  count = static_cast<int>(object_counts_[name]);    \
+  size = static_cast<int>(object_sizes_[name]) / KB; \
+  total_size += size;                                \
+  TraceObjectStat(#name, count, size, time);
+  INSTANCE_TYPE_LIST(TRACE_OBJECT_COUNT)
+#undef TRACE_OBJECT_COUNT
+#define TRACE_OBJECT_COUNT(name)                      \
+  index = FIRST_CODE_KIND_SUB_TYPE + Code::name;      \
+  count = static_cast<int>(object_counts_[index]);    \
+  size = static_cast<int>(object_sizes_[index]) / KB; \
+  TraceObjectStat("*CODE_" #name, count, size, time);
+  CODE_KIND_LIST(TRACE_OBJECT_COUNT)
+#undef TRACE_OBJECT_COUNT
+#define TRACE_OBJECT_COUNT(name)                      \
+  index = FIRST_FIXED_ARRAY_SUB_TYPE + name;          \
+  count = static_cast<int>(object_counts_[index]);    \
+  size = static_cast<int>(object_sizes_[index]) / KB; \
+  TraceObjectStat("*FIXED_ARRAY_" #name, count, size, time);
+  FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(TRACE_OBJECT_COUNT)
+#undef TRACE_OBJECT_COUNT
+#define TRACE_OBJECT_COUNT(name)                                              \
+  index =                                                                     \
+      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
+  count = static_cast<int>(object_counts_[index]);                            \
+  size = static_cast<int>(object_sizes_[index]) / KB;                         \
+  TraceObjectStat("*CODE_AGE_" #name, count, size, time);
+  CODE_AGE_LIST_COMPLETE(TRACE_OBJECT_COUNT)
+#undef TRACE_OBJECT_COUNT
+}
+
+
+void ObjectStats::CheckpointObjectStats() {
+  base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
+  Counters* counters = isolate()->counters();
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)              \
+  counters->count_of_##name()->Increment(                \
+      static_cast<int>(object_counts_[name]));           \
+  counters->count_of_##name()->Decrement(                \
+      static_cast<int>(object_counts_last_time_[name])); \
+  counters->size_of_##name()->Increment(                 \
+      static_cast<int>(object_sizes_[name]));            \
+  counters->size_of_##name()->Decrement(                 \
+      static_cast<int>(object_sizes_last_time_[name]));
+  INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+  int index;
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
+  index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
+  counters->count_of_CODE_TYPE_##name()->Increment(       \
+      static_cast<int>(object_counts_[index]));           \
+  counters->count_of_CODE_TYPE_##name()->Decrement(       \
+      static_cast<int>(object_counts_last_time_[index])); \
+  counters->size_of_CODE_TYPE_##name()->Increment(        \
+      static_cast<int>(object_sizes_[index]));            \
+  counters->size_of_CODE_TYPE_##name()->Decrement(        \
+      static_cast<int>(object_sizes_last_time_[index]));
+  CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
+  index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
+  counters->count_of_FIXED_ARRAY_##name()->Increment(     \
+      static_cast<int>(object_counts_[index]));           \
+  counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
+      static_cast<int>(object_counts_last_time_[index])); \
+  counters->size_of_FIXED_ARRAY_##name()->Increment(      \
+      static_cast<int>(object_sizes_[index]));            \
+  counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
+      static_cast<int>(object_sizes_last_time_[index]));
+  FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
+  index =                                                                     \
+      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
+  counters->count_of_CODE_AGE_##name()->Increment(                            \
+      static_cast<int>(object_counts_[index]));                               \
+  counters->count_of_CODE_AGE_##name()->Decrement(                            \
+      static_cast<int>(object_counts_last_time_[index]));                     \
+  counters->size_of_CODE_AGE_##name()->Increment(                             \
+      static_cast<int>(object_sizes_[index]));                                \
+  counters->size_of_CODE_AGE_##name()->Decrement(                             \
+      static_cast<int>(object_sizes_last_time_[index]));
+  CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+
+  MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
+  MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
+  ClearObjectStats();
+}
+
+
+Isolate* ObjectStats::isolate() { return heap()->isolate(); }
+
+
+void ObjectStatsVisitor::CountFixedArray(
+    FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
+    FixedArraySubInstanceType dictionary_type) {
+  Heap* heap = fixed_array->map()->GetHeap();
+  if (fixed_array->map() != heap->fixed_cow_array_map() &&
+      fixed_array->map() != heap->fixed_double_array_map() &&
+      fixed_array != heap->empty_fixed_array()) {
+    if (fixed_array->IsDictionary()) {
+      heap->object_stats_->RecordFixedArraySubTypeStats(dictionary_type,
+                                                        fixed_array->Size());
+    } else {
+      heap->object_stats_->RecordFixedArraySubTypeStats(fast_type,
+                                                        fixed_array->Size());
+    }
+  }
+}
+
+
+void ObjectStatsVisitor::VisitBase(VisitorId id, Map* map, HeapObject* obj) {
+  Heap* heap = map->GetHeap();
+  int object_size = obj->Size();
+  heap->object_stats_->RecordObjectStats(map->instance_type(), object_size);
+  table_.GetVisitorById(id)(map, obj);
+  if (obj->IsJSObject()) {
+    JSObject* object = JSObject::cast(obj);
+    CountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
+                    FAST_ELEMENTS_SUB_TYPE);
+    CountFixedArray(object->properties(), DICTIONARY_PROPERTIES_SUB_TYPE,
+                    FAST_PROPERTIES_SUB_TYPE);
+  }
+}
+
+
+template <ObjectStatsVisitor::VisitorId id>
+void ObjectStatsVisitor::Visit(Map* map, HeapObject* obj) {
+  VisitBase(id, map, obj);
+}
+
+
+template <>
+void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitMap>(Map* map,
+                                                              HeapObject* obj) {
+  Heap* heap = map->GetHeap();
+  Map* map_obj = Map::cast(obj);
+  DCHECK(map->instance_type() == MAP_TYPE);
+  DescriptorArray* array = map_obj->instance_descriptors();
+  if (map_obj->owns_descriptors() && array != heap->empty_descriptor_array()) {
+    int fixed_array_size = array->Size();
+    heap->object_stats_->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
+                                                      fixed_array_size);
+  }
+  if (map_obj->has_code_cache()) {
+    CodeCache* cache = CodeCache::cast(map_obj->code_cache());
+    heap->object_stats_->RecordFixedArraySubTypeStats(
+        MAP_CODE_CACHE_SUB_TYPE, cache->default_cache()->Size());
+    if (!cache->normal_type_cache()->IsUndefined()) {
+      heap->object_stats_->RecordFixedArraySubTypeStats(
+          MAP_CODE_CACHE_SUB_TYPE,
+          FixedArray::cast(cache->normal_type_cache())->Size());
+    }
+  }
+  VisitBase(kVisitMap, map, obj);
+}
+
+
+template <>
+void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitCode>(
+    Map* map, HeapObject* obj) {
+  Heap* heap = map->GetHeap();
+  int object_size = obj->Size();
+  DCHECK(map->instance_type() == CODE_TYPE);
+  Code* code_obj = Code::cast(obj);
+  heap->object_stats_->RecordCodeSubTypeStats(code_obj->kind(),
+                                              code_obj->GetAge(), object_size);
+  VisitBase(kVisitCode, map, obj);
+}
+
+
+template <>
+void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitSharedFunctionInfo>(
+    Map* map, HeapObject* obj) {
+  Heap* heap = map->GetHeap();
+  SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+  if (sfi->scope_info() != heap->empty_fixed_array()) {
+    heap->object_stats_->RecordFixedArraySubTypeStats(
+        SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
+  }
+  VisitBase(kVisitSharedFunctionInfo, map, obj);
+}
+
+
+template <>
+void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitFixedArray>(
+    Map* map, HeapObject* obj) {
+  Heap* heap = map->GetHeap();
+  FixedArray* fixed_array = FixedArray::cast(obj);
+  if (fixed_array == heap->string_table()) {
+    heap->object_stats_->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
+                                                      fixed_array->Size());
+  }
+  VisitBase(kVisitFixedArray, map, obj);
+}
+
+
+void ObjectStatsVisitor::Initialize(VisitorDispatchTable<Callback>* original) {
+  // Copy the original visitor table to make call-through possible. After we
+  // preserved a copy locally, we patch the original table to call us.
+  table_.CopyFrom(original);
+#define COUNT_FUNCTION(id) original->Register(kVisit##id, Visit<kVisit##id>);
+  VISITOR_ID_LIST(COUNT_FUNCTION)
+#undef COUNT_FUNCTION
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/object-stats.h b/src/heap/object-stats.h
new file mode 100644
index 0000000..e2dcfaa
--- /dev/null
+++ b/src/heap/object-stats.h
@@ -0,0 +1,102 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_OBJECT_STATS_H_
+#define V8_HEAP_OBJECT_STATS_H_
+
+#include "src/heap/heap.h"
+#include "src/heap/objects-visiting.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class ObjectStats {
+ public:
+  explicit ObjectStats(Heap* heap) : heap_(heap) {}
+
+  // ObjectStats are kept in two arrays, counts and sizes. Related stats are
+  // stored in a contiguous linear buffer. Stats groups are stored one after
+  // another.
+  enum {
+    FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
+    FIRST_FIXED_ARRAY_SUB_TYPE =
+        FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
+    FIRST_CODE_AGE_SUB_TYPE =
+        FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+    OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
+  };
+
+  void ClearObjectStats(bool clear_last_time_stats = false);
+
+  void TraceObjectStats();
+  void TraceObjectStat(const char* name, int count, int size, double time);
+  void CheckpointObjectStats();
+
+  void RecordObjectStats(InstanceType type, size_t size) {
+    DCHECK(type <= LAST_TYPE);
+    object_counts_[type]++;
+    object_sizes_[type] += size;
+  }
+
+  void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
+    int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
+    int code_age_index =
+        FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
+    DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
+           code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
+    DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
+           code_age_index < OBJECT_STATS_COUNT);
+    object_counts_[code_sub_type_index]++;
+    object_sizes_[code_sub_type_index] += size;
+    object_counts_[code_age_index]++;
+    object_sizes_[code_age_index] += size;
+  }
+
+  void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
+    DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
+    object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
+    object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
+  }
+
+  size_t object_count_last_gc(size_t index) {
+    return object_counts_last_time_[index];
+  }
+
+  size_t object_size_last_gc(size_t index) {
+    return object_sizes_last_time_[index];
+  }
+
+  Isolate* isolate();
+  Heap* heap() { return heap_; }
+
+ private:
+  Heap* heap_;
+
+  // Object counts and used memory by InstanceType
+  size_t object_counts_[OBJECT_STATS_COUNT];
+  size_t object_counts_last_time_[OBJECT_STATS_COUNT];
+  size_t object_sizes_[OBJECT_STATS_COUNT];
+  size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
+};
+
+
+class ObjectStatsVisitor : public StaticMarkingVisitor<ObjectStatsVisitor> {
+ public:
+  static void Initialize(VisitorDispatchTable<Callback>* original);
+
+  static void VisitBase(VisitorId id, Map* map, HeapObject* obj);
+
+  static void CountFixedArray(FixedArrayBase* fixed_array,
+                              FixedArraySubInstanceType fast_type,
+                              FixedArraySubInstanceType dictionary_type);
+
+  template <VisitorId id>
+  static inline void Visit(Map* map, HeapObject* obj);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_HEAP_OBJECT_STATS_H_
diff --git a/src/heap/objects-visiting-inl.h b/src/heap/objects-visiting-inl.h
index e6334f3..a29ba4b 100644
--- a/src/heap/objects-visiting-inl.h
+++ b/src/heap/objects-visiting-inl.h
@@ -5,10 +5,22 @@
 #ifndef V8_OBJECTS_VISITING_INL_H_
 #define V8_OBJECTS_VISITING_INL_H_
 
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/objects-visiting.h"
+#include "src/ic/ic-state.h"
+#include "src/macro-assembler.h"
+#include "src/objects-body-descriptors-inl.h"
 
 namespace v8 {
 namespace internal {
 
+
+template <typename Callback>
+Callback VisitorDispatchTable<Callback>::GetVisitor(Map* map) {
+  return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
+}
+
+
 template <typename StaticVisitor>
 void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
   table_.Register(
@@ -32,8 +44,15 @@
                                        FixedArray::BodyDescriptor, int>::Visit);
 
   table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
-  table_.Register(kVisitFixedTypedArray, &VisitFixedTypedArray);
-  table_.Register(kVisitFixedFloat64Array, &VisitFixedTypedArray);
+  table_.Register(
+      kVisitFixedTypedArray,
+      &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+                           int>::Visit);
+
+  table_.Register(
+      kVisitFixedFloat64Array,
+      &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+                           int>::Visit);
 
   table_.Register(
       kVisitNativeContext,
@@ -41,6 +60,7 @@
                         int>::Visit);
 
   table_.Register(kVisitByteArray, &VisitByteArray);
+  table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
 
   table_.Register(
       kVisitSharedFunctionInfo,
@@ -51,14 +71,14 @@
 
   table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
 
-  table_.Register(kVisitJSFunction, &VisitJSFunction);
+  // Don't visit code entry. We are using this visitor only during scavenges.
+  table_.Register(
+      kVisitJSFunction,
+      &FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
+                           int>::Visit);
 
   table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
 
-  table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
-
-  table_.Register(kVisitJSDataView, &VisitJSDataView);
-
   table_.Register(kVisitFreeSpace, &VisitFreeSpace);
 
   table_.Register(kVisitJSWeakCollection, &JSObjectVisitor::Visit);
@@ -78,48 +98,25 @@
 template <typename StaticVisitor>
 int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
     Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
+  typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor, int>
+      JSArrayBufferBodyVisitor;
 
-  STATIC_ASSERT(JSArrayBuffer::kWeakFirstViewOffset ==
-                JSArrayBuffer::kWeakNextOffset + kPointerSize);
-  VisitPointers(heap, HeapObject::RawField(
-                          object, JSArrayBuffer::BodyDescriptor::kStartOffset),
-                HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
-  VisitPointers(
-      heap, HeapObject::RawField(
-                object, JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
-      HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
-  return JSArrayBuffer::kSizeWithInternalFields;
+  if (!JSArrayBuffer::cast(object)->is_external()) {
+    Heap* heap = map->GetHeap();
+    heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
+  }
+  return JSArrayBufferBodyVisitor::Visit(map, object);
 }
 
 
 template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
+int StaticNewSpaceVisitor<StaticVisitor>::VisitBytecodeArray(
     Map* map, HeapObject* object) {
   VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
-      HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
-  VisitPointers(
-      map->GetHeap(), HeapObject::RawField(
-                          object, JSTypedArray::kWeakNextOffset + kPointerSize),
-      HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
-  return JSTypedArray::kSizeWithInternalFields;
-}
-
-
-template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(Map* map,
-                                                          HeapObject* object) {
-  VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
-      HeapObject::RawField(object, JSDataView::kWeakNextOffset));
-  VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object, JSDataView::kWeakNextOffset + kPointerSize),
-      HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
-  return JSDataView::kSizeWithInternalFields;
+      map->GetHeap(), object,
+      HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
+      HeapObject::RawField(object, BytecodeArray::kHeaderSize));
+  return reinterpret_cast<BytecodeArray*>(object)->BytecodeArraySize();
 }
 
 
@@ -145,11 +142,15 @@
 
   table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
 
-  table_.Register(kVisitFixedTypedArray, &DataObjectVisitor::Visit);
+  table_.Register(
+      kVisitFixedTypedArray,
+      &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+                           void>::Visit);
 
-  table_.Register(kVisitFixedFloat64Array, &DataObjectVisitor::Visit);
-
-  table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray);
+  table_.Register(
+      kVisitFixedFloat64Array,
+      &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+                           void>::Visit);
 
   table_.Register(kVisitNativeContext, &VisitNativeContext);
 
@@ -157,6 +158,8 @@
 
   table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
 
+  table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
+
   table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
 
   table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
@@ -179,10 +182,6 @@
 
   table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
 
-  table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
-
-  table_.Register(kVisitJSDataView, &VisitJSDataView);
-
   // Registration for kVisitJSRegExp is done by StaticVisitor.
 
   table_.Register(
@@ -193,6 +192,8 @@
 
   table_.Register(kVisitWeakCell, &VisitWeakCell);
 
+  table_.Register(kVisitTransitionArray, &VisitTransitionArray);
+
   table_.template RegisterSpecializations<DataObjectVisitor, kVisitDataObject,
                                           kVisitDataObjectGeneric>();
 
@@ -206,9 +207,10 @@
 
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry(
-    Heap* heap, Address entry_address) {
+    Heap* heap, HeapObject* object, Address entry_address) {
   Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
-  heap->mark_compact_collector()->RecordCodeEntrySlot(entry_address, code);
+  heap->mark_compact_collector()->RecordCodeEntrySlot(object, entry_address,
+                                                      code);
   StaticVisitor::MarkObject(heap, code);
 }
 
@@ -233,7 +235,7 @@
                                                     RelocInfo* rinfo) {
   DCHECK(rinfo->rmode() == RelocInfo::CELL);
   Cell* cell = rinfo->target_cell();
-  // No need to record slots because the cell space is not compacted during GC.
+  heap->mark_compact_collector()->RecordRelocSlot(rinfo, cell);
   if (!rinfo->host()->IsWeakObject(cell)) {
     StaticVisitor::MarkObject(heap, cell);
   }
@@ -243,11 +245,9 @@
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(Heap* heap,
                                                            RelocInfo* rinfo) {
-  DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
-          rinfo->IsPatchedReturnSequence()) ||
-         (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
-          rinfo->IsPatchedDebugBreakSlotSequence()));
-  Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+  DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+         rinfo->IsPatchedDebugBreakSlotSequence());
+  Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
   heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
   StaticVisitor::MarkObject(heap, target);
 }
@@ -262,10 +262,8 @@
   // when they might be keeping a Context alive, or when the heap is about
   // to be serialized.
   if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() &&
-      !target->is_call_stub() &&
-      ((heap->flush_monomorphic_ics() && !target->embeds_maps_weakly()) ||
-       heap->isolate()->serializer_enabled() ||
-       target->ic_age() != heap->global_ic_age())) {
+      !target->is_call_stub() && (heap->isolate()->serializer_enabled() ||
+                                  target->ic_age() != heap->global_ic_age())) {
     ICUtility::Clear(heap->isolate(), rinfo->pc(),
                      rinfo->host()->constant_pool());
     target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -291,13 +289,6 @@
     Map* map, HeapObject* object) {
   FixedBodyVisitor<StaticVisitor, Context::MarkCompactBodyDescriptor,
                    void>::Visit(map, object);
-
-  MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
-  for (int idx = Context::FIRST_WEAK_SLOT; idx < Context::NATIVE_CONTEXT_SLOTS;
-       ++idx) {
-    Object** slot = Context::cast(object)->RawFieldOfElementAt(idx);
-    collector->RecordSlot(slot, slot, *slot);
-  }
 }
 
 
@@ -314,11 +305,12 @@
 
   // When map collection is enabled we have to mark through map's transitions
   // and back pointers in a special way to make these links weak.
-  if (FLAG_collect_maps && map_object->CanTransition()) {
+  if (map_object->CanTransition()) {
     MarkMapContents(heap, map_object);
   } else {
     StaticVisitor::VisitPointers(
-        heap, HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
+        heap, object,
+        HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
         HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
   }
 }
@@ -329,21 +321,8 @@
     Map* map, HeapObject* object) {
   Heap* heap = map->GetHeap();
 
-  Object** slot =
-      HeapObject::RawField(object, PropertyCell::kDependentCodeOffset);
-  if (FLAG_collect_maps) {
-    // Mark property cell dependent codes array but do not push it onto marking
-    // stack, this will make references from it weak. We will clean dead
-    // codes when we iterate over property cells in ClearNonLiveReferences.
-    HeapObject* obj = HeapObject::cast(*slot);
-    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
-    StaticVisitor::MarkObjectWithoutPush(heap, obj);
-  } else {
-    StaticVisitor::VisitPointer(heap, slot);
-  }
-
   StaticVisitor::VisitPointers(
-      heap,
+      heap, object,
       HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
       HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
 }
@@ -354,13 +333,50 @@
                                                         HeapObject* object) {
   Heap* heap = map->GetHeap();
   WeakCell* weak_cell = reinterpret_cast<WeakCell*>(object);
-  Object* undefined = heap->undefined_value();
   // Enqueue weak cell in linked list of encountered weak collections.
   // We can ignore weak cells with cleared values because they will always
   // contain smi zero.
-  if (weak_cell->next() == undefined && !weak_cell->cleared()) {
-    weak_cell->set_next(heap->encountered_weak_cells());
-    heap->set_encountered_weak_cells(weak_cell);
+  if (weak_cell->next_cleared() && !weak_cell->cleared()) {
+    HeapObject* value = HeapObject::cast(weak_cell->value());
+    if (MarkCompactCollector::IsMarked(value)) {
+      // Weak cells with live values are directly processed here to reduce
+      // the processing time of weak cells during the main GC pause.
+      Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
+      map->GetHeap()->mark_compact_collector()->RecordSlot(weak_cell, slot,
+                                                           *slot);
+    } else {
+      // If we do not know about liveness of values of weak cells, we have to
+      // process them when we know the liveness of the whole transitive
+      // closure.
+      weak_cell->set_next(heap->encountered_weak_cells(),
+                          UPDATE_WEAK_WRITE_BARRIER);
+      heap->set_encountered_weak_cells(weak_cell);
+    }
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitTransitionArray(
+    Map* map, HeapObject* object) {
+  TransitionArray* array = TransitionArray::cast(object);
+  Heap* heap = array->GetHeap();
+  // Visit strong references.
+  if (array->HasPrototypeTransitions()) {
+    StaticVisitor::VisitPointer(heap, array,
+                                array->GetPrototypeTransitionsSlot());
+  }
+  int num_transitions = TransitionArray::NumberOfTransitions(array);
+  for (int i = 0; i < num_transitions; ++i) {
+    StaticVisitor::VisitPointer(heap, array, array->GetKeySlot(i));
+  }
+  // Enqueue the array in linked list of encountered transition arrays if it is
+  // not already in the list.
+  if (array->next_link()->IsUndefined()) {
+    Heap* heap = map->GetHeap();
+    array->set_next_link(heap->encountered_transition_arrays(),
+                         UPDATE_WEAK_WRITE_BARRIER);
+    heap->set_encountered_transition_arrays(array);
   }
 }
 
@@ -370,22 +386,8 @@
     Map* map, HeapObject* object) {
   Heap* heap = map->GetHeap();
 
-  Object** slot =
-      HeapObject::RawField(object, AllocationSite::kDependentCodeOffset);
-  if (FLAG_collect_maps) {
-    // Mark allocation site dependent codes array but do not push it onto
-    // marking stack, this will make references from it weak. We will clean
-    // dead codes when we iterate over allocation sites in
-    // ClearNonLiveReferences.
-    HeapObject* obj = HeapObject::cast(*slot);
-    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
-    StaticVisitor::MarkObjectWithoutPush(heap, obj);
-  } else {
-    StaticVisitor::VisitPointer(heap, slot);
-  }
-
   StaticVisitor::VisitPointers(
-      heap,
+      heap, object,
       HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset),
       HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset));
 }
@@ -394,6 +396,9 @@
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
     Map* map, HeapObject* object) {
+  typedef FlexibleBodyVisitor<StaticVisitor,
+                              JSWeakCollection::BodyDescriptorWeak,
+                              void> JSWeakCollectionBodyVisitor;
   Heap* heap = map->GetHeap();
   JSWeakCollection* weak_collection =
       reinterpret_cast<JSWeakCollection*>(object);
@@ -406,13 +411,7 @@
 
   // Skip visiting the backing hash table containing the mappings and the
   // pointer to the other enqueued weak collections, both are post-processed.
-  StaticVisitor::VisitPointers(
-      heap, HeapObject::RawField(object, JSWeakCollection::kPropertiesOffset),
-      HeapObject::RawField(object, JSWeakCollection::kTableOffset));
-  STATIC_ASSERT(JSWeakCollection::kTableOffset + kPointerSize ==
-                JSWeakCollection::kNextOffset);
-  STATIC_ASSERT(JSWeakCollection::kNextOffset + kPointerSize ==
-                JSWeakCollection::kSize);
+  JSWeakCollectionBodyVisitor::Visit(map, object);
 
   // Partially initialized weak collection is enqueued, but table is ignored.
   if (!weak_collection->table()->IsHashTable()) return;
@@ -420,7 +419,7 @@
   // Mark the backing hash table without pushing it on the marking stack.
   Object** slot = HeapObject::RawField(object, JSWeakCollection::kTableOffset);
   HeapObject* obj = HeapObject::cast(*slot);
-  heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+  heap->mark_compact_collector()->RecordSlot(object, slot, obj);
   StaticVisitor::MarkObjectWithoutPush(heap, obj);
 }
 
@@ -428,12 +427,14 @@
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitCode(Map* map,
                                                     HeapObject* object) {
+  typedef FlexibleBodyVisitor<StaticVisitor, Code::BodyDescriptor, void>
+      CodeBodyVisitor;
   Heap* heap = map->GetHeap();
   Code* code = Code::cast(object);
   if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
     code->MakeOlder(heap->mark_compact_collector()->marking_parity());
   }
-  code->CodeIterateBody<StaticVisitor>(heap);
+  CodeBodyVisitor::Visit(map, object);
 }
 
 
@@ -446,24 +447,16 @@
     shared->ResetForNewContext(heap->global_ic_age());
   }
   if (FLAG_cleanup_code_caches_at_gc) {
-    shared->ClearTypeFeedbackInfo();
+    shared->ClearTypeFeedbackInfoAtGCTime();
   }
-  if (FLAG_cache_optimized_code && FLAG_flush_optimized_code_cache &&
-      !shared->optimized_code_map()->IsSmi()) {
-    // Always flush the optimized code map if requested by flag.
-    shared->ClearOptimizedCodeMap();
+  if (FLAG_flush_optimized_code_cache) {
+    if (!shared->OptimizedCodeMapIsCleared()) {
+      // Always flush the optimized code map if requested by flag.
+      shared->ClearOptimizedCodeMap();
+    }
   }
   MarkCompactCollector* collector = heap->mark_compact_collector();
   if (collector->is_code_flushing_enabled()) {
-    if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
-      // Add the shared function info holding an optimized code map to
-      // the code flusher for processing of code maps after marking.
-      collector->code_flusher()->AddOptimizedCodeMap(shared);
-      // Treat all references within the code map weakly by marking the
-      // code map itself but not pushing it onto the marking deque.
-      FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
-      StaticVisitor::MarkObjectWithoutPush(heap, code_map);
-    }
     if (IsFlushable(heap, shared)) {
       // This function's code looks flushable. But we have to postpone
       // the decision until we see all functions that point to the same
@@ -476,46 +469,12 @@
       VisitSharedFunctionInfoWeakCode(heap, object);
       return;
     }
-  } else {
-    if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
-      // Flush optimized code map on major GCs without code flushing,
-      // needed because cached code doesn't contain breakpoints.
-      shared->ClearOptimizedCodeMap();
-    }
   }
   VisitSharedFunctionInfoStrongCode(heap, object);
 }
 
 
 template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-  ConstantPoolArray* array = ConstantPoolArray::cast(object);
-  ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
-  while (!code_iter.is_finished()) {
-    Address code_entry = reinterpret_cast<Address>(
-        array->RawFieldOfElementAt(code_iter.next_index()));
-    StaticVisitor::VisitCodeEntry(heap, code_entry);
-  }
-
-  ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
-  while (!heap_iter.is_finished()) {
-    Object** slot = array->RawFieldOfElementAt(heap_iter.next_index());
-    HeapObject* object = HeapObject::cast(*slot);
-    heap->mark_compact_collector()->RecordSlot(slot, slot, object);
-    bool is_weak_object =
-        (array->get_weak_object_state() ==
-             ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE &&
-         Code::IsWeakObjectInOptimizedCode(object));
-    if (!is_weak_object) {
-      StaticVisitor::MarkObject(heap, object);
-    }
-  }
-}
-
-
-template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
                                                           HeapObject* object) {
   Heap* heap = map->GetHeap();
@@ -530,37 +489,22 @@
       // non-flushable, because it is required for bailing out from
       // optimized code.
       collector->code_flusher()->AddCandidate(function);
-      // Visit shared function info immediately to avoid double checking
-      // of its flushability later. This is just an optimization because
-      // the shared function info would eventually be visited.
-      SharedFunctionInfo* shared = function->shared();
-      if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) {
-        StaticVisitor::MarkObject(heap, shared->map());
-        VisitSharedFunctionInfoWeakCode(heap, shared);
-      }
       // Treat the reference to the code object weakly.
-      VisitJSFunctionWeakCode(heap, object);
+      VisitJSFunctionWeakCode(map, object);
       return;
     } else {
       // Visit all unoptimized code objects to prevent flushing them.
       StaticVisitor::MarkObject(heap, function->shared()->code());
-      if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
-        MarkInlinedFunctionsCode(heap, function->code());
-      }
     }
   }
-  VisitJSFunctionStrongCode(heap, object);
+  VisitJSFunctionStrongCode(map, object);
 }
 
 
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(Map* map,
                                                         HeapObject* object) {
-  int last_property_offset =
-      JSRegExp::kSize + kPointerSize * map->inobject_properties();
-  StaticVisitor::VisitPointers(
-      map->GetHeap(), HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
-      HeapObject::RawField(object, last_property_offset));
+  JSObjectVisitor::Visit(map, object);
 }
 
 
@@ -569,150 +513,63 @@
     Map* map, HeapObject* object) {
   Heap* heap = map->GetHeap();
 
-  STATIC_ASSERT(JSArrayBuffer::kWeakFirstViewOffset ==
-                JSArrayBuffer::kWeakNextOffset + kPointerSize);
-  StaticVisitor::VisitPointers(
-      heap,
-      HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
-      HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
-  StaticVisitor::VisitPointers(
-      heap, HeapObject::RawField(
-                object, JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
-      HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
+  typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
+                              void> JSArrayBufferBodyVisitor;
+
+  JSArrayBufferBodyVisitor::Visit(map, object);
+
+  if (!JSArrayBuffer::cast(object)->is_external() &&
+      !heap->InNewSpace(object)) {
+    heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
+  }
 }
 
 
 template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
+void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
     Map* map, HeapObject* object) {
   StaticVisitor::VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
-      HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
-  StaticVisitor::VisitPointers(
-      map->GetHeap(), HeapObject::RawField(
-                          object, JSTypedArray::kWeakNextOffset + kPointerSize),
-      HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
-}
-
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(Map* map,
-                                                          HeapObject* object) {
-  StaticVisitor::VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
-      HeapObject::RawField(object, JSDataView::kWeakNextOffset));
-  StaticVisitor::VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object, JSDataView::kWeakNextOffset + kPointerSize),
-      HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
+      map->GetHeap(), object,
+      HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
+      HeapObject::RawField(object, BytecodeArray::kHeaderSize));
 }
 
 
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
                                                           Map* map) {
-  // Make sure that the back pointer stored either in the map itself or
-  // inside its transitions array is marked. Skip recording the back
-  // pointer slot since map space is not compacted.
-  StaticVisitor::MarkObject(heap, HeapObject::cast(map->GetBackPointer()));
-
-  // Treat pointers in the transitions array as weak and also mark that
-  // array to prevent visiting it later. Skip recording the transition
-  // array slot, since it will be implicitly recorded when the pointer
-  // fields of this map are visited.
-  if (map->HasTransitionArray()) {
-    TransitionArray* transitions = map->transitions();
-    MarkTransitionArray(heap, transitions);
-  }
-
   // Since descriptor arrays are potentially shared, ensure that only the
-  // descriptors that belong to this map are marked. The first time a
-  // non-empty descriptor array is marked, its header is also visited. The slot
-  // holding the descriptor array will be implicitly recorded when the pointer
-  // fields of this map are visited.
-  DescriptorArray* descriptors = map->instance_descriptors();
-  if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
-      descriptors->length() > 0) {
-    StaticVisitor::VisitPointers(heap, descriptors->GetFirstElementAddress(),
-                                 descriptors->GetDescriptorEndSlot(0));
+  // descriptors that belong to this map are marked. The first time a non-empty
+  // descriptor array is marked, its header is also visited. The slot holding
+  // the descriptor array will be implicitly recorded when the pointer fields of
+  // this map are visited.  Prototype maps don't keep track of transitions, so
+  // just mark the entire descriptor array.
+  if (!map->is_prototype_map()) {
+    DescriptorArray* descriptors = map->instance_descriptors();
+    if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
+        descriptors->length() > 0) {
+      StaticVisitor::VisitPointers(heap, descriptors,
+                                   descriptors->GetFirstElementAddress(),
+                                   descriptors->GetDescriptorEndSlot(0));
+    }
+    int start = 0;
+    int end = map->NumberOfOwnDescriptors();
+    if (start < end) {
+      StaticVisitor::VisitPointers(heap, descriptors,
+                                   descriptors->GetDescriptorStartSlot(start),
+                                   descriptors->GetDescriptorEndSlot(end));
+    }
   }
-  int start = 0;
-  int end = map->NumberOfOwnDescriptors();
-  if (start < end) {
-    StaticVisitor::VisitPointers(heap,
-                                 descriptors->GetDescriptorStartSlot(start),
-                                 descriptors->GetDescriptorEndSlot(end));
-  }
-
-  // Mark prototype dependent codes array but do not push it onto marking
-  // stack, this will make references from it weak. We will clean dead
-  // codes when we iterate over maps in ClearNonLiveTransitions.
-  Object** slot = HeapObject::RawField(map, Map::kDependentCodeOffset);
-  HeapObject* obj = HeapObject::cast(*slot);
-  heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
-  StaticVisitor::MarkObjectWithoutPush(heap, obj);
 
   // Mark the pointer fields of the Map. Since the transitions array has
   // been marked already, it is fine that one of these fields contains a
   // pointer to it.
   StaticVisitor::VisitPointers(
-      heap, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+      heap, map, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
       HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
 }
 
 
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
-    Heap* heap, TransitionArray* transitions) {
-  if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
-
-  // Simple transitions do not have keys nor prototype transitions.
-  if (transitions->IsSimpleTransition()) return;
-
-  if (transitions->HasPrototypeTransitions()) {
-    // Mark prototype transitions array but do not push it onto marking
-    // stack, this will make references from it weak. We will clean dead
-    // prototype transitions in ClearNonLiveTransitions.
-    Object** slot = transitions->GetPrototypeTransitionsSlot();
-    HeapObject* obj = HeapObject::cast(*slot);
-    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
-    StaticVisitor::MarkObjectWithoutPush(heap, obj);
-  }
-
-  for (int i = 0; i < transitions->number_of_transitions(); ++i) {
-    StaticVisitor::VisitPointer(heap, transitions->GetKeySlot(i));
-  }
-}
-
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(Heap* heap,
-                                                                   Code* code) {
-  // Skip in absence of inlining.
-  // TODO(turbofan): Revisit once we support inlining.
-  if (code->is_turbofanned()) return;
-  // For optimized functions we should retain both non-optimized version
-  // of its code and non-optimized version of all inlined functions.
-  // This is required to support bailing out from inlined code.
-  DeoptimizationInputData* data =
-      DeoptimizationInputData::cast(code->deoptimization_data());
-  FixedArray* literals = data->LiteralArray();
-  for (int i = 0, count = data->InlinedFunctionCount()->value(); i < count;
-       i++) {
-    JSFunction* inlined = JSFunction::cast(literals->get(i));
-    StaticVisitor::MarkObject(heap, inlined->shared()->code());
-  }
-}
-
-
-inline static bool IsValidNonBuiltinContext(Object* context) {
-  return context->IsContext() &&
-         !Context::cast(context)->global_object()->IsJSBuiltinsObject();
-}
-
-
 inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
   Object* undefined = heap->undefined_value();
   return (info->script() != undefined) &&
@@ -728,12 +585,7 @@
   // Code is either on stack, in compilation cache or referenced
   // by optimized version of function.
   MarkBit code_mark = Marking::MarkBitFrom(function->code());
-  if (code_mark.Get()) {
-    return false;
-  }
-
-  // The function must have a valid context and not be a builtin.
-  if (!IsValidNonBuiltinContext(function->context())) {
+  if (Marking::IsBlackOrGrey(code_mark)) {
     return false;
   }
 
@@ -757,7 +609,7 @@
   // Code is either on stack, in compilation cache or referenced
   // by optimized version of function.
   MarkBit code_mark = Marking::MarkBitFrom(shared_info->code());
-  if (code_mark.Get()) {
+  if (Marking::IsBlackOrGrey(code_mark)) {
     return false;
   }
 
@@ -794,6 +646,16 @@
     return false;
   }
 
+  // The function must not be a builtin.
+  if (shared_info->IsBuiltin()) {
+    return false;
+  }
+
+  // Maintain debug break slots in the code.
+  if (shared_info->HasDebugCode()) {
+    return false;
+  }
+
   // If this is a function initialized with %SetCode then the one-to-one
   // relation between SharedFunctionInfo and Code is broken.
   if (shared_info->dont_flush()) {
@@ -816,7 +678,7 @@
       object, SharedFunctionInfo::BodyDescriptor::kStartOffset);
   Object** end_slot = HeapObject::RawField(
       object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+  StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
 }
 
 
@@ -825,7 +687,7 @@
     Heap* heap, HeapObject* object) {
   Object** name_slot =
       HeapObject::RawField(object, SharedFunctionInfo::kNameOffset);
-  StaticVisitor::VisitPointer(heap, name_slot);
+  StaticVisitor::VisitPointer(heap, object, name_slot);
 
   // Skip visiting kCodeOffset as it is treated weakly here.
   STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize ==
@@ -837,111 +699,30 @@
       HeapObject::RawField(object, SharedFunctionInfo::kOptimizedCodeMapOffset);
   Object** end_slot = HeapObject::RawField(
       object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+  StaticVisitor::VisitPointers(heap, object, start_slot, end_slot);
 }
 
 
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
-    Heap* heap, HeapObject* object) {
-  Object** start_slot =
-      HeapObject::RawField(object, JSFunction::kPropertiesOffset);
-  Object** end_slot =
-      HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-
-  VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
-  STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
-                JSFunction::kPrototypeOrInitialMapOffset);
-
-  start_slot =
-      HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
-  end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+    Map* map, HeapObject* object) {
+  typedef FlexibleBodyVisitor<StaticVisitor,
+                              JSFunction::BodyDescriptorStrongCode,
+                              void> JSFunctionStrongCodeBodyVisitor;
+  JSFunctionStrongCodeBodyVisitor::Visit(map, object);
 }
 
 
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
-    Heap* heap, HeapObject* object) {
-  Object** start_slot =
-      HeapObject::RawField(object, JSFunction::kPropertiesOffset);
-  Object** end_slot =
-      HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-
-  // Skip visiting kCodeEntryOffset as it is treated weakly here.
-  STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
-                JSFunction::kPrototypeOrInitialMapOffset);
-
-  start_slot =
-      HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
-  end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+    Map* map, HeapObject* object) {
+  typedef FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
+                              void> JSFunctionWeakCodeBodyVisitor;
+  JSFunctionWeakCodeBodyVisitor::Visit(map, object);
 }
 
 
-void Code::CodeIterateBody(ObjectVisitor* v) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // There are two places where we iterate code bodies: here and the
-  // templated CodeIterateBody (below). They should be kept in sync.
-  IteratePointer(v, kRelocationInfoOffset);
-  IteratePointer(v, kHandlerTableOffset);
-  IteratePointer(v, kDeoptimizationDataOffset);
-  IteratePointer(v, kTypeFeedbackInfoOffset);
-  IterateNextCodeLink(v, kNextCodeLinkOffset);
-  IteratePointer(v, kConstantPoolOffset);
-
-  RelocIterator it(this, mode_mask);
-  Isolate* isolate = this->GetIsolate();
-  for (; !it.done(); it.next()) {
-    it.rinfo()->Visit(isolate, v);
-  }
-}
-
-
-template <typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // There are two places where we iterate code bodies: here and the non-
-  // templated CodeIterateBody (above). They should be kept in sync.
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
-  StaticVisitor::VisitPointer(
-      heap, reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
-  StaticVisitor::VisitNextCodeLink(
-      heap, reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset));
-  StaticVisitor::VisitPointer(
-      heap, reinterpret_cast<Object**>(this->address() + kConstantPoolOffset));
-
-
-  RelocIterator it(this, mode_mask);
-  for (; !it.done(); it.next()) {
-    it.rinfo()->template Visit<StaticVisitor>(heap);
-  }
-}
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_OBJECTS_VISITING_INL_H_
diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc
index 20d92de..315c897 100644
--- a/src/heap/objects-visiting.cc
+++ b/src/heap/objects-visiting.cc
@@ -2,14 +2,21 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-
 #include "src/heap/objects-visiting.h"
 
+#include "src/heap/mark-compact-inl.h"
+#include "src/heap/objects-visiting-inl.h"
+
 namespace v8 {
 namespace internal {
 
 
+StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(Map* map) {
+  return GetVisitorId(map->instance_type(), map->instance_size(),
+                      FLAG_unbox_double_fields && !map->HasFastPointerLayout());
+}
+
+
 StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
     int instance_type, int instance_size, bool has_unboxed_fields) {
   if (instance_type < FIRST_NONSTRING_TYPE) {
@@ -42,6 +49,9 @@
     case BYTE_ARRAY_TYPE:
       return kVisitByteArray;
 
+    case BYTECODE_ARRAY_TYPE:
+      return kVisitBytecodeArray;
+
     case FREE_SPACE_TYPE:
       return kVisitFreeSpace;
 
@@ -51,9 +61,6 @@
     case FIXED_DOUBLE_ARRAY_TYPE:
       return kVisitFixedDoubleArray;
 
-    case CONSTANT_POOL_ARRAY_TYPE:
-      return kVisitConstantPoolArray;
-
     case ODDBALL_TYPE:
       return kVisitOddball;
 
@@ -72,13 +79,8 @@
     case WEAK_CELL_TYPE:
       return kVisitWeakCell;
 
-    case JS_SET_TYPE:
-      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
-                                 JSSet::kSize, has_unboxed_fields);
-
-    case JS_MAP_TYPE:
-      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
-                                 JSMap::kSize, has_unboxed_fields);
+    case TRANSITION_ARRAY_TYPE:
+      return kVisitTransitionArray;
 
     case JS_WEAK_MAP_TYPE:
     case JS_WEAK_SET_TYPE:
@@ -92,31 +94,14 @@
 
     case JS_PROXY_TYPE:
       return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
-                                 JSProxy::kSize, has_unboxed_fields);
-
-    case JS_FUNCTION_PROXY_TYPE:
-      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
-                                 JSFunctionProxy::kSize, has_unboxed_fields);
-
-    case FOREIGN_TYPE:
-      return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
-                                 Foreign::kSize, has_unboxed_fields);
+                                 instance_size, has_unboxed_fields);
 
     case SYMBOL_TYPE:
       return kVisitSymbol;
 
-    case FILLER_TYPE:
-      return kVisitDataObjectGeneric;
-
     case JS_ARRAY_BUFFER_TYPE:
       return kVisitJSArrayBuffer;
 
-    case JS_TYPED_ARRAY_TYPE:
-      return kVisitJSTypedArray;
-
-    case JS_DATA_VIEW_TYPE:
-      return kVisitJSDataView;
-
     case JS_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
@@ -126,25 +111,31 @@
     case JS_ARRAY_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
-    case JS_BUILTINS_OBJECT_TYPE:
     case JS_MESSAGE_OBJECT_TYPE:
+    case JS_TYPED_ARRAY_TYPE:
+    case JS_DATA_VIEW_TYPE:
+    case JS_SET_TYPE:
+    case JS_MAP_TYPE:
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
+    case JS_ITERATOR_RESULT_TYPE:
+    case JS_PROMISE_TYPE:
+    case JS_BOUND_FUNCTION_TYPE:
       return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
                                  instance_size, has_unboxed_fields);
 
     case JS_FUNCTION_TYPE:
       return kVisitJSFunction;
 
+    case FILLER_TYPE:
+      if (instance_size == kPointerSize) return kVisitDataObjectGeneric;
+    // Fall through.
+    case FOREIGN_TYPE:
     case HEAP_NUMBER_TYPE:
     case MUTABLE_HEAP_NUMBER_TYPE:
-#define EXTERNAL_ARRAY_CASE(Type, type, TYPE, ctype, size) \
-  case EXTERNAL_##TYPE##_ARRAY_TYPE:
-
-      TYPED_ARRAYS(EXTERNAL_ARRAY_CASE)
+    case SIMD128_VALUE_TYPE:
       return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
                                  instance_size, has_unboxed_fields);
-#undef EXTERNAL_ARRAY_CASE
 
     case FIXED_UINT8_ARRAY_TYPE:
     case FIXED_INT8_ARRAY_TYPE:
@@ -197,9 +188,11 @@
   T* tail = NULL;
   MarkCompactCollector* collector = heap->mark_compact_collector();
   bool record_slots = MustRecordSlots(heap);
+
   while (list != undefined) {
     // Check whether to keep the candidate in the list.
     T* candidate = reinterpret_cast<T*>(list);
+
     Object* retained = retainer->RetainAs(list);
     if (retained != NULL) {
       if (head == undefined) {
@@ -212,7 +205,7 @@
         if (record_slots) {
           Object** next_slot =
               HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
-          collector->RecordSlot(next_slot, next_slot, retained);
+          collector->RecordSlot(tail, next_slot, retained);
         }
       }
       // Retained object is new tail.
@@ -220,9 +213,9 @@
       candidate = reinterpret_cast<T*>(retained);
       tail = candidate;
 
-
       // tail is a live object, visit it.
       WeakListVisitor<T>::VisitLiveObject(heap, tail, retainer);
+
     } else {
       WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
     }
@@ -232,9 +225,7 @@
   }
 
   // Terminate the list if there is one or more elements.
-  if (tail != NULL) {
-    WeakListVisitor<T>::SetWeakNext(tail, undefined);
-  }
+  if (tail != NULL) WeakListVisitor<T>::SetWeakNext(tail, undefined);
   return head;
 }
 
@@ -253,7 +244,7 @@
 template <>
 struct WeakListVisitor<JSFunction> {
   static void SetWeakNext(JSFunction* function, Object* next) {
-    function->set_next_function_link(next);
+    function->set_next_function_link(next, UPDATE_WEAK_WRITE_BARRIER);
   }
 
   static Object* WeakNext(JSFunction* function) {
@@ -271,7 +262,7 @@
 template <>
 struct WeakListVisitor<Code> {
   static void SetWeakNext(Code* code, Object* next) {
-    code->set_next_code_link(next);
+    code->set_next_code_link(next, UPDATE_WEAK_WRITE_BARRIER);
   }
 
   static Object* WeakNext(Code* code) { return code->next_code_link(); }
@@ -287,7 +278,7 @@
 template <>
 struct WeakListVisitor<Context> {
   static void SetWeakNext(Context* context, Object* next) {
-    context->set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WRITE_BARRIER);
+    context->set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WEAK_WRITE_BARRIER);
   }
 
   static Object* WeakNext(Context* context) {
@@ -303,8 +294,21 @@
     // Process the three weak lists linked off the context.
     DoWeakList<JSFunction>(heap, context, retainer,
                            Context::OPTIMIZED_FUNCTIONS_LIST);
-    DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
-    DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
+
+    if (heap->gc_state() == Heap::MARK_COMPACT) {
+      // Record the slots of the weak entries in the native context.
+      MarkCompactCollector* collector = heap->mark_compact_collector();
+      for (int idx = Context::FIRST_WEAK_SLOT;
+           idx < Context::NATIVE_CONTEXT_SLOTS; ++idx) {
+        Object** slot = Context::cast(context)->RawFieldOfElementAt(idx);
+        collector->RecordSlot(context, slot, *slot);
+      }
+      // Code objects are always allocated in Code space, we do not have to
+      // visit
+      // them during scavenges.
+      DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
+      DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
+    }
   }
 
   template <class T>
@@ -320,8 +324,7 @@
       // Record the updated slot if necessary.
       Object** head_slot =
           HeapObject::RawField(context, FixedArray::SizeFor(index));
-      heap->mark_compact_collector()->RecordSlot(head_slot, head_slot,
-                                                 list_head);
+      heap->mark_compact_collector()->RecordSlot(context, head_slot, list_head);
     }
   }
 
@@ -335,53 +338,9 @@
 
 
 template <>
-struct WeakListVisitor<JSArrayBufferView> {
-  static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
-    obj->set_weak_next(next);
-  }
-
-  static Object* WeakNext(JSArrayBufferView* obj) { return obj->weak_next(); }
-
-  static int WeakNextOffset() { return JSArrayBufferView::kWeakNextOffset; }
-
-  static void VisitLiveObject(Heap*, JSArrayBufferView*, WeakObjectRetainer*) {}
-
-  static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
-};
-
-
-template <>
-struct WeakListVisitor<JSArrayBuffer> {
-  static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
-    obj->set_weak_next(next);
-  }
-
-  static Object* WeakNext(JSArrayBuffer* obj) { return obj->weak_next(); }
-
-  static int WeakNextOffset() { return JSArrayBuffer::kWeakNextOffset; }
-
-  static void VisitLiveObject(Heap* heap, JSArrayBuffer* array_buffer,
-                              WeakObjectRetainer* retainer) {
-    Object* typed_array_obj = VisitWeakList<JSArrayBufferView>(
-        heap, array_buffer->weak_first_view(), retainer);
-    array_buffer->set_weak_first_view(typed_array_obj);
-    if (typed_array_obj != heap->undefined_value() && MustRecordSlots(heap)) {
-      Object** slot = HeapObject::RawField(array_buffer,
-                                           JSArrayBuffer::kWeakFirstViewOffset);
-      heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
-    }
-  }
-
-  static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
-    Runtime::FreeArrayBuffer(heap->isolate(), phantom);
-  }
-};
-
-
-template <>
 struct WeakListVisitor<AllocationSite> {
   static void SetWeakNext(AllocationSite* obj, Object* next) {
-    obj->set_weak_next(next);
+    obj->set_weak_next(next, UPDATE_WEAK_WRITE_BARRIER);
   }
 
   static Object* WeakNext(AllocationSite* obj) { return obj->weak_next(); }
@@ -394,23 +353,10 @@
 };
 
 
-template Object* VisitWeakList<Code>(Heap* heap, Object* list,
-                                     WeakObjectRetainer* retainer);
-
-
-template Object* VisitWeakList<JSFunction>(Heap* heap, Object* list,
-                                           WeakObjectRetainer* retainer);
-
-
 template Object* VisitWeakList<Context>(Heap* heap, Object* list,
                                         WeakObjectRetainer* retainer);
 
-
-template Object* VisitWeakList<JSArrayBuffer>(Heap* heap, Object* list,
-                                              WeakObjectRetainer* retainer);
-
-
 template Object* VisitWeakList<AllocationSite>(Heap* heap, Object* list,
                                                WeakObjectRetainer* retainer);
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/objects-visiting.h b/src/heap/objects-visiting.h
index a442867..1fe8a17 100644
--- a/src/heap/objects-visiting.h
+++ b/src/heap/objects-visiting.h
@@ -6,7 +6,10 @@
 #define V8_OBJECTS_VISITING_H_
 
 #include "src/allocation.h"
+#include "src/heap/heap.h"
+#include "src/heap/spaces.h"
 #include "src/layout-descriptor.h"
+#include "src/objects-body-descriptors.h"
 
 // This file provides base classes and auxiliary methods for defining
 // static object visitors used during GC.
@@ -29,12 +32,12 @@
   V(SeqTwoByteString)      \
   V(ShortcutCandidate)     \
   V(ByteArray)             \
+  V(BytecodeArray)         \
   V(FreeSpace)             \
   V(FixedArray)            \
   V(FixedDoubleArray)      \
   V(FixedTypedArray)       \
   V(FixedFloat64Array)     \
-  V(ConstantPoolArray)     \
   V(NativeContext)         \
   V(AllocationSite)        \
   V(DataObject2)           \
@@ -73,12 +76,11 @@
   V(Cell)                  \
   V(PropertyCell)          \
   V(WeakCell)              \
+  V(TransitionArray)       \
   V(SharedFunctionInfo)    \
   V(JSFunction)            \
   V(JSWeakCollection)      \
   V(JSArrayBuffer)         \
-  V(JSTypedArray)          \
-  V(JSDataView)            \
   V(JSRegExp)
 
   // For data objects, JS objects and structs along with generic visitor which
@@ -98,7 +100,6 @@
     kVisitDataObject = kVisitDataObject2,
     kVisitJSObject = kVisitJSObject2,
     kVisitStruct = kVisitStruct2,
-    kMinObjectSizeInWords = 2
   };
 
   // Visitor ID should fit in one byte.
@@ -110,11 +111,7 @@
                                 bool has_unboxed_fields);
 
   // Determine which specialized visitor should be used for given map.
-  static VisitorId GetVisitorId(Map* map) {
-    return GetVisitorId(
-        map->instance_type(), map->instance_size(),
-        FLAG_unbox_double_fields && !map->HasFastPointerLayout());
-  }
+  static VisitorId GetVisitorId(Map* map);
 
   // For visitors that allow specialization by size calculate VisitorId based
   // on size, base visitor id and generic visitor id.
@@ -124,15 +121,15 @@
     DCHECK((base == kVisitDataObject) || (base == kVisitStruct) ||
            (base == kVisitJSObject));
     DCHECK(IsAligned(object_size, kPointerSize));
-    DCHECK(kMinObjectSizeInWords * kPointerSize <= object_size);
+    DCHECK(Heap::kMinObjectSizeInWords * kPointerSize <= object_size);
     DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
     DCHECK(!has_unboxed_fields || (base == kVisitJSObject));
 
     if (has_unboxed_fields) return generic;
 
-    int visitor_id =
-        Min(base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords,
-            static_cast<int>(generic));
+    int visitor_id = Min(
+        base + (object_size >> kPointerSizeLog2) - Heap::kMinObjectSizeInWords,
+        static_cast<int>(generic));
 
     return static_cast<VisitorId>(visitor_id);
   }
@@ -151,14 +148,12 @@
     }
   }
 
+  inline Callback GetVisitor(Map* map);
+
   inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
     return reinterpret_cast<Callback>(callbacks_[id]);
   }
 
-  inline Callback GetVisitor(Map* map) {
-    return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
-  }
-
   void Register(StaticVisitorBase::VisitorId id, Callback callback) {
     DCHECK(id < StaticVisitorBase::kVisitorIdCount);  // id is unsigned.
     callbacks_[id] = reinterpret_cast<base::AtomicWord>(callback);
@@ -176,8 +171,7 @@
   template <typename Visitor, StaticVisitorBase::VisitorId base,
             StaticVisitorBase::VisitorId generic>
   void RegisterSpecializations() {
-    STATIC_ASSERT((generic - base + StaticVisitorBase::kMinObjectSizeInWords) ==
-                  10);
+    STATIC_ASSERT((generic - base + Heap::kMinObjectSizeInWords) == 10);
     RegisterSpecialization<Visitor, base, generic, 2>();
     RegisterSpecialization<Visitor, base, generic, 3>();
     RegisterSpecialization<Visitor, base, generic, 4>();
@@ -194,79 +188,34 @@
 };
 
 
-template <typename StaticVisitor>
-class BodyVisitorBase : public AllStatic {
- public:
-  INLINE(static void IteratePointers(Heap* heap, HeapObject* object,
-                                     int start_offset, int end_offset)) {
-    DCHECK(!FLAG_unbox_double_fields || object->map()->HasFastPointerLayout());
-    IterateRawPointers(heap, object, start_offset, end_offset);
-  }
-
-  INLINE(static void IterateBody(Heap* heap, HeapObject* object,
-                                 int start_offset, int end_offset)) {
-    if (!FLAG_unbox_double_fields || object->map()->HasFastPointerLayout()) {
-      IterateRawPointers(heap, object, start_offset, end_offset);
-    } else {
-      IterateBodyUsingLayoutDescriptor(heap, object, start_offset, end_offset);
-    }
-  }
-
- private:
-  INLINE(static void IterateRawPointers(Heap* heap, HeapObject* object,
-                                        int start_offset, int end_offset)) {
-    StaticVisitor::VisitPointers(heap,
-                                 HeapObject::RawField(object, start_offset),
-                                 HeapObject::RawField(object, end_offset));
-  }
-
-  static void IterateBodyUsingLayoutDescriptor(Heap* heap, HeapObject* object,
-                                               int start_offset,
-                                               int end_offset) {
-    DCHECK(FLAG_unbox_double_fields);
-    DCHECK(IsAligned(start_offset, kPointerSize) &&
-           IsAligned(end_offset, kPointerSize));
-
-    LayoutDescriptorHelper helper(object->map());
-    DCHECK(!helper.all_fields_tagged());
-    for (int offset = start_offset; offset < end_offset;) {
-      int end_of_region_offset;
-      if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
-        IterateRawPointers(heap, object, offset, end_of_region_offset);
-      }
-      offset = end_of_region_offset;
-    }
-  }
-};
-
-
 template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+class FlexibleBodyVisitor : public AllStatic {
  public:
   INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
     int object_size = BodyDescriptor::SizeOf(map, object);
-    BodyVisitorBase<StaticVisitor>::IterateBody(
-        map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size);
+    BodyDescriptor::template IterateBody<StaticVisitor>(object, object_size);
     return static_cast<ReturnType>(object_size);
   }
 
+  // This specialization is only suitable for objects containing pointer fields.
   template <int object_size>
   static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
     DCHECK(BodyDescriptor::SizeOf(map, object) == object_size);
-    BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size);
+    DCHECK(!FLAG_unbox_double_fields || map->HasFastPointerLayout());
+    StaticVisitor::VisitPointers(
+        object->GetHeap(), object,
+        HeapObject::RawField(object, BodyDescriptor::kStartOffset),
+        HeapObject::RawField(object, object_size));
     return static_cast<ReturnType>(object_size);
   }
 };
 
 
 template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+class FixedBodyVisitor : public AllStatic {
  public:
   INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
-    BodyVisitorBase<StaticVisitor>::IterateBody(map->GetHeap(), object,
-                                                BodyDescriptor::kStartOffset,
-                                                BodyDescriptor::kEndOffset);
+    BodyDescriptor::template IterateBody<StaticVisitor>(object);
     return static_cast<ReturnType>(BodyDescriptor::kSize);
   }
 };
@@ -297,26 +246,22 @@
     return table_.GetVisitor(map)(map, obj);
   }
 
-  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
-    for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
+  INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
+                                   Object** start, Object** end)) {
+    for (Object** p = start; p < end; p++) {
+      StaticVisitor::VisitPointer(heap, object, p);
+    }
+  }
+
+  // Although we are using the JSFunction body descriptor which does not
+  // visit the code entry, compiler wants it to be accessible.
+  // See JSFunction::BodyDescriptorImpl.
+  INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
+                                    Address entry_address)) {
+    UNREACHABLE();
   }
 
  private:
-  INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) {
-    Heap* heap = map->GetHeap();
-    VisitPointers(heap,
-                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
-                  HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
-
-    // Don't visit code entry. We are using this visitor only during scavenges.
-
-    VisitPointers(
-        heap, HeapObject::RawField(object,
-                                   JSFunction::kCodeEntryOffset + kPointerSize),
-        HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset));
-    return JSFunction::kSize;
-  }
-
   INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
     return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
   }
@@ -326,10 +271,6 @@
     return FixedDoubleArray::SizeFor(length);
   }
 
-  INLINE(static int VisitFixedTypedArray(Map* map, HeapObject* object)) {
-    return reinterpret_cast<FixedTypedArrayBase*>(object)->size();
-  }
-
   INLINE(static int VisitJSObject(Map* map, HeapObject* object)) {
     return JSObjectVisitor::Visit(map, object);
   }
@@ -345,12 +286,11 @@
   }
 
   INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) {
-    return FreeSpace::cast(object)->Size();
+    return FreeSpace::cast(object)->size();
   }
 
   INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
-  INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
-  INLINE(static int VisitJSDataView(Map* map, HeapObject* object));
+  INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object));
 
   class DataObjectVisitor {
    public:
@@ -406,40 +346,34 @@
 
   INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
   INLINE(static void VisitWeakCell(Map* map, HeapObject* object));
-  INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
+  INLINE(static void VisitTransitionArray(Map* map, HeapObject* object));
+  INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
+                                    Address entry_address));
   INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
   INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
   INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo));
   INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo));
   INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
   INLINE(static void VisitExternalReference(RelocInfo* rinfo)) {}
+  INLINE(static void VisitInternalReference(RelocInfo* rinfo)) {}
   INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) {}
   // Skip the weak next code link in a code object.
   INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) {}
 
-  // TODO(mstarzinger): This should be made protected once refactoring is done.
-  // Mark non-optimize code for functions inlined into the given optimized
-  // code. This will prevent it from being flushed.
-  static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
-
  protected:
   INLINE(static void VisitMap(Map* map, HeapObject* object));
   INLINE(static void VisitCode(Map* map, HeapObject* object));
   INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
-  INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object));
   INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
   INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
   INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
   INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
   INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
-  INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
-  INLINE(static void VisitJSDataView(Map* map, HeapObject* object));
   INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
+  INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
 
-  // Mark pointers in a Map and its TransitionArray together, possibly
-  // treating transitions or back pointers weak.
+  // Mark pointers in a Map treating some elements of the descriptor array weak.
   static void MarkMapContents(Heap* heap, Map* map);
-  static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
 
   // Code flushing support.
   INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
@@ -449,8 +383,8 @@
   // references to code objects either strongly or weakly.
   static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object);
   static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object);
-  static void VisitJSFunctionStrongCode(Heap* heap, HeapObject* object);
-  static void VisitJSFunctionWeakCode(Heap* heap, HeapObject* object);
+  static void VisitJSFunctionStrongCode(Map* map, HeapObject* object);
+  static void VisitJSFunctionWeakCode(Map* map, HeapObject* object);
 
   class DataObjectVisitor {
    public:
@@ -490,7 +424,7 @@
 // access the next-element pointers.
 template <class T>
 Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_OBJECTS_VISITING_H_
diff --git a/src/heap/scavenge-job.cc b/src/heap/scavenge-job.cc
new file mode 100644
index 0000000..52ba97a
--- /dev/null
+++ b/src/heap/scavenge-job.cc
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/scavenge-job.h"
+
+#include "src/base/platform/time.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+const double ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace = 0.8;
+
+void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
+  Heap* heap = isolate()->heap();
+  double deadline_in_ms =
+      deadline_in_seconds *
+      static_cast<double>(base::Time::kMillisecondsPerSecond);
+  double start_ms = heap->MonotonicallyIncreasingTimeInMs();
+  double idle_time_in_ms = deadline_in_ms - start_ms;
+  size_t scavenge_speed_in_bytes_per_ms =
+      static_cast<size_t>(heap->tracer()->ScavengeSpeedInBytesPerMillisecond());
+  size_t new_space_size = heap->new_space()->Size();
+  size_t new_space_capacity = heap->new_space()->Capacity();
+
+  job_->NotifyIdleTask();
+
+  if (ReachedIdleAllocationLimit(scavenge_speed_in_bytes_per_ms, new_space_size,
+                                 new_space_capacity)) {
+    if (EnoughIdleTimeForScavenge(
+            idle_time_in_ms, scavenge_speed_in_bytes_per_ms, new_space_size)) {
+      heap->CollectGarbage(NEW_SPACE, "idle task: scavenge");
+    } else {
+      // Immediately request another idle task that can get larger idle time.
+      job_->RescheduleIdleTask(heap);
+    }
+  }
+}
+
+
+bool ScavengeJob::ReachedIdleAllocationLimit(
+    size_t scavenge_speed_in_bytes_per_ms, size_t new_space_size,
+    size_t new_space_capacity) {
+  if (scavenge_speed_in_bytes_per_ms == 0) {
+    scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
+  }
+
+  // Set the allocation limit to the number of bytes we can scavenge in an
+  // average idle task.
+  size_t allocation_limit = kAverageIdleTimeMs * scavenge_speed_in_bytes_per_ms;
+
+  // Keep the limit smaller than the new space capacity.
+  allocation_limit =
+      Min(allocation_limit,
+          static_cast<size_t>(new_space_capacity *
+                              kMaxAllocationLimitAsFractionOfNewSpace));
+  // Adjust the limit to take into account bytes that will be allocated until
+  // the next check.
+  allocation_limit = allocation_limit < kBytesAllocatedBeforeNextIdleTask
+                         ? 0
+                         : allocation_limit - kBytesAllocatedBeforeNextIdleTask;
+  // Keep the limit large enough to avoid scavenges in tiny new space.
+  allocation_limit = Max(allocation_limit, kMinAllocationLimit);
+
+  return allocation_limit <= new_space_size;
+}
+
+
+bool ScavengeJob::EnoughIdleTimeForScavenge(
+    double idle_time_in_ms, size_t scavenge_speed_in_bytes_per_ms,
+    size_t new_space_size) {
+  if (scavenge_speed_in_bytes_per_ms == 0) {
+    scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
+  }
+  return new_space_size <= idle_time_in_ms * scavenge_speed_in_bytes_per_ms;
+}
+
+
+void ScavengeJob::RescheduleIdleTask(Heap* heap) {
+  // Make sure that we don't reschedule more than one time.
+  // Otherwise, we might spam the scheduler with idle tasks.
+  if (!idle_task_rescheduled_) {
+    ScheduleIdleTask(heap);
+    idle_task_rescheduled_ = true;
+  }
+}
+
+
+void ScavengeJob::ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated) {
+  bytes_allocated_since_the_last_task_ += bytes_allocated;
+  if (bytes_allocated_since_the_last_task_ >=
+      static_cast<int>(kBytesAllocatedBeforeNextIdleTask)) {
+    ScheduleIdleTask(heap);
+    bytes_allocated_since_the_last_task_ = 0;
+    idle_task_rescheduled_ = false;
+  }
+}
+
+
+void ScavengeJob::ScheduleIdleTask(Heap* heap) {
+  if (!idle_task_pending_) {
+    v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
+    if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
+      idle_task_pending_ = true;
+      auto task = new IdleTask(heap->isolate(), this);
+      V8::GetCurrentPlatform()->CallIdleOnForegroundThread(isolate, task);
+    }
+  }
+}
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/scavenge-job.h b/src/heap/scavenge-job.h
new file mode 100644
index 0000000..56299a1
--- /dev/null
+++ b/src/heap/scavenge-job.h
@@ -0,0 +1,80 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SCAVENGE_JOB_H_
+#define V8_HEAP_SCAVENGE_JOB_H_
+
+#include "src/cancelable-task.h"
+#include "src/heap/gc-tracer.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Isolate;
+
+
+// This class posts idle tasks and performs scavenges in the idle tasks.
+class ScavengeJob {
+ public:
+  class IdleTask : public CancelableIdleTask {
+   public:
+    explicit IdleTask(Isolate* isolate, ScavengeJob* job)
+        : CancelableIdleTask(isolate), job_(job) {}
+    // CancelableIdleTask overrides.
+    void RunInternal(double deadline_in_seconds) override;
+
+   private:
+    ScavengeJob* job_;
+  };
+
+  ScavengeJob()
+      : idle_task_pending_(false),
+        idle_task_rescheduled_(false),
+        bytes_allocated_since_the_last_task_(0) {}
+
+  // Posts an idle task if the cumulative bytes allocated since the last
+  // idle task exceed kBytesAllocatedBeforeNextIdleTask.
+  void ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated);
+
+  // Posts an idle task ignoring the bytes allocated, but makes sure
+  // that the new idle task cannot reschedule again.
+  // This prevents infinite rescheduling.
+  void RescheduleIdleTask(Heap* heap);
+
+  bool IdleTaskPending() { return idle_task_pending_; }
+  void NotifyIdleTask() { idle_task_pending_ = false; }
+  bool IdleTaskRescheduled() { return idle_task_rescheduled_; }
+
+  static bool ReachedIdleAllocationLimit(size_t scavenge_speed_in_bytes_per_ms,
+                                         size_t new_space_size,
+                                         size_t new_space_capacity);
+
+  static bool EnoughIdleTimeForScavenge(double idle_time_ms,
+                                        size_t scavenge_speed_in_bytes_per_ms,
+                                        size_t new_space_size);
+
+  // If we haven't recorded any scavenger events yet, we use a conservative
+  // lower bound for the scavenger speed.
+  static const int kInitialScavengeSpeedInBytesPerMs = 256 * KB;
+  // Estimate of the average idle time that an idle task gets.
+  static const int kAverageIdleTimeMs = 5;
+  // The number of bytes to be allocated in new space before the next idle
+  // task is posted.
+  static const size_t kBytesAllocatedBeforeNextIdleTask = 512 * KB;
+  // The minimum size of allocated new space objects to trigger a scavenge.
+  static const size_t kMinAllocationLimit = 512 * KB;
+  // The allocation limit cannot exceed this fraction of the new space capacity.
+  static const double kMaxAllocationLimitAsFractionOfNewSpace;
+
+ private:
+  void ScheduleIdleTask(Heap* heap);
+  bool idle_task_pending_;
+  bool idle_task_rescheduled_;
+  int bytes_allocated_since_the_last_task_;
+};
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_HEAP_SCAVENGE_JOB_H_
diff --git a/src/heap/scavenger-inl.h b/src/heap/scavenger-inl.h
new file mode 100644
index 0000000..cd35c7d
--- /dev/null
+++ b/src/heap/scavenger-inl.h
@@ -0,0 +1,53 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SCAVENGER_INL_H_
+#define V8_HEAP_SCAVENGER_INL_H_
+
+#include "src/heap/scavenger.h"
+
+namespace v8 {
+namespace internal {
+
+void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
+  DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
+
+  // We use the first word (where the map pointer usually is) of a heap
+  // object to record the forwarding pointer.  A forwarding pointer can
+  // point to an old space, the code space, or the to space of the new
+  // generation.
+  MapWord first_word = object->map_word();
+
+  // If the first word is a forwarding address, the object has already been
+  // copied.
+  if (first_word.IsForwardingAddress()) {
+    HeapObject* dest = first_word.ToForwardingAddress();
+    DCHECK(object->GetIsolate()->heap()->InFromSpace(*p));
+    *p = dest;
+    return;
+  }
+
+  object->GetHeap()->UpdateAllocationSite(
+      object, object->GetHeap()->global_pretenuring_feedback_);
+
+  // AllocationMementos are unrooted and shouldn't survive a scavenge
+  DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
+  // Call the slow part of scavenge object.
+  return ScavengeObjectSlow(p, object);
+}
+
+
+// static
+void StaticScavengeVisitor::VisitPointer(Heap* heap, HeapObject* obj,
+                                         Object** p) {
+  Object* object = *p;
+  if (!heap->InNewSpace(object)) return;
+  Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
+                            reinterpret_cast<HeapObject*>(object));
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_HEAP_SCAVENGER_INL_H_
diff --git a/src/heap/scavenger.cc b/src/heap/scavenger.cc
new file mode 100644
index 0000000..40aeb74
--- /dev/null
+++ b/src/heap/scavenger.cc
@@ -0,0 +1,468 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/scavenger.h"
+
+#include "src/contexts.h"
+#include "src/heap/heap.h"
+#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/scavenger-inl.h"
+#include "src/isolate.h"
+#include "src/log.h"
+#include "src/profiler/cpu-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+enum LoggingAndProfiling {
+  LOGGING_AND_PROFILING_ENABLED,
+  LOGGING_AND_PROFILING_DISABLED
+};
+
+
+enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
+
+
+template <MarksHandling marks_handling,
+          LoggingAndProfiling logging_and_profiling_mode>
+class ScavengingVisitor : public StaticVisitorBase {
+ public:
+  static void Initialize() {
+    table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
+    table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
+    table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
+    table_.Register(kVisitByteArray, &EvacuateByteArray);
+    table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+    table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
+    table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
+    table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
+    table_.Register(kVisitJSArrayBuffer, &EvacuateJSArrayBuffer);
+
+    table_.Register(
+        kVisitNativeContext,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            Context::kSize>);
+
+    table_.Register(
+        kVisitConsString,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            ConsString::kSize>);
+
+    table_.Register(
+        kVisitSlicedString,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            SlicedString::kSize>);
+
+    table_.Register(
+        kVisitSymbol,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            Symbol::kSize>);
+
+    table_.Register(
+        kVisitSharedFunctionInfo,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            SharedFunctionInfo::kSize>);
+
+    table_.Register(kVisitJSWeakCollection,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    table_.Register(kVisitJSRegExp,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    table_.Register(kVisitJSFunction, &EvacuateJSFunction);
+
+    table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
+                                   kVisitDataObject, kVisitDataObjectGeneric>();
+
+    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+                                   kVisitJSObject, kVisitJSObjectGeneric>();
+
+    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+                                   kVisitStruct, kVisitStructGeneric>();
+  }
+
+  static VisitorDispatchTable<ScavengingCallback>* GetTable() {
+    return &table_;
+  }
+
+ private:
+  enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
+
+  static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
+    bool should_record = false;
+#ifdef DEBUG
+    should_record = FLAG_heap_stats;
+#endif
+    should_record = should_record || FLAG_log_gc;
+    if (should_record) {
+      if (heap->new_space()->Contains(obj)) {
+        heap->new_space()->RecordAllocation(obj);
+      } else {
+        heap->new_space()->RecordPromotion(obj);
+      }
+    }
+  }
+
+  // Helper function used by CopyObject to copy a source object to an
+  // allocated target object and update the forwarding pointer in the source
+  // object.  Returns the target object.
+  INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
+                                   HeapObject* target, int size)) {
+    // If we migrate into to-space, then the to-space top pointer should be
+    // right after the target object. Incorporate double alignment
+    // over-allocation.
+    DCHECK(!heap->InToSpace(target) ||
+           target->address() + size == heap->new_space()->top() ||
+           target->address() + size + kPointerSize == heap->new_space()->top());
+
+    // Make sure that we do not overwrite the promotion queue which is at
+    // the end of to-space.
+    DCHECK(!heap->InToSpace(target) ||
+           heap->promotion_queue()->IsBelowPromotionQueue(
+               heap->new_space()->top()));
+
+    // Copy the content of source to target.
+    heap->CopyBlock(target->address(), source->address(), size);
+
+    // Set the forwarding address.
+    source->set_map_word(MapWord::FromForwardingAddress(target));
+
+    if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
+      // Update NewSpace stats if necessary.
+      RecordCopiedObject(heap, target);
+      heap->OnMoveEvent(target, source, size);
+    }
+
+    if (marks_handling == TRANSFER_MARKS) {
+      if (Marking::TransferColor(source, target)) {
+        MemoryChunk::IncrementLiveBytesFromGC(target, size);
+      }
+    }
+  }
+
+  template <AllocationAlignment alignment>
+  static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
+                                         HeapObject* object, int object_size) {
+    Heap* heap = map->GetHeap();
+
+    DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
+    AllocationResult allocation =
+        heap->new_space()->AllocateRaw(object_size, alignment);
+
+    HeapObject* target = NULL;  // Initialization to please compiler.
+    if (allocation.To(&target)) {
+      // Order is important here: Set the promotion limit before storing a
+      // filler for double alignment or migrating the object. Otherwise we
+      // may end up overwriting promotion queue entries when we migrate the
+      // object.
+      heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+
+      MigrateObject(heap, object, target, object_size);
+
+      // Update slot to new target.
+      *slot = target;
+
+      heap->IncrementSemiSpaceCopiedObjectSize(object_size);
+      return true;
+    }
+    return false;
+  }
+
+
+  template <ObjectContents object_contents, AllocationAlignment alignment>
+  static inline bool PromoteObject(Map* map, HeapObject** slot,
+                                   HeapObject* object, int object_size) {
+    Heap* heap = map->GetHeap();
+
+    AllocationResult allocation =
+        heap->old_space()->AllocateRaw(object_size, alignment);
+
+    HeapObject* target = NULL;  // Initialization to please compiler.
+    if (allocation.To(&target)) {
+      MigrateObject(heap, object, target, object_size);
+
+      // Update slot to new target.
+      *slot = target;
+
+      if (object_contents == POINTER_OBJECT) {
+        heap->promotion_queue()->insert(target, object_size);
+      }
+      heap->IncrementPromotedObjectsSize(object_size);
+      return true;
+    }
+    return false;
+  }
+
+
+  template <ObjectContents object_contents, AllocationAlignment alignment>
+  static inline void EvacuateObject(Map* map, HeapObject** slot,
+                                    HeapObject* object, int object_size) {
+    SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
+    SLOW_DCHECK(object->Size() == object_size);
+    Heap* heap = map->GetHeap();
+
+    if (!heap->ShouldBePromoted(object->address(), object_size)) {
+      // A semi-space copy may fail due to fragmentation. In that case, we
+      // try to promote the object.
+      if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
+        return;
+      }
+    }
+
+    if (PromoteObject<object_contents, alignment>(map, slot, object,
+                                                  object_size)) {
+      return;
+    }
+
+    // If promotion failed, we try to copy the object to the other semi-space
+    if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
+
+    FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
+  }
+
+
+  static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
+                                        HeapObject* object) {
+    ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
+
+    if (marks_handling == IGNORE_MARKS) return;
+
+    MapWord map_word = object->map_word();
+    DCHECK(map_word.IsForwardingAddress());
+    HeapObject* target = map_word.ToForwardingAddress();
+
+    MarkBit mark_bit = Marking::MarkBitFrom(target);
+    if (Marking::IsBlack(mark_bit)) {
+      // This object is black and it might not be rescanned by marker.
+      // We should explicitly record code entry slot for compaction because
+      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
+      // miss it as it is not HeapObject-tagged.
+      Address code_entry_slot =
+          target->address() + JSFunction::kCodeEntryOffset;
+      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+      map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
+          target, code_entry_slot, code);
+    }
+  }
+
+
+  static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
+                                        HeapObject* object) {
+    int length = reinterpret_cast<FixedArray*>(object)->synchronized_length();
+    int object_size = FixedArray::SizeFor(length);
+    EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+                                                 object_size);
+  }
+
+
+  static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
+                                              HeapObject* object) {
+    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+    int object_size = FixedDoubleArray::SizeFor(length);
+    EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
+  }
+
+
+  static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
+                                             HeapObject* object) {
+    int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+    EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+                                                 object_size);
+  }
+
+
+  static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
+                                               HeapObject* object) {
+    int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
+    EvacuateObject<POINTER_OBJECT, kDoubleAligned>(map, slot, object,
+                                                   object_size);
+  }
+
+
+  static inline void EvacuateJSArrayBuffer(Map* map, HeapObject** slot,
+                                           HeapObject* object) {
+    ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
+
+    Heap* heap = map->GetHeap();
+    MapWord map_word = object->map_word();
+    DCHECK(map_word.IsForwardingAddress());
+    HeapObject* target = map_word.ToForwardingAddress();
+    if (!heap->InNewSpace(target)) {
+      heap->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
+    }
+  }
+
+
+  static inline void EvacuateByteArray(Map* map, HeapObject** slot,
+                                       HeapObject* object) {
+    int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
+    EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
+  }
+
+
+  static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
+                                              HeapObject* object) {
+    int object_size = SeqOneByteString::cast(object)
+                          ->SeqOneByteStringSize(map->instance_type());
+    EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
+  }
+
+
+  static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
+                                              HeapObject* object) {
+    int object_size = SeqTwoByteString::cast(object)
+                          ->SeqTwoByteStringSize(map->instance_type());
+    EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
+  }
+
+
+  static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
+                                               HeapObject* object) {
+    DCHECK(IsShortcutCandidate(map->instance_type()));
+
+    Heap* heap = map->GetHeap();
+
+    if (marks_handling == IGNORE_MARKS &&
+        ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
+      HeapObject* first =
+          HeapObject::cast(ConsString::cast(object)->unchecked_first());
+
+      *slot = first;
+
+      if (!heap->InNewSpace(first)) {
+        object->set_map_word(MapWord::FromForwardingAddress(first));
+        return;
+      }
+
+      MapWord first_word = first->map_word();
+      if (first_word.IsForwardingAddress()) {
+        HeapObject* target = first_word.ToForwardingAddress();
+
+        *slot = target;
+        object->set_map_word(MapWord::FromForwardingAddress(target));
+        return;
+      }
+
+      Scavenger::ScavengeObjectSlow(slot, first);
+      object->set_map_word(MapWord::FromForwardingAddress(*slot));
+      return;
+    }
+
+    int object_size = ConsString::kSize;
+    EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+                                                 object_size);
+  }
+
+  template <ObjectContents object_contents>
+  class ObjectEvacuationStrategy {
+   public:
+    template <int object_size>
+    static inline void VisitSpecialized(Map* map, HeapObject** slot,
+                                        HeapObject* object) {
+      EvacuateObject<object_contents, kWordAligned>(map, slot, object,
+                                                    object_size);
+    }
+
+    static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
+      int object_size = map->instance_size();
+      EvacuateObject<object_contents, kWordAligned>(map, slot, object,
+                                                    object_size);
+    }
+  };
+
+  static VisitorDispatchTable<ScavengingCallback> table_;
+};
+
+
+template <MarksHandling marks_handling,
+          LoggingAndProfiling logging_and_profiling_mode>
+VisitorDispatchTable<ScavengingCallback>
+    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
+
+
+// static
+void Scavenger::Initialize() {
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+}
+
+
+// static
+void Scavenger::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
+  SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
+  MapWord first_word = object->map_word();
+  SLOW_DCHECK(!first_word.IsForwardingAddress());
+  Map* map = first_word.ToMap();
+  Scavenger* scavenger = map->GetHeap()->scavenge_collector_;
+  scavenger->scavenging_visitors_table_.GetVisitor(map)(map, p, object);
+}
+
+
+void Scavenger::SelectScavengingVisitorsTable() {
+  bool logging_and_profiling =
+      FLAG_verify_predictable || isolate()->logger()->is_logging() ||
+      isolate()->cpu_profiler()->is_profiling() ||
+      (isolate()->heap_profiler() != NULL &&
+       isolate()->heap_profiler()->is_tracking_object_moves());
+
+  if (!heap()->incremental_marking()->IsMarking()) {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<IGNORE_MARKS,
+                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<IGNORE_MARKS,
+                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+  } else {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<TRANSFER_MARKS,
+                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<TRANSFER_MARKS,
+                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+
+    if (heap()->incremental_marking()->IsCompacting()) {
+      // When compacting forbid short-circuiting of cons-strings.
+      // Scavenging code relies on the fact that new space object
+      // can't be evacuated into evacuation candidate but
+      // short-circuiting violates this assumption.
+      scavenging_visitors_table_.Register(
+          StaticVisitorBase::kVisitShortcutCandidate,
+          scavenging_visitors_table_.GetVisitorById(
+              StaticVisitorBase::kVisitConsString));
+    }
+  }
+}
+
+
+Isolate* Scavenger::isolate() { return heap()->isolate(); }
+
+
+void ScavengeVisitor::VisitPointer(Object** p) { ScavengePointer(p); }
+
+
+void ScavengeVisitor::VisitPointers(Object** start, Object** end) {
+  // Copy all HeapObject pointers in [start, end)
+  for (Object** p = start; p < end; p++) ScavengePointer(p);
+}
+
+
+void ScavengeVisitor::ScavengePointer(Object** p) {
+  Object* object = *p;
+  if (!heap_->InNewSpace(object)) return;
+  Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
+                            reinterpret_cast<HeapObject*>(object));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/scavenger.h b/src/heap/scavenger.h
new file mode 100644
index 0000000..5d0abf4
--- /dev/null
+++ b/src/heap/scavenger.h
@@ -0,0 +1,72 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SCAVENGER_H_
+#define V8_HEAP_SCAVENGER_H_
+
+#include "src/heap/objects-visiting.h"
+
+namespace v8 {
+namespace internal {
+
+typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
+                                   HeapObject* object);
+
+class Scavenger {
+ public:
+  explicit Scavenger(Heap* heap) : heap_(heap) {}
+
+  // Initializes static visitor dispatch tables.
+  static void Initialize();
+
+  // Callback function passed to Heap::Iterate etc.  Copies an object if
+  // necessary, the object might be promoted to an old space.  The caller must
+  // ensure the precondition that the object is (a) a heap object and (b) in
+  // the heap's from space.
+  static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+
+  // Slow part of {ScavengeObject} above.
+  static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
+
+  // Chooses an appropriate static visitor table depending on the current state
+  // of the heap (i.e. incremental marking, logging and profiling).
+  void SelectScavengingVisitorsTable();
+
+  Isolate* isolate();
+  Heap* heap() { return heap_; }
+
+ private:
+  Heap* heap_;
+  VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+};
+
+
+// Helper class for turning the scavenger into an object visitor that is also
+// filtering out non-HeapObjects and objects which do not reside in new space.
+class ScavengeVisitor : public ObjectVisitor {
+ public:
+  explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
+
+  void VisitPointer(Object** p) override;
+  void VisitPointers(Object** start, Object** end) override;
+
+ private:
+  inline void ScavengePointer(Object** p);
+
+  Heap* heap_;
+};
+
+
+// Helper class for turning the scavenger into an object visitor that is also
+// filtering out non-HeapObjects and objects which do not reside in new space.
+class StaticScavengeVisitor
+    : public StaticNewSpaceVisitor<StaticScavengeVisitor> {
+ public:
+  static inline void VisitPointer(Heap* heap, HeapObject* object, Object** p);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_HEAP_SCAVENGER_H_
diff --git a/src/heap/slots-buffer.cc b/src/heap/slots-buffer.cc
new file mode 100644
index 0000000..3f145e6
--- /dev/null
+++ b/src/heap/slots-buffer.cc
@@ -0,0 +1,161 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/slots-buffer.h"
+
+#include "src/assembler.h"
+#include "src/heap/heap.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
+  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
+}
+
+
+bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
+                        SlotsBuffer** buffer_address, SlotType type,
+                        Address addr, AdditionMode mode) {
+  SlotsBuffer* buffer = *buffer_address;
+  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
+    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+      allocator->DeallocateChain(buffer_address);
+      return false;
+    }
+    buffer = allocator->AllocateBuffer(buffer);
+    *buffer_address = buffer;
+  }
+  DCHECK(buffer->HasSpaceForTypedSlot());
+  buffer->Add(reinterpret_cast<ObjectSlot>(type));
+  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
+  return true;
+}
+
+
+void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
+  // Remove entries by replacing them with an old-space slot containing a smi
+  // that is located in an unmovable page.
+  const ObjectSlot kRemovedEntry = HeapObject::RawField(
+      heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
+  DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
+             ->NeverEvacuate());
+
+  while (buffer != NULL) {
+    SlotsBuffer::ObjectSlot* slots = buffer->slots_;
+    intptr_t slots_count = buffer->idx_;
+
+    for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
+      ObjectSlot slot = slots[slot_idx];
+      if (!IsTypedSlot(slot)) {
+        Object* object = *slot;
+        // Slots are invalid when they currently:
+        // - do not point to a heap object (SMI)
+        // - point to a heap object in new space
+        // - are not within a live heap object on a valid pointer slot
+        // - point to a heap object not on an evacuation candidate
+        if (!object->IsHeapObject() || heap->InNewSpace(object) ||
+            !heap->mark_compact_collector()->IsSlotInLiveObject(
+                reinterpret_cast<Address>(slot)) ||
+            !Page::FromAddress(reinterpret_cast<Address>(object))
+                 ->IsEvacuationCandidate()) {
+          // TODO(hpayer): Instead of replacing slots with kRemovedEntry we
+          // could shrink the slots buffer in-place.
+          slots[slot_idx] = kRemovedEntry;
+        }
+      } else {
+        ++slot_idx;
+        DCHECK(slot_idx < slots_count);
+      }
+    }
+    buffer = buffer->next();
+  }
+}
+
+
+void SlotsBuffer::RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
+                                    Address start_slot, Address end_slot) {
+  // Remove entries by replacing them with an old-space slot containing a smi
+  // that is located in an unmovable page.
+  const ObjectSlot kRemovedEntry = HeapObject::RawField(
+      heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
+  DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
+             ->NeverEvacuate());
+
+  while (buffer != NULL) {
+    SlotsBuffer::ObjectSlot* slots = buffer->slots_;
+    intptr_t slots_count = buffer->idx_;
+    bool is_typed_slot = false;
+
+    for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
+      ObjectSlot slot = slots[slot_idx];
+      if (!IsTypedSlot(slot)) {
+        Address slot_address = reinterpret_cast<Address>(slot);
+        if (slot_address >= start_slot && slot_address < end_slot) {
+          // TODO(hpayer): Instead of replacing slots with kRemovedEntry we
+          // could shrink the slots buffer in-place.
+          slots[slot_idx] = kRemovedEntry;
+          if (is_typed_slot) {
+            slots[slot_idx - 1] = kRemovedEntry;
+          }
+        }
+        is_typed_slot = false;
+      } else {
+        is_typed_slot = true;
+        DCHECK(slot_idx < slots_count);
+      }
+    }
+    buffer = buffer->next();
+  }
+}
+
+
+void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
+  while (buffer != NULL) {
+    SlotsBuffer::ObjectSlot* slots = buffer->slots_;
+    intptr_t slots_count = buffer->idx_;
+
+    for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
+      ObjectSlot slot = slots[slot_idx];
+      if (!IsTypedSlot(slot)) {
+        Object* object = *slot;
+        if (object->IsHeapObject()) {
+          HeapObject* heap_object = HeapObject::cast(object);
+          CHECK(!heap->InNewSpace(object));
+          heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
+              reinterpret_cast<Address>(slot), heap_object);
+        }
+      } else {
+        ++slot_idx;
+        DCHECK(slot_idx < slots_count);
+      }
+    }
+    buffer = buffer->next();
+  }
+}
+
+
+SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
+  return new SlotsBuffer(next_buffer);
+}
+
+
+void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
+  delete buffer;
+}
+
+
+void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
+  SlotsBuffer* buffer = *buffer_address;
+  while (buffer != NULL) {
+    SlotsBuffer* next_buffer = buffer->next();
+    DeallocateBuffer(buffer);
+    buffer = next_buffer;
+  }
+  *buffer_address = NULL;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/slots-buffer.h b/src/heap/slots-buffer.h
new file mode 100644
index 0000000..dc6c922
--- /dev/null
+++ b/src/heap/slots-buffer.h
@@ -0,0 +1,175 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SLOTS_BUFFER_H_
+#define V8_HEAP_SLOTS_BUFFER_H_
+
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class SlotsBuffer;
+
+
+// SlotsBufferAllocator manages the allocation and deallocation of slots buffer
+// chunks and links them together. Slots buffer chunks are always created by the
+// SlotsBufferAllocator.
+class SlotsBufferAllocator {
+ public:
+  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
+  void DeallocateBuffer(SlotsBuffer* buffer);
+
+  void DeallocateChain(SlotsBuffer** buffer_address);
+};
+
+
+// SlotsBuffer records a sequence of slots that has to be updated
+// after live objects were relocated from evacuation candidates.
+// All slots are either untyped or typed:
+//    - Untyped slots are expected to contain a tagged object pointer.
+//      They are recorded by an address.
+//    - Typed slots are expected to contain an encoded pointer to a heap
+//      object where the way of encoding depends on the type of the slot.
+//      They are recorded as a pair (SlotType, slot address).
+// We assume that zero-page is never mapped this allows us to distinguish
+// untyped slots from typed slots during iteration by a simple comparison:
+// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
+// is the first element of typed slot's pair.
+class SlotsBuffer {
+ public:
+  typedef Object** ObjectSlot;
+
+  explicit SlotsBuffer(SlotsBuffer* next_buffer)
+      : idx_(0), chain_length_(1), next_(next_buffer) {
+    if (next_ != NULL) {
+      chain_length_ = next_->chain_length_ + 1;
+    }
+  }
+
+  ~SlotsBuffer() {}
+
+  void Add(ObjectSlot slot) {
+    DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
+#ifdef DEBUG
+    if (slot >= reinterpret_cast<ObjectSlot>(NUMBER_OF_SLOT_TYPES)) {
+      DCHECK_NOT_NULL(*slot);
+    }
+#endif
+    slots_[idx_++] = slot;
+  }
+
+  ObjectSlot Get(intptr_t i) {
+    DCHECK(i >= 0 && i < kNumberOfElements);
+    return slots_[i];
+  }
+
+  size_t Size() {
+    DCHECK(idx_ <= kNumberOfElements);
+    return idx_;
+  }
+
+  enum SlotType {
+    EMBEDDED_OBJECT_SLOT,
+    OBJECT_SLOT,
+    RELOCATED_CODE_OBJECT,
+    CELL_TARGET_SLOT,
+    CODE_TARGET_SLOT,
+    CODE_ENTRY_SLOT,
+    DEBUG_TARGET_SLOT,
+    NUMBER_OF_SLOT_TYPES
+  };
+
+  static const char* SlotTypeToString(SlotType type) {
+    switch (type) {
+      case EMBEDDED_OBJECT_SLOT:
+        return "EMBEDDED_OBJECT_SLOT";
+      case OBJECT_SLOT:
+        return "OBJECT_SLOT";
+      case RELOCATED_CODE_OBJECT:
+        return "RELOCATED_CODE_OBJECT";
+      case CELL_TARGET_SLOT:
+        return "CELL_TARGET_SLOT";
+      case CODE_TARGET_SLOT:
+        return "CODE_TARGET_SLOT";
+      case CODE_ENTRY_SLOT:
+        return "CODE_ENTRY_SLOT";
+      case DEBUG_TARGET_SLOT:
+        return "DEBUG_TARGET_SLOT";
+      case NUMBER_OF_SLOT_TYPES:
+        return "NUMBER_OF_SLOT_TYPES";
+    }
+    return "UNKNOWN SlotType";
+  }
+
+  SlotsBuffer* next() { return next_; }
+
+  static int SizeOfChain(SlotsBuffer* buffer) {
+    if (buffer == NULL) return 0;
+    return static_cast<int>(buffer->idx_ +
+                            (buffer->chain_length_ - 1) * kNumberOfElements);
+  }
+
+  inline bool IsFull() { return idx_ == kNumberOfElements; }
+
+  inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
+
+  enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
+
+  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
+    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
+  }
+
+  INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
+                           SlotsBuffer** buffer_address, ObjectSlot slot,
+                           AdditionMode mode)) {
+    SlotsBuffer* buffer = *buffer_address;
+    if (buffer == NULL || buffer->IsFull()) {
+      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+        allocator->DeallocateChain(buffer_address);
+        return false;
+      }
+      buffer = allocator->AllocateBuffer(buffer);
+      *buffer_address = buffer;
+    }
+    buffer->Add(slot);
+    return true;
+  }
+
+  static bool IsTypedSlot(ObjectSlot slot);
+
+  static bool AddTo(SlotsBufferAllocator* allocator,
+                    SlotsBuffer** buffer_address, SlotType type, Address addr,
+                    AdditionMode mode);
+
+  // Eliminates all stale entries from the slots buffer, i.e., slots that
+  // are not part of live objects anymore. This method must be called after
+  // marking, when the whole transitive closure is known and must be called
+  // before sweeping when mark bits are still intact.
+  static void RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer);
+
+  // Eliminate all slots that are within the given address range.
+  static void RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
+                                Address start_slot, Address end_slot);
+
+  // Ensures that there are no invalid slots in the chain of slots buffers.
+  static void VerifySlots(Heap* heap, SlotsBuffer* buffer);
+
+  static const int kNumberOfElements = 1021;
+
+ private:
+  static const int kChainLengthThreshold = 15;
+
+  intptr_t idx_;
+  intptr_t chain_length_;
+  SlotsBuffer* next_;
+  ObjectSlot slots_[kNumberOfElements];
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_HEAP_SLOTS_BUFFER_H_
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index d81d253..3023fbf 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -5,10 +5,11 @@
 #ifndef V8_HEAP_SPACES_INL_H_
 #define V8_HEAP_SPACES_INL_H_
 
+#include "src/heap/incremental-marking.h"
 #include "src/heap/spaces.h"
-#include "src/heap-profiler.h"
 #include "src/isolate.h"
 #include "src/msan.h"
+#include "src/profiler/heap-profiler.h"
 #include "src/v8memory.h"
 
 namespace v8 {
@@ -28,7 +29,6 @@
 // -----------------------------------------------------------------------------
 // PageIterator
 
-
 PageIterator::PageIterator(PagedSpace* space)
     : space_(space),
       prev_page_(&space->anchor_),
@@ -47,8 +47,32 @@
 
 
 // -----------------------------------------------------------------------------
-// NewSpacePageIterator
+// SemiSpaceIterator
 
+HeapObject* SemiSpaceIterator::Next() {
+  while (current_ != limit_) {
+    if (NewSpacePage::IsAtEnd(current_)) {
+      NewSpacePage* page = NewSpacePage::FromLimit(current_);
+      page = page->next_page();
+      DCHECK(!page->is_anchor());
+      current_ = page->area_start();
+      if (current_ == limit_) return nullptr;
+    }
+    HeapObject* object = HeapObject::FromAddress(current_);
+    current_ += object->Size();
+    if (!object->IsFiller()) {
+      return object;
+    }
+  }
+  return nullptr;
+}
+
+
+HeapObject* SemiSpaceIterator::next_object() { return Next(); }
+
+
+// -----------------------------------------------------------------------------
+// NewSpacePageIterator
 
 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
     : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
@@ -81,6 +105,19 @@
 
 // -----------------------------------------------------------------------------
 // HeapObjectIterator
+
+HeapObject* HeapObjectIterator::Next() {
+  do {
+    HeapObject* next_obj = FromCurrentPage();
+    if (next_obj != NULL) return next_obj;
+  } while (AdvanceToNextPage());
+  return NULL;
+}
+
+
+HeapObject* HeapObjectIterator::next_object() { return Next(); }
+
+
 HeapObject* HeapObjectIterator::FromCurrentPage() {
   while (cur_addr_ != cur_end_) {
     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
@@ -88,11 +125,22 @@
       continue;
     }
     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
-    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+    int obj_size = obj->Size();
     cur_addr_ += obj_size;
     DCHECK(cur_addr_ <= cur_end_);
+    // TODO(hpayer): Remove the debugging code.
+    if (cur_addr_ > cur_end_) {
+      space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
+                                                      obj_size);
+    }
+
     if (!obj->IsFiller()) {
-      DCHECK_OBJECT_SIZE(obj_size);
+      if (obj->IsCode()) {
+        DCHECK_EQ(space_, space_->heap()->code_space());
+        DCHECK_CODEOBJECT_SIZE(obj_size, space_);
+      } else {
+        DCHECK_OBJECT_SIZE(obj_size);
+      }
       return obj;
     }
   }
@@ -132,11 +180,22 @@
 
 
 // --------------------------------------------------------------------------
+// AllocationResult
+
+AllocationSpace AllocationResult::RetrySpace() {
+  DCHECK(IsRetry());
+  return static_cast<AllocationSpace>(Smi::cast(object_)->value());
+}
+
+
+// --------------------------------------------------------------------------
 // PagedSpace
+
 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
                        PagedSpace* owner) {
   Page* page = reinterpret_cast<Page*>(chunk);
-  DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
+  page->mutex_ = new base::Mutex();
+  DCHECK(page->area_size() <= kAllocatableMemory);
   DCHECK(chunk->owner() == owner);
   owner->IncreaseCapacity(page->area_size());
   owner->Free(page->area_start(), page->area_size());
@@ -154,6 +213,9 @@
 }
 
 
+bool PagedSpace::Contains(HeapObject* o) { return Contains(o->address()); }
+
+
 void MemoryChunk::set_scan_on_scavenge(bool scan) {
   if (scan) {
     if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
@@ -186,35 +248,50 @@
 }
 
 
-void MemoryChunk::UpdateHighWaterMark(Address mark) {
-  if (mark == NULL) return;
-  // Need to subtract one from the mark because when a chunk is full the
-  // top points to the next address after the chunk, which effectively belongs
-  // to another chunk. See the comment to Page::FromAllocationTop.
-  MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
-  int new_mark = static_cast<int>(mark - chunk->address());
-  if (new_mark > chunk->high_water_mark_) {
-    chunk->high_water_mark_ = new_mark;
-  }
-}
-
-
 PointerChunkIterator::PointerChunkIterator(Heap* heap)
-    : state_(kOldPointerState),
-      old_pointer_iterator_(heap->old_pointer_space()),
+    : state_(kOldSpaceState),
+      old_iterator_(heap->old_space()),
       map_iterator_(heap->map_space()),
       lo_iterator_(heap->lo_space()) {}
 
 
-Page* Page::next_page() {
-  DCHECK(next_chunk()->owner() == owner());
-  return static_cast<Page*>(next_chunk());
-}
-
-
-Page* Page::prev_page() {
-  DCHECK(prev_chunk()->owner() == owner());
-  return static_cast<Page*>(prev_chunk());
+MemoryChunk* PointerChunkIterator::next() {
+  switch (state_) {
+    case kOldSpaceState: {
+      if (old_iterator_.has_next()) {
+        return old_iterator_.next();
+      }
+      state_ = kMapState;
+      // Fall through.
+    }
+    case kMapState: {
+      if (map_iterator_.has_next()) {
+        return map_iterator_.next();
+      }
+      state_ = kLargeObjectState;
+      // Fall through.
+    }
+    case kLargeObjectState: {
+      HeapObject* heap_object;
+      do {
+        heap_object = lo_iterator_.Next();
+        if (heap_object == NULL) {
+          state_ = kFinishedState;
+          return NULL;
+        }
+        // Fixed arrays are the only pointer-containing objects in large
+        // object space.
+      } while (!heap_object->IsFixedArray());
+      MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
+      return answer;
+    }
+    case kFinishedState:
+      return NULL;
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return NULL;
 }
 
 
@@ -244,8 +321,45 @@
 }
 
 
+AllocationResult LocalAllocationBuffer::AllocateRawAligned(
+    int size_in_bytes, AllocationAlignment alignment) {
+  Address current_top = allocation_info_.top();
+  int filler_size = Heap::GetFillToAlign(current_top, alignment);
+
+  Address new_top = current_top + filler_size + size_in_bytes;
+  if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
+
+  allocation_info_.set_top(new_top);
+  if (filler_size > 0) {
+    return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
+                                    filler_size);
+  }
+
+  return AllocationResult(HeapObject::FromAddress(current_top));
+}
+
+
+HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
+                                                AllocationAlignment alignment) {
+  Address current_top = allocation_info_.top();
+  int filler_size = Heap::GetFillToAlign(current_top, alignment);
+
+  Address new_top = current_top + filler_size + *size_in_bytes;
+  if (new_top > allocation_info_.limit()) return NULL;
+
+  allocation_info_.set_top(new_top);
+  if (filler_size > 0) {
+    *size_in_bytes += filler_size;
+    return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
+                                     filler_size);
+  }
+
+  return HeapObject::FromAddress(current_top);
+}
+
+
 // Raw allocation.
-AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
   HeapObject* object = AllocateLinearly(size_in_bytes);
 
   if (object == NULL) {
@@ -267,28 +381,134 @@
 }
 
 
+AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
+    int size_in_bytes) {
+  base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
+  return AllocateRawUnaligned(size_in_bytes);
+}
+
+
+// Raw allocation.
+AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
+                                                AllocationAlignment alignment) {
+  DCHECK(identity() == OLD_SPACE);
+  int allocation_size = size_in_bytes;
+  HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
+
+  if (object == NULL) {
+    // We don't know exactly how much filler we need to align until space is
+    // allocated, so assume the worst case.
+    int filler_size = Heap::GetMaximumFillToAlign(alignment);
+    allocation_size += filler_size;
+    object = free_list_.Allocate(allocation_size);
+    if (object == NULL) {
+      object = SlowAllocateRaw(allocation_size);
+    }
+    if (object != NULL && filler_size != 0) {
+      object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
+                                       alignment);
+      // Filler objects are initialized, so mark only the aligned object memory
+      // as uninitialized.
+      allocation_size = size_in_bytes;
+    }
+  }
+
+  if (object != NULL) {
+    MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
+    return object;
+  }
+
+  return AllocationResult::Retry(identity());
+}
+
+
+AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
+                                         AllocationAlignment alignment) {
+#ifdef V8_HOST_ARCH_32_BIT
+  return alignment == kDoubleAligned
+             ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
+             : AllocateRawUnaligned(size_in_bytes);
+#else
+  return AllocateRawUnaligned(size_in_bytes);
+#endif
+}
+
+
 // -----------------------------------------------------------------------------
 // NewSpace
 
 
-AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
-  Address old_top = allocation_info_.top();
+AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
+                                              AllocationAlignment alignment) {
+  Address top = allocation_info_.top();
+  int filler_size = Heap::GetFillToAlign(top, alignment);
+  int aligned_size_in_bytes = size_in_bytes + filler_size;
 
-  if (allocation_info_.limit() - old_top < size_in_bytes) {
-    return SlowAllocateRaw(size_in_bytes);
+  if (allocation_info_.limit() - top < aligned_size_in_bytes) {
+    // See if we can create room.
+    if (!EnsureAllocation(size_in_bytes, alignment)) {
+      return AllocationResult::Retry();
+    }
+
+    top = allocation_info_.top();
+    filler_size = Heap::GetFillToAlign(top, alignment);
+    aligned_size_in_bytes = size_in_bytes + filler_size;
   }
 
-  HeapObject* obj = HeapObject::FromAddress(old_top);
-  allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
+  HeapObject* obj = HeapObject::FromAddress(top);
+  allocation_info_.set_top(top + aligned_size_in_bytes);
   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 
-  // The slow path above ultimately goes through AllocateRaw, so this suffices.
+  if (filler_size > 0) {
+    obj = heap()->PrecedeWithFiller(obj, filler_size);
+  }
+
   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
 
   return obj;
 }
 
 
+AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
+  Address top = allocation_info_.top();
+  if (allocation_info_.limit() < top + size_in_bytes) {
+    // See if we can create room.
+    if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
+      return AllocationResult::Retry();
+    }
+
+    top = allocation_info_.top();
+  }
+
+  HeapObject* obj = HeapObject::FromAddress(top);
+  allocation_info_.set_top(top + size_in_bytes);
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
+
+  return obj;
+}
+
+
+AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
+                                       AllocationAlignment alignment) {
+#ifdef V8_HOST_ARCH_32_BIT
+  return alignment == kDoubleAligned
+             ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
+             : AllocateRawUnaligned(size_in_bytes);
+#else
+  return AllocateRawUnaligned(size_in_bytes);
+#endif
+}
+
+
+MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
+    int size_in_bytes, AllocationAlignment alignment) {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  return AllocateRaw(size_in_bytes, alignment);
+}
+
+
 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
   return static_cast<LargePage*>(chunk);
@@ -300,14 +520,34 @@
 }
 
 
-bool FreeListNode::IsFreeListNode(HeapObject* object) {
-  Map* map = object->map();
-  Heap* heap = object->GetHeap();
-  return map == heap->raw_unchecked_free_space_map() ||
-         map == heap->raw_unchecked_one_pointer_filler_map() ||
-         map == heap->raw_unchecked_two_pointer_filler_map();
+LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
+  return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
 }
+
+
+LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
+                                                        AllocationResult result,
+                                                        intptr_t size) {
+  if (result.IsRetry()) return InvalidBuffer();
+  HeapObject* obj = nullptr;
+  bool ok = result.To(&obj);
+  USE(ok);
+  DCHECK(ok);
+  Address top = HeapObject::cast(obj)->address();
+  return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
 }
-}  // namespace v8::internal
+
+
+bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
+  if (allocation_info_.top() == other->allocation_info_.limit()) {
+    allocation_info_.set_top(other->allocation_info_.top());
+    other->allocation_info_.Reset(nullptr, nullptr);
+    return true;
+  }
+  return false;
+}
+
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_HEAP_SPACES_INL_H_
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index 060052e..90d252a 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -2,14 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
+#include "src/heap/spaces.h"
 
 #include "src/base/bits.h"
 #include "src/base/platform/platform.h"
-#include "src/full-codegen.h"
-#include "src/heap/mark-compact.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/heap/slots-buffer.h"
 #include "src/macro-assembler.h"
 #include "src/msan.h"
+#include "src/snapshot/snapshot.h"
 
 namespace v8 {
 namespace internal {
@@ -23,43 +24,27 @@
   // just an anchor for the double linked page list.  Initialize as if we have
   // reached the end of the anchor page, then the first iteration will move on
   // to the first page.
-  Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
+  Initialize(space, NULL, NULL, kAllPagesInSpace);
 }
 
 
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
-                                       HeapObjectCallback size_func) {
-  // You can't actually iterate over the anchor page.  It is not a real page,
-  // just an anchor for the double linked page list.  Initialize the current
-  // address and end as NULL, then the first iteration will move on
-  // to the first page.
-  Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(Page* page,
-                                       HeapObjectCallback size_func) {
+HeapObjectIterator::HeapObjectIterator(Page* page) {
   Space* owner = page->owner();
-  DCHECK(owner == page->heap()->old_pointer_space() ||
-         owner == page->heap()->old_data_space() ||
+  DCHECK(owner == page->heap()->old_space() ||
          owner == page->heap()->map_space() ||
-         owner == page->heap()->cell_space() ||
-         owner == page->heap()->property_cell_space() ||
          owner == page->heap()->code_space());
   Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
-             page->area_end(), kOnePageOnly, size_func);
+             page->area_end(), kOnePageOnly);
   DCHECK(page->WasSwept() || page->SweepingCompleted());
 }
 
 
 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
-                                    HeapObjectIterator::PageMode mode,
-                                    HeapObjectCallback size_f) {
+                                    HeapObjectIterator::PageMode mode) {
   space_ = space;
   cur_addr_ = cur;
   cur_end_ = end;
   page_mode_ = mode;
-  size_func_ = size_f;
 }
 
 
@@ -77,6 +62,8 @@
   }
   cur_page = cur_page->next_page();
   if (cur_page == space_->anchor()) return false;
+  cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
+      cur_page);
   cur_addr_ = cur_page->area_start();
   cur_end_ = cur_page->area_end();
   DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
@@ -93,8 +80,7 @@
       code_range_(NULL),
       free_list_(0),
       allocation_list_(0),
-      current_allocation_block_index_(0),
-      emergency_block_() {}
+      current_allocation_block_index_(0) {}
 
 
 bool CodeRange::SetUp(size_t requested) {
@@ -116,7 +102,14 @@
   }
 
   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
+#ifdef V8_TARGET_ARCH_MIPS64
+  // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
+  // encoded immediate, the addresses have to be in range of 256Mb aligned
+  // region.
+  code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
+#else
   code_range_ = new base::VirtualMemory(requested);
+#endif
   CHECK(code_range_ != NULL);
   if (!code_range_->IsReserved()) {
     delete code_range_;
@@ -146,7 +139,6 @@
   current_allocation_block_index_ = 0;
 
   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
-  ReserveEmergencyBlock();
   return true;
 }
 
@@ -204,7 +196,10 @@
 Address CodeRange::AllocateRawMemory(const size_t requested_size,
                                      const size_t commit_size,
                                      size_t* allocated) {
-  DCHECK(commit_size <= requested_size);
+  // request_size includes guards while committed_size does not. Make sure
+  // callers know about the invariant.
+  CHECK_LE(commit_size,
+           requested_size - 2 * MemoryAllocator::CodePageGuardSize());
   FreeBlock current;
   if (!ReserveBlock(requested_size, &current)) {
     *allocated = 0;
@@ -235,6 +230,7 @@
 
 void CodeRange::FreeRawMemory(Address address, size_t length) {
   DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
+  base::LockGuard<base::Mutex> guard(&code_range_mutex_);
   free_list_.Add(FreeBlock(address, length));
   code_range_->Uncommit(address, length);
 }
@@ -243,12 +239,14 @@
 void CodeRange::TearDown() {
   delete code_range_;  // Frees all memory in the virtual memory range.
   code_range_ = NULL;
+  base::LockGuard<base::Mutex> guard(&code_range_mutex_);
   free_list_.Free();
   allocation_list_.Free();
 }
 
 
 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
+  base::LockGuard<base::Mutex> guard(&code_range_mutex_);
   DCHECK(allocation_list_.length() == 0 ||
          current_allocation_block_index_ < allocation_list_.length());
   if (allocation_list_.length() == 0 ||
@@ -270,24 +268,9 @@
 }
 
 
-void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
-
-
-void CodeRange::ReserveEmergencyBlock() {
-  const size_t requested_size = MemoryAllocator::CodePageAreaSize();
-  if (emergency_block_.size == 0) {
-    ReserveBlock(requested_size, &emergency_block_);
-  } else {
-    DCHECK(emergency_block_.size >= requested_size);
-  }
-}
-
-
-void CodeRange::ReleaseEmergencyBlock() {
-  if (emergency_block_.size != 0) {
-    ReleaseBlock(&emergency_block_);
-    emergency_block_.size = 0;
-  }
+void CodeRange::ReleaseBlock(const FreeBlock* block) {
+  base::LockGuard<base::Mutex> guard(&code_range_mutex_);
+  free_list_.Add(*block);
 }
 
 
@@ -319,7 +302,7 @@
 
 void MemoryAllocator::TearDown() {
   // Check that spaces were torn down before MemoryAllocator.
-  DCHECK(size_ == 0);
+  DCHECK(size_.Value() == 0);
   // TODO(gc) this will be true again when we fix FreeMemory.
   // DCHECK(size_executable_ == 0);
   capacity_ = 0;
@@ -338,26 +321,31 @@
 }
 
 
+void MemoryAllocator::FreeNewSpaceMemory(Address addr,
+                                         base::VirtualMemory* reservation,
+                                         Executability executable) {
+  LOG(isolate_, DeleteEvent("NewSpace", addr));
+
+  DCHECK(reservation->IsReserved());
+  const intptr_t size = static_cast<intptr_t>(reservation->size());
+  DCHECK(size_.Value() >= size);
+  size_.Increment(-size);
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+  FreeMemory(reservation, NOT_EXECUTABLE);
+}
+
+
 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
-  DCHECK(reservation->IsReserved());
-  size_t size = reservation->size();
-  DCHECK(size_ >= size);
-  size_ -= size;
-
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
-  if (executable == EXECUTABLE) {
-    DCHECK(size_executable_ >= size);
-    size_executable_ -= size;
-  }
   // Code which is part of the code-range does not have its own VirtualMemory.
   DCHECK(isolate_->code_range() == NULL ||
          !isolate_->code_range()->contains(
              static_cast<Address>(reservation->address())));
   DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
-         !isolate_->code_range()->valid());
+         !isolate_->code_range()->valid() ||
+         reservation->size() <= Page::kPageSize);
+
   reservation->Release();
 }
 
@@ -365,15 +353,6 @@
 void MemoryAllocator::FreeMemory(Address base, size_t size,
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
-  DCHECK(size_ >= size);
-  size_ -= size;
-
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
-  if (executable == EXECUTABLE) {
-    DCHECK(size_executable_ >= size);
-    size_executable_ -= size;
-  }
   if (isolate_->code_range() != NULL &&
       isolate_->code_range()->contains(static_cast<Address>(base))) {
     DCHECK(executable == EXECUTABLE);
@@ -393,7 +372,7 @@
   base::VirtualMemory reservation(size, alignment);
 
   if (!reservation.IsReserved()) return NULL;
-  size_ += reservation.size();
+  size_.Increment(static_cast<intptr_t>(reservation.size()));
   Address base =
       RoundUp(static_cast<Address>(reservation.address()), alignment);
   controller->TakeControl(&reservation);
@@ -449,8 +428,6 @@
   MemoryChunk* chunk =
       MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
                               area_end, NOT_EXECUTABLE, semi_space);
-  chunk->set_next_chunk(NULL);
-  chunk->set_prev_chunk(NULL);
   chunk->initialize_scan_on_scavenge(true);
   bool in_to_space = (semi_space->id() != kFromSpace);
   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
@@ -491,8 +468,10 @@
   chunk->skip_list_ = NULL;
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
-  chunk->high_water_mark_ = static_cast<int>(area_start - base);
-  chunk->set_parallel_sweeping(SWEEPING_DONE);
+  chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
+  chunk->parallel_sweeping_state().SetValue(kSweepingDone);
+  chunk->parallel_compaction_state().SetValue(kCompactingDone);
+  chunk->mutex_ = NULL;
   chunk->available_in_small_free_list_ = 0;
   chunk->available_in_medium_free_list_ = 0;
   chunk->available_in_large_free_list_ = 0;
@@ -502,6 +481,8 @@
   Bitmap::Clear(chunk);
   chunk->initialize_scan_on_scavenge(false);
   chunk->SetFlag(WAS_SWEPT);
+  chunk->set_next_chunk(nullptr);
+  chunk->set_prev_chunk(nullptr);
 
   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
   DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
@@ -510,10 +491,6 @@
     chunk->SetFlag(IS_EXECUTABLE);
   }
 
-  if (owner == heap->old_data_space()) {
-    chunk->SetFlag(CONTAINS_ONLY_DATA);
-  }
-
   return chunk;
 }
 
@@ -640,7 +617,8 @@
                  CodePageGuardSize();
 
     // Check executable memory limit.
-    if (size_executable_ + chunk_size > capacity_executable_) {
+    if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
+        capacity_executable_) {
       LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
                                 "V8 Executable Allocation capacity exceeded"));
       return NULL;
@@ -651,22 +629,29 @@
                                  base::OS::CommitPageSize());
     // Allocate executable memory either from code range or from the
     // OS.
+#ifdef V8_TARGET_ARCH_MIPS64
+    // Use code range only for large object space on mips64 to keep address
+    // range within 256-MB memory region.
+    if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
+        reserve_area_size > CodePageAreaSize()) {
+#else
     if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
+#endif
       base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
                                                        &chunk_size);
       DCHECK(
           IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
       if (base == NULL) return NULL;
-      size_ += chunk_size;
+      size_.Increment(static_cast<intptr_t>(chunk_size));
       // Update executable memory size.
-      size_executable_ += chunk_size;
+      size_executable_.Increment(static_cast<intptr_t>(chunk_size));
     } else {
       base = AllocateAlignedMemory(chunk_size, commit_size,
                                    MemoryChunk::kAlignment, executable,
                                    &reservation);
       if (base == NULL) return NULL;
       // Update executable memory size.
-      size_executable_ += reservation.size();
+      size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
     }
 
     if (Heap::ShouldZapGarbage()) {
@@ -726,9 +711,7 @@
 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
                                     Executability executable) {
   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
-
   if (chunk == NULL) return NULL;
-
   return Page::Initialize(isolate_->heap(), chunk, executable, owner);
 }
 
@@ -743,7 +726,8 @@
 }
 
 
-void MemoryAllocator::Free(MemoryChunk* chunk) {
+void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
+  DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
   if (chunk->owner() != NULL) {
     ObjectSpace space =
@@ -754,8 +738,29 @@
   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
                                          chunk->IsEvacuationCandidate());
 
-  delete chunk->slots_buffer();
-  delete chunk->skip_list();
+  intptr_t size;
+  base::VirtualMemory* reservation = chunk->reserved_memory();
+  if (reservation->IsReserved()) {
+    size = static_cast<intptr_t>(reservation->size());
+  } else {
+    size = static_cast<intptr_t>(chunk->size());
+  }
+  DCHECK(size_.Value() >= size);
+  size_.Increment(-size);
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+  if (chunk->executable() == EXECUTABLE) {
+    DCHECK(size_executable_.Value() >= size);
+    size_executable_.Increment(-size);
+  }
+
+  chunk->SetFlag(MemoryChunk::PRE_FREED);
+}
+
+
+void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
+  DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
+  chunk->ReleaseAllocatedMemory();
 
   base::VirtualMemory* reservation = chunk->reserved_memory();
   if (reservation->IsReserved()) {
@@ -766,6 +771,12 @@
 }
 
 
+void MemoryAllocator::Free(MemoryChunk* chunk) {
+  PreFreeMemory(chunk);
+  PerformFreeMemory(chunk);
+}
+
+
 bool MemoryAllocator::CommitBlock(Address start, size_t size,
                                   Executability executable) {
   if (!CommitMemory(start, size, executable)) return false;
@@ -840,13 +851,14 @@
 
 #ifdef DEBUG
 void MemoryAllocator::ReportStatistics() {
-  float pct = static_cast<float>(capacity_ - size_) / capacity_;
+  intptr_t size = Size();
+  float pct = static_cast<float>(capacity_ - size) / capacity_;
   PrintF("  capacity: %" V8_PTR_PREFIX
          "d"
          ", used: %" V8_PTR_PREFIX
          "d"
          ", available: %%%d\n\n",
-         capacity_, size_, static_cast<int>(pct * 100));
+         capacity_, size, static_cast<int>(pct * 100));
 }
 #endif
 
@@ -881,80 +893,72 @@
                                              Address start, size_t commit_size,
                                              size_t reserved_size) {
   // Commit page header (not executable).
-  if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
-    return false;
+  Address header = start;
+  size_t header_size = CodePageGuardStartOffset();
+  if (vm->Commit(header, header_size, false)) {
+    // Create guard page after the header.
+    if (vm->Guard(start + CodePageGuardStartOffset())) {
+      // Commit page body (executable).
+      Address body = start + CodePageAreaStartOffset();
+      size_t body_size = commit_size - CodePageGuardStartOffset();
+      if (vm->Commit(body, body_size, true)) {
+        // Create guard page before the end.
+        if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
+          UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
+                                                commit_size -
+                                                CodePageGuardStartOffset());
+          return true;
+        }
+        vm->Uncommit(body, body_size);
+      }
+    }
+    vm->Uncommit(header, header_size);
   }
-
-  // Create guard page after the header.
-  if (!vm->Guard(start + CodePageGuardStartOffset())) {
-    return false;
-  }
-
-  // Commit page body (executable).
-  if (!vm->Commit(start + CodePageAreaStartOffset(),
-                  commit_size - CodePageGuardStartOffset(), true)) {
-    return false;
-  }
-
-  // Create guard page before the end.
-  if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
-    return false;
-  }
-
-  UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
-                                        commit_size -
-                                        CodePageGuardStartOffset());
-  return true;
+  return false;
 }
 
 
 // -----------------------------------------------------------------------------
 // MemoryChunk implementation
 
-void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
-  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
+  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
   if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
-    static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
+    static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
   }
   chunk->IncrementLiveBytes(by);
 }
 
 
+void MemoryChunk::ReleaseAllocatedMemory() {
+  delete slots_buffer_;
+  delete skip_list_;
+  delete mutex_;
+}
+
+
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
 
 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
               ObjectSpace::kObjectSpaceNewSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1
-                                       << AllocationSpace::OLD_POINTER_SPACE) ==
-              ObjectSpace::kObjectSpaceOldPointerSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_DATA_SPACE) ==
-              ObjectSpace::kObjectSpaceOldDataSpace);
+STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
+              ObjectSpace::kObjectSpaceOldSpace);
 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
               ObjectSpace::kObjectSpaceCodeSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CELL_SPACE) ==
-              ObjectSpace::kObjectSpaceCellSpace);
-STATIC_ASSERT(
-    static_cast<ObjectSpace>(1 << AllocationSpace::PROPERTY_CELL_SPACE) ==
-    ObjectSpace::kObjectSpacePropertyCellSpace);
 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
               ObjectSpace::kObjectSpaceMapSpace);
 
 
-PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space,
+PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
                        Executability executable)
     : Space(heap, space, executable),
       free_list_(this),
-      unswept_free_bytes_(0),
-      end_of_unswept_pages_(NULL),
-      emergency_memory_(NULL) {
+      end_of_unswept_pages_(NULL) {
   area_size_ = MemoryAllocator::PageAreaSize(space);
-  max_capacity_ =
-      (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
   accounting_stats_.Clear();
 
-  allocation_info_.set_top(NULL);
-  allocation_info_.set_limit(NULL);
+  allocation_info_.Reset(nullptr, nullptr);
 
   anchor_.InitializeAsAnchor(this);
 }
@@ -977,6 +981,150 @@
 }
 
 
+void PagedSpace::AddMemory(Address start, intptr_t size) {
+  accounting_stats_.ExpandSpace(static_cast<int>(size));
+  Free(start, static_cast<int>(size));
+}
+
+
+FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
+  FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
+  if (free_space != nullptr) {
+    accounting_stats_.DecreaseCapacity(free_space->size());
+  }
+  return free_space;
+}
+
+
+void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
+                                            int num, intptr_t limit) {
+  DCHECK_GT(num, 0);
+  DCHECK(other != nullptr);
+
+  if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
+
+  EmptyAllocationInfo();
+
+  bool memory_available = true;
+  bool spaces_need_memory = true;
+  FreeSpace* node = nullptr;
+  CompactionSpace* current_space = nullptr;
+  // Iterate over spaces and memory as long as we have memory and there are
+  // spaces in need of some.
+  while (memory_available && spaces_need_memory) {
+    spaces_need_memory = false;
+    // Round-robin over all spaces.
+    for (int i = 0; i < num; i++) {
+      current_space = other[i]->Get(identity());
+      if (current_space->free_list()->Available() < limit) {
+        // Space has not reached its limit. Try to get some memory.
+        spaces_need_memory = true;
+        node = TryRemoveMemory(limit - current_space->free_list()->Available());
+        if (node != nullptr) {
+          CHECK(current_space->identity() == identity());
+          current_space->AddMemory(node->address(), node->size());
+        } else {
+          memory_available = false;
+          break;
+        }
+      }
+    }
+  }
+}
+
+
+void PagedSpace::RefillFreeList() {
+  MarkCompactCollector* collector = heap()->mark_compact_collector();
+  FreeList* free_list = nullptr;
+  if (this == heap()->old_space()) {
+    free_list = collector->free_list_old_space().get();
+  } else if (this == heap()->code_space()) {
+    free_list = collector->free_list_code_space().get();
+  } else if (this == heap()->map_space()) {
+    free_list = collector->free_list_map_space().get();
+  } else {
+    // Any PagedSpace might invoke RefillFreeList. We filter all but our old
+    // generation spaces out.
+    return;
+  }
+  DCHECK(free_list != nullptr);
+  intptr_t added = free_list_.Concatenate(free_list);
+  accounting_stats_.IncreaseCapacity(added);
+}
+
+
+void CompactionSpace::RefillFreeList() {
+  MarkCompactCollector* collector = heap()->mark_compact_collector();
+  FreeList* free_list = nullptr;
+  if (identity() == OLD_SPACE) {
+    free_list = collector->free_list_old_space().get();
+  } else if (identity() == CODE_SPACE) {
+    free_list = collector->free_list_code_space().get();
+  } else {
+    // Compaction spaces only represent old or code space.
+    UNREACHABLE();
+  }
+  DCHECK(free_list != nullptr);
+  intptr_t refilled = 0;
+  while (refilled < kCompactionMemoryWanted) {
+    FreeSpace* node =
+        free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
+    if (node == nullptr) return;
+    refilled += node->size();
+    AddMemory(node->address(), node->size());
+  }
+}
+
+
+void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
+  DCHECK(identity() == other->identity());
+  // Destroy the linear allocation space of {other}. This is needed to
+  //   (a) not waste the memory and
+  //   (b) keep the rest of the chunk in an iterable state (filler is needed).
+  other->EmptyAllocationInfo();
+
+  // Move over the free list. Concatenate makes sure that the source free list
+  // gets properly reset after moving over all nodes.
+  intptr_t added = free_list_.Concatenate(other->free_list());
+
+  // Moved memory is not recorded as allocated memory, but rather increases and
+  // decreases capacity of the corresponding spaces.
+  other->accounting_stats_.DecreaseCapacity(added);
+  accounting_stats_.IncreaseCapacity(added);
+}
+
+
+void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
+  // Unmerged fields:
+  //   area_size_
+  //   anchor_
+
+  MoveOverFreeMemory(other);
+
+  // Update and clear accounting statistics.
+  accounting_stats_.Merge(other->accounting_stats_);
+  other->accounting_stats_.Clear();
+
+  // The linear allocation area of {other} should be destroyed now.
+  DCHECK(other->top() == nullptr);
+  DCHECK(other->limit() == nullptr);
+
+  DCHECK(other->end_of_unswept_pages_ == nullptr);
+
+  AccountCommitted(other->CommittedMemory());
+
+  // Move over pages.
+  PageIterator it(other);
+  Page* p = nullptr;
+  while (it.has_next()) {
+    p = it.next();
+    p->Unlink();
+    p->set_owner(this);
+    p->InsertAfter(anchor_.prev_page());
+  }
+}
+
+
 size_t PagedSpace::CommittedPhysicalMemory() {
   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@@ -989,6 +1137,16 @@
 }
 
 
+bool PagedSpace::ContainsSafe(Address addr) {
+  Page* p = Page::FromAddress(addr);
+  PageIterator iterator(this);
+  while (iterator.has_next()) {
+    if (iterator.next() == p) return true;
+  }
+  return false;
+}
+
+
 Object* PagedSpace::FindObject(Address addr) {
   // Note: this function can only be called on iterable spaces.
   DCHECK(!heap()->mark_compact_collector()->in_use());
@@ -996,7 +1154,7 @@
   if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
 
   Page* p = Page::FromAddress(addr);
-  HeapObjectIterator it(p, NULL);
+  HeapObjectIterator it(p);
   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
     Address cur = obj->address();
     Address next = cur + obj->Size();
@@ -1008,34 +1166,37 @@
 }
 
 
-bool PagedSpace::CanExpand() {
-  DCHECK(max_capacity_ % AreaSize() == 0);
+bool PagedSpace::CanExpand(size_t size) {
+  DCHECK(heap()->mark_compact_collector()->is_compacting() ||
+         Capacity() <= heap()->MaxOldGenerationSize());
 
-  if (Capacity() == max_capacity_) return false;
-
-  DCHECK(Capacity() < max_capacity_);
-
-  // Are we going to exceed capacity for this space?
-  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
+  // Are we going to exceed capacity for this space? At this point we can be
+  // way over the maximum size because of AlwaysAllocate scopes and large
+  // objects.
+  if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false;
 
   return true;
 }
 
 
 bool PagedSpace::Expand() {
-  if (!CanExpand()) return false;
-
   intptr_t size = AreaSize();
-
-  if (anchor_.next_page() == &anchor_) {
-    size = SizeOfFirstPage();
+  if (snapshotable() && !HasPages()) {
+    size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
   }
 
+  if (!CanExpand(size)) return false;
+
   Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
                                                                 executable());
   if (p == NULL) return false;
 
-  DCHECK(Capacity() <= max_capacity_);
+  AccountCommitted(static_cast<intptr_t>(p->size()));
+
+  // Pages created during bootstrapping may contain immortal immovable objects.
+  if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
+
+  DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
 
   p->InsertAfter(anchor_.prev_page());
 
@@ -1043,48 +1204,6 @@
 }
 
 
-intptr_t PagedSpace::SizeOfFirstPage() {
-  // If using an ool constant pool then transfer the constant pool allowance
-  // from the code space to the old pointer space.
-  static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
-  int size = 0;
-  switch (identity()) {
-    case OLD_POINTER_SPACE:
-      size = (128 + constant_pool_delta) * kPointerSize * KB;
-      break;
-    case OLD_DATA_SPACE:
-      size = 192 * KB;
-      break;
-    case MAP_SPACE:
-      size = 16 * kPointerSize * KB;
-      break;
-    case CELL_SPACE:
-      size = 16 * kPointerSize * KB;
-      break;
-    case PROPERTY_CELL_SPACE:
-      size = 8 * kPointerSize * KB;
-      break;
-    case CODE_SPACE: {
-      CodeRange* code_range = heap()->isolate()->code_range();
-      if (code_range != NULL && code_range->valid()) {
-        // When code range exists, code pages are allocated in a special way
-        // (from the reserved code range). That part of the code is not yet
-        // upgraded to handle small pages.
-        size = AreaSize();
-      } else {
-        size = RoundUp((480 - constant_pool_delta) * KB *
-                           FullCodeGenerator::kBootCodeSizeMultiplier / 100,
-                       kPointerSize);
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-  return Min(size, AreaSize());
-}
-
-
 int PagedSpace::CountTotalPages() {
   PageIterator it(this);
   int count = 0;
@@ -1096,14 +1215,6 @@
 }
 
 
-void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
-  sizes->huge_size_ = page->available_in_huge_free_list();
-  sizes->small_size_ = page->available_in_small_free_list();
-  sizes->medium_size_ = page->available_in_medium_free_list();
-  sizes->large_size_ = page->available_in_large_free_list();
-}
-
-
 void PagedSpace::ResetFreeListStatistics() {
   PageIterator page_iterator(this);
   while (page_iterator.has_next()) {
@@ -1126,8 +1237,6 @@
     intptr_t size = free_list_.EvictFreeListItems(page);
     accounting_stats_.AllocateBytes(size);
     DCHECK_EQ(AreaSize(), static_cast<int>(size));
-  } else {
-    DecreaseUnsweptFreeBytes(page);
   }
 
   if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
@@ -1138,8 +1247,7 @@
   DCHECK(!free_list_.ContainsPageFreeListItems(page));
 
   if (Page::FromAllocationTop(allocation_info_.top()) == page) {
-    allocation_info_.set_top(NULL);
-    allocation_info_.set_limit(NULL);
+    allocation_info_.Reset(nullptr, nullptr);
   }
 
   // If page is still in a list, unlink it from that list.
@@ -1148,48 +1256,14 @@
     page->Unlink();
   }
 
-  if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
-    heap()->isolate()->memory_allocator()->Free(page);
-  } else {
-    heap()->QueueMemoryChunkForFree(page);
-  }
+  AccountUncommitted(static_cast<intptr_t>(page->size()));
+  heap()->QueueMemoryChunkForFree(page);
 
   DCHECK(Capacity() > 0);
   accounting_stats_.ShrinkSpace(AreaSize());
 }
 
 
-void PagedSpace::CreateEmergencyMemory() {
-  if (identity() == CODE_SPACE) {
-    // Make the emergency block available to the allocator.
-    CodeRange* code_range = heap()->isolate()->code_range();
-    if (code_range != NULL && code_range->valid()) {
-      code_range->ReleaseEmergencyBlock();
-    }
-    DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
-  }
-  emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
-      AreaSize(), AreaSize(), executable(), this);
-}
-
-
-void PagedSpace::FreeEmergencyMemory() {
-  Page* page = static_cast<Page*>(emergency_memory_);
-  DCHECK(page->LiveBytes() == 0);
-  DCHECK(AreaSize() == page->area_size());
-  DCHECK(!free_list_.ContainsPageFreeListItems(page));
-  heap()->isolate()->memory_allocator()->Free(page);
-  emergency_memory_ = NULL;
-}
-
-
-void PagedSpace::UseEmergencyMemory() {
-  Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
-  page->InsertAfter(anchor_.prev_page());
-  emergency_memory_ = NULL;
-}
-
-
 #ifdef DEBUG
 void PagedSpace::Print() {}
 #endif
@@ -1206,7 +1280,7 @@
       allocation_pointer_found_in_space = true;
     }
     CHECK(page->WasSwept());
-    HeapObjectIterator it(page, NULL);
+    HeapObjectIterator it(page);
     Address end_of_previous_object = page->area_start();
     Address top = page->area_end();
     int black_size = 0;
@@ -1314,17 +1388,15 @@
   }
 
   start_ = NULL;
-  allocation_info_.set_top(NULL);
-  allocation_info_.set_limit(NULL);
+  allocation_info_.Reset(nullptr, nullptr);
+
 
   to_space_.TearDown();
   from_space_.TearDown();
 
-  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
+  heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
+      chunk_base_, &reservation_, NOT_EXECUTABLE);
 
-  DCHECK(reservation_.IsReserved());
-  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
-                                                    NOT_EXECUTABLE);
   chunk_base_ = NULL;
   chunk_size_ = 0;
 }
@@ -1406,16 +1478,57 @@
 }
 
 
+void LocalAllocationBuffer::Close() {
+  if (IsValid()) {
+    heap_->CreateFillerObjectAt(
+        allocation_info_.top(),
+        static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+  }
+}
+
+
+LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
+                                             AllocationInfo allocation_info)
+    : heap_(heap), allocation_info_(allocation_info) {
+  if (IsValid()) {
+    heap_->CreateFillerObjectAt(
+        allocation_info_.top(),
+        static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+  }
+}
+
+
+LocalAllocationBuffer::LocalAllocationBuffer(
+    const LocalAllocationBuffer& other) {
+  *this = other;
+}
+
+
+LocalAllocationBuffer& LocalAllocationBuffer::operator=(
+    const LocalAllocationBuffer& other) {
+  Close();
+  heap_ = other.heap_;
+  allocation_info_ = other.allocation_info_;
+
+  // This is needed since we (a) cannot yet use move-semantics, and (b) want
+  // to make the use of the class easy by it as value and (c) implicitly call
+  // {Close} upon copy.
+  const_cast<LocalAllocationBuffer&>(other)
+      .allocation_info_.Reset(nullptr, nullptr);
+  return *this;
+}
+
+
 void NewSpace::UpdateAllocationInfo() {
   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
-  allocation_info_.set_top(to_space_.page_low());
-  allocation_info_.set_limit(to_space_.page_high());
+  allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
   UpdateInlineAllocationLimit(0);
   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
 void NewSpace::ResetAllocationInfo() {
+  Address old_top = allocation_info_.top();
   to_space_.Reset();
   UpdateAllocationInfo();
   pages_used_ = 0;
@@ -1424,6 +1537,7 @@
   while (it.has_next()) {
     Bitmap::Clear(it.next());
   }
+  InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
 }
 
 
@@ -1433,14 +1547,15 @@
     Address high = to_space_.page_high();
     Address new_top = allocation_info_.top() + size_in_bytes;
     allocation_info_.set_limit(Min(new_top, high));
-  } else if (inline_allocation_limit_step() == 0) {
+  } else if (inline_allocation_observers_paused_ ||
+             top_on_previous_step_ == 0) {
     // Normal limit is the end of the current page.
     allocation_info_.set_limit(to_space_.page_high());
   } else {
     // Lower limit during incremental marking.
     Address high = to_space_.page_high();
     Address new_top = allocation_info_.top() + size_in_bytes;
-    Address new_limit = new_top + inline_allocation_limit_step_;
+    Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
     allocation_info_.set_limit(Min(new_limit, high));
   }
   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@@ -1489,33 +1604,114 @@
 }
 
 
-AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
+bool NewSpace::AddFreshPageSynchronized() {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  return AddFreshPage();
+}
+
+
+bool NewSpace::EnsureAllocation(int size_in_bytes,
+                                AllocationAlignment alignment) {
   Address old_top = allocation_info_.top();
   Address high = to_space_.page_high();
+  int filler_size = Heap::GetFillToAlign(old_top, alignment);
+  int aligned_size_in_bytes = size_in_bytes + filler_size;
+
+  if (old_top + aligned_size_in_bytes >= high) {
+    // Not enough room in the page, try to allocate a new one.
+    if (!AddFreshPage()) {
+      return false;
+    }
+
+    InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
+
+    old_top = allocation_info_.top();
+    high = to_space_.page_high();
+    filler_size = Heap::GetFillToAlign(old_top, alignment);
+    aligned_size_in_bytes = size_in_bytes + filler_size;
+  }
+
+  DCHECK(old_top + aligned_size_in_bytes < high);
+
   if (allocation_info_.limit() < high) {
     // Either the limit has been lowered because linear allocation was disabled
-    // or because incremental marking wants to get a chance to do a step. Set
-    // the new limit accordingly.
-    Address new_top = old_top + size_in_bytes;
-    int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(bytes_allocated,
-                                        IncrementalMarking::GC_VIA_STACK_GUARD);
-    UpdateInlineAllocationLimit(size_in_bytes);
-    top_on_previous_step_ = new_top;
-    return AllocateRaw(size_in_bytes);
-  } else if (AddFreshPage()) {
-    // Switched to new page. Try allocating again.
-    int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(bytes_allocated,
-                                        IncrementalMarking::GC_VIA_STACK_GUARD);
-    top_on_previous_step_ = to_space_.page_low();
-    return AllocateRaw(size_in_bytes);
-  } else {
-    return AllocationResult::Retry();
+    // or because incremental marking wants to get a chance to do a step,
+    // or because idle scavenge job wants to get a chance to post a task.
+    // Set the new limit accordingly.
+    Address new_top = old_top + aligned_size_in_bytes;
+    Address soon_object = old_top + filler_size;
+    InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
+    UpdateInlineAllocationLimit(aligned_size_in_bytes);
+  }
+  return true;
+}
+
+
+void NewSpace::StartNextInlineAllocationStep() {
+  if (!inline_allocation_observers_paused_) {
+    top_on_previous_step_ =
+        inline_allocation_observers_.length() ? allocation_info_.top() : 0;
+    UpdateInlineAllocationLimit(0);
   }
 }
 
 
+intptr_t NewSpace::GetNextInlineAllocationStepSize() {
+  intptr_t next_step = 0;
+  for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
+    InlineAllocationObserver* o = inline_allocation_observers_[i];
+    next_step = next_step ? Min(next_step, o->bytes_to_next_step())
+                          : o->bytes_to_next_step();
+  }
+  DCHECK(inline_allocation_observers_.length() == 0 || next_step != 0);
+  return next_step;
+}
+
+
+void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) {
+  inline_allocation_observers_.Add(observer);
+  StartNextInlineAllocationStep();
+}
+
+
+void NewSpace::RemoveInlineAllocationObserver(
+    InlineAllocationObserver* observer) {
+  bool removed = inline_allocation_observers_.RemoveElement(observer);
+  // Only used in assertion. Suppress unused variable warning.
+  static_cast<void>(removed);
+  DCHECK(removed);
+  StartNextInlineAllocationStep();
+}
+
+
+void NewSpace::PauseInlineAllocationObservers() {
+  // Do a step to account for memory allocated so far.
+  InlineAllocationStep(top(), top(), nullptr, 0);
+  inline_allocation_observers_paused_ = true;
+  top_on_previous_step_ = 0;
+  UpdateInlineAllocationLimit(0);
+}
+
+
+void NewSpace::ResumeInlineAllocationObservers() {
+  DCHECK(top_on_previous_step_ == 0);
+  inline_allocation_observers_paused_ = false;
+  StartNextInlineAllocationStep();
+}
+
+
+void NewSpace::InlineAllocationStep(Address top, Address new_top,
+                                    Address soon_object, size_t size) {
+  if (top_on_previous_step_) {
+    int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
+    for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
+      inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated,
+                                                            soon_object, size);
+    }
+    top_on_previous_step_ = new_top;
+  }
+}
+
 #ifdef VERIFY_HEAP
 // We do not use the SemiSpaceIterator because verification doesn't assume
 // that it works (it depends on the invariants we are checking).
@@ -1590,13 +1786,12 @@
   total_capacity_ = initial_capacity;
   target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
   maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
-  maximum_committed_ = 0;
   committed_ = false;
   start_ = start;
   address_mask_ = ~(maximum_capacity - 1);
   object_mask_ = address_mask_ | kHeapObjectTagMask;
   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
-  age_mark_ = start_;
+  age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
 }
 
 
@@ -1613,6 +1808,7 @@
           start_, total_capacity_, executable())) {
     return false;
   }
+  AccountCommitted(total_capacity_);
 
   NewSpacePage* current = anchor();
   for (int i = 0; i < pages; i++) {
@@ -1636,6 +1832,8 @@
                                                             total_capacity_)) {
     return false;
   }
+  AccountUncommitted(total_capacity_);
+
   anchor()->set_next_page(anchor());
   anchor()->set_prev_page(anchor());
 
@@ -1672,6 +1870,7 @@
           start_ + total_capacity_, delta, executable())) {
     return false;
   }
+  AccountCommitted(static_cast<intptr_t>(delta));
   SetCapacity(new_capacity);
   NewSpacePage* last_page = anchor()->prev_page();
   DCHECK(last_page != anchor());
@@ -1702,6 +1901,7 @@
     if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
       return false;
     }
+    AccountUncommitted(static_cast<intptr_t>(delta));
 
     int pages_after = new_capacity / Page::kPageSize;
     NewSpacePage* new_last_page =
@@ -1787,9 +1987,6 @@
 
 void SemiSpace::SetCapacity(int new_capacity) {
   total_capacity_ = new_capacity;
-  if (total_capacity_ > maximum_committed_) {
-    maximum_committed_ = total_capacity_;
-  }
 }
 
 
@@ -1864,33 +2061,16 @@
 
 // -----------------------------------------------------------------------------
 // SemiSpaceIterator implementation.
+
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
-  Initialize(space->bottom(), space->top(), NULL);
+  Initialize(space->bottom(), space->top());
 }
 
 
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
-                                     HeapObjectCallback size_func) {
-  Initialize(space->bottom(), space->top(), size_func);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
-  Initialize(start, space->top(), NULL);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
-  Initialize(from, to, NULL);
-}
-
-
-void SemiSpaceIterator::Initialize(Address start, Address end,
-                                   HeapObjectCallback size_func) {
+void SemiSpaceIterator::Initialize(Address start, Address end) {
   SemiSpace::AssertValidRange(start, end);
   current_ = start;
   limit_ = end;
-  size_func_ = size_func;
 }
 
 
@@ -2084,87 +2264,9 @@
 // -----------------------------------------------------------------------------
 // Free lists for old object spaces implementation
 
-void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
-  DCHECK(size_in_bytes > 0);
-  DCHECK(IsAligned(size_in_bytes, kPointerSize));
-
-  // We write a map and possibly size information to the block.  If the block
-  // is big enough to be a FreeSpace with at least one extra word (the next
-  // pointer), we set its map to be the free space map and its size to an
-  // appropriate array length for the desired size from HeapObject::Size().
-  // If the block is too small (eg, one or two words), to hold both a size
-  // field and a next pointer, we give it a filler map that gives it the
-  // correct size.
-  if (size_in_bytes > FreeSpace::kHeaderSize) {
-    // Can't use FreeSpace::cast because it fails during deserialization.
-    // We have to set the size first with a release store before we store
-    // the map because a concurrent store buffer scan on scavenge must not
-    // observe a map with an invalid size.
-    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
-    this_as_free_space->nobarrier_set_size(size_in_bytes);
-    synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
-  } else if (size_in_bytes == kPointerSize) {
-    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
-  } else if (size_in_bytes == 2 * kPointerSize) {
-    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
-  } else {
-    UNREACHABLE();
-  }
-  // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
-  // deserialization because the free space map is not done yet.
-}
-
-
-FreeListNode* FreeListNode::next() {
-  DCHECK(IsFreeListNode(this));
-  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
-    DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
-    return reinterpret_cast<FreeListNode*>(
-        Memory::Address_at(address() + kNextOffset));
-  } else {
-    return reinterpret_cast<FreeListNode*>(
-        Memory::Address_at(address() + kPointerSize));
-  }
-}
-
-
-FreeListNode** FreeListNode::next_address() {
-  DCHECK(IsFreeListNode(this));
-  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
-    DCHECK(Size() >= kNextOffset + kPointerSize);
-    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
-  } else {
-    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
-  }
-}
-
-
-void FreeListNode::set_next(FreeListNode* next) {
-  DCHECK(IsFreeListNode(this));
-  // While we are booting the VM the free space map will actually be null.  So
-  // we have to make sure that we don't try to use it for anything at that
-  // stage.
-  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
-    DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
-    base::NoBarrier_Store(
-        reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
-        reinterpret_cast<base::AtomicWord>(next));
-  } else {
-    base::NoBarrier_Store(
-        reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
-        reinterpret_cast<base::AtomicWord>(next));
-  }
-}
-
-
 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
   intptr_t free_bytes = 0;
   if (category->top() != NULL) {
-    // This is safe (not going to deadlock) since Concatenate operations
-    // are never performed on the same free lists at the same time in
-    // reverse order.
-    base::LockGuard<base::Mutex> target_lock_guard(mutex());
-    base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
     DCHECK(category->end_ != NULL);
     free_bytes = category->available();
     if (end_ == NULL) {
@@ -2173,7 +2275,6 @@
       category->end()->set_next(top());
     }
     set_top(category->top());
-    base::NoBarrier_Store(&top_, category->top_);
     available_ += category->available();
     category->Reset();
   }
@@ -2182,36 +2283,45 @@
 
 
 void FreeListCategory::Reset() {
-  set_top(NULL);
-  set_end(NULL);
-  set_available(0);
+  set_top(nullptr);
+  set_end(nullptr);
+  available_ = 0;
 }
 
 
 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
-  int sum = 0;
-  FreeListNode* t = top();
-  FreeListNode** n = &t;
-  while (*n != NULL) {
-    if (Page::FromAddress((*n)->address()) == p) {
-      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
-      sum += free_space->Size();
-      *n = (*n)->next();
-    } else {
-      n = (*n)->next_address();
+  intptr_t sum = 0;
+  FreeSpace* prev_node = nullptr;
+  for (FreeSpace* cur_node = top(); cur_node != nullptr;
+       cur_node = cur_node->next()) {
+    Page* page_for_node = Page::FromAddress(cur_node->address());
+    if (page_for_node == p) {
+      // FreeSpace node on eviction page found, unlink it.
+      int size = cur_node->size();
+      sum += size;
+      DCHECK((prev_node != nullptr) || (top() == cur_node));
+      if (cur_node == top()) {
+        set_top(cur_node->next());
+      }
+      if (cur_node == end()) {
+        set_end(prev_node);
+      }
+      if (prev_node != nullptr) {
+        prev_node->set_next(cur_node->next());
+      }
+      continue;
     }
+    prev_node = cur_node;
   }
-  set_top(t);
-  if (top() == NULL) {
-    set_end(NULL);
-  }
+  DCHECK_EQ(p->available_in_free_list(type_), sum);
+  p->add_available_in_free_list(type_, -sum);
   available_ -= sum;
   return sum;
 }
 
 
 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
-  FreeListNode* node = top();
+  FreeSpace* node = top();
   while (node != NULL) {
     if (Page::FromAddress(node->address()) == p) return true;
     node = node->next();
@@ -2220,57 +2330,94 @@
 }
 
 
-FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
-  FreeListNode* node = top();
+FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
+  FreeSpace* node = top();
+  if (node == nullptr) return nullptr;
 
-  if (node == NULL) return NULL;
-
-  while (node != NULL &&
-         Page::FromAddress(node->address())->IsEvacuationCandidate()) {
-    available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
+  Page* page = Page::FromAddress(node->address());
+  while ((node != nullptr) && !page->CanAllocate()) {
+    available_ -= node->size();
+    page->add_available_in_free_list(type_, -(node->Size()));
     node = node->next();
   }
 
-  if (node != NULL) {
+  if (node != nullptr) {
     set_top(node->next());
-    *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
+    *node_size = node->Size();
     available_ -= *node_size;
   } else {
-    set_top(NULL);
+    set_top(nullptr);
   }
 
-  if (top() == NULL) {
-    set_end(NULL);
+  if (top() == nullptr) {
+    set_end(nullptr);
   }
 
   return node;
 }
 
 
-FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
-                                                 int* node_size) {
-  FreeListNode* node = PickNodeFromList(node_size);
-  if (node != NULL && *node_size < size_in_bytes) {
+FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
+                                              int* node_size) {
+  FreeSpace* node = PickNodeFromList(node_size);
+  if ((node != nullptr) && (*node_size < size_in_bytes)) {
     Free(node, *node_size);
     *node_size = 0;
-    return NULL;
+    return nullptr;
   }
   return node;
 }
 
 
-void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
-  node->set_next(top());
-  set_top(node);
+FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
+                                                 int* node_size) {
+  FreeSpace* prev_non_evac_node = nullptr;
+  for (FreeSpace* cur_node = top(); cur_node != nullptr;
+       cur_node = cur_node->next()) {
+    int size = cur_node->size();
+    Page* page_for_node = Page::FromAddress(cur_node->address());
+
+    if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) {
+      // The node is either large enough or contained in an evacuation
+      // candidate. In both cases we need to unlink it from the list.
+      available_ -= size;
+      if (cur_node == top()) {
+        set_top(cur_node->next());
+      }
+      if (cur_node == end()) {
+        set_end(prev_non_evac_node);
+      }
+      if (prev_non_evac_node != nullptr) {
+        prev_non_evac_node->set_next(cur_node->next());
+      }
+      // For evacuation candidates we continue.
+      if (!page_for_node->CanAllocate()) {
+        page_for_node->add_available_in_free_list(type_, -size);
+        continue;
+      }
+      // Otherwise we have a large enough node and can return.
+      *node_size = size;
+      return cur_node;
+    }
+
+    prev_non_evac_node = cur_node;
+  }
+  return nullptr;
+}
+
+
+void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
+  free_space->set_next(top());
+  set_top(free_space);
   if (end_ == NULL) {
-    end_ = node;
+    end_ = free_space;
   }
   available_ += size_in_bytes;
 }
 
 
 void FreeListCategory::RepairFreeList(Heap* heap) {
-  FreeListNode* n = top();
+  FreeSpace* n = top();
   while (n != NULL) {
     Map** map_location = reinterpret_cast<Map**>(n->address());
     if (*map_location == NULL) {
@@ -2283,18 +2430,40 @@
 }
 
 
-FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
+FreeList::FreeList(PagedSpace* owner)
+    : owner_(owner),
+      wasted_bytes_(0),
+      small_list_(this, kSmall),
+      medium_list_(this, kMedium),
+      large_list_(this, kLarge),
+      huge_list_(this, kHuge) {
   Reset();
 }
 
 
-intptr_t FreeList::Concatenate(FreeList* free_list) {
-  intptr_t free_bytes = 0;
-  free_bytes += small_list_.Concatenate(free_list->small_list());
-  free_bytes += medium_list_.Concatenate(free_list->medium_list());
-  free_bytes += large_list_.Concatenate(free_list->large_list());
-  free_bytes += huge_list_.Concatenate(free_list->huge_list());
-  return free_bytes;
+intptr_t FreeList::Concatenate(FreeList* other) {
+  intptr_t usable_bytes = 0;
+  intptr_t wasted_bytes = 0;
+
+  // This is safe (not going to deadlock) since Concatenate operations
+  // are never performed on the same free lists at the same time in
+  // reverse order. Furthermore, we only lock if the PagedSpace containing
+  // the free list is know to be globally available, i.e., not local.
+  if (!owner()->is_local()) mutex_.Lock();
+  if (!other->owner()->is_local()) other->mutex()->Lock();
+
+  wasted_bytes = other->wasted_bytes_;
+  wasted_bytes_ += wasted_bytes;
+  other->wasted_bytes_ = 0;
+
+  usable_bytes += small_list_.Concatenate(other->GetFreeListCategory(kSmall));
+  usable_bytes += medium_list_.Concatenate(other->GetFreeListCategory(kMedium));
+  usable_bytes += large_list_.Concatenate(other->GetFreeListCategory(kLarge));
+  usable_bytes += huge_list_.Concatenate(other->GetFreeListCategory(kHuge));
+
+  if (!other->owner()->is_local()) other->mutex()->Unlock();
+  if (!owner()->is_local()) mutex_.Unlock();
+  return usable_bytes + wasted_bytes;
 }
 
 
@@ -2303,123 +2472,81 @@
   medium_list_.Reset();
   large_list_.Reset();
   huge_list_.Reset();
+  ResetStats();
 }
 
 
 int FreeList::Free(Address start, int size_in_bytes) {
   if (size_in_bytes == 0) return 0;
 
-  FreeListNode* node = FreeListNode::FromAddress(start);
-  node->set_size(heap_, size_in_bytes);
+  owner()->heap()->CreateFillerObjectAt(start, size_in_bytes);
+
   Page* page = Page::FromAddress(start);
 
   // Early return to drop too-small blocks on the floor.
-  if (size_in_bytes < kSmallListMin) {
+  if (size_in_bytes <= kSmallListMin) {
     page->add_non_available_small_blocks(size_in_bytes);
+    wasted_bytes_ += size_in_bytes;
     return size_in_bytes;
   }
 
+  FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
   // Insert other blocks at the head of a free list of the appropriate
   // magnitude.
   if (size_in_bytes <= kSmallListMax) {
-    small_list_.Free(node, size_in_bytes);
+    small_list_.Free(free_space, size_in_bytes);
     page->add_available_in_small_free_list(size_in_bytes);
   } else if (size_in_bytes <= kMediumListMax) {
-    medium_list_.Free(node, size_in_bytes);
+    medium_list_.Free(free_space, size_in_bytes);
     page->add_available_in_medium_free_list(size_in_bytes);
   } else if (size_in_bytes <= kLargeListMax) {
-    large_list_.Free(node, size_in_bytes);
+    large_list_.Free(free_space, size_in_bytes);
     page->add_available_in_large_free_list(size_in_bytes);
   } else {
-    huge_list_.Free(node, size_in_bytes);
+    huge_list_.Free(free_space, size_in_bytes);
     page->add_available_in_huge_free_list(size_in_bytes);
   }
 
-  DCHECK(IsVeryLong() || available() == SumFreeLists());
+  DCHECK(IsVeryLong() || Available() == SumFreeLists());
   return 0;
 }
 
 
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
-  FreeListNode* node = NULL;
-  Page* page = NULL;
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
+  FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
+  if (node != nullptr) {
+    Page::FromAddress(node->address())
+        ->add_available_in_free_list(category, -(*node_size));
+    DCHECK(IsVeryLong() || Available() == SumFreeLists());
+  }
+  return node;
+}
+
+
+FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+  FreeSpace* node = nullptr;
+  Page* page = nullptr;
 
   if (size_in_bytes <= kSmallAllocationMax) {
-    node = small_list_.PickNodeFromList(node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_small_free_list(-(*node_size));
-      DCHECK(IsVeryLong() || available() == SumFreeLists());
-      return node;
-    }
+    node = FindNodeIn(kSmall, node_size);
+    if (node != nullptr) return node;
   }
 
   if (size_in_bytes <= kMediumAllocationMax) {
-    node = medium_list_.PickNodeFromList(node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_medium_free_list(-(*node_size));
-      DCHECK(IsVeryLong() || available() == SumFreeLists());
-      return node;
-    }
+    node = FindNodeIn(kMedium, node_size);
+    if (node != nullptr) return node;
   }
 
   if (size_in_bytes <= kLargeAllocationMax) {
-    node = large_list_.PickNodeFromList(node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_large_free_list(-(*node_size));
-      DCHECK(IsVeryLong() || available() == SumFreeLists());
-      return node;
-    }
+    node = FindNodeIn(kLarge, node_size);
+    if (node != nullptr) return node;
   }
 
-  int huge_list_available = huge_list_.available();
-  FreeListNode* top_node = huge_list_.top();
-  for (FreeListNode** cur = &top_node; *cur != NULL;
-       cur = (*cur)->next_address()) {
-    FreeListNode* cur_node = *cur;
-    while (cur_node != NULL &&
-           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
-      int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
-      huge_list_available -= size;
-      page = Page::FromAddress(cur_node->address());
-      page->add_available_in_huge_free_list(-size);
-      cur_node = cur_node->next();
-    }
-
-    *cur = cur_node;
-    if (cur_node == NULL) {
-      huge_list_.set_end(NULL);
-      break;
-    }
-
-    DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
-    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
-    int size = cur_as_free_space->Size();
-    if (size >= size_in_bytes) {
-      // Large enough node found.  Unlink it from the list.
-      node = *cur;
-      *cur = node->next();
-      *node_size = size;
-      huge_list_available -= size;
-      page = Page::FromAddress(node->address());
-      page->add_available_in_huge_free_list(-size);
-      break;
-    }
-  }
-
-  huge_list_.set_top(top_node);
-  if (huge_list_.top() == NULL) {
-    huge_list_.set_end(NULL);
-  }
-  huge_list_.set_available(huge_list_available);
-
-  if (node != NULL) {
-    DCHECK(IsVeryLong() || available() == SumFreeLists());
+  node = huge_list_.SearchForNodeInList(size_in_bytes, node_size);
+  if (node != nullptr) {
+    page = Page::FromAddress(node->address());
+    page->add_available_in_large_free_list(-(*node_size));
+    DCHECK(IsVeryLong() || Available() == SumFreeLists());
     return node;
   }
 
@@ -2446,7 +2573,38 @@
     }
   }
 
-  DCHECK(IsVeryLong() || available() == SumFreeLists());
+  DCHECK(IsVeryLong() || Available() == SumFreeLists());
+  return node;
+}
+
+
+FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
+  hint_size_in_bytes = RoundDown(hint_size_in_bytes, kPointerSize);
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  FreeSpace* node = nullptr;
+  int node_size = 0;
+  // Try to find a node that fits exactly.
+  node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
+  // If no node could be found get as much memory as possible.
+  if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
+  if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
+  if (node != nullptr) {
+    // We round up the size to (kSmallListMin + kPointerSize) to (a) have a
+    // size larger then the minimum size required for FreeSpace, and (b) to get
+    // a block that can actually be freed into some FreeList later on.
+    if (hint_size_in_bytes <= kSmallListMin) {
+      hint_size_in_bytes = kSmallListMin + kPointerSize;
+    }
+    // Give back left overs that were not required by {size_in_bytes}.
+    intptr_t left_over = node_size - hint_size_in_bytes;
+
+    // Do not bother to return anything below {kSmallListMin} as it would be
+    // immediately discarded anyways.
+    if (left_over > kSmallListMin) {
+      Free(node->address() + hint_size_in_bytes, static_cast<int>(left_over));
+      node->set_size(static_cast<int>(hint_size_in_bytes));
+    }
+  }
   return node;
 }
 
@@ -2467,16 +2625,14 @@
   // skipped when scanning the heap.  This also puts it back in the free list
   // if it is big enough.
   owner_->Free(owner_->top(), old_linear_size);
+  owner_->SetTopAndLimit(nullptr, nullptr);
 
   owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
                                                       old_linear_size);
 
   int new_node_size = 0;
-  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
-  if (new_node == NULL) {
-    owner_->SetTopAndLimit(NULL, NULL);
-    return NULL;
-  }
+  FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+  if (new_node == nullptr) return nullptr;
 
   int bytes_left = new_node_size - size_in_bytes;
   DCHECK(bytes_left >= 0);
@@ -2506,7 +2662,7 @@
     DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
   } else if (bytes_left > kThreshold &&
              owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
-             FLAG_incremental_marking_steps) {
+             FLAG_incremental_marking) {
     int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
     // We don't want to give too large linear areas to the allocator while
     // incremental marking is going on, because we won't check again whether
@@ -2520,10 +2676,6 @@
     // linear allocation area.
     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
                            new_node->address() + new_node_size);
-  } else {
-    // TODO(gc) Try not freeing linear allocation region when bytes_left
-    // are zero.
-    owner_->SetTopAndLimit(NULL, NULL);
   }
 
   return new_node;
@@ -2532,17 +2684,11 @@
 
 intptr_t FreeList::EvictFreeListItems(Page* p) {
   intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
-  p->set_available_in_huge_free_list(0);
-
   if (sum < p->area_size()) {
     sum += small_list_.EvictFreeListItemsInList(p) +
            medium_list_.EvictFreeListItemsInList(p) +
            large_list_.EvictFreeListItemsInList(p);
-    p->set_available_in_small_free_list(0);
-    p->set_available_in_medium_free_list(0);
-    p->set_available_in_large_free_list(0);
   }
-
   return sum;
 }
 
@@ -2566,23 +2712,19 @@
 #ifdef DEBUG
 intptr_t FreeListCategory::SumFreeList() {
   intptr_t sum = 0;
-  FreeListNode* cur = top();
+  FreeSpace* cur = top();
   while (cur != NULL) {
-    DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
-    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
-    sum += cur_as_free_space->nobarrier_size();
+    DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
+    sum += cur->nobarrier_size();
     cur = cur->next();
   }
   return sum;
 }
 
 
-static const int kVeryLongFreeList = 500;
-
-
 int FreeListCategory::FreeListLength() {
   int length = 0;
-  FreeListNode* cur = top();
+  FreeSpace* cur = top();
   while (cur != NULL) {
     length++;
     cur = cur->next();
@@ -2592,12 +2734,14 @@
 }
 
 
+bool FreeListCategory::IsVeryLong() {
+  return FreeListLength() == kVeryLongFreeList;
+}
+
+
 bool FreeList::IsVeryLong() {
-  if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
-  if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
-  if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
-  if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
-  return false;
+  return small_list_.IsVeryLong() || medium_list_.IsVeryLong() ||
+         large_list_.IsVeryLong() || huge_list_.IsVeryLong();
 }
 
 
@@ -2622,20 +2766,17 @@
   // on the first allocation after the sweep.
   EmptyAllocationInfo();
 
-  // This counter will be increased for pages which will be swept by the
-  // sweeper threads.
-  unswept_free_bytes_ = 0;
-
   // Clear the free list before a full GC---it will be rebuilt afterward.
   free_list_.Reset();
 }
 
 
 intptr_t PagedSpace::SizeOfObjects() {
-  DCHECK(!FLAG_concurrent_sweeping ||
-         heap()->mark_compact_collector()->sweeping_in_progress() ||
-         (unswept_free_bytes_ == 0));
-  return Size() - unswept_free_bytes_ - (limit() - top());
+  const intptr_t size = Size() - (limit() - top());
+  CHECK_GE(limit(), top());
+  CHECK_GE(size, 0);
+  USE(size);
+  return size;
 }
 
 
@@ -2643,27 +2784,35 @@
 // on the heap.  If there was already a free list then the elements on it
 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
 // fix them.
-void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
-
-
-void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
-  if (allocation_info_.top() >= allocation_info_.limit()) return;
-
-  if (Page::FromAllocationTop(allocation_info_.top())
-          ->IsEvacuationCandidate()) {
-    // Create filler object to keep page iterable if it was iterable.
-    int remaining =
-        static_cast<int>(allocation_info_.limit() - allocation_info_.top());
-    heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
-
-    allocation_info_.set_top(NULL);
-    allocation_info_.set_limit(NULL);
+void PagedSpace::RepairFreeListsAfterDeserialization() {
+  free_list_.RepairLists(heap());
+  // Each page may have a small free space that is not tracked by a free list.
+  // Update the maps for those free space objects.
+  PageIterator iterator(this);
+  while (iterator.has_next()) {
+    Page* page = iterator.next();
+    int size = static_cast<int>(page->non_available_small_blocks());
+    if (size == 0) continue;
+    Address address = page->OffsetToAddress(Page::kPageSize - size);
+    heap()->CreateFillerObjectAt(address, size);
   }
 }
 
 
-HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
-    int size_in_bytes) {
+void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
+  if (allocation_info_.top() >= allocation_info_.limit()) return;
+
+  if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
+    // Create filler object to keep page iterable if it was iterable.
+    int remaining =
+        static_cast<int>(allocation_info_.limit() - allocation_info_.top());
+    heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
+    allocation_info_.Reset(nullptr, nullptr);
+  }
+}
+
+
+HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
   MarkCompactCollector* collector = heap()->mark_compact_collector();
   if (collector->sweeping_in_progress()) {
     // Wait for the sweeper threads here and complete the sweeping phase.
@@ -2673,7 +2822,17 @@
     // entries.
     return free_list_.Allocate(size_in_bytes);
   }
-  return NULL;
+  return nullptr;
+}
+
+
+HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
+  MarkCompactCollector* collector = heap()->mark_compact_collector();
+  if (collector->sweeping_in_progress()) {
+    collector->SweepAndRefill(this);
+    return free_list_.Allocate(size_in_bytes);
+  }
+  return nullptr;
 }
 
 
@@ -2685,22 +2844,17 @@
   if (collector->sweeping_in_progress()) {
     // First try to refill the free-list, concurrent sweeper threads
     // may have freed some objects in the meantime.
-    collector->RefillFreeList(this);
+    RefillFreeList();
 
     // Retry the free list allocation.
     HeapObject* object = free_list_.Allocate(size_in_bytes);
     if (object != NULL) return object;
 
     // If sweeping is still in progress try to sweep pages on the main thread.
-    int free_chunk = collector->SweepInParallel(this, size_in_bytes);
-    collector->RefillFreeList(this);
-    if (free_chunk >= size_in_bytes) {
-      HeapObject* object = free_list_.Allocate(size_in_bytes);
-      // We should be able to allocate an object here since we just freed that
-      // much memory.
-      DCHECK(object != NULL);
-      if (object != NULL) return object;
-    }
+    collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes);
+    RefillFreeList();
+    object = free_list_.Allocate(size_in_bytes);
+    if (object != nullptr) return object;
   }
 
   // Free list allocation failed and there is no next page.  Fail if we have
@@ -2710,20 +2864,21 @@
       heap()->OldGenerationAllocationLimitReached()) {
     // If sweeper threads are active, wait for them at that point and steal
     // elements form their free-lists.
-    HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
-    if (object != NULL) return object;
+    HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
+    return object;
   }
 
   // Try to expand the space and allocate in the new next page.
   if (Expand()) {
-    DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
+    DCHECK((CountTotalPages() > 1) ||
+           (size_in_bytes <= free_list_.Available()));
     return free_list_.Allocate(size_in_bytes);
   }
 
   // If sweeper threads are active, wait for them at that point and steal
   // elements form their free-lists. Allocation may still fail their which
   // would indicate that there is not enough memory for the given allocation.
-  return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+  return SweepAndRetryAllocation(size_in_bytes);
 }
 
 
@@ -2876,25 +3031,10 @@
 
 // -----------------------------------------------------------------------------
 // MapSpace implementation
-// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
-// there is at least one non-inlined virtual function. I would prefer to hide
-// the VerifyObject definition behind VERIFY_HEAP.
 
+#ifdef VERIFY_HEAP
 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
-
-
-// -----------------------------------------------------------------------------
-// CellSpace and PropertyCellSpace implementation
-// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
-// there is at least one non-inlined virtual function. I would prefer to hide
-// the VerifyObject definition behind VERIFY_HEAP.
-
-void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
-
-
-void PropertyCellSpace::VerifyObject(HeapObject* object) {
-  CHECK(object->IsPropertyCell());
-}
+#endif
 
 
 // -----------------------------------------------------------------------------
@@ -2902,14 +3042,6 @@
 
 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
   current_ = space->first_page_;
-  size_func_ = NULL;
-}
-
-
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
-                                         HeapObjectCallback size_func) {
-  current_ = space->first_page_;
-  size_func_ = size_func;
 }
 
 
@@ -2924,24 +3056,23 @@
 
 // -----------------------------------------------------------------------------
 // LargeObjectSpace
-static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
 
 
-LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
-                                   AllocationSpace id)
+LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
-      max_capacity_(max_capacity),
       first_page_(NULL),
       size_(0),
       page_count_(0),
       objects_size_(0),
-      chunk_map_(ComparePointers, 1024) {}
+      chunk_map_(HashMap::PointersMatch, 1024) {}
+
+
+LargeObjectSpace::~LargeObjectSpace() {}
 
 
 bool LargeObjectSpace::SetUp() {
   first_page_ = NULL;
   size_ = 0;
-  maximum_committed_ = 0;
   page_count_ = 0;
   objects_size_ = 0;
   chunk_map_.Clear();
@@ -2968,35 +3099,29 @@
                                                Executability executable) {
   // Check if we want to force a GC before growing the old space further.
   // If so, fail the allocation.
-  if (!heap()->always_allocate() &&
-      heap()->OldGenerationAllocationLimitReached()) {
+  if (!heap()->CanExpandOldGeneration(object_size)) {
     return AllocationResult::Retry(identity());
   }
 
-  if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
-
   LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
       object_size, this, executable);
   if (page == NULL) return AllocationResult::Retry(identity());
   DCHECK(page->area_size() >= object_size);
 
   size_ += static_cast<int>(page->size());
+  AccountCommitted(static_cast<intptr_t>(page->size()));
   objects_size_ += object_size;
   page_count_++;
   page->set_next_page(first_page_);
   first_page_ = page;
 
-  if (size_ > maximum_committed_) {
-    maximum_committed_ = size_;
-  }
-
   // Register all MemoryChunk::kAlignment-aligned chunks covered by
   // this large page in the chunk map.
   uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
   uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
   for (uintptr_t key = base; key <= limit; key++) {
-    HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
-                                              static_cast<uint32_t>(key), true);
+    HashMap::Entry* entry = chunk_map_.LookupOrInsert(
+        reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
     DCHECK(entry != NULL);
     entry->value = page;
   }
@@ -3043,7 +3168,7 @@
 LargePage* LargeObjectSpace::FindPage(Address a) {
   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
   HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
-                                        static_cast<uint32_t>(key), false);
+                                        static_cast<uint32_t>(key));
   if (e != NULL) {
     DCHECK(e->value != NULL);
     LargePage* page = reinterpret_cast<LargePage*>(e->value);
@@ -3056,19 +3181,28 @@
 }
 
 
+void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
+  LargePage* current = first_page_;
+  while (current != NULL) {
+    HeapObject* object = current->GetObject();
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    DCHECK(Marking::IsBlack(mark_bit));
+    Marking::BlackToWhite(mark_bit);
+    Page::FromAddress(object->address())->ResetProgressBar();
+    Page::FromAddress(object->address())->ResetLiveBytes();
+    current = current->next_page();
+  }
+}
+
+
 void LargeObjectSpace::FreeUnmarkedObjects() {
   LargePage* previous = NULL;
   LargePage* current = first_page_;
   while (current != NULL) {
     HeapObject* object = current->GetObject();
-    // Can this large page contain pointers to non-trivial objects.  No other
-    // pointer object is this big.
-    bool is_pointer_object = object->IsFixedArray();
     MarkBit mark_bit = Marking::MarkBitFrom(object);
-    if (mark_bit.Get()) {
-      mark_bit.Clear();
-      Page::FromAddress(object->address())->ResetProgressBar();
-      Page::FromAddress(object->address())->ResetLiveBytes();
+    DCHECK(!Marking::IsGrey(mark_bit));
+    if (Marking::IsBlack(mark_bit)) {
       previous = current;
       current = current->next_page();
     } else {
@@ -3085,6 +3219,7 @@
       heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
                                                              heap()->isolate());
       size_ -= static_cast<int>(page->size());
+      AccountUncommitted(static_cast<intptr_t>(page->size()));
       objects_size_ -= object->Size();
       page_count_--;
 
@@ -3099,14 +3234,9 @@
                           static_cast<uint32_t>(key));
       }
 
-      if (is_pointer_object) {
-        heap()->QueueMemoryChunkForFree(page);
-      } else {
-        heap()->isolate()->memory_allocator()->Free(page);
-      }
+      heap()->QueueMemoryChunkForFree(page);
     }
   }
-  heap()->FreeQueuedChunks();
 }
 
 
@@ -3122,6 +3252,11 @@
 }
 
 
+bool LargeObjectSpace::Contains(Address address) {
+  return FindPage(address) != NULL;
+}
+
+
 #ifdef VERIFY_HEAP
 // We do not assume that the large object iterator works, because it depends
 // on the invariants we are checking during verification.
@@ -3146,8 +3281,7 @@
     // large object space.
     CHECK(object->IsCode() || object->IsSeqString() ||
           object->IsExternalString() || object->IsFixedArray() ||
-          object->IsFixedDoubleArray() || object->IsByteArray() ||
-          object->IsConstantPoolArray());
+          object->IsFixedDoubleArray() || object->IsByteArray());
 
     // The object itself should look OK.
     object->ObjectVerify();
@@ -3217,14 +3351,14 @@
   PrintF("Page@%p in %s\n", this->address(),
          AllocationSpaceName(this->owner()->identity()));
   printf(" --------------------------------------\n");
-  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
+  HeapObjectIterator objects(this);
   unsigned mark_size = 0;
   for (HeapObject* object = objects.Next(); object != NULL;
        object = objects.Next()) {
-    bool is_marked = Marking::MarkBitFrom(object).Get();
+    bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
     if (is_marked) {
-      mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
+      mark_size += object->Size();
     }
     object->ShortPrint();
     PrintF("\n");
@@ -3234,5 +3368,5 @@
 }
 
 #endif  // DEBUG
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index dcd3364..a8102ca 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -6,17 +6,20 @@
 #define V8_HEAP_SPACES_H_
 
 #include "src/allocation.h"
+#include "src/atomic-utils.h"
 #include "src/base/atomicops.h"
 #include "src/base/bits.h"
 #include "src/base/platform/mutex.h"
+#include "src/flags.h"
 #include "src/hashmap.h"
 #include "src/list.h"
-#include "src/log.h"
+#include "src/objects.h"
 #include "src/utils.h"
 
 namespace v8 {
 namespace internal {
 
+class CompactionSpaceCollection;
 class Isolate;
 
 // -----------------------------------------------------------------------------
@@ -34,7 +37,7 @@
 // area.
 //
 // There is a separate large object space for objects larger than
-// Page::kMaxHeapObjectSize, so that they do not have to move during
+// Page::kMaxRegularHeapObjectSize, so that they do not have to move during
 // collection. The large object space is paged. Pages in large object space
 // may be larger than the page size.
 //
@@ -43,11 +46,11 @@
 //
 // During scavenges and mark-sweep collections we sometimes (after a store
 // buffer overflow) iterate intergenerational pointers without decoding heap
-// object maps so if the page belongs to old pointer space or large object
-// space it is essential to guarantee that the page does not contain any
+// object maps so if the page belongs to old space or large object space
+// it is essential to guarantee that the page does not contain any
 // garbage pointers to new space: every pointer aligned word which satisfies
 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
-// new space. Thus objects in old pointer and large object spaces should have a
+// new space. Thus objects in old space and large object spaces should have a
 // special layout (e.g. no bare integer fields). This requirement does not
 // apply to map space which is iterated in a special fashion. However we still
 // require pointer fields of dead maps to be cleaned.
@@ -84,29 +87,28 @@
 #define DCHECK_OBJECT_SIZE(size) \
   DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
 
+#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
+  DCHECK((0 < size) && (size <= code_space->AreaSize()))
+
 #define DCHECK_PAGE_OFFSET(offset) \
   DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
 
 #define DCHECK_MAP_PAGE_INDEX(index) \
   DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
 
-
-class PagedSpace;
-class MemoryAllocator;
 class AllocationInfo;
-class Space;
+class CompactionSpace;
 class FreeList;
+class MemoryAllocator;
 class MemoryChunk;
+class PagedSpace;
+class Space;
 
 class MarkBit {
  public:
   typedef uint32_t CellType;
 
-  inline MarkBit(CellType* cell, CellType mask, bool data_only)
-      : cell_(cell), mask_(mask), data_only_(data_only) {}
-
-  inline CellType* cell() { return cell_; }
-  inline CellType mask() { return mask_; }
+  inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
 
 #ifdef DEBUG
   bool operator==(const MarkBit& other) {
@@ -114,29 +116,27 @@
   }
 #endif
 
-  inline void Set() { *cell_ |= mask_; }
-  inline bool Get() { return (*cell_ & mask_) != 0; }
-  inline void Clear() { *cell_ &= ~mask_; }
-
-  inline bool data_only() { return data_only_; }
+ private:
+  inline CellType* cell() { return cell_; }
+  inline CellType mask() { return mask_; }
 
   inline MarkBit Next() {
     CellType new_mask = mask_ << 1;
     if (new_mask == 0) {
-      return MarkBit(cell_ + 1, 1, data_only_);
+      return MarkBit(cell_ + 1, 1);
     } else {
-      return MarkBit(cell_, new_mask, data_only_);
+      return MarkBit(cell_, new_mask);
     }
   }
 
- private:
+  inline void Set() { *cell_ |= mask_; }
+  inline bool Get() { return (*cell_ & mask_) != 0; }
+  inline void Clear() { *cell_ &= ~mask_; }
+
   CellType* cell_;
   CellType mask_;
-  // This boolean indicates that the object is in a data-only space with no
-  // pointers.  This enables some optimizations when marking.
-  // It is expected that this field is inlined and turned into control flow
-  // at the place where the MarkBit object is created.
-  bool data_only_;
+
+  friend class Marking;
 };
 
 
@@ -169,6 +169,10 @@
     return index >> kBitsPerCellLog2;
   }
 
+  V8_INLINE static uint32_t IndexInCell(uint32_t index) {
+    return index & kBitIndexMask;
+  }
+
   INLINE(static uint32_t CellToIndex(uint32_t index)) {
     return index << kBitsPerCellLog2;
   }
@@ -187,10 +191,10 @@
     return reinterpret_cast<Bitmap*>(addr);
   }
 
-  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
-    MarkBit::CellType mask = 1 << (index & kBitIndexMask);
+  inline MarkBit MarkBitFromIndex(uint32_t index) {
+    MarkBit::CellType mask = 1u << IndexInCell(index);
     MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
-    return MarkBit(cell, mask, data_only);
+    return MarkBit(cell, mask);
   }
 
   static inline void Clear(MemoryChunk* chunk);
@@ -260,6 +264,23 @@
     }
     return true;
   }
+
+  // Clears all bits starting from {cell_base_index} up to and excluding
+  // {index}. Note that {cell_base_index} is required to be cell aligned.
+  void ClearRange(uint32_t cell_base_index, uint32_t index) {
+    DCHECK_EQ(IndexInCell(cell_base_index), 0u);
+    DCHECK_GE(index, cell_base_index);
+    uint32_t start_cell_index = IndexToCell(cell_base_index);
+    uint32_t end_cell_index = IndexToCell(index);
+    DCHECK_GE(end_cell_index, start_cell_index);
+    // Clear all cells till the cell containing the last index.
+    for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
+      cells()[i] = 0;
+    }
+    // Clear all bits in the last cell till the last bit before index.
+    uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
+    cells()[end_cell_index] &= clear_mask;
+  }
 };
 
 
@@ -272,37 +293,197 @@
 // any heap object.
 class MemoryChunk {
  public:
+  enum MemoryChunkFlags {
+    IS_EXECUTABLE,
+    ABOUT_TO_BE_FREED,
+    POINTERS_TO_HERE_ARE_INTERESTING,
+    POINTERS_FROM_HERE_ARE_INTERESTING,
+    SCAN_ON_SCAVENGE,
+    IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
+    IN_TO_SPACE,    // All pages in new space has one of these two set.
+    NEW_SPACE_BELOW_AGE_MARK,
+    EVACUATION_CANDIDATE,
+    RESCAN_ON_EVACUATION,
+    NEVER_EVACUATE,  // May contain immortal immutables.
+    POPULAR_PAGE,    // Slots buffer of this page overflowed on the previous GC.
+
+    // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
+    // otherwise marking bits are still intact.
+    WAS_SWEPT,
+
+    // Large objects can have a progress bar in their page header. These object
+    // are scanned in increments and will be kept black while being scanned.
+    // Even if the mutator writes to them they will be kept black and a white
+    // to grey transition is performed in the value.
+    HAS_PROGRESS_BAR,
+
+    // This flag is intended to be used for testing. Works only when both
+    // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
+    // are set. It forces the page to become an evacuation candidate at next
+    // candidates selection cycle.
+    FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
+
+    // This flag is inteded to be used for testing.
+    NEVER_ALLOCATE_ON_PAGE,
+
+    // The memory chunk is already logically freed, however the actual freeing
+    // still has to be performed.
+    PRE_FREED,
+
+    // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
+    //   has been aborted and needs special handling by the sweeper.
+    COMPACTION_WAS_ABORTED,
+
+    // Last flag, keep at bottom.
+    NUM_MEMORY_CHUNK_FLAGS
+  };
+
+  // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
+  // |kCompactingInProgress|:  Parallel compaction is currently in progress.
+  // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
+  //   be finalized.
+  // |kCompactingAborted|: Parallel compaction has been aborted, which should
+  //   for now only happen in OOM scenarios.
+  enum ParallelCompactingState {
+    kCompactingDone,
+    kCompactingInProgress,
+    kCompactingFinalize,
+    kCompactingAborted,
+  };
+
+  // |kSweepingDone|: The page state when sweeping is complete or sweeping must
+  //   not be performed on that page.
+  // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will
+  //   not touch the page memory anymore.
+  // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
+  // |kSweepingPending|: This page is ready for parallel sweeping.
+  enum ParallelSweepingState {
+    kSweepingDone,
+    kSweepingFinalize,
+    kSweepingInProgress,
+    kSweepingPending
+  };
+
+  // Every n write barrier invocations we go to runtime even though
+  // we could have handled it in generated code.  This lets us check
+  // whether we have hit the limit and should do some more marking.
+  static const int kWriteBarrierCounterGranularity = 500;
+
+  static const int kPointersToHereAreInterestingMask =
+      1 << POINTERS_TO_HERE_ARE_INTERESTING;
+
+  static const int kPointersFromHereAreInterestingMask =
+      1 << POINTERS_FROM_HERE_ARE_INTERESTING;
+
+  static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
+
+  static const int kSkipEvacuationSlotsRecordingMask =
+      (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
+      (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
+
+  static const intptr_t kAlignment =
+      (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+  static const intptr_t kAlignmentMask = kAlignment - 1;
+
+  static const intptr_t kSizeOffset = 0;
+
+  static const intptr_t kLiveBytesOffset =
+      kSizeOffset + kPointerSize  // size_t size
+      + kIntptrSize               // intptr_t flags_
+      + kPointerSize              // Address area_start_
+      + kPointerSize              // Address area_end_
+      + 2 * kPointerSize          // base::VirtualMemory reservation_
+      + kPointerSize              // Address owner_
+      + kPointerSize              // Heap* heap_
+      + kIntSize;                 // int store_buffer_counter_
+
+  static const size_t kSlotsBufferOffset =
+      kLiveBytesOffset + kIntSize;  // int live_byte_count_
+
+  static const size_t kWriteBarrierCounterOffset =
+      kSlotsBufferOffset + kPointerSize  // SlotsBuffer* slots_buffer_;
+      + kPointerSize;                    // SkipList* skip_list_;
+
+  static const size_t kMinHeaderSize =
+      kWriteBarrierCounterOffset +
+      kIntptrSize         // intptr_t write_barrier_counter_
+      + kIntSize          // int progress_bar_
+      + kPointerSize      // AtomicValue high_water_mark_
+      + kPointerSize      // base::Mutex* mutex_
+      + kPointerSize      // base::AtomicWord parallel_sweeping_
+      + kPointerSize      // AtomicValue parallel_compaction_
+      + 5 * kPointerSize  // AtomicNumber free-list statistics
+      + kPointerSize      // AtomicValue next_chunk_
+      + kPointerSize;     // AtomicValue prev_chunk_
+
+  // We add some more space to the computed header size to amount for missing
+  // alignment requirements in our computation.
+  // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
+  static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
+
+  static const int kBodyOffset =
+      CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
+
+  // The start offset of the object area in a page. Aligned to both maps and
+  // code alignment to be suitable for both.  Also aligned to 32 words because
+  // the marking bitmap is arranged in 32 bit chunks.
+  static const int kObjectStartAlignment = 32 * kPointerSize;
+  static const int kObjectStartOffset =
+      kBodyOffset - 1 +
+      (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
+
+  static const int kFlagsOffset = kPointerSize;
+
+  static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
+
   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
   static MemoryChunk* FromAddress(Address a) {
     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
   }
+
   static const MemoryChunk* FromAddress(const byte* a) {
     return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
                                                 ~kAlignmentMask);
   }
 
+  static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
+    MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
+  }
+
   // Only works for addresses in pointer spaces, not data or code spaces.
   static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
 
+  static inline uint32_t FastAddressToMarkbitIndex(Address addr) {
+    const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+    return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
+  }
+
+  static inline void UpdateHighWaterMark(Address mark) {
+    if (mark == nullptr) return;
+    // Need to subtract one from the mark because when a chunk is full the
+    // top points to the next address after the chunk, which effectively belongs
+    // to another chunk. See the comment to Page::FromAllocationTop.
+    MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
+    intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
+    intptr_t old_mark = 0;
+    do {
+      old_mark = chunk->high_water_mark_.Value();
+    } while ((new_mark > old_mark) &&
+             !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
+  }
+
   Address address() { return reinterpret_cast<Address>(this); }
 
   bool is_valid() { return address() != NULL; }
 
-  MemoryChunk* next_chunk() const {
-    return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
-  }
+  MemoryChunk* next_chunk() { return next_chunk_.Value(); }
 
-  MemoryChunk* prev_chunk() const {
-    return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
-  }
+  MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
 
-  void set_next_chunk(MemoryChunk* next) {
-    base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
-  }
+  void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
 
-  void set_prev_chunk(MemoryChunk* prev) {
-    base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
-  }
+  void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
 
   Space* owner() const {
     if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
@@ -323,8 +504,6 @@
 
   base::VirtualMemory* reserved_memory() { return &reservation_; }
 
-  void InitializeReservedMemory() { reservation_.Reset(); }
-
   void set_reserved_memory(base::VirtualMemory* reservation) {
     DCHECK_NOT_NULL(reservation);
     reservation_.TakeControl(reservation);
@@ -356,52 +535,6 @@
     return addr >= area_start() && addr <= area_end();
   }
 
-  // Every n write barrier invocations we go to runtime even though
-  // we could have handled it in generated code.  This lets us check
-  // whether we have hit the limit and should do some more marking.
-  static const int kWriteBarrierCounterGranularity = 500;
-
-  enum MemoryChunkFlags {
-    IS_EXECUTABLE,
-    ABOUT_TO_BE_FREED,
-    POINTERS_TO_HERE_ARE_INTERESTING,
-    POINTERS_FROM_HERE_ARE_INTERESTING,
-    SCAN_ON_SCAVENGE,
-    IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
-    IN_TO_SPACE,    // All pages in new space has one of these two set.
-    NEW_SPACE_BELOW_AGE_MARK,
-    CONTAINS_ONLY_DATA,
-    EVACUATION_CANDIDATE,
-    RESCAN_ON_EVACUATION,
-
-    // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
-    // otherwise marking bits are still intact.
-    WAS_SWEPT,
-
-    // Large objects can have a progress bar in their page header. These object
-    // are scanned in increments and will be kept black while being scanned.
-    // Even if the mutator writes to them they will be kept black and a white
-    // to grey transition is performed in the value.
-    HAS_PROGRESS_BAR,
-
-    // Last flag, keep at bottom.
-    NUM_MEMORY_CHUNK_FLAGS
-  };
-
-
-  static const int kPointersToHereAreInterestingMask =
-      1 << POINTERS_TO_HERE_ARE_INTERESTING;
-
-  static const int kPointersFromHereAreInterestingMask =
-      1 << POINTERS_FROM_HERE_ARE_INTERESTING;
-
-  static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
-
-  static const int kSkipEvacuationSlotsRecordingMask =
-      (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
-      (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
-
-
   void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
 
   void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
@@ -428,36 +561,30 @@
   // Return all current flags.
   intptr_t GetFlags() { return flags_; }
 
-
-  // SWEEPING_DONE - The page state when sweeping is complete or sweeping must
-  // not be performed on that page.
-  // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
-  // not touch the page memory anymore.
-  // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
-  // SWEEPING_PENDING - This page is ready for parallel sweeping.
-  enum ParallelSweepingState {
-    SWEEPING_DONE,
-    SWEEPING_FINALIZE,
-    SWEEPING_IN_PROGRESS,
-    SWEEPING_PENDING
-  };
-
-  ParallelSweepingState parallel_sweeping() {
-    return static_cast<ParallelSweepingState>(
-        base::Acquire_Load(&parallel_sweeping_));
+  AtomicValue<ParallelSweepingState>& parallel_sweeping_state() {
+    return parallel_sweeping_;
   }
 
-  void set_parallel_sweeping(ParallelSweepingState state) {
-    base::Release_Store(&parallel_sweeping_, state);
+  AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
+    return parallel_compaction_;
   }
 
-  bool TryParallelSweeping() {
-    return base::Acquire_CompareAndSwap(&parallel_sweeping_, SWEEPING_PENDING,
-                                        SWEEPING_IN_PROGRESS) ==
-           SWEEPING_PENDING;
+  bool TryLock() { return mutex_->TryLock(); }
+
+  base::Mutex* mutex() { return mutex_; }
+
+  // WaitUntilSweepingCompleted only works when concurrent sweeping is in
+  // progress. In particular, when we know that right before this call a
+  // sweeper thread was sweeping this page.
+  void WaitUntilSweepingCompleted() {
+    mutex_->Lock();
+    mutex_->Unlock();
+    DCHECK(SweepingCompleted());
   }
 
-  bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
+  bool SweepingCompleted() {
+    return parallel_sweeping_state().Value() <= kSweepingFinalize;
+  }
 
   // Manage live byte count (count of bytes known to be live,
   // because they are marked black).
@@ -468,6 +595,7 @@
     }
     live_byte_count_ = 0;
   }
+
   void IncrementLiveBytes(int by) {
     if (FLAG_gc_verbose) {
       printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
@@ -475,13 +603,21 @@
              live_byte_count_ + by);
     }
     live_byte_count_ += by;
+    DCHECK_GE(live_byte_count_, 0);
     DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
   }
+
   int LiveBytes() {
-    DCHECK(static_cast<unsigned>(live_byte_count_) <= size_);
+    DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
     return live_byte_count_;
   }
 
+  void SetLiveBytes(int live_bytes) {
+    DCHECK_GE(live_bytes, 0);
+    DCHECK_LE(static_cast<unsigned>(live_bytes), size_);
+    live_byte_count_ = live_bytes;
+  }
+
   int write_barrier_counter() {
     return static_cast<int>(write_barrier_counter_);
   }
@@ -507,50 +643,6 @@
     }
   }
 
-  bool IsLeftOfProgressBar(Object** slot) {
-    Address slot_address = reinterpret_cast<Address>(slot);
-    DCHECK(slot_address > this->address());
-    return (slot_address - (this->address() + kObjectStartOffset)) <
-           progress_bar();
-  }
-
-  static void IncrementLiveBytesFromGC(Address address, int by) {
-    MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
-  }
-
-  static void IncrementLiveBytesFromMutator(Address address, int by);
-
-  static const intptr_t kAlignment =
-      (static_cast<uintptr_t>(1) << kPageSizeBits);
-
-  static const intptr_t kAlignmentMask = kAlignment - 1;
-
-  static const intptr_t kSizeOffset = 0;
-
-  static const intptr_t kLiveBytesOffset =
-      kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize +
-      kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize;
-
-  static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
-
-  static const size_t kWriteBarrierCounterOffset =
-      kSlotsBufferOffset + kPointerSize + kPointerSize;
-
-  static const size_t kHeaderSize =
-      kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize +
-      kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize;
-
-  static const int kBodyOffset =
-      CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
-
-  // The start offset of the object area in a page. Aligned to both maps and
-  // code alignment to be suitable for both.  Also aligned to 32 words because
-  // the marking bitmap is arranged in 32 bit chunks.
-  static const int kObjectStartAlignment = 32 * kPointerSize;
-  static const int kObjectStartOffset =
-      kBodyOffset - 1 +
-      (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
-
   size_t size() const { return size_; }
 
   void set_size(size_t size) { size_ = size; }
@@ -564,8 +656,6 @@
     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
   }
 
-  bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); }
-
   bool InNewSpace() {
     return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
   }
@@ -574,7 +664,6 @@
 
   bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
 
-  // ---------------------------------------------------------------------
   // Markbits support
 
   inline Bitmap* markbits() {
@@ -587,12 +676,6 @@
     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
   }
 
-  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
-    const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
-
-    return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
-  }
-
   inline Address MarkbitIndexToAddress(uint32_t index) {
     return this->address() + (index << kPointerSizeLog2);
   }
@@ -602,9 +685,18 @@
 
   inline Heap* heap() const { return heap_; }
 
-  static const int kFlagsOffset = kPointerSize;
+  bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
 
-  bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
+  void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
+
+  bool IsEvacuationCandidate() {
+    DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
+    return IsFlagSet(EVACUATION_CANDIDATE);
+  }
+
+  bool CanAllocate() {
+    return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
+  }
 
   bool ShouldSkipEvacuationSlotRecording() {
     return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
@@ -619,6 +711,7 @@
   inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
 
   void MarkEvacuationCandidate() {
+    DCHECK(!IsFlagSet(NEVER_EVACUATE));
     DCHECK(slots_buffer_ == NULL);
     SetFlag(EVACUATION_CANDIDATE);
   }
@@ -634,11 +727,16 @@
   bool CommitArea(size_t requested);
 
   // Approximate amount of physical memory committed for this chunk.
-  size_t CommittedPhysicalMemory() { return high_water_mark_; }
+  size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
 
-  static inline void UpdateHighWaterMark(Address mark);
+  // Should be called when memory chunk is about to be freed.
+  void ReleaseAllocatedMemory();
 
  protected:
+  static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
+                                 Address area_start, Address area_end,
+                                 Executability executable, Space* owner);
+
   size_t size_;
   intptr_t flags_;
 
@@ -666,32 +764,33 @@
   int progress_bar_;
   // Assuming the initial allocation on a page is sequential,
   // count highest number of bytes ever allocated on the page.
-  int high_water_mark_;
+  AtomicValue<intptr_t> high_water_mark_;
 
-  base::AtomicWord parallel_sweeping_;
+  base::Mutex* mutex_;
+  AtomicValue<ParallelSweepingState> parallel_sweeping_;
+  AtomicValue<ParallelCompactingState> parallel_compaction_;
 
   // PagedSpace free-list statistics.
-  intptr_t available_in_small_free_list_;
-  intptr_t available_in_medium_free_list_;
-  intptr_t available_in_large_free_list_;
-  intptr_t available_in_huge_free_list_;
-  intptr_t non_available_small_blocks_;
+  AtomicNumber<intptr_t> available_in_small_free_list_;
+  AtomicNumber<intptr_t> available_in_medium_free_list_;
+  AtomicNumber<intptr_t> available_in_large_free_list_;
+  AtomicNumber<intptr_t> available_in_huge_free_list_;
+  AtomicNumber<intptr_t> non_available_small_blocks_;
 
-  static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
-                                 Address area_start, Address area_end,
-                                 Executability executable, Space* owner);
+  // next_chunk_ holds a pointer of type MemoryChunk
+  AtomicValue<MemoryChunk*> next_chunk_;
+  // prev_chunk_ holds a pointer of type MemoryChunk
+  AtomicValue<MemoryChunk*> prev_chunk_;
 
  private:
-  // next_chunk_ holds a pointer of type MemoryChunk
-  base::AtomicWord next_chunk_;
-  // prev_chunk_ holds a pointer of type MemoryChunk
-  base::AtomicWord prev_chunk_;
+  void InitializeReservedMemory() { reservation_.Reset(); }
 
   friend class MemoryAllocator;
+  friend class MemoryChunkValidator;
 };
 
 
-STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
 
 
 // -----------------------------------------------------------------------------
@@ -720,8 +819,14 @@
   }
 
   // Returns the next page in the chain of pages owned by a space.
-  inline Page* next_page();
-  inline Page* prev_page();
+  inline Page* next_page() {
+    DCHECK(next_chunk()->owner() == owner());
+    return static_cast<Page*>(next_chunk());
+  }
+  inline Page* prev_page() {
+    DCHECK(prev_chunk()->owner() == owner());
+    return static_cast<Page*>(prev_chunk());
+  }
   inline void set_next_page(Page* page);
   inline void set_prev_page(Page* page);
 
@@ -747,11 +852,16 @@
   // Page size in bytes.  This must be a multiple of the OS page size.
   static const int kPageSize = 1 << kPageSizeBits;
 
-  // Maximum object size that fits in a page. Objects larger than that size
-  // are allocated in large object space and are never moved in memory. This
-  // also applies to new space allocation, since objects are never migrated
-  // from new space to large object space.  Takes double alignment into account.
-  static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
+  // Maximum object size that gets allocated into regular pages. Objects larger
+  // than that size are allocated in large object space and are never moved in
+  // memory. This also applies to new space allocation, since objects are never
+  // migrated from new space to large object space. Takes double alignment into
+  // account.
+  // TODO(hpayer): This limit should be way smaller but we currently have
+  // short living objects >256K.
+  static const int kMaxRegularHeapObjectSize = 600 * KB;
+
+  static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
 
   // Page size mask.
   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
@@ -769,10 +879,17 @@
 
   void ResetFreeListStatistics();
 
-#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
-  type name() { return name##_; }                 \
-  void set_##name(type name) { name##_ = name; }  \
-  void add_##name(type name) { name##_ += name; }
+  int LiveBytesFromFreeList() {
+    return static_cast<int>(
+        area_size() - non_available_small_blocks() -
+        available_in_small_free_list() - available_in_medium_free_list() -
+        available_in_large_free_list() - available_in_huge_free_list());
+  }
+
+#define FRAGMENTATION_STATS_ACCESSORS(type, name)        \
+  type name() { return name##_.Value(); }                \
+  void set_##name(type name) { name##_.SetValue(name); } \
+  void add_##name(type name) { name##_.Increment(name); }
 
   FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
@@ -782,6 +899,42 @@
 
 #undef FRAGMENTATION_STATS_ACCESSORS
 
+  void add_available_in_free_list(FreeListCategoryType type, intptr_t bytes) {
+    switch (type) {
+      case kSmall:
+        add_available_in_small_free_list(bytes);
+        break;
+      case kMedium:
+        add_available_in_medium_free_list(bytes);
+        break;
+      case kLarge:
+        add_available_in_large_free_list(bytes);
+        break;
+      case kHuge:
+        add_available_in_huge_free_list(bytes);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  intptr_t available_in_free_list(FreeListCategoryType type) {
+    switch (type) {
+      case kSmall:
+        return available_in_small_free_list();
+      case kMedium:
+        return available_in_medium_free_list();
+      case kLarge:
+        return available_in_large_free_list();
+      case kHuge:
+        return available_in_huge_free_list();
+      default:
+        UNREACHABLE();
+    }
+    UNREACHABLE();
+    return 0;
+  }
+
 #ifdef DEBUG
   void Print();
 #endif  // DEBUG
@@ -790,14 +943,11 @@
 };
 
 
-STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
-
-
 class LargePage : public MemoryChunk {
  public:
   HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
 
-  inline LargePage* next_page() const {
+  inline LargePage* next_page() {
     return static_cast<LargePage*>(next_chunk());
   }
 
@@ -809,14 +959,17 @@
   friend class MemoryAllocator;
 };
 
-STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
 
 // ----------------------------------------------------------------------------
 // Space is the abstract superclass for all allocation spaces.
 class Space : public Malloced {
  public:
   Space(Heap* heap, AllocationSpace id, Executability executable)
-      : heap_(heap), id_(id), executable_(executable) {}
+      : heap_(heap),
+        id_(id),
+        executable_(executable),
+        committed_(0),
+        max_committed_(0) {}
 
   virtual ~Space() {}
 
@@ -828,6 +981,12 @@
   // Identity used in error reporting.
   AllocationSpace identity() { return id_; }
 
+  // Return the total amount committed memory for this space, i.e., allocatable
+  // memory and page headers.
+  virtual intptr_t CommittedMemory() { return committed_; }
+
+  virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
+
   // Returns allocated size.
   virtual intptr_t Size() = 0;
 
@@ -835,6 +994,12 @@
   // (e.g. see LargeObjectSpace).
   virtual intptr_t SizeOfObjects() { return Size(); }
 
+  // Approximate amount of physical memory committed for this space.
+  virtual size_t CommittedPhysicalMemory() = 0;
+
+  // Return the available bytes without growing.
+  virtual intptr_t Available() = 0;
+
   virtual int RoundSizeDownToObjectAlignment(int size) {
     if (id_ == CODE_SPACE) {
       return RoundDown(size, kCodeAlignment);
@@ -847,10 +1012,46 @@
   virtual void Print() = 0;
 #endif
 
+ protected:
+  void AccountCommitted(intptr_t bytes) {
+    DCHECK_GE(bytes, 0);
+    committed_ += bytes;
+    if (committed_ > max_committed_) {
+      max_committed_ = committed_;
+    }
+  }
+
+  void AccountUncommitted(intptr_t bytes) {
+    DCHECK_GE(bytes, 0);
+    committed_ -= bytes;
+    DCHECK_GE(committed_, 0);
+  }
+
  private:
   Heap* heap_;
   AllocationSpace id_;
   Executability executable_;
+
+  // Keeps track of committed memory in a space.
+  intptr_t committed_;
+  intptr_t max_committed_;
+};
+
+
+class MemoryChunkValidator {
+  // Computed offsets should match the compiler generated ones.
+  STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
+  STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
+                offsetof(MemoryChunk, live_byte_count_));
+  STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
+                offsetof(MemoryChunk, slots_buffer_));
+  STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
+                offsetof(MemoryChunk, write_barrier_counter_));
+
+  // Validate our estimates on the header size.
+  STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+  STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+  STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
 };
 
 
@@ -871,10 +1072,6 @@
   // Returns false on failure.
   bool SetUp(size_t requested_size);
 
-  // Frees the range of virtual memory, and frees the data structures used to
-  // manage it.
-  void TearDown();
-
   bool valid() { return code_range_ != NULL; }
   Address start() {
     DCHECK(valid());
@@ -900,10 +1097,11 @@
   bool UncommitRawMemory(Address start, size_t length);
   void FreeRawMemory(Address buf, size_t length);
 
-  void ReserveEmergencyBlock();
-  void ReleaseEmergencyBlock();
-
  private:
+  // Frees the range of virtual memory, and frees the data structures used to
+  // manage it.
+  void TearDown();
+
   Isolate* isolate_;
 
   // The reserved range of virtual memory that all code objects are put in.
@@ -927,21 +1125,20 @@
     size_t size;
   };
 
+  // The global mutex guards free_list_ and allocation_list_ as GC threads may
+  // access both lists concurrently to the main thread.
+  base::Mutex code_range_mutex_;
+
   // Freed blocks of memory are added to the free list.  When the allocation
   // list is exhausted, the free list is sorted and merged to make the new
   // allocation list.
   List<FreeBlock> free_list_;
+
   // Memory is allocated from the free blocks on the allocation list.
   // The block at current_allocation_block_index_ is the current block.
   List<FreeBlock> allocation_list_;
   int current_allocation_block_index_;
 
-  // Emergency block guarantees that we can always allocate a page for
-  // evacuation candidates when code space is compacted. Emergency block is
-  // reserved immediately after GC and is released immedietely before
-  // allocating a page for evacuation.
-  FreeBlock emergency_block_;
-
   // Finds a block on the allocation list that contains at least the
   // requested amount of memory.  If none is found, sorts and merges
   // the existing free memory blocks, and searches again.
@@ -1026,33 +1223,46 @@
   LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
                                Executability executable);
 
+  // PreFree logically frees the object, i.e., it takes care of the size
+  // bookkeeping and calls the allocation callback.
+  void PreFreeMemory(MemoryChunk* chunk);
+
+  // FreeMemory can be called concurrently when PreFree was executed before.
+  void PerformFreeMemory(MemoryChunk* chunk);
+
+  // Free is a wrapper method, which calls PreFree and PerformFreeMemory
+  // together.
   void Free(MemoryChunk* chunk);
 
-  // Returns the maximum available bytes of heaps.
-  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
-
   // Returns allocated spaces in bytes.
-  intptr_t Size() { return size_; }
+  intptr_t Size() { return size_.Value(); }
+
+  // Returns allocated executable spaces in bytes.
+  intptr_t SizeExecutable() { return size_executable_.Value(); }
+
+  // Returns the maximum available bytes of heaps.
+  intptr_t Available() {
+    intptr_t size = Size();
+    return capacity_ < size ? 0 : capacity_ - size;
+  }
 
   // Returns the maximum available executable bytes of heaps.
   intptr_t AvailableExecutable() {
-    if (capacity_executable_ < size_executable_) return 0;
-    return capacity_executable_ - size_executable_;
+    intptr_t executable_size = SizeExecutable();
+    if (capacity_executable_ < executable_size) return 0;
+    return capacity_executable_ - executable_size;
   }
 
-  // Returns allocated executable spaces in bytes.
-  intptr_t SizeExecutable() { return size_executable_; }
-
   // Returns maximum available bytes that the old space can have.
   intptr_t MaxAvailable() {
-    return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
+    return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
   }
 
   // Returns an indication of whether a pointer is in a space that has
   // been allocated by this MemoryAllocator.
-  V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
-    return address < lowest_ever_allocated_ ||
-           address >= highest_ever_allocated_;
+  V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
+    return address < lowest_ever_allocated_.Value() ||
+           address >= highest_ever_allocated_.Value();
   }
 
 #ifdef DEBUG
@@ -1075,6 +1285,8 @@
 
   bool CommitMemory(Address addr, size_t size, Executability executable);
 
+  void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
+                          Executability executable);
   void FreeMemory(base::VirtualMemory* reservation, Executability executable);
   void FreeMemory(Address addr, size_t size, Executability executable);
 
@@ -1119,7 +1331,7 @@
   static int PageAreaSize(AllocationSpace space) {
     DCHECK_NE(LO_SPACE, space);
     return (space == CODE_SPACE) ? CodePageAreaSize()
-                                 : Page::kMaxRegularHeapObjectSize;
+                                 : Page::kAllocatableMemory;
   }
 
   MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
@@ -1130,22 +1342,22 @@
   Isolate* isolate_;
 
   // Maximum space size in bytes.
-  size_t capacity_;
+  intptr_t capacity_;
   // Maximum subset of capacity_ that can be executable
-  size_t capacity_executable_;
+  intptr_t capacity_executable_;
 
   // Allocated space size in bytes.
-  size_t size_;
+  AtomicNumber<intptr_t> size_;
   // Allocated executable space size in bytes.
-  size_t size_executable_;
+  AtomicNumber<intptr_t> size_executable_;
 
   // We keep the lowest and highest addresses allocated as a quick way
   // of determining that pointers are outside the heap. The estimate is
   // conservative, i.e. not all addrsses in 'allocated' space are allocated
   // to our heap. The range is [lowest, highest[, inclusive on the low end
   // and exclusive on the high end.
-  void* lowest_ever_allocated_;
-  void* highest_ever_allocated_;
+  AtomicValue<void*> lowest_ever_allocated_;
+  AtomicValue<void*> highest_ever_allocated_;
 
   struct MemoryAllocationCallbackRegistration {
     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -1168,8 +1380,16 @@
                                PagedSpace* owner);
 
   void UpdateAllocatedSpaceLimits(void* low, void* high) {
-    lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
-    highest_ever_allocated_ = Max(highest_ever_allocated_, high);
+    // The use of atomic primitives does not guarantee correctness (wrt.
+    // desired semantics) by default. The loop here ensures that we update the
+    // values only if they did not change in between.
+    void* ptr = nullptr;
+    do {
+      ptr = lowest_ever_allocated_.Value();
+    } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
+    do {
+      ptr = highest_ever_allocated_.Value();
+    } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
   }
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
@@ -1204,31 +1424,20 @@
 class HeapObjectIterator : public ObjectIterator {
  public:
   // Creates a new object iterator in a given space.
-  // If the size function is not given, the iterator calls the default
-  // Object::Size().
   explicit HeapObjectIterator(PagedSpace* space);
-  HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
-  HeapObjectIterator(Page* page, HeapObjectCallback size_func);
+  explicit HeapObjectIterator(Page* page);
 
   // Advance to the next object, skipping free spaces and other fillers and
   // skipping the special garbage section of which there is one per space.
   // Returns NULL when the iteration has ended.
-  inline HeapObject* Next() {
-    do {
-      HeapObject* next_obj = FromCurrentPage();
-      if (next_obj != NULL) return next_obj;
-    } while (AdvanceToNextPage());
-    return NULL;
-  }
-
-  virtual HeapObject* next_object() { return Next(); }
+  inline HeapObject* Next();
+  inline HeapObject* next_object() override;
 
  private:
   enum PageMode { kOnePageOnly, kAllPagesInSpace };
 
   Address cur_addr_;              // Current iteration point.
   Address cur_end_;               // End iteration point.
-  HeapObjectCallback size_func_;  // Size function or NULL.
   PagedSpace* space_;
   PageMode page_mode_;
 
@@ -1241,7 +1450,7 @@
 
   // Initializes fields.
   inline void Initialize(PagedSpace* owner, Address start, Address end,
-                         PageMode mode, HeapObjectCallback size_func);
+                         PageMode mode);
 };
 
 
@@ -1272,32 +1481,33 @@
 // space.
 class AllocationInfo {
  public:
-  AllocationInfo() : top_(NULL), limit_(NULL) {}
+  AllocationInfo() : top_(nullptr), limit_(nullptr) {}
+  AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
+
+  void Reset(Address top, Address limit) {
+    set_top(top);
+    set_limit(limit);
+  }
 
   INLINE(void set_top(Address top)) {
     SLOW_DCHECK(top == NULL ||
-                (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+                (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
     top_ = top;
   }
 
   INLINE(Address top()) const {
     SLOW_DCHECK(top_ == NULL ||
-                (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+                (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
     return top_;
   }
 
   Address* top_address() { return &top_; }
 
   INLINE(void set_limit(Address limit)) {
-    SLOW_DCHECK(limit == NULL ||
-                (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
     limit_ = limit;
   }
 
   INLINE(Address limit()) const {
-    SLOW_DCHECK(limit_ == NULL ||
-                (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) ==
-                    0);
     return limit_;
   }
 
@@ -1319,19 +1529,11 @@
 
 
 // An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (i.e., not
-// including page bookkeeping structures) currently in the space. The 'size'
-// of a space is the number of allocated bytes, the 'waste' in the space is
-// the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (e.g. small blocks due
-// to internal fragmentation, top of page areas in map space), and the bytes
-// 'available' is the number of unallocated bytes that are not waste.  The
-// capacity is the sum of size, waste, and available.
 //
 // The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in
-// conjunction with capacity, or else they always balance increases and
-// decreases to the non-capacity stats.
+// functions increase or decrease one of the non-capacity stats in conjunction
+// with capacity, or else they always balance increases and decreases to the
+// non-capacity stats.
 class AllocationStats BASE_EMBEDDED {
  public:
   AllocationStats() { Clear(); }
@@ -1341,26 +1543,23 @@
     capacity_ = 0;
     max_capacity_ = 0;
     size_ = 0;
-    waste_ = 0;
   }
 
-  void ClearSizeWaste() {
-    size_ = capacity_;
-    waste_ = 0;
-  }
+  void ClearSize() { size_ = capacity_; }
 
-  // Reset the allocation statistics (i.e., available = capacity with no
-  // wasted or allocated bytes).
+  // Reset the allocation statistics (i.e., available = capacity with no wasted
+  // or allocated bytes).
   void Reset() {
     size_ = 0;
-    waste_ = 0;
   }
 
   // Accessors for the allocation statistics.
   intptr_t Capacity() { return capacity_; }
   intptr_t MaxCapacity() { return max_capacity_; }
-  intptr_t Size() { return size_; }
-  intptr_t Waste() { return waste_; }
+  intptr_t Size() {
+    CHECK_GE(size_, 0);
+    return size_;
+  }
 
   // Grow the space by adding available bytes.  They are initially marked as
   // being in use (part of the size), but will normally be immediately freed,
@@ -1371,7 +1570,7 @@
     if (capacity_ > max_capacity_) {
       max_capacity_ = capacity_;
     }
-    DCHECK(size_ >= 0);
+    CHECK(size_ >= 0);
   }
 
   // Shrink the space by removing available bytes.  Since shrinking is done
@@ -1380,183 +1579,151 @@
   void ShrinkSpace(int size_in_bytes) {
     capacity_ -= size_in_bytes;
     size_ -= size_in_bytes;
-    DCHECK(size_ >= 0);
+    CHECK(size_ >= 0);
   }
 
   // Allocate from available bytes (available -> size).
   void AllocateBytes(intptr_t size_in_bytes) {
     size_ += size_in_bytes;
-    DCHECK(size_ >= 0);
+    CHECK(size_ >= 0);
   }
 
   // Free allocated bytes, making them available (size -> available).
   void DeallocateBytes(intptr_t size_in_bytes) {
     size_ -= size_in_bytes;
-    DCHECK(size_ >= 0);
+    CHECK_GE(size_, 0);
   }
 
-  // Waste free bytes (available -> waste).
-  void WasteBytes(int size_in_bytes) {
-    DCHECK(size_in_bytes >= 0);
-    waste_ += size_in_bytes;
+  // Merge {other} into {this}.
+  void Merge(const AllocationStats& other) {
+    capacity_ += other.capacity_;
+    size_ += other.size_;
+    if (other.max_capacity_ > max_capacity_) {
+      max_capacity_ = other.max_capacity_;
+    }
+    CHECK_GE(size_, 0);
   }
 
+  void DecreaseCapacity(intptr_t size_in_bytes) {
+    capacity_ -= size_in_bytes;
+    CHECK_GE(capacity_, 0);
+    CHECK_GE(capacity_, size_);
+  }
+
+  void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
+
  private:
+  // |capacity_|: The number of object-area bytes (i.e., not including page
+  // bookkeeping structures) currently in the space.
   intptr_t capacity_;
+
+  // |max_capacity_|: The maximum capacity ever observed.
   intptr_t max_capacity_;
+
+  // |size_|: The number of allocated bytes.
   intptr_t size_;
-  intptr_t waste_;
 };
 
 
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap.  They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object).  They have a size and a next pointer.  The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode : public HeapObject {
- public:
-  // Obtain a free-list node from a raw address.  This is not a cast because
-  // it does not check nor require that the first word at the address is a map
-  // pointer.
-  static FreeListNode* FromAddress(Address address) {
-    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
-  }
-
-  static inline bool IsFreeListNode(HeapObject* object);
-
-  // Set the size in bytes, which can be read with HeapObject::Size().  This
-  // function also writes a map to the first word of the block so that it
-  // looks like a heap object to the garbage collector and heap iteration
-  // functions.
-  void set_size(Heap* heap, int size_in_bytes);
-
-  // Accessors for the next field.
-  inline FreeListNode* next();
-  inline FreeListNode** next_address();
-  inline void set_next(FreeListNode* next);
-
-  inline void Zap();
-
-  static inline FreeListNode* cast(Object* object) {
-    return reinterpret_cast<FreeListNode*>(object);
-  }
-
- private:
-  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list category holds a pointer to the top element and a pointer to
-// the end element of the linked list of free memory blocks.
+// A free list category maintains a linked list of free memory blocks.
 class FreeListCategory {
  public:
-  FreeListCategory() : top_(0), end_(NULL), available_(0) {}
+  explicit FreeListCategory(FreeList* owner, FreeListCategoryType type)
+      : type_(type),
+        top_(nullptr),
+        end_(nullptr),
+        available_(0),
+        owner_(owner) {}
 
+  // Concatenates {category} into {this}.
+  //
+  // Note: Thread-safe.
   intptr_t Concatenate(FreeListCategory* category);
 
   void Reset();
 
-  void Free(FreeListNode* node, int size_in_bytes);
+  void Free(FreeSpace* node, int size_in_bytes);
 
-  FreeListNode* PickNodeFromList(int* node_size);
-  FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
+  // Pick a node from the list.
+  FreeSpace* PickNodeFromList(int* node_size);
+
+  // Pick a node from the list and compare it against {size_in_bytes}. If the
+  // node's size is greater or equal return the node and null otherwise.
+  FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
+
+  // Search for a node of size {size_in_bytes}.
+  FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
 
   intptr_t EvictFreeListItemsInList(Page* p);
   bool ContainsPageFreeListItemsInList(Page* p);
 
   void RepairFreeList(Heap* heap);
 
-  FreeListNode* top() const {
-    return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
-  }
+  bool IsEmpty() { return top() == nullptr; }
 
-  void set_top(FreeListNode* top) {
-    base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
-  }
-
-  FreeListNode** GetEndAddress() { return &end_; }
-  FreeListNode* end() const { return end_; }
-  void set_end(FreeListNode* end) { end_ = end; }
-
-  int* GetAvailableAddress() { return &available_; }
+  FreeList* owner() { return owner_; }
   int available() const { return available_; }
-  void set_available(int available) { available_ = available; }
-
-  base::Mutex* mutex() { return &mutex_; }
-
-  bool IsEmpty() { return top() == 0; }
 
 #ifdef DEBUG
   intptr_t SumFreeList();
   int FreeListLength();
+  bool IsVeryLong();
 #endif
 
  private:
-  // top_ points to the top FreeListNode* in the free list category.
-  base::AtomicWord top_;
-  FreeListNode* end_;
-  base::Mutex mutex_;
+  // For debug builds we accurately compute free lists lengths up until
+  // {kVeryLongFreeList} by manually walking the list.
+  static const int kVeryLongFreeList = 500;
 
-  // Total available bytes in all blocks of this free list category.
+  FreeSpace* top() { return top_.Value(); }
+  void set_top(FreeSpace* top) { top_.SetValue(top); }
+
+  FreeSpace* end() const { return end_; }
+  void set_end(FreeSpace* end) { end_ = end; }
+
+  // |type_|: The type of this free list category.
+  FreeListCategoryType type_;
+
+  // |top_|: Points to the top FreeSpace* in the free list category.
+  AtomicValue<FreeSpace*> top_;
+
+  // |end_|: Points to the end FreeSpace* in the free list category.
+  FreeSpace* end_;
+
+  // |available_|: Total available bytes in all blocks of this free list
+  //   category.
   int available_;
+
+  // |owner_|: The owning free list of this category.
+  FreeList* owner_;
 };
 
-
-// The free list for the old space.  The free list is organized in such a way
-// as to encourage objects allocated around the same time to be near each
-// other.  The normal way to allocate is intended to be by bumping a 'top'
+// A free list maintaining free blocks of memory. The free list is organized in
+// a way to encourage objects allocated around the same time to be near each
+// other. The normal way to allocate is intended to be by bumping a 'top'
 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
-// find a new space to allocate from.  This is done with the free list, which
-// is divided up into rough categories to cut down on waste.  Having finer
+// find a new space to allocate from. This is done with the free list, which is
+// divided up into rough categories to cut down on waste. Having finer
 // categories would scatter allocation more.
 
-// The old space free list is organized in categories.
-// 1-31 words:  Such small free areas are discarded for efficiency reasons.
-//     They can be reclaimed by the compactor.  However the distance between top
-//     and limit may be this small.
-// 32-255 words: There is a list of spaces this large.  It is used for top and
-//     limit when the object we need to allocate is 1-31 words in size.  These
-//     spaces are called small.
-// 256-2047 words: There is a list of spaces this large.  It is used for top and
-//     limit when the object we need to allocate is 32-255 words in size.  These
-//     spaces are called medium.
-// 1048-16383 words: There is a list of spaces this large.  It is used for top
-//     and limit when the object we need to allocate is 256-2047 words in size.
-//     These spaces are call large.
-// At least 16384 words.  This list is for objects of 2048 words or larger.
-//     Empty pages are added to this list.  These spaces are called huge.
+// The free list is organized in categories as follows:
+// 1-31 words (too small): Such small free areas are discarded for efficiency
+//   reasons. They can be reclaimed by the compactor. However the distance
+//   between top and limit may be this small.
+// 32-255 words (small): Used for allocating free space between 1-31 words in
+//   size.
+// 256-2047 words (medium): Used for allocating free space between 32-255 words
+//   in size.
+// 1048-16383 words (large): Used for allocating free space between 256-2047
+//   words in size.
+// At least 16384 words (huge): This list is for objects of 2048 words or
+//   larger. Empty pages are also added to this list.
 class FreeList {
  public:
-  explicit FreeList(PagedSpace* owner);
-
-  intptr_t Concatenate(FreeList* free_list);
-
-  // Clear the free list.
-  void Reset();
-
-  // Return the number of bytes available on the free list.
-  intptr_t available() {
-    return small_list_.available() + medium_list_.available() +
-           large_list_.available() + huge_list_.available();
-  }
-
-  // Place a node on the free list.  The block of size 'size_in_bytes'
-  // starting at 'start' is placed on the free list.  The return value is the
-  // number of bytes that have been lost due to internal fragmentation by
-  // freeing the block.  Bookkeeping information will be written to the block,
-  // i.e., its contents will be destroyed.  The start address should be word
-  // aligned, and the size should be a non-zero multiple of the word size.
-  int Free(Address start, int size_in_bytes);
-
   // This method returns how much memory can be allocated after freeing
   // maximum_freed memory.
   static inline int GuaranteedAllocatable(int maximum_freed) {
-    if (maximum_freed < kSmallListMin) {
+    if (maximum_freed <= kSmallListMin) {
       return 0;
     } else if (maximum_freed <= kSmallListMax) {
       return kSmallAllocationMax;
@@ -1568,51 +1735,103 @@
     return maximum_freed;
   }
 
-  // Allocate a block of size 'size_in_bytes' from the free list.  The block
-  // is unitialized.  A failure is returned if no block is available.  The
-  // number of bytes lost to fragmentation is returned in the output parameter
-  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
+  explicit FreeList(PagedSpace* owner);
+
+  // The method concatenates {other} into {this} and returns the added bytes,
+  // including waste.
+  //
+  // Note: Thread-safe.
+  intptr_t Concatenate(FreeList* other);
+
+  // Adds a node on the free list. The block of size {size_in_bytes} starting
+  // at {start} is placed on the free list. The return value is the number of
+  // bytes that were not added to the free list, because they freed memory block
+  // was too small. Bookkeeping information will be written to the block, i.e.,
+  // its contents will be destroyed. The start address should be word aligned,
+  // and the size should be a non-zero multiple of the word size.
+  int Free(Address start, int size_in_bytes);
+
+  // Allocate a block of size {size_in_bytes} from the free list. The block is
+  // unitialized. A failure is returned if no block is available. The size
+  // should be a non-zero multiple of the word size.
   MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
 
+  // Clear the free list.
+  void Reset();
+
+  void ResetStats() { wasted_bytes_ = 0; }
+
+  // Return the number of bytes available on the free list.
+  intptr_t Available() {
+    return small_list_.available() + medium_list_.available() +
+           large_list_.available() + huge_list_.available();
+  }
+
+  // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
+  // size in the free list category exactly matching the size. If no suitable
+  // node could be found, the method falls back to retrieving a {FreeSpace}
+  // from the large or huge free list category.
+  //
+  // Can be used concurrently.
+  MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
+
   bool IsEmpty() {
     return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
            large_list_.IsEmpty() && huge_list_.IsEmpty();
   }
 
-#ifdef DEBUG
-  void Zap();
-  intptr_t SumFreeLists();
-  bool IsVeryLong();
-#endif
-
   // Used after booting the VM.
   void RepairLists(Heap* heap);
 
   intptr_t EvictFreeListItems(Page* p);
   bool ContainsPageFreeListItems(Page* p);
 
-  FreeListCategory* small_list() { return &small_list_; }
-  FreeListCategory* medium_list() { return &medium_list_; }
-  FreeListCategory* large_list() { return &large_list_; }
-  FreeListCategory* huge_list() { return &huge_list_; }
+  PagedSpace* owner() { return owner_; }
+  intptr_t wasted_bytes() { return wasted_bytes_; }
+  base::Mutex* mutex() { return &mutex_; }
+
+#ifdef DEBUG
+  void Zap();
+  intptr_t SumFreeLists();
+  bool IsVeryLong();
+#endif
 
  private:
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
-  static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
+  static const int kMaxBlockSize = Page::kAllocatableMemory;
 
-  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
-
-  PagedSpace* owner_;
-  Heap* heap_;
-
-  static const int kSmallListMin = 0x20 * kPointerSize;
+  static const int kSmallListMin = 0x1f * kPointerSize;
   static const int kSmallListMax = 0xff * kPointerSize;
   static const int kMediumListMax = 0x7ff * kPointerSize;
   static const int kLargeListMax = 0x3fff * kPointerSize;
-  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
+  static const int kSmallAllocationMax = kSmallListMin;
   static const int kMediumAllocationMax = kSmallListMax;
   static const int kLargeAllocationMax = kMediumListMax;
+
+  FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
+  FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
+
+  FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
+    switch (category) {
+      case kSmall:
+        return &small_list_;
+      case kMedium:
+        return &medium_list_;
+      case kLarge:
+        return &large_list_;
+      case kHuge:
+        return &huge_list_;
+      default:
+        UNREACHABLE();
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
+  PagedSpace* owner_;
+  base::Mutex mutex_;
+  intptr_t wasted_bytes_;
   FreeListCategory small_list_;
   FreeListCategory medium_list_;
   FreeListCategory large_list_;
@@ -1652,10 +1871,7 @@
     return object_;
   }
 
-  AllocationSpace RetrySpace() {
-    DCHECK(IsRetry());
-    return static_cast<AllocationSpace>(Smi::cast(object_)->value());
-  }
+  inline AllocationSpace RetrySpace();
 
  private:
   explicit AllocationResult(AllocationSpace space)
@@ -1668,13 +1884,68 @@
 STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
 
 
+// LocalAllocationBuffer represents a linear allocation area that is created
+// from a given {AllocationResult} and can be used to allocate memory without
+// synchronization.
+//
+// The buffer is properly closed upon destruction and reassignment.
+// Example:
+//   {
+//     AllocationResult result = ...;
+//     LocalAllocationBuffer a(heap, result, size);
+//     LocalAllocationBuffer b = a;
+//     CHECK(!a.IsValid());
+//     CHECK(b.IsValid());
+//     // {a} is invalid now and cannot be used for further allocations.
+//   }
+//   // Since {b} went out of scope, the LAB is closed, resulting in creating a
+//   // filler object for the remaining area.
+class LocalAllocationBuffer {
+ public:
+  // Indicates that a buffer cannot be used for allocations anymore. Can result
+  // from either reassigning a buffer, or trying to construct it from an
+  // invalid {AllocationResult}.
+  static inline LocalAllocationBuffer InvalidBuffer();
+
+  // Creates a new LAB from a given {AllocationResult}. Results in
+  // InvalidBuffer if the result indicates a retry.
+  static inline LocalAllocationBuffer FromResult(Heap* heap,
+                                                 AllocationResult result,
+                                                 intptr_t size);
+
+  ~LocalAllocationBuffer() { Close(); }
+
+  // Convert to C++11 move-semantics once allowed by the style guide.
+  LocalAllocationBuffer(const LocalAllocationBuffer& other);
+  LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
+
+  MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+      int size_in_bytes, AllocationAlignment alignment);
+
+  inline bool IsValid() { return allocation_info_.top() != nullptr; }
+
+  // Try to merge LABs, which is only possible when they are adjacent in memory.
+  // Returns true if the merge was successful, false otherwise.
+  inline bool TryMerge(LocalAllocationBuffer* other);
+
+ private:
+  LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
+
+  void Close();
+
+  Heap* heap_;
+  AllocationInfo allocation_info_;
+};
+
+
 class PagedSpace : public Space {
  public:
-  // Creates a space with a maximum capacity, and an id.
-  PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
-             Executability executable);
+  static const intptr_t kCompactionMemoryWanted = 500 * KB;
 
-  virtual ~PagedSpace() {}
+  // Creates a space with an id.
+  PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
+
+  ~PagedSpace() override { TearDown(); }
 
   // Set up the space using the given address range of virtual memory (from
   // the memory allocator's initial chunk) if possible.  If the block of
@@ -1686,13 +1957,12 @@
   // subsequently torn down.
   bool HasBeenSetUp();
 
-  // Cleans up the space, frees all pages in this space except those belonging
-  // to the initial chunk, uncommits addresses in the initial chunk.
-  void TearDown();
-
   // Checks whether an object/address is in this space.
   inline bool Contains(Address a);
-  bool Contains(HeapObject* o) { return Contains(o->address()); }
+  inline bool Contains(HeapObject* o);
+  // Unlike Contains() methods it is safe to call this one even for addresses
+  // of unmapped memory.
+  bool ContainsSafe(Address addr);
 
   // Given an address occupied by a live object, return that object if it is
   // in this space, or a Smi if it is not.  The implementation iterates over
@@ -1702,7 +1972,7 @@
 
   // During boot the free_space_map is created, and afterwards we may need
   // to write it into the free list nodes that were already created.
-  void RepairFreeListsAfterBoot();
+  void RepairFreeListsAfterDeserialization();
 
   // Prepares for a mark-compact GC.
   void PrepareForMarkCompact();
@@ -1710,28 +1980,9 @@
   // Current capacity without growing (Size() + Available()).
   intptr_t Capacity() { return accounting_stats_.Capacity(); }
 
-  // Total amount of memory committed for this space.  For paged
-  // spaces this equals the capacity.
-  intptr_t CommittedMemory() { return Capacity(); }
-
-  // The maximum amount of memory ever committed for this space.
-  intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
-
   // Approximate amount of physical memory committed for this space.
-  size_t CommittedPhysicalMemory();
+  size_t CommittedPhysicalMemory() override;
 
-  struct SizeStats {
-    intptr_t Total() {
-      return small_size_ + medium_size_ + large_size_ + huge_size_;
-    }
-
-    intptr_t small_size_;
-    intptr_t medium_size_;
-    intptr_t large_size_;
-    intptr_t huge_size_;
-  };
-
-  void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
   void ResetFreeListStatistics();
 
   // Sets the capacity, the available space and the wasted space to zero.
@@ -1740,7 +1991,8 @@
   // discovered during the sweeping they are subtracted from the size and added
   // to the available and wasted totals.
   void ClearStats() {
-    accounting_stats_.ClearSizeWaste();
+    accounting_stats_.ClearSize();
+    free_list_.ResetStats();
     ResetFreeListStatistics();
   }
 
@@ -1753,22 +2005,21 @@
   // The bytes in the linear allocation area are not included in this total
   // because updating the stats would slow down allocation.  New pages are
   // immediately added to the free list so they show up here.
-  intptr_t Available() { return free_list_.available(); }
+  intptr_t Available() override { return free_list_.Available(); }
 
   // Allocated bytes in this space.  Garbage bytes that were not found due to
   // concurrent sweeping are counted as being allocated!  The bytes in the
   // current linear allocation area (between top and limit) are also counted
   // here.
-  virtual intptr_t Size() { return accounting_stats_.Size(); }
+  intptr_t Size() override { return accounting_stats_.Size(); }
 
   // As size, but the bytes in lazily swept pages are estimated and the bytes
   // in the current linear allocation area are not included.
-  virtual intptr_t SizeOfObjects();
+  intptr_t SizeOfObjects() override;
 
   // Wasted bytes in this space.  These are just the bytes that were thrown away
-  // due to being too small to use for allocation.  They do not include the
-  // free bytes that were not found at all due to lazy sweeping.
-  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
+  // due to being too small to use for allocation.
+  virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
 
   // Returns the allocation pointer in this space.
   Address top() { return allocation_info_.top(); }
@@ -1784,7 +2035,21 @@
 
   // Allocate the requested number of bytes in the space if possible, return a
   // failure object if not.
-  MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
+  MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
+      int size_in_bytes);
+
+  MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
+      int size_in_bytes);
+
+  // Allocate the requested number of bytes in the space double aligned if
+  // possible, return a failure object if not.
+  MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+      int size_in_bytes, AllocationAlignment alignment);
+
+  // Allocate the requested number of bytes in the space and consider allocation
+  // alignment if needed.
+  MUST_USE_RESULT inline AllocationResult AllocateRaw(
+      int size_in_bytes, AllocationAlignment alignment);
 
   // Give a block of memory to the space's free list.  It might be added to
   // the free list or accounted as waste.
@@ -1793,7 +2058,6 @@
   int Free(Address start, int size_in_bytes) {
     int wasted = free_list_.Free(start, size_in_bytes);
     accounting_stats_.DeallocateBytes(size_in_bytes);
-    accounting_stats_.WasteBytes(wasted);
     return size_in_bytes - wasted;
   }
 
@@ -1804,8 +2068,7 @@
     DCHECK(top == limit ||
            Page::FromAddress(top) == Page::FromAddress(limit - 1));
     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
-    allocation_info_.set_top(top);
-    allocation_info_.set_limit(limit);
+    allocation_info_.Reset(top, limit);
   }
 
   // Empty space allocation info, returning unused area to free list.
@@ -1838,7 +2101,7 @@
 
 #ifdef DEBUG
   // Print meta info and objects in this space.
-  virtual void Print();
+  void Print() override;
 
   // Reports statistics for the space
   void ReportStatistics();
@@ -1856,22 +2119,6 @@
            !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
   }
 
-  void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
-
-  void IncreaseUnsweptFreeBytes(Page* p) {
-    DCHECK(ShouldBeSweptBySweeperThreads(p));
-    unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
-  }
-
-  void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
-
-  void DecreaseUnsweptFreeBytes(Page* p) {
-    DCHECK(ShouldBeSweptBySweeperThreads(p));
-    unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
-  }
-
-  void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
-
   // This function tries to steal size_in_bytes memory from the sweeper threads
   // free-lists. If it does not succeed stealing enough memory, it will wait
   // for the sweeper threads to finish sweeping.
@@ -1885,9 +2132,9 @@
   Page* FirstPage() { return anchor_.next_page(); }
   Page* LastPage() { return anchor_.prev_page(); }
 
-  void EvictEvacuationCandidatesFromFreeLists();
+  void EvictEvacuationCandidatesFromLinearAllocationArea();
 
-  bool CanExpand();
+  bool CanExpand(size_t size);
 
   // Returns the number of total pages in this space.
   int CountTotalPages();
@@ -1895,22 +2142,64 @@
   // Return size of allocatable area on a page in this space.
   inline int AreaSize() { return area_size_; }
 
-  void CreateEmergencyMemory();
-  void FreeEmergencyMemory();
-  void UseEmergencyMemory();
+  virtual bool is_local() { return false; }
 
-  bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
+  // Merges {other} into the current space. Note that this modifies {other},
+  // e.g., removes its bump pointer area and resets statistics.
+  void MergeCompactionSpace(CompactionSpace* other);
+
+  void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num,
+                                  intptr_t limit = kCompactionMemoryWanted);
+
+  // Refills the free list from the corresponding free list filled by the
+  // sweeper.
+  virtual void RefillFreeList();
 
  protected:
+  void AddMemory(Address start, intptr_t size);
+
+  FreeSpace* TryRemoveMemory(intptr_t size_in_bytes);
+
+  void MoveOverFreeMemory(PagedSpace* other);
+
+  // PagedSpaces that should be included in snapshots have different, i.e.,
+  // smaller, initial pages.
+  virtual bool snapshotable() { return true; }
+
   FreeList* free_list() { return &free_list_; }
 
+  bool HasPages() { return anchor_.next_page() != &anchor_; }
+
+  // Cleans up the space, frees all pages in this space except those belonging
+  // to the initial chunk, uncommits addresses in the initial chunk.
+  void TearDown();
+
+  // Expands the space by allocating a fixed number of pages. Returns false if
+  // it cannot allocate requested number of pages from OS, or if the hard heap
+  // size limit has been hit.
+  bool Expand();
+
+  // Generic fast case allocation function that tries linear allocation at the
+  // address denoted by top in allocation_info_.
+  inline HeapObject* AllocateLinearly(int size_in_bytes);
+
+  // Generic fast case allocation function that tries aligned linear allocation
+  // at the address denoted by top in allocation_info_. Writes the aligned
+  // allocation size, which includes the filler size, to size_in_bytes.
+  inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
+                                             AllocationAlignment alignment);
+
+  // If sweeping is still in progress try to sweep unswept pages. If that is
+  // not successful, wait for the sweeper threads and re-try free-list
+  // allocation.
+  MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
+      int size_in_bytes);
+
+  // Slow path of AllocateRaw.  This function is space-dependent.
+  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
+
   int area_size_;
 
-  // Maximum capacity of this space.
-  intptr_t max_capacity_;
-
-  intptr_t SizeOfFirstPage();
-
   // Accounting information for this space.
   AllocationStats accounting_stats_;
 
@@ -1923,41 +2212,19 @@
   // Normal allocation information.
   AllocationInfo allocation_info_;
 
-  // The number of free bytes which could be reclaimed by advancing the
-  // concurrent sweeper threads.
-  intptr_t unswept_free_bytes_;
-
   // The sweeper threads iterate over the list of pointer and data space pages
   // and sweep these pages concurrently. They will stop sweeping after the
   // end_of_unswept_pages_ page.
   Page* end_of_unswept_pages_;
 
-  // Emergency memory is the memory of a full page for a given space, allocated
-  // conservatively before evacuating a page. If compaction fails due to out
-  // of memory error the emergency memory can be used to complete compaction.
-  // If not used, the emergency memory is released after compaction.
-  MemoryChunk* emergency_memory_;
+  // Mutex guarding any concurrent access to the space.
+  base::Mutex space_mutex_;
 
-  // Expands the space by allocating a fixed number of pages. Returns false if
-  // it cannot allocate requested number of pages from OS, or if the hard heap
-  // size limit has been hit.
-  bool Expand();
-
-  // Generic fast case allocation function that tries linear allocation at the
-  // address denoted by top in allocation_info_.
-  inline HeapObject* AllocateLinearly(int size_in_bytes);
-
-  // If sweeping is still in progress try to sweep unswept pages. If that is
-  // not successful, wait for the sweeper threads and re-try free-list
-  // allocation.
-  MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
-      int size_in_bytes);
-
-  // Slow path of AllocateRaw.  This function is space-dependent.
-  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
-  friend class PageIterator;
   friend class MarkCompactCollector;
+  friend class PageIterator;
+
+  // Used in cctest.
+  friend class HeapTester;
 };
 
 
@@ -2011,15 +2278,15 @@
       (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
       (1 << MemoryChunk::SCAN_ON_SCAVENGE);
 
-  static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
+  static const int kAreaSize = Page::kAllocatableMemory;
 
-  inline NewSpacePage* next_page() const {
+  inline NewSpacePage* next_page() {
     return static_cast<NewSpacePage*>(next_chunk());
   }
 
   inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
 
-  inline NewSpacePage* prev_page() const {
+  inline NewSpacePage* prev_page() {
     return static_cast<NewSpacePage*>(prev_chunk());
   }
 
@@ -2164,12 +2431,21 @@
   }
 
   // If we don't have these here then SemiSpace will be abstract.  However
-  // they should never be called.
-  virtual intptr_t Size() {
+  // they should never be called:
+
+  intptr_t Size() override {
     UNREACHABLE();
     return 0;
   }
 
+  intptr_t SizeOfObjects() override { return Size(); }
+
+  intptr_t Available() override {
+    UNREACHABLE();
+    return 0;
+  }
+
+
   bool is_committed() { return committed_; }
   bool Commit();
   bool Uncommit();
@@ -2182,7 +2458,7 @@
 #endif
 
 #ifdef DEBUG
-  virtual void Print();
+  void Print() override;
   // Validate a range of of addresses in a SemiSpace.
   // The "from" address must be on a page prior to the "to" address,
   // in the linked page order, or it must be earlier on the same page.
@@ -2208,11 +2484,8 @@
 
   static void Swap(SemiSpace* from, SemiSpace* to);
 
-  // Returns the maximum amount of memory ever committed by the semi space.
-  size_t MaximumCommittedMemory() { return maximum_committed_; }
-
   // Approximate amount of physical memory committed for this space.
-  size_t CommittedPhysicalMemory();
+  size_t CommittedPhysicalMemory() override;
 
  private:
   // Flips the semispace between being from-space and to-space.
@@ -2230,8 +2503,6 @@
   int maximum_total_capacity_;
   int initial_total_capacity_;
 
-  intptr_t maximum_committed_;
-
   // The start address of the space.
   Address start_;
   // Used to govern object promotion during mark-compact collection.
@@ -2250,9 +2521,6 @@
 
   friend class SemiSpaceIterator;
   friend class NewSpacePageIterator;
-
- public:
-  TRACK_MEMORY("SemiSpace")
 };
 
 
@@ -2263,49 +2531,21 @@
 // iterator is created are not iterated.
 class SemiSpaceIterator : public ObjectIterator {
  public:
-  // Create an iterator over the objects in the given space.  If no start
-  // address is given, the iterator starts from the bottom of the space.  If
-  // no size function is given, the iterator calls Object::Size().
-
-  // Iterate over all of allocated to-space.
+  // Create an iterator over the allocated objects in the given to-space.
   explicit SemiSpaceIterator(NewSpace* space);
-  // Iterate over all of allocated to-space, with a custome size function.
-  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
-  // Iterate over part of allocated to-space, from start to the end
-  // of allocation.
-  SemiSpaceIterator(NewSpace* space, Address start);
-  // Iterate from one address to another in the same semi-space.
-  SemiSpaceIterator(Address from, Address to);
 
-  HeapObject* Next() {
-    if (current_ == limit_) return NULL;
-    if (NewSpacePage::IsAtEnd(current_)) {
-      NewSpacePage* page = NewSpacePage::FromLimit(current_);
-      page = page->next_page();
-      DCHECK(!page->is_anchor());
-      current_ = page->area_start();
-      if (current_ == limit_) return NULL;
-    }
-
-    HeapObject* object = HeapObject::FromAddress(current_);
-    int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
-
-    current_ += size;
-    return object;
-  }
+  inline HeapObject* Next();
 
   // Implementation of the ObjectIterator functions.
-  virtual HeapObject* next_object() { return Next(); }
+  inline HeapObject* next_object() override;
 
  private:
-  void Initialize(Address start, Address end, HeapObjectCallback size_func);
+  void Initialize(Address start, Address end);
 
   // The current iteration point.
   Address current_;
   // The end of iteration.
   Address limit_;
-  // The callback function.
-  HeapObjectCallback size_func_;
 };
 
 
@@ -2336,6 +2576,54 @@
   NewSpacePage* last_page_;
 };
 
+// -----------------------------------------------------------------------------
+// Allows observation of inline allocation in the new space.
+class InlineAllocationObserver {
+ public:
+  explicit InlineAllocationObserver(intptr_t step_size)
+      : step_size_(step_size), bytes_to_next_step_(step_size) {
+    DCHECK(step_size >= kPointerSize);
+  }
+  virtual ~InlineAllocationObserver() {}
+
+ private:
+  intptr_t step_size() const { return step_size_; }
+  intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
+
+  // Pure virtual method provided by the subclasses that gets called when at
+  // least step_size bytes have been allocated. soon_object is the address just
+  // allocated (but not yet initialized.) size is the size of the object as
+  // requested (i.e. w/o the alignment fillers). Some complexities to be aware
+  // of:
+  // 1) soon_object will be nullptr in cases where we end up observing an
+  //    allocation that happens to be a filler space (e.g. page boundaries.)
+  // 2) size is the requested size at the time of allocation. Right-trimming
+  //    may change the object size dynamically.
+  // 3) soon_object may actually be the first object in an allocation-folding
+  //    group. In such a case size is the size of the group rather than the
+  //    first object.
+  virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
+
+  // Called each time the new space does an inline allocation step. This may be
+  // more frequently than the step_size we are monitoring (e.g. when there are
+  // multiple observers, or when page or space boundary is encountered.)
+  void InlineAllocationStep(int bytes_allocated, Address soon_object,
+                            size_t size) {
+    bytes_to_next_step_ -= bytes_allocated;
+    if (bytes_to_next_step_ <= 0) {
+      Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
+           size);
+      bytes_to_next_step_ = step_size_;
+    }
+  }
+
+  intptr_t step_size_;
+  intptr_t bytes_to_next_step_;
+
+  friend class NewSpace;
+
+  DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver);
+};
 
 // -----------------------------------------------------------------------------
 // The young generation space.
@@ -2351,7 +2639,8 @@
         to_space_(heap, kToSpace),
         from_space_(heap, kFromSpace),
         reservation_(),
-        inline_allocation_limit_step_(0) {}
+        top_on_previous_step_(0),
+        inline_allocation_observers_paused_(false) {}
 
   // Sets up the new space using the given chunk.
   bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
@@ -2391,7 +2680,7 @@
   }
 
   // Return the allocated bytes in the active semispace.
-  virtual intptr_t Size() {
+  intptr_t Size() override {
     return pages_used_ * NewSpacePage::kAreaSize +
            static_cast<int>(top() - to_space_.page_low());
   }
@@ -2415,23 +2704,41 @@
     return to_space_.TotalCapacity();
   }
 
-  // Return the total amount of memory committed for new space.
-  intptr_t CommittedMemory() {
-    if (from_space_.is_committed()) return 2 * Capacity();
-    return TotalCapacity();
+  // Committed memory for NewSpace is the committed memory of both semi-spaces
+  // combined.
+  intptr_t CommittedMemory() override {
+    return from_space_.CommittedMemory() + to_space_.CommittedMemory();
   }
 
-  // Return the total amount of memory committed for new space.
-  intptr_t MaximumCommittedMemory() {
-    return to_space_.MaximumCommittedMemory() +
-           from_space_.MaximumCommittedMemory();
+  intptr_t MaximumCommittedMemory() override {
+    return from_space_.MaximumCommittedMemory() +
+           to_space_.MaximumCommittedMemory();
   }
 
   // Approximate amount of physical memory committed for this space.
-  size_t CommittedPhysicalMemory();
+  size_t CommittedPhysicalMemory() override;
 
   // Return the available bytes without growing.
-  intptr_t Available() { return Capacity() - Size(); }
+  intptr_t Available() override { return Capacity() - Size(); }
+
+  intptr_t PagesFromStart(Address addr) {
+    return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
+  }
+
+  size_t AllocatedSinceLastGC() {
+    intptr_t allocated = top() - to_space_.age_mark();
+    if (allocated < 0) {
+      // Runtime has lowered the top below the age mark.
+      return 0;
+    }
+    // Correctly account for non-allocatable regions at the beginning of
+    // each page from the age_mark() to the top().
+    intptr_t pages =
+        PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
+    allocated -= pages * (NewSpacePage::kObjectStartOffset);
+    DCHECK(0 <= allocated && allocated <= Size());
+    return static_cast<size_t>(allocated);
+  }
 
   // Return the maximum capacity of a semispace.
   int MaximumCapacity() {
@@ -2455,11 +2762,6 @@
     return allocation_info_.top();
   }
 
-  void set_top(Address top) {
-    DCHECK(to_space_.current_page()->ContainsLimit(top));
-    allocation_info_.set_top(top);
-  }
-
   // Return the address of the allocation pointer limit in the active semispace.
   Address limit() {
     DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
@@ -2498,16 +2800,35 @@
     return allocation_info_.limit_address();
   }
 
-  MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
+  MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
+      int size_in_bytes, AllocationAlignment alignment));
+
+  MUST_USE_RESULT INLINE(
+      AllocationResult AllocateRawUnaligned(int size_in_bytes));
+
+  MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
+      int size_in_bytes, AllocationAlignment alignment));
+
+  MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
+      int size_in_bytes, AllocationAlignment alignment);
 
   // Reset the allocation pointer to the beginning of the active semispace.
   void ResetAllocationInfo();
 
   void UpdateInlineAllocationLimit(int size_in_bytes);
-  void LowerInlineAllocationLimit(intptr_t step) {
-    inline_allocation_limit_step_ = step;
+
+  // Allows observation of inline allocation. The observer->Step() method gets
+  // called after every step_size bytes have been allocated (approximately).
+  // This works by adjusting the allocation limit to a lower value and adjusting
+  // it after each step.
+  void AddInlineAllocationObserver(InlineAllocationObserver* observer);
+
+  // Removes a previously installed observer.
+  void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
+
+  void DisableInlineAllocationSteps() {
+    top_on_previous_step_ = 0;
     UpdateInlineAllocationLimit(0);
-    top_on_previous_step_ = allocation_info_.top();
   }
 
   // Get the extent of the inactive semispace (for use as a marking stack,
@@ -2540,6 +2861,7 @@
   // are no pages, or the current page is already empty), or true
   // if successful.
   bool AddFreshPage();
+  bool AddFreshPageSynchronized();
 
 #ifdef VERIFY_HEAP
   // Verify the active semispace.
@@ -2548,7 +2870,7 @@
 
 #ifdef DEBUG
   // Print the active semispace.
-  virtual void Print() { to_space_.Print(); }
+  void Print() override { to_space_.Print(); }
 #endif
 
   // Iterates the active semispace to collect statistics.
@@ -2575,9 +2897,7 @@
     return from_space_.Uncommit();
   }
 
-  inline intptr_t inline_allocation_limit_step() {
-    return inline_allocation_limit_step_;
-  }
+  bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
 
   SemiSpace* active_space() { return &to_space_; }
 
@@ -2585,6 +2905,8 @@
   // Update allocation info to match the current to-space page.
   void UpdateAllocationInfo();
 
+  base::Mutex mutex_;
+
   Address chunk_base_;
   uintptr_t chunk_size_;
 
@@ -2604,39 +2926,128 @@
   // mark-compact collection.
   AllocationInfo allocation_info_;
 
-  // When incremental marking is active we will set allocation_info_.limit
-  // to be lower than actual limit and then will gradually increase it
-  // in steps to guarantee that we do incremental marking steps even
-  // when all allocation is performed from inlined generated code.
-  intptr_t inline_allocation_limit_step_;
-
+  // When inline allocation stepping is active, either because of incremental
+  // marking or because of idle scavenge, we 'interrupt' inline allocation every
+  // once in a while. This is done by setting allocation_info_.limit to be lower
+  // than the actual limit and and increasing it in steps to guarantee that the
+  // observers are notified periodically.
+  List<InlineAllocationObserver*> inline_allocation_observers_;
   Address top_on_previous_step_;
+  bool inline_allocation_observers_paused_;
 
   HistogramInfo* allocated_histogram_;
   HistogramInfo* promoted_histogram_;
 
-  MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
+  bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
 
+  // If we are doing inline allocation in steps, this method performs the 'step'
+  // operation. top is the memory address of the bump pointer at the last
+  // inline allocation (i.e. it determines the numbers of bytes actually
+  // allocated since the last step.) new_top is the address of the bump pointer
+  // where the next byte is going to be allocated from. top and new_top may be
+  // different when we cross a page boundary or reset the space.
+  void InlineAllocationStep(Address top, Address new_top, Address soon_object,
+                            size_t size);
+  intptr_t GetNextInlineAllocationStepSize();
+  void StartNextInlineAllocationStep();
+  void PauseInlineAllocationObservers();
+  void ResumeInlineAllocationObservers();
+
+  friend class PauseInlineAllocationObserversScope;
   friend class SemiSpaceIterator;
+};
 
+class PauseInlineAllocationObserversScope {
  public:
-  TRACK_MEMORY("NewSpace")
+  explicit PauseInlineAllocationObserversScope(NewSpace* new_space)
+      : new_space_(new_space) {
+    new_space_->PauseInlineAllocationObservers();
+  }
+  ~PauseInlineAllocationObserversScope() {
+    new_space_->ResumeInlineAllocationObservers();
+  }
+
+ private:
+  NewSpace* new_space_;
+  DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope);
+};
+
+// -----------------------------------------------------------------------------
+// Compaction space that is used temporarily during compaction.
+
+class CompactionSpace : public PagedSpace {
+ public:
+  CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
+      : PagedSpace(heap, id, executable) {}
+
+  // Adds external memory starting at {start} of {size_in_bytes} to the space.
+  void AddExternalMemory(Address start, int size_in_bytes) {
+    IncreaseCapacity(size_in_bytes);
+    Free(start, size_in_bytes);
+  }
+
+  bool is_local() override { return true; }
+
+  void RefillFreeList() override;
+
+ protected:
+  // The space is temporary and not included in any snapshots.
+  bool snapshotable() override { return false; }
+
+  MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
+      int size_in_bytes) override;
+};
+
+
+// A collection of |CompactionSpace|s used by a single compaction task.
+class CompactionSpaceCollection : public Malloced {
+ public:
+  explicit CompactionSpaceCollection(Heap* heap)
+      : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
+        code_space_(heap, CODE_SPACE, Executability::EXECUTABLE),
+        duration_(0.0),
+        bytes_compacted_(0) {}
+
+  CompactionSpace* Get(AllocationSpace space) {
+    switch (space) {
+      case OLD_SPACE:
+        return &old_space_;
+      case CODE_SPACE:
+        return &code_space_;
+      default:
+        UNREACHABLE();
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
+  void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
+    duration_ += duration;
+    bytes_compacted_ += bytes_compacted;
+  }
+
+  double duration() const { return duration_; }
+  intptr_t bytes_compacted() const { return bytes_compacted_; }
+
+ private:
+  CompactionSpace old_space_;
+  CompactionSpace code_space_;
+
+  // Book keeping.
+  double duration_;
+  intptr_t bytes_compacted_;
 };
 
 
 // -----------------------------------------------------------------------------
-// Old object space (excluding map objects)
+// Old object space (includes the old space of objects and code space)
 
 class OldSpace : public PagedSpace {
  public:
-  // Creates an old space object with a given maximum capacity.
-  // The constructor does not allocate pages from OS.
-  OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
-           Executability executable)
-      : PagedSpace(heap, max_capacity, id, executable) {}
-
- public:
-  TRACK_MEMORY("OldSpace")
+  // Creates an old space object. The constructor does not allocate pages
+  // from OS.
+  OldSpace(Heap* heap, AllocationSpace id, Executability executable)
+      : PagedSpace(heap, id, executable) {}
 };
 
 
@@ -2653,16 +3064,16 @@
 
 class MapSpace : public PagedSpace {
  public:
-  // Creates a map space object with a maximum capacity.
-  MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
-      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
+  // Creates a map space object.
+  MapSpace(Heap* heap, AllocationSpace id)
+      : PagedSpace(heap, id, NOT_EXECUTABLE),
         max_map_space_pages_(kMaxMapPageIndex - 1) {}
 
   // Given an index, returns the page address.
   // TODO(1600): this limit is artifical just to keep code compilable
   static const int kMaxMapPageIndex = 1 << 16;
 
-  virtual int RoundSizeDownToObjectAlignment(int size) {
+  int RoundSizeDownToObjectAlignment(int size) override {
     if (base::bits::IsPowerOfTwo32(Map::kSize)) {
       return RoundDown(size, Map::kSize);
     } else {
@@ -2670,11 +3081,12 @@
     }
   }
 
- protected:
-  virtual void VerifyObject(HeapObject* obj);
+#ifdef VERIFY_HEAP
+  void VerifyObject(HeapObject* obj) override;
+#endif
 
  private:
-  static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
+  static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
 
   // Do map space compaction if there is a page gap.
   int CompactionThreshold() {
@@ -2682,73 +3094,20 @@
   }
 
   const int max_map_space_pages_;
-
- public:
-  TRACK_MEMORY("MapSpace")
 };
 
 
 // -----------------------------------------------------------------------------
-// Old space for simple property cell objects
-
-class CellSpace : public PagedSpace {
- public:
-  // Creates a property cell space object with a maximum capacity.
-  CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
-      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
-
-  virtual int RoundSizeDownToObjectAlignment(int size) {
-    if (base::bits::IsPowerOfTwo32(Cell::kSize)) {
-      return RoundDown(size, Cell::kSize);
-    } else {
-      return (size / Cell::kSize) * Cell::kSize;
-    }
-  }
-
- protected:
-  virtual void VerifyObject(HeapObject* obj);
-
- public:
-  TRACK_MEMORY("CellSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Old space for all global object property cell objects
-
-class PropertyCellSpace : public PagedSpace {
- public:
-  // Creates a property cell space object with a maximum capacity.
-  PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
-      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
-
-  virtual int RoundSizeDownToObjectAlignment(int size) {
-    if (base::bits::IsPowerOfTwo32(PropertyCell::kSize)) {
-      return RoundDown(size, PropertyCell::kSize);
-    } else {
-      return (size / PropertyCell::kSize) * PropertyCell::kSize;
-    }
-  }
-
- protected:
-  virtual void VerifyObject(HeapObject* obj);
-
- public:
-  TRACK_MEMORY("PropertyCellSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
-// the large object space. A large object is allocated from OS heap with
-// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
+// Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
+// managed by the large object space. A large object is allocated from OS
+// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
 // A large object always starts at Page::kObjectStartOffset to a page.
 // Large objects do not move during garbage collections.
 
 class LargeObjectSpace : public Space {
  public:
-  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
-  virtual ~LargeObjectSpace() {}
+  LargeObjectSpace(Heap* heap, AllocationSpace id);
+  virtual ~LargeObjectSpace();
 
   // Initializes internal data structures.
   bool SetUp();
@@ -2766,21 +3125,15 @@
   MUST_USE_RESULT AllocationResult
       AllocateRaw(int object_size, Executability executable);
 
-  bool CanAllocateSize(int size) { return Size() + size <= max_capacity_; }
-
   // Available bytes for objects in this space.
-  inline intptr_t Available();
+  inline intptr_t Available() override;
 
-  virtual intptr_t Size() { return size_; }
+  intptr_t Size() override { return size_; }
 
-  virtual intptr_t SizeOfObjects() { return objects_size_; }
-
-  intptr_t MaximumCommittedMemory() { return maximum_committed_; }
-
-  intptr_t CommittedMemory() { return Size(); }
+  intptr_t SizeOfObjects() override { return objects_size_; }
 
   // Approximate amount of physical memory committed for this space.
-  size_t CommittedPhysicalMemory();
+  size_t CommittedPhysicalMemory() override;
 
   int PageCount() { return page_count_; }
 
@@ -2792,11 +3145,15 @@
   // if such a page doesn't exist.
   LargePage* FindPage(Address a);
 
+  // Clears the marking state of live objects.
+  void ClearMarkingStateOfLiveObjects();
+
   // Frees unmarked objects.
   void FreeUnmarkedObjects();
 
   // Checks whether a heap object is in this space; O(1).
   bool Contains(HeapObject* obj);
+  bool Contains(Address address);
 
   // Checks whether the space is empty.
   bool IsEmpty() { return first_page_ == NULL; }
@@ -2808,7 +3165,7 @@
 #endif
 
 #ifdef DEBUG
-  virtual void Print();
+  void Print() override;
   void ReportStatistics();
   void CollectCodeStatistics();
 #endif
@@ -2817,8 +3174,6 @@
   bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
 
  private:
-  intptr_t max_capacity_;
-  intptr_t maximum_committed_;
   // The head of the linked list of large object chunks.
   LargePage* first_page_;
   intptr_t size_;          // allocated bytes
@@ -2828,16 +3183,12 @@
   HashMap chunk_map_;
 
   friend class LargeObjectIterator;
-
- public:
-  TRACK_MEMORY("LargeObjectSpace")
 };
 
 
 class LargeObjectIterator : public ObjectIterator {
  public:
   explicit LargeObjectIterator(LargeObjectSpace* space);
-  LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
 
   HeapObject* Next();
 
@@ -2846,7 +3197,6 @@
 
  private:
   LargePage* current_;
-  HeapObjectCallback size_func_;
 };
 
 
@@ -2857,50 +3207,12 @@
   inline explicit PointerChunkIterator(Heap* heap);
 
   // Return NULL when the iterator is done.
-  MemoryChunk* next() {
-    switch (state_) {
-      case kOldPointerState: {
-        if (old_pointer_iterator_.has_next()) {
-          return old_pointer_iterator_.next();
-        }
-        state_ = kMapState;
-        // Fall through.
-      }
-      case kMapState: {
-        if (map_iterator_.has_next()) {
-          return map_iterator_.next();
-        }
-        state_ = kLargeObjectState;
-        // Fall through.
-      }
-      case kLargeObjectState: {
-        HeapObject* heap_object;
-        do {
-          heap_object = lo_iterator_.Next();
-          if (heap_object == NULL) {
-            state_ = kFinishedState;
-            return NULL;
-          }
-          // Fixed arrays are the only pointer-containing objects in large
-          // object space.
-        } while (!heap_object->IsFixedArray());
-        MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
-        return answer;
-      }
-      case kFinishedState:
-        return NULL;
-      default:
-        break;
-    }
-    UNREACHABLE();
-    return NULL;
-  }
-
+  inline MemoryChunk* next();
 
  private:
-  enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState };
+  enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
   State state_;
-  PageIterator old_pointer_iterator_;
+  PageIterator old_iterator_;
   PageIterator map_iterator_;
   LargeObjectIterator lo_iterator_;
 };
@@ -2920,7 +3232,7 @@
   static const int kMaxComments = 64;
 };
 #endif
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_HEAP_SPACES_H_
diff --git a/src/heap/store-buffer-inl.h b/src/heap/store-buffer-inl.h
index 1606465..e11ad87 100644
--- a/src/heap/store-buffer-inl.h
+++ b/src/heap/store-buffer-inl.h
@@ -5,23 +5,18 @@
 #ifndef V8_STORE_BUFFER_INL_H_
 #define V8_STORE_BUFFER_INL_H_
 
+#include "src/heap/heap.h"
+#include "src/heap/spaces-inl.h"
 #include "src/heap/store-buffer.h"
 
 namespace v8 {
 namespace internal {
 
-Address StoreBuffer::TopAddress() {
-  return reinterpret_cast<Address>(heap_->store_buffer_top_address());
-}
-
-
 void StoreBuffer::Mark(Address addr) {
-  DCHECK(!heap_->cell_space()->Contains(addr));
   DCHECK(!heap_->code_space()->Contains(addr));
-  DCHECK(!heap_->old_data_space()->Contains(addr));
   Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
   *top++ = addr;
-  heap_->public_set_store_buffer_top(top);
+  heap_->set_store_buffer_top(reinterpret_cast<Smi*>(top));
   if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
     DCHECK(top == limit_);
     Compact();
@@ -31,11 +26,15 @@
 }
 
 
+inline void StoreBuffer::MarkSynchronized(Address addr) {
+  base::LockGuard<base::Mutex> lock_guard(&mutex_);
+  Mark(addr);
+}
+
+
 void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
   if (store_buffer_rebuilding_enabled_) {
-    SLOW_DCHECK(!heap_->cell_space()->Contains(addr) &&
-                !heap_->code_space()->Contains(addr) &&
-                !heap_->old_data_space()->Contains(addr) &&
+    SLOW_DCHECK(!heap_->code_space()->Contains(addr) &&
                 !heap_->new_space()->Contains(addr));
     Address* top = old_top_;
     *top++ = addr;
@@ -49,15 +48,7 @@
     }
   }
 }
-
-
-void StoreBuffer::ClearDeadObject(HeapObject* object) {
-  Address& map_field = Memory::Address_at(object->address());
-  if (heap_->map_space()->Contains(map_field)) {
-    map_field = NULL;
-  }
-}
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_STORE_BUFFER_INL_H_
diff --git a/src/heap/store-buffer.cc b/src/heap/store-buffer.cc
index aac6811..a8a1e5b 100644
--- a/src/heap/store-buffer.cc
+++ b/src/heap/store-buffer.cc
@@ -2,13 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/heap/store-buffer.h"
+
 #include <algorithm>
 
-#include "src/v8.h"
-
-#include "src/base/atomicops.h"
 #include "src/counters.h"
+#include "src/heap/incremental-marking.h"
 #include "src/heap/store-buffer-inl.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/v8.h"
 
 namespace v8 {
 namespace internal {
@@ -34,6 +37,9 @@
 
 
 void StoreBuffer::SetUp() {
+  // Allocate 3x the buffer size, so that we can start the new store buffer
+  // aligned to 2x the size.  This lets us use a bit test to detect the end of
+  // the area.
   virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
   uintptr_t start_as_int =
       reinterpret_cast<uintptr_t>(virtual_memory_->address());
@@ -41,23 +47,30 @@
       reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
   limit_ = start_ + (kStoreBufferSize / kPointerSize);
 
+  // Reserve space for the larger old buffer.
   old_virtual_memory_ =
       new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
   old_top_ = old_start_ =
       reinterpret_cast<Address*>(old_virtual_memory_->address());
   // Don't know the alignment requirements of the OS, but it is certainly not
   // less than 0xfff.
-  DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
-  int initial_length =
-      static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
-  DCHECK(initial_length > 0);
-  DCHECK(initial_length <= kOldStoreBufferLength);
+  CHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
+  CHECK(kStoreBufferSize >= base::OS::CommitPageSize());
+  // Initial size of the old buffer is as big as the buffer for new pointers.
+  // This means even if we later fail to enlarge the old buffer due to OOM from
+  // the OS, we will still be able to empty the new pointer buffer into the old
+  // buffer.
+  int initial_length = static_cast<int>(kStoreBufferSize / kPointerSize);
+  CHECK(initial_length > 0);
+  CHECK(initial_length <= kOldStoreBufferLength);
   old_limit_ = old_start_ + initial_length;
   old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
 
-  CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
-                                    (old_limit_ - old_start_) * kPointerSize,
-                                    false));
+  if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
+                                   (old_limit_ - old_start_) * kPointerSize,
+                                   false)) {
+    V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
+  }
 
   DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
   DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
@@ -71,10 +84,12 @@
   DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
          0);
 
-  CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
-                                kStoreBufferSize,
-                                false));  // Not executable.
-  heap_->public_set_store_buffer_top(start_);
+  if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+                               kStoreBufferSize,
+                               false)) {  // Not executable.
+    V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
+  }
+  heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
 
   hash_set_1_ = new uintptr_t[kHashSetLength];
   hash_set_2_ = new uintptr_t[kHashSetLength];
@@ -91,7 +106,7 @@
   delete[] hash_set_2_;
   old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
   start_ = limit_ = NULL;
-  heap_->public_set_store_buffer_top(start_);
+  heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
 }
 
 
@@ -101,26 +116,6 @@
 }
 
 
-void StoreBuffer::Uniq() {
-  // Remove adjacent duplicates and cells that do not point at new space.
-  Address previous = NULL;
-  Address* write = old_start_;
-  DCHECK(may_move_store_buffer_entries_);
-  for (Address* read = old_start_; read < old_top_; read++) {
-    Address current = *read;
-    if (current != previous) {
-      Object* object = reinterpret_cast<Object*>(
-          base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(current)));
-      if (heap_->InNewSpace(object)) {
-        *write++ = current;
-      }
-    }
-    previous = current;
-  }
-  old_top_ = write;
-}
-
-
 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
   return old_limit_ - old_top_ >= space_needed;
 }
@@ -130,9 +125,12 @@
   while (old_limit_ - old_top_ < space_needed &&
          old_limit_ < old_reserved_limit_) {
     size_t grow = old_limit_ - old_start_;  // Double size.
-    CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
-                                      grow * kPointerSize, false));
-    old_limit_ += grow;
+    if (old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
+                                    grow * kPointerSize, false)) {
+      old_limit_ += grow;
+    } else {
+      break;
+    }
   }
 
   if (SpaceAvailable(space_needed)) return;
@@ -209,6 +207,8 @@
   }
   if (created_new_scan_on_scavenge_pages) {
     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+    heap_->isolate()->CountUsage(
+        v8::Isolate::UseCounterFeature::kStoreBufferOverflow);
   }
   old_buffer_is_filtered_ = true;
 }
@@ -238,20 +238,6 @@
 }
 
 
-void StoreBuffer::SortUniq() {
-  Compact();
-  if (old_buffer_is_sorted_) return;
-  std::sort(old_start_, old_top_);
-  Uniq();
-
-  old_buffer_is_sorted_ = true;
-
-  // Filtering hash sets are inconsistent with the store buffer after this
-  // operation.
-  ClearFilteringHashSets();
-}
-
-
 bool StoreBuffer::PrepareForIteration() {
   Compact();
   PointerChunkIterator it(heap_);
@@ -276,41 +262,6 @@
 }
 
 
-#ifdef DEBUG
-void StoreBuffer::Clean() {
-  ClearFilteringHashSets();
-  Uniq();  // Also removes things that no longer point to new space.
-  EnsureSpace(kStoreBufferSize / 2);
-}
-
-
-static Address* in_store_buffer_1_element_cache = NULL;
-
-
-bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
-  if (!FLAG_enable_slow_asserts) return true;
-  if (in_store_buffer_1_element_cache != NULL &&
-      *in_store_buffer_1_element_cache == cell_address) {
-    return true;
-  }
-  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-  for (Address* current = top - 1; current >= start_; current--) {
-    if (*current == cell_address) {
-      in_store_buffer_1_element_cache = current;
-      return true;
-    }
-  }
-  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
-    if (*current == cell_address) {
-      in_store_buffer_1_element_cache = current;
-      return true;
-    }
-  }
-  return false;
-}
-#endif
-
-
 void StoreBuffer::ClearFilteringHashSets() {
   if (!hash_sets_are_empty_) {
     memset(reinterpret_cast<void*>(hash_set_1_), 0,
@@ -341,8 +292,7 @@
         // When we are not in GC the Heap::InNewSpace() predicate
         // checks that pointers which satisfy predicate point into
         // the active semispace.
-        Object* object = reinterpret_cast<Object*>(
-            base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+        Object* object = *slot;
         heap_->InNewSpace(object);
         slot_address += kPointerSize;
       }
@@ -369,33 +319,40 @@
 }
 
 
-void StoreBuffer::FindPointersToNewSpaceInRegion(
-    Address start, Address end, ObjectSlotCallback slot_callback,
-    bool clear_maps) {
-  for (Address slot_address = start; slot_address < end;
-       slot_address += kPointerSize) {
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    Object* object = reinterpret_cast<Object*>(
-        base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-    if (heap_->InNewSpace(object)) {
-      HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
-      DCHECK(heap_object->IsHeapObject());
-      // The new space object was not promoted if it still contains a map
-      // pointer. Clear the map field now lazily.
-      if (clear_maps) ClearDeadObject(heap_object);
-      slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
-      object = reinterpret_cast<Object*>(
-          base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-      if (heap_->InNewSpace(object)) {
-        EnterDirectlyIntoStoreBuffer(slot_address);
-      }
+void StoreBuffer::ProcessOldToNewSlot(Address slot_address,
+                                      ObjectSlotCallback slot_callback) {
+  Object** slot = reinterpret_cast<Object**>(slot_address);
+  Object* object = *slot;
+
+  // If the object is not in from space, it must be a duplicate store buffer
+  // entry and the slot was already updated.
+  if (heap_->InFromSpace(object)) {
+    HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+    DCHECK(heap_object->IsHeapObject());
+    slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+    object = *slot;
+    // If the object was in from space before and is after executing the
+    // callback in to space, the object is still live.
+    // Unfortunately, we do not know about the slot. It could be in a
+    // just freed free space object.
+    if (heap_->InToSpace(object)) {
+      EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
     }
   }
 }
 
 
-void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
-                                               bool clear_maps) {
+void StoreBuffer::FindPointersToNewSpaceInRegion(
+    Address start, Address end, ObjectSlotCallback slot_callback) {
+  for (Address slot_address = start; slot_address < end;
+       slot_address += kPointerSize) {
+    ProcessOldToNewSlot(slot_address, slot_callback);
+  }
+}
+
+
+void StoreBuffer::IteratePointersInStoreBuffer(
+    ObjectSlotCallback slot_callback) {
   Address* limit = old_top_;
   old_top_ = old_start_;
   {
@@ -404,40 +361,78 @@
 #ifdef DEBUG
       Address* saved_top = old_top_;
 #endif
-      Object** slot = reinterpret_cast<Object**>(*current);
-      Object* object = reinterpret_cast<Object*>(
-          base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-      if (heap_->InFromSpace(object)) {
-        HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
-        // The new space object was not promoted if it still contains a map
-        // pointer. Clear the map field now lazily.
-        if (clear_maps) ClearDeadObject(heap_object);
-        slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
-        object = reinterpret_cast<Object*>(
-            base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-        if (heap_->InNewSpace(object)) {
-          EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
-        }
-      }
+      ProcessOldToNewSlot(*current, slot_callback);
       DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
     }
   }
 }
 
 
+void StoreBuffer::ClearInvalidStoreBufferEntries() {
+  Compact();
+  Address* new_top = old_start_;
+  for (Address* current = old_start_; current < old_top_; current++) {
+    Address addr = *current;
+    Object** slot = reinterpret_cast<Object**>(addr);
+    Object* object = *slot;
+    if (heap_->InNewSpace(object) && object->IsHeapObject()) {
+      // If the target object is not black, the source slot must be part
+      // of a non-black (dead) object.
+      HeapObject* heap_object = HeapObject::cast(object);
+      if (Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
+          heap_->mark_compact_collector()->IsSlotInLiveObject(addr)) {
+        *new_top++ = addr;
+      }
+    }
+  }
+  old_top_ = new_top;
+  ClearFilteringHashSets();
+
+  // Don't scan on scavenge dead large objects.
+  LargeObjectIterator it(heap_->lo_space());
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+    if (chunk->scan_on_scavenge() &&
+        Marking::IsWhite(Marking::MarkBitFrom(object))) {
+      chunk->set_scan_on_scavenge(false);
+    }
+  }
+}
+
+
+void StoreBuffer::VerifyValidStoreBufferEntries() {
+  for (Address* current = old_start_; current < old_top_; current++) {
+    Object** slot = reinterpret_cast<Object**>(*current);
+    Object* object = *slot;
+    CHECK(object->IsHeapObject());
+    CHECK(heap_->InNewSpace(object));
+    heap_->mark_compact_collector()->VerifyIsSlotInLiveObject(
+        reinterpret_cast<Address>(slot), HeapObject::cast(object));
+  }
+}
+
+
+class FindPointersToNewSpaceVisitor final : public ObjectVisitor {
+ public:
+  FindPointersToNewSpaceVisitor(StoreBuffer* store_buffer,
+                                ObjectSlotCallback callback)
+      : store_buffer_(store_buffer), callback_(callback) {}
+
+  V8_INLINE void VisitPointers(Object** start, Object** end) override {
+    store_buffer_->FindPointersToNewSpaceInRegion(
+        reinterpret_cast<Address>(start), reinterpret_cast<Address>(end),
+        callback_);
+  }
+
+  V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
+
+ private:
+  StoreBuffer* store_buffer_;
+  ObjectSlotCallback callback_;
+};
+
+
 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
-  IteratePointersToNewSpace(slot_callback, false);
-}
-
-
-void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
-    ObjectSlotCallback slot_callback) {
-  IteratePointersToNewSpace(slot_callback, true);
-}
-
-
-void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
-                                            bool clear_maps) {
   // We do not sort or remove duplicated entries from the store buffer because
   // we expect that callback will rebuild the store buffer thus removing
   // all duplicates and pointers to old space.
@@ -446,7 +441,7 @@
   // TODO(gc): we want to skip slots on evacuation candidates
   // but we can't simply figure that out from slot address
   // because slot can belong to a large object.
-  IteratePointersInStoreBuffer(slot_callback, clear_maps);
+  IteratePointersInStoreBuffer(slot_callback);
 
   // We are done scanning all the pointers that were in the store buffer, but
   // there may be some pages marked scan_on_scavenge that have pointers to new
@@ -463,6 +458,7 @@
     }
     PointerChunkIterator it(heap_);
     MemoryChunk* chunk;
+    FindPointersToNewSpaceVisitor visitor(this, slot_callback);
     while ((chunk = it.next()) != NULL) {
       if (chunk->scan_on_scavenge()) {
         chunk->set_scan_on_scavenge(false);
@@ -475,13 +471,13 @@
           DCHECK(array->IsFixedArray());
           Address start = array->address();
           Address end = start + array->Size();
-          FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
+          FindPointersToNewSpaceInRegion(start, end, slot_callback);
         } else {
           Page* page = reinterpret_cast<Page*>(chunk);
           PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
           if (owner == heap_->map_space()) {
             DCHECK(page->WasSwept());
-            HeapObjectIterator iterator(page, NULL);
+            HeapObjectIterator iterator(page);
             for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
                  heap_object = iterator.Next()) {
               // We skip free space objects.
@@ -490,56 +486,26 @@
                 FindPointersToNewSpaceInRegion(
                     heap_object->address() + Map::kPointerFieldsBeginOffset,
                     heap_object->address() + Map::kPointerFieldsEndOffset,
-                    slot_callback, clear_maps);
+                    slot_callback);
               }
             }
           } else {
-            if (!page->SweepingCompleted()) {
-              heap_->mark_compact_collector()->SweepInParallel(page, owner);
-              if (!page->SweepingCompleted()) {
-                // We were not able to sweep that page, i.e., a concurrent
-                // sweeper thread currently owns this page.
-                // TODO(hpayer): This may introduce a huge pause here. We
-                // just care about finish sweeping of the scan on scavenge page.
-                heap_->mark_compact_collector()->EnsureSweepingCompleted();
-              }
-            }
-            CHECK(page->owner() == heap_->old_pointer_space());
-            HeapObjectIterator iterator(page, NULL);
-            for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
-                 heap_object = iterator.Next()) {
-              // We iterate over objects that contain new space pointers only.
-              bool may_contain_raw_values = heap_object->MayContainRawValues();
-              if (!may_contain_raw_values) {
-                Address obj_address = heap_object->address();
-                const int start_offset = HeapObject::kHeaderSize;
-                const int end_offset = heap_object->Size();
-#if V8_DOUBLE_FIELDS_UNBOXING
-                LayoutDescriptorHelper helper(heap_object->map());
-                bool has_only_tagged_fields = helper.all_fields_tagged();
-
-                if (!has_only_tagged_fields) {
-                  for (int offset = start_offset; offset < end_offset;) {
-                    int end_of_region_offset;
-                    if (helper.IsTagged(offset, end_offset,
-                                        &end_of_region_offset)) {
-                      FindPointersToNewSpaceInRegion(
-                          obj_address + offset,
-                          obj_address + end_of_region_offset, slot_callback,
-                          clear_maps);
-                    }
-                    offset = end_of_region_offset;
-                  }
-                } else {
-#endif
-                  Address start_address = obj_address + start_offset;
-                  Address end_address = obj_address + end_offset;
-                  // Object has only tagged fields.
-                  FindPointersToNewSpaceInRegion(start_address, end_address,
-                                                 slot_callback, clear_maps);
-#if V8_DOUBLE_FIELDS_UNBOXING
-                }
-#endif
+            if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+              // Aborted pages require iterating using mark bits because they
+              // don't have an iterable object layout before sweeping (which can
+              // only happen later). Note that we can never reach an
+              // aborted page through the scavenger.
+              DCHECK_EQ(heap_->gc_state(), Heap::MARK_COMPACT);
+              heap_->mark_compact_collector()->VisitLiveObjectsBody(page,
+                                                                    &visitor);
+            } else {
+              heap_->mark_compact_collector()
+                  ->SweepOrWaitUntilSweepingCompleted(page);
+              HeapObjectIterator iterator(page);
+              for (HeapObject* heap_object = iterator.Next();
+                   heap_object != nullptr; heap_object = iterator.Next()) {
+                // We iterate over objects that contain new space pointers only.
+                heap_object->IterateBody(&visitor);
               }
             }
           }
@@ -561,7 +527,7 @@
   // There's no check of the limit in the loop below so we check here for
   // the worst case (compaction doesn't eliminate any pointers).
   DCHECK(top <= limit_);
-  heap_->public_set_store_buffer_top(start_);
+  heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
   EnsureSpace(top - start_);
   DCHECK(may_move_store_buffer_entries_);
   // Goes through the addresses in the store buffer attempting to remove
@@ -570,9 +536,7 @@
   // functions to reduce the number of unnecessary clashes.
   hash_sets_are_empty_ = false;  // Hash sets are in use.
   for (Address* current = start_; current < top; current++) {
-    DCHECK(!heap_->cell_space()->Contains(*current));
     DCHECK(!heap_->code_space()->Contains(*current));
-    DCHECK(!heap_->old_data_space()->Contains(*current));
     uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
     // Shift out the last bits including any tags.
     int_addr >>= kPointerSizeLog2;
@@ -605,5 +569,56 @@
   }
   heap_->isolate()->counters()->store_buffer_compactions()->Increment();
 }
+
+
+void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
+  if (event == kStoreBufferStartScanningPagesEvent) {
+    start_of_current_page_ = NULL;
+    current_page_ = NULL;
+  } else if (event == kStoreBufferScanningPageEvent) {
+    if (current_page_ != NULL) {
+      // If this page already overflowed the store buffer during this iteration.
+      if (current_page_->scan_on_scavenge()) {
+        // Then we should wipe out the entries that have been added for it.
+        store_buffer_->SetTop(start_of_current_page_);
+      } else if (store_buffer_->Top() - start_of_current_page_ >=
+                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
+        // Did we find too many pointers in the previous page?  The heuristic is
+        // that no page can take more then 1/5 the remaining slots in the store
+        // buffer.
+        current_page_->set_scan_on_scavenge(true);
+        store_buffer_->SetTop(start_of_current_page_);
+      } else {
+        // In this case the page we scanned took a reasonable number of slots in
+        // the store buffer.  It has now been rehabilitated and is no longer
+        // marked scan_on_scavenge.
+        DCHECK(!current_page_->scan_on_scavenge());
+      }
+    }
+    start_of_current_page_ = store_buffer_->Top();
+    current_page_ = page;
+  } else if (event == kStoreBufferFullEvent) {
+    // The current page overflowed the store buffer again.  Wipe out its entries
+    // in the store buffer and mark it scan-on-scavenge again.  This may happen
+    // several times while scanning.
+    if (current_page_ == NULL) {
+      // Store Buffer overflowed while scanning promoted objects.  These are not
+      // in any particular page, though they are likely to be clustered by the
+      // allocation routines.
+      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
+    } else {
+      // Store Buffer overflowed while scanning a particular old space page for
+      // pointers to new space.
+      DCHECK(current_page_ == page);
+      DCHECK(page != NULL);
+      current_page_->set_scan_on_scavenge(true);
+      DCHECK(start_of_current_page_ != store_buffer_->Top());
+      store_buffer_->SetTop(start_of_current_page_);
+    }
+  } else {
+    UNREACHABLE();
+  }
 }
-}  // namespace v8::internal
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h
index 5efd692..9eeb001 100644
--- a/src/heap/store-buffer.h
+++ b/src/heap/store-buffer.h
@@ -19,10 +19,6 @@
 
 typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
 
-typedef void (StoreBuffer::*RegionCallback)(Address start, Address end,
-                                            ObjectSlotCallback slot_callback,
-                                            bool clear_maps);
-
 // Used to implement the write barrier by collecting addresses of pointers
 // between spaces.
 class StoreBuffer {
@@ -31,14 +27,16 @@
 
   static void StoreBufferOverflow(Isolate* isolate);
 
-  inline Address TopAddress();
-
   void SetUp();
   void TearDown();
 
-  // This is used by the mutator to enter addresses into the store buffer.
+  // This is used to add addresses to the store buffer non-concurrently.
   inline void Mark(Address addr);
 
+  // This is used to add addresses to the store buffer when multiple threads
+  // may operate on the store buffer.
+  inline void MarkSynchronized(Address addr);
+
   // This is used by the heap traversal to enter the addresses into the store
   // buffer that should still be in the store buffer after GC.  It enters
   // addresses directly into the old buffer because the GC starts by wiping the
@@ -60,10 +58,6 @@
   // surviving old-to-new pointers into the store buffer to rebuild it.
   void IteratePointersToNewSpace(ObjectSlotCallback callback);
 
-  // Same as IteratePointersToNewSpace but additonally clears maps in objects
-  // referenced from the store buffer that do not contain a forwarding pointer.
-  void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback);
-
   static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
   static const int kStoreBufferSize = kStoreBufferOverflowBit;
   static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
@@ -88,23 +82,20 @@
   bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
   bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
 
-  // Goes through the store buffer removing pointers to things that have
-  // been promoted.  Rebuilds the store buffer completely if it overflowed.
-  void SortUniq();
-
   void EnsureSpace(intptr_t space_needed);
   void Verify();
 
   bool PrepareForIteration();
 
-#ifdef DEBUG
-  void Clean();
-  // Slow, for asserts only.
-  bool CellIsInStoreBuffer(Address cell);
-#endif
-
   void Filter(int flag);
 
+  // Eliminates all stale store buffer entries from the store buffer, i.e.,
+  // slots that are not part of live objects anymore. This method must be
+  // called after marking, when the whole transitive closure is known and
+  // must be called before sweeping when mark bits are still intact.
+  void ClearInvalidStoreBufferEntries();
+  void VerifyValidStoreBufferEntries();
+
  private:
   Heap* heap_;
 
@@ -139,39 +130,49 @@
   uintptr_t* hash_set_2_;
   bool hash_sets_are_empty_;
 
+  // Used for synchronization of concurrent store buffer access.
+  base::Mutex mutex_;
+
   void ClearFilteringHashSets();
 
   bool SpaceAvailable(intptr_t space_needed);
-  void Uniq();
   void ExemptPopularPages(int prime_sample_step, int threshold);
 
-  // Set the map field of the object to NULL if contains a map.
-  inline void ClearDeadObject(HeapObject* object);
-
-  void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps);
+  void ProcessOldToNewSlot(Address slot_address,
+                           ObjectSlotCallback slot_callback);
 
   void FindPointersToNewSpaceInRegion(Address start, Address end,
-                                      ObjectSlotCallback slot_callback,
-                                      bool clear_maps);
+                                      ObjectSlotCallback slot_callback);
 
-  // For each region of pointers on a page in use from an old space call
-  // visit_pointer_region callback.
-  // If either visit_pointer_region or callback can cause an allocation
-  // in old space and changes in allocation watermark then
-  // can_preallocate_during_iteration should be set to true.
-  void IteratePointersOnPage(PagedSpace* space, Page* page,
-                             RegionCallback region_callback,
-                             ObjectSlotCallback slot_callback);
-
-  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
-                                    bool clear_maps);
+  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
 
 #ifdef VERIFY_HEAP
   void VerifyPointers(LargeObjectSpace* space);
 #endif
 
-  friend class StoreBufferRebuildScope;
   friend class DontMoveStoreBufferEntriesScope;
+  friend class FindPointersToNewSpaceVisitor;
+  friend class StoreBufferRebuildScope;
+};
+
+
+class StoreBufferRebuilder {
+ public:
+  explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer) {}
+
+  void Callback(MemoryChunk* page, StoreBufferEvent event);
+
+ private:
+  StoreBuffer* store_buffer_;
+
+  // We record in this variable how full the store buffer was when we started
+  // iterating over the current page, finding pointers to new space.  If the
+  // store buffer overflows again we can exempt the page from the store buffer
+  // by rewinding to this point instead of having to search the store buffer.
+  Object*** start_of_current_page_;
+  // The current page we are scanning in the store buffer iterator.
+  MemoryChunk* current_page_;
 };
 
 
@@ -215,7 +216,7 @@
   StoreBuffer* store_buffer_;
   bool stored_state_;
 };
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_STORE_BUFFER_H_