Upgrade to 3.29

Update V8 to 3.29.88.17 and update makefiles to support building on
all the relevant platforms.

Bug: 17370214

Change-Id: Ia3407c157fd8d72a93e23d8318ccaf6ecf77fa4e
diff --git a/src/heap/gc-idle-time-handler-unittest.cc b/src/heap/gc-idle-time-handler-unittest.cc
new file mode 100644
index 0000000..b4f2f74
--- /dev/null
+++ b/src/heap/gc-idle-time-handler-unittest.cc
@@ -0,0 +1,348 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/heap/gc-idle-time-handler.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class GCIdleTimeHandlerTest : public ::testing::Test {
+ public:
+  GCIdleTimeHandlerTest() {}
+  virtual ~GCIdleTimeHandlerTest() {}
+
+  GCIdleTimeHandler* handler() { return &handler_; }
+
+  GCIdleTimeHandler::HeapState DefaultHeapState() {
+    GCIdleTimeHandler::HeapState result;
+    result.contexts_disposed = 0;
+    result.size_of_objects = kSizeOfObjects;
+    result.incremental_marking_stopped = false;
+    result.can_start_incremental_marking = true;
+    result.sweeping_in_progress = false;
+    result.mark_compact_speed_in_bytes_per_ms = kMarkCompactSpeed;
+    result.incremental_marking_speed_in_bytes_per_ms = kMarkingSpeed;
+    result.scavenge_speed_in_bytes_per_ms = kScavengeSpeed;
+    result.available_new_space_memory = kNewSpaceCapacity;
+    result.new_space_capacity = kNewSpaceCapacity;
+    result.new_space_allocation_throughput_in_bytes_per_ms =
+        kNewSpaceAllocationThroughput;
+    return result;
+  }
+
+  static const size_t kSizeOfObjects = 100 * MB;
+  static const size_t kMarkCompactSpeed = 200 * KB;
+  static const size_t kMarkingSpeed = 200 * KB;
+  static const size_t kScavengeSpeed = 100 * KB;
+  static const size_t kNewSpaceCapacity = 1 * MB;
+  static const size_t kNewSpaceAllocationThroughput = 10 * KB;
+
+ private:
+  GCIdleTimeHandler handler_;
+};
+
+}  // namespace
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeInitial) {
+  size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(1, 0);
+  EXPECT_EQ(
+      static_cast<size_t>(GCIdleTimeHandler::kInitialConservativeMarkingSpeed *
+                          GCIdleTimeHandler::kConservativeTimeRatio),
+      step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeNonZero) {
+  size_t marking_speed_in_bytes_per_millisecond = 100;
+  size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+      1, marking_speed_in_bytes_per_millisecond);
+  EXPECT_EQ(static_cast<size_t>(marking_speed_in_bytes_per_millisecond *
+                                GCIdleTimeHandler::kConservativeTimeRatio),
+            step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow1) {
+  size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+      10, std::numeric_limits<size_t>::max());
+  EXPECT_EQ(static_cast<size_t>(GCIdleTimeHandler::kMaximumMarkingStepSize),
+            step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow2) {
+  size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+      std::numeric_limits<size_t>::max(), 10);
+  EXPECT_EQ(static_cast<size_t>(GCIdleTimeHandler::kMaximumMarkingStepSize),
+            step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeInitial) {
+  size_t size = 100 * MB;
+  size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, 0);
+  EXPECT_EQ(size / GCIdleTimeHandler::kInitialConservativeMarkCompactSpeed,
+            time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeNonZero) {
+  size_t size = 100 * MB;
+  size_t speed = 1 * MB;
+  size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
+  EXPECT_EQ(size / speed, time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeMax) {
+  size_t size = std::numeric_limits<size_t>::max();
+  size_t speed = 1;
+  size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
+  EXPECT_EQ(GCIdleTimeHandler::kMaxMarkCompactTimeInMs, time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateScavengeTimeInitial) {
+  size_t size = 1 * MB;
+  size_t time = GCIdleTimeHandler::EstimateScavengeTime(size, 0);
+  EXPECT_EQ(size / GCIdleTimeHandler::kInitialConservativeScavengeSpeed, time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateScavengeTimeNonZero) {
+  size_t size = 1 * MB;
+  size_t speed = 1 * MB;
+  size_t time = GCIdleTimeHandler::EstimateScavengeTime(size, speed);
+  EXPECT_EQ(size / speed, time);
+}
+
+
+TEST(GCIdleTimeHandler, ScavangeMayHappenSoonInitial) {
+  size_t available = 100 * KB;
+  EXPECT_FALSE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, 0));
+}
+
+
+TEST(GCIdleTimeHandler, ScavangeMayHappenSoonNonZeroFalse) {
+  size_t available = (GCIdleTimeHandler::kMaxFrameRenderingIdleTime + 1) * KB;
+  size_t speed = 1 * KB;
+  EXPECT_FALSE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, speed));
+}
+
+
+TEST(GCIdleTimeHandler, ScavangeMayHappenSoonNonZeroTrue) {
+  size_t available = GCIdleTimeHandler::kMaxFrameRenderingIdleTime * KB;
+  size_t speed = 1 * KB;
+  EXPECT_TRUE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, speed));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeIdleTime) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.contexts_disposed = 1;
+  heap_state.incremental_marking_stopped = true;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms =
+      static_cast<int>((heap_state.size_of_objects + speed - 1) / speed);
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_FULL_GC, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.contexts_disposed = 1;
+  heap_state.incremental_marking_stopped = true;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.contexts_disposed = 1;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, IncrementalMarking1) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
+  int idle_time_ms = 10;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+  EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
+            static_cast<size_t>(action.parameter));
+  EXPECT_LT(0, action.parameter);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, IncrementalMarking2) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.incremental_marking_stopped = true;
+  size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
+  int idle_time_ms = 10;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+  EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
+            static_cast<size_t>(action.parameter));
+  EXPECT_LT(0, action.parameter);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, NotEnoughTime) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.incremental_marking_stopped = true;
+  heap_state.can_start_incremental_marking = false;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, StopEventually1) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.incremental_marking_stopped = true;
+  heap_state.can_start_incremental_marking = false;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed + 1);
+  for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+    GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+    EXPECT_EQ(DO_FULL_GC, action.type);
+    handler()->NotifyIdleMarkCompact();
+  }
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DONE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, StopEventually2) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 10;
+  for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+    GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+    EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+    // In this case we emulate incremental marking steps that finish with a
+    // full gc.
+    handler()->NotifyIdleMarkCompact();
+  }
+  heap_state.can_start_incremental_marking = false;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DONE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop1) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.incremental_marking_stopped = true;
+  heap_state.can_start_incremental_marking = false;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed + 1);
+  for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+    GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+    EXPECT_EQ(DO_FULL_GC, action.type);
+    handler()->NotifyIdleMarkCompact();
+  }
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DONE, action.type);
+  // Emulate mutator work.
+  for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+    handler()->NotifyScavenge();
+  }
+  action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_FULL_GC, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop2) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 10;
+  for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+    GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+    if (action.type == DONE) break;
+    EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+    // In this case we try to emulate incremental marking steps the finish with
+    // a full gc.
+    handler()->NotifyIdleMarkCompact();
+  }
+  heap_state.can_start_incremental_marking = false;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DONE, action.type);
+  // Emulate mutator work.
+  for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+    handler()->NotifyScavenge();
+  }
+  heap_state.can_start_incremental_marking = true;
+  action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, Scavenge) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 10;
+  heap_state.available_new_space_memory =
+      kNewSpaceAllocationThroughput * idle_time_ms;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_SCAVENGE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ScavengeAndDone) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 10;
+  heap_state.can_start_incremental_marking = false;
+  heap_state.incremental_marking_stopped = true;
+  heap_state.available_new_space_memory =
+      kNewSpaceAllocationThroughput * idle_time_ms;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_SCAVENGE, action.type);
+  heap_state.available_new_space_memory = kNewSpaceCapacity;
+  action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeNothingToDo) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 0;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeDoNothingButStartIdleRound) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 10;
+  for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+    GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+    if (action.type == DONE) break;
+    EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+    // In this case we try to emulate incremental marking steps the finish with
+    // a full gc.
+    handler()->NotifyIdleMarkCompact();
+  }
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  // Emulate mutator work.
+  for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+    handler()->NotifyScavenge();
+  }
+  action = handler()->Compute(0, heap_state);
+  EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/gc-idle-time-handler.cc b/src/heap/gc-idle-time-handler.cc
new file mode 100644
index 0000000..b9a99b2
--- /dev/null
+++ b/src/heap/gc-idle-time-handler.cc
@@ -0,0 +1,174 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/gc-tracer.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
+const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
+const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
+const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
+const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
+
+
+void GCIdleTimeAction::Print() {
+  switch (type) {
+    case DONE:
+      PrintF("done");
+      break;
+    case DO_NOTHING:
+      PrintF("no action");
+      break;
+    case DO_INCREMENTAL_MARKING:
+      PrintF("incremental marking with step %" V8_PTR_PREFIX "d", parameter);
+      break;
+    case DO_SCAVENGE:
+      PrintF("scavenge");
+      break;
+    case DO_FULL_GC:
+      PrintF("full GC");
+      break;
+    case DO_FINALIZE_SWEEPING:
+      PrintF("finalize sweeping");
+      break;
+  }
+}
+
+
+size_t GCIdleTimeHandler::EstimateMarkingStepSize(
+    size_t idle_time_in_ms, size_t marking_speed_in_bytes_per_ms) {
+  DCHECK(idle_time_in_ms > 0);
+
+  if (marking_speed_in_bytes_per_ms == 0) {
+    marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
+  }
+
+  size_t marking_step_size = marking_speed_in_bytes_per_ms * idle_time_in_ms;
+  if (marking_step_size / marking_speed_in_bytes_per_ms != idle_time_in_ms) {
+    // In the case of an overflow we return maximum marking step size.
+    return kMaximumMarkingStepSize;
+  }
+
+  if (marking_step_size > kMaximumMarkingStepSize)
+    return kMaximumMarkingStepSize;
+
+  return static_cast<size_t>(marking_step_size * kConservativeTimeRatio);
+}
+
+
+size_t GCIdleTimeHandler::EstimateMarkCompactTime(
+    size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
+  if (mark_compact_speed_in_bytes_per_ms == 0) {
+    mark_compact_speed_in_bytes_per_ms = kInitialConservativeMarkCompactSpeed;
+  }
+  size_t result = size_of_objects / mark_compact_speed_in_bytes_per_ms;
+  return Min(result, kMaxMarkCompactTimeInMs);
+}
+
+
+size_t GCIdleTimeHandler::EstimateScavengeTime(
+    size_t new_space_size, size_t scavenge_speed_in_bytes_per_ms) {
+  if (scavenge_speed_in_bytes_per_ms == 0) {
+    scavenge_speed_in_bytes_per_ms = kInitialConservativeScavengeSpeed;
+  }
+  return new_space_size / scavenge_speed_in_bytes_per_ms;
+}
+
+
+bool GCIdleTimeHandler::ScavangeMayHappenSoon(
+    size_t available_new_space_memory,
+    size_t new_space_allocation_throughput_in_bytes_per_ms) {
+  if (available_new_space_memory <=
+      new_space_allocation_throughput_in_bytes_per_ms *
+          kMaxFrameRenderingIdleTime) {
+    return true;
+  }
+  return false;
+}
+
+
+// The following logic is implemented by the controller:
+// (1) If the new space is almost full and we can effort a Scavenge, then a
+// Scavenge is performed.
+// (2) If there is currently no MarkCompact idle round going on, we start a
+// new idle round if enough garbage was created or we received a context
+// disposal event. Otherwise we do not perform garbage collection to keep
+// system utilization low.
+// (3) If incremental marking is done, we perform a full garbage collection
+// if context was disposed or if we are allowed to still do full garbage
+// collections during this idle round or if we are not allowed to start
+// incremental marking. Otherwise we do not perform garbage collection to
+// keep system utilization low.
+// (4) If sweeping is in progress and we received a large enough idle time
+// request, we finalize sweeping here.
+// (5) If incremental marking is in progress, we perform a marking step. Note,
+// that this currently may trigger a full garbage collection.
+GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
+                                            HeapState heap_state) {
+  if (idle_time_in_ms <= kMaxFrameRenderingIdleTime &&
+      ScavangeMayHappenSoon(
+          heap_state.available_new_space_memory,
+          heap_state.new_space_allocation_throughput_in_bytes_per_ms) &&
+      idle_time_in_ms >=
+          EstimateScavengeTime(heap_state.new_space_capacity,
+                               heap_state.scavenge_speed_in_bytes_per_ms)) {
+    return GCIdleTimeAction::Scavenge();
+  }
+  if (IsMarkCompactIdleRoundFinished()) {
+    if (EnoughGarbageSinceLastIdleRound() || heap_state.contexts_disposed > 0) {
+      StartIdleRound();
+    } else {
+      return GCIdleTimeAction::Done();
+    }
+  }
+
+  if (idle_time_in_ms == 0) {
+    return GCIdleTimeAction::Nothing();
+  }
+
+  if (heap_state.incremental_marking_stopped) {
+    size_t estimated_time_in_ms =
+        EstimateMarkCompactTime(heap_state.size_of_objects,
+                                heap_state.mark_compact_speed_in_bytes_per_ms);
+    if (idle_time_in_ms >= estimated_time_in_ms ||
+        (heap_state.size_of_objects < kSmallHeapSize &&
+         heap_state.contexts_disposed > 0)) {
+      // If there are no more than two GCs left in this idle round and we are
+      // allowed to do a full GC, then make those GCs full in order to compact
+      // the code space.
+      // TODO(ulan): Once we enable code compaction for incremental marking, we
+      // can get rid of this special case and always start incremental marking.
+      int remaining_mark_sweeps =
+          kMaxMarkCompactsInIdleRound - mark_compacts_since_idle_round_started_;
+      if (heap_state.contexts_disposed > 0 ||
+          (idle_time_in_ms > kMaxFrameRenderingIdleTime &&
+           (remaining_mark_sweeps <= 2 ||
+            !heap_state.can_start_incremental_marking))) {
+        return GCIdleTimeAction::FullGC();
+      }
+    }
+    if (!heap_state.can_start_incremental_marking) {
+      return GCIdleTimeAction::Nothing();
+    }
+  }
+  // TODO(hpayer): Estimate finalize sweeping time.
+  if (heap_state.sweeping_in_progress &&
+      idle_time_in_ms >= kMinTimeForFinalizeSweeping) {
+    return GCIdleTimeAction::FinalizeSweeping();
+  }
+
+  if (heap_state.incremental_marking_stopped &&
+      !heap_state.can_start_incremental_marking) {
+    return GCIdleTimeAction::Nothing();
+  }
+  size_t step_size = EstimateMarkingStepSize(
+      idle_time_in_ms, heap_state.incremental_marking_speed_in_bytes_per_ms);
+  return GCIdleTimeAction::IncrementalMarking(step_size);
+}
+}
+}
diff --git a/src/heap/gc-idle-time-handler.h b/src/heap/gc-idle-time-handler.h
new file mode 100644
index 0000000..daab616
--- /dev/null
+++ b/src/heap/gc-idle-time-handler.h
@@ -0,0 +1,188 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_GC_IDLE_TIME_HANDLER_H_
+#define V8_HEAP_GC_IDLE_TIME_HANDLER_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+enum GCIdleTimeActionType {
+  DONE,
+  DO_NOTHING,
+  DO_INCREMENTAL_MARKING,
+  DO_SCAVENGE,
+  DO_FULL_GC,
+  DO_FINALIZE_SWEEPING
+};
+
+
+class GCIdleTimeAction {
+ public:
+  static GCIdleTimeAction Done() {
+    GCIdleTimeAction result;
+    result.type = DONE;
+    result.parameter = 0;
+    return result;
+  }
+
+  static GCIdleTimeAction Nothing() {
+    GCIdleTimeAction result;
+    result.type = DO_NOTHING;
+    result.parameter = 0;
+    return result;
+  }
+
+  static GCIdleTimeAction IncrementalMarking(intptr_t step_size) {
+    GCIdleTimeAction result;
+    result.type = DO_INCREMENTAL_MARKING;
+    result.parameter = step_size;
+    return result;
+  }
+
+  static GCIdleTimeAction Scavenge() {
+    GCIdleTimeAction result;
+    result.type = DO_SCAVENGE;
+    result.parameter = 0;
+    return result;
+  }
+
+  static GCIdleTimeAction FullGC() {
+    GCIdleTimeAction result;
+    result.type = DO_FULL_GC;
+    result.parameter = 0;
+    return result;
+  }
+
+  static GCIdleTimeAction FinalizeSweeping() {
+    GCIdleTimeAction result;
+    result.type = DO_FINALIZE_SWEEPING;
+    result.parameter = 0;
+    return result;
+  }
+
+  void Print();
+
+  GCIdleTimeActionType type;
+  intptr_t parameter;
+};
+
+
+class GCTracer;
+
+// The idle time handler makes decisions about which garbage collection
+// operations are executing during IdleNotification.
+class GCIdleTimeHandler {
+ public:
+  // If we haven't recorded any incremental marking events yet, we carefully
+  // mark with a conservative lower bound for the marking speed.
+  static const size_t kInitialConservativeMarkingSpeed = 100 * KB;
+
+  // Maximum marking step size returned by EstimateMarkingStepSize.
+  static const size_t kMaximumMarkingStepSize = 700 * MB;
+
+  // We have to make sure that we finish the IdleNotification before
+  // idle_time_in_ms. Hence, we conservatively prune our workload estimate.
+  static const double kConservativeTimeRatio;
+
+  // If we haven't recorded any mark-compact events yet, we use
+  // conservative lower bound for the mark-compact speed.
+  static const size_t kInitialConservativeMarkCompactSpeed = 2 * MB;
+
+  // Maximum mark-compact time returned by EstimateMarkCompactTime.
+  static const size_t kMaxMarkCompactTimeInMs;
+
+  // Minimum time to finalize sweeping phase. The main thread may wait for
+  // sweeper threads.
+  static const size_t kMinTimeForFinalizeSweeping;
+
+  // Number of idle mark-compact events, after which idle handler will finish
+  // idle round.
+  static const int kMaxMarkCompactsInIdleRound;
+
+  // Number of scavenges that will trigger start of new idle round.
+  static const int kIdleScavengeThreshold;
+
+  // Heap size threshold below which we prefer mark-compact over incremental
+  // step.
+  static const size_t kSmallHeapSize = 4 * kPointerSize * MB;
+
+  // That is the maximum idle time we will have during frame rendering.
+  static const size_t kMaxFrameRenderingIdleTime = 16;
+
+  // If less than that much memory is left in the new space, we consider it
+  // as almost full and force a new space collection earlier in the idle time.
+  static const size_t kNewSpaceAlmostFullTreshold = 100 * KB;
+
+  // If we haven't recorded any scavenger events yet, we use a conservative
+  // lower bound for the scavenger speed.
+  static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
+
+  struct HeapState {
+    int contexts_disposed;
+    size_t size_of_objects;
+    bool incremental_marking_stopped;
+    bool can_start_incremental_marking;
+    bool sweeping_in_progress;
+    size_t mark_compact_speed_in_bytes_per_ms;
+    size_t incremental_marking_speed_in_bytes_per_ms;
+    size_t scavenge_speed_in_bytes_per_ms;
+    size_t available_new_space_memory;
+    size_t new_space_capacity;
+    size_t new_space_allocation_throughput_in_bytes_per_ms;
+  };
+
+  GCIdleTimeHandler()
+      : mark_compacts_since_idle_round_started_(0),
+        scavenges_since_last_idle_round_(0) {}
+
+  GCIdleTimeAction Compute(size_t idle_time_in_ms, HeapState heap_state);
+
+  void NotifyIdleMarkCompact() {
+    if (mark_compacts_since_idle_round_started_ < kMaxMarkCompactsInIdleRound) {
+      ++mark_compacts_since_idle_round_started_;
+      if (mark_compacts_since_idle_round_started_ ==
+          kMaxMarkCompactsInIdleRound) {
+        scavenges_since_last_idle_round_ = 0;
+      }
+    }
+  }
+
+  void NotifyScavenge() { ++scavenges_since_last_idle_round_; }
+
+  static size_t EstimateMarkingStepSize(size_t idle_time_in_ms,
+                                        size_t marking_speed_in_bytes_per_ms);
+
+  static size_t EstimateMarkCompactTime(
+      size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
+
+  static size_t EstimateScavengeTime(size_t new_space_size,
+                                     size_t scavenger_speed_in_bytes_per_ms);
+
+  static bool ScavangeMayHappenSoon(
+      size_t available_new_space_memory,
+      size_t new_space_allocation_throughput_in_bytes_per_ms);
+
+ private:
+  void StartIdleRound() { mark_compacts_since_idle_round_started_ = 0; }
+  bool IsMarkCompactIdleRoundFinished() {
+    return mark_compacts_since_idle_round_started_ ==
+           kMaxMarkCompactsInIdleRound;
+  }
+  bool EnoughGarbageSinceLastIdleRound() {
+    return scavenges_since_last_idle_round_ >= kIdleScavengeThreshold;
+  }
+
+  int mark_compacts_since_idle_round_started_;
+  int scavenges_since_last_idle_round_;
+
+  DISALLOW_COPY_AND_ASSIGN(GCIdleTimeHandler);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_HEAP_GC_IDLE_TIME_HANDLER_H_
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
new file mode 100644
index 0000000..8a40b53
--- /dev/null
+++ b/src/heap/gc-tracer.cc
@@ -0,0 +1,480 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/heap/gc-tracer.h"
+
+namespace v8 {
+namespace internal {
+
+static intptr_t CountTotalHolesSize(Heap* heap) {
+  intptr_t holes_size = 0;
+  OldSpaces spaces(heap);
+  for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
+    holes_size += space->Waste() + space->Available();
+  }
+  return holes_size;
+}
+
+
+GCTracer::AllocationEvent::AllocationEvent(double duration,
+                                           intptr_t allocation_in_bytes) {
+  duration_ = duration;
+  allocation_in_bytes_ = allocation_in_bytes;
+}
+
+
+GCTracer::Event::Event(Type type, const char* gc_reason,
+                       const char* collector_reason)
+    : type(type),
+      gc_reason(gc_reason),
+      collector_reason(collector_reason),
+      start_time(0.0),
+      end_time(0.0),
+      start_object_size(0),
+      end_object_size(0),
+      start_memory_size(0),
+      end_memory_size(0),
+      start_holes_size(0),
+      end_holes_size(0),
+      cumulative_incremental_marking_steps(0),
+      incremental_marking_steps(0),
+      cumulative_incremental_marking_bytes(0),
+      incremental_marking_bytes(0),
+      cumulative_incremental_marking_duration(0.0),
+      incremental_marking_duration(0.0),
+      cumulative_pure_incremental_marking_duration(0.0),
+      pure_incremental_marking_duration(0.0),
+      longest_incremental_marking_step(0.0) {
+  for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
+    scopes[i] = 0;
+  }
+}
+
+
+const char* GCTracer::Event::TypeName(bool short_name) const {
+  switch (type) {
+    case SCAVENGER:
+      if (short_name) {
+        return "s";
+      } else {
+        return "Scavenge";
+      }
+    case MARK_COMPACTOR:
+      if (short_name) {
+        return "ms";
+      } else {
+        return "Mark-sweep";
+      }
+    case START:
+      if (short_name) {
+        return "st";
+      } else {
+        return "Start";
+      }
+  }
+  return "Unknown Event Type";
+}
+
+
+GCTracer::GCTracer(Heap* heap)
+    : heap_(heap),
+      cumulative_incremental_marking_steps_(0),
+      cumulative_incremental_marking_bytes_(0),
+      cumulative_incremental_marking_duration_(0.0),
+      cumulative_pure_incremental_marking_duration_(0.0),
+      longest_incremental_marking_step_(0.0),
+      cumulative_marking_duration_(0.0),
+      cumulative_sweeping_duration_(0.0),
+      new_space_top_after_gc_(0) {
+  current_ = Event(Event::START, NULL, NULL);
+  current_.end_time = base::OS::TimeCurrentMillis();
+  previous_ = previous_mark_compactor_event_ = current_;
+}
+
+
+void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
+                     const char* collector_reason) {
+  previous_ = current_;
+  double start_time = base::OS::TimeCurrentMillis();
+  if (new_space_top_after_gc_ != 0) {
+    AddNewSpaceAllocationTime(
+        start_time - previous_.end_time,
+        reinterpret_cast<intptr_t>((heap_->new_space()->top()) -
+                                   new_space_top_after_gc_));
+  }
+  if (current_.type == Event::MARK_COMPACTOR)
+    previous_mark_compactor_event_ = current_;
+
+  if (collector == SCAVENGER) {
+    current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
+  } else {
+    current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
+  }
+
+  current_.start_time = start_time;
+  current_.start_object_size = heap_->SizeOfObjects();
+  current_.start_memory_size = heap_->isolate()->memory_allocator()->Size();
+  current_.start_holes_size = CountTotalHolesSize(heap_);
+  current_.new_space_object_size =
+      heap_->new_space()->top() - heap_->new_space()->bottom();
+
+  current_.cumulative_incremental_marking_steps =
+      cumulative_incremental_marking_steps_;
+  current_.cumulative_incremental_marking_bytes =
+      cumulative_incremental_marking_bytes_;
+  current_.cumulative_incremental_marking_duration =
+      cumulative_incremental_marking_duration_;
+  current_.cumulative_pure_incremental_marking_duration =
+      cumulative_pure_incremental_marking_duration_;
+  current_.longest_incremental_marking_step = longest_incremental_marking_step_;
+
+  for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
+    current_.scopes[i] = 0;
+  }
+}
+
+
+void GCTracer::Stop() {
+  current_.end_time = base::OS::TimeCurrentMillis();
+  current_.end_object_size = heap_->SizeOfObjects();
+  current_.end_memory_size = heap_->isolate()->memory_allocator()->Size();
+  current_.end_holes_size = CountTotalHolesSize(heap_);
+  new_space_top_after_gc_ =
+      reinterpret_cast<intptr_t>(heap_->new_space()->top());
+
+  if (current_.type == Event::SCAVENGER) {
+    current_.incremental_marking_steps =
+        current_.cumulative_incremental_marking_steps -
+        previous_.cumulative_incremental_marking_steps;
+    current_.incremental_marking_bytes =
+        current_.cumulative_incremental_marking_bytes -
+        previous_.cumulative_incremental_marking_bytes;
+    current_.incremental_marking_duration =
+        current_.cumulative_incremental_marking_duration -
+        previous_.cumulative_incremental_marking_duration;
+    current_.pure_incremental_marking_duration =
+        current_.cumulative_pure_incremental_marking_duration -
+        previous_.cumulative_pure_incremental_marking_duration;
+    scavenger_events_.push_front(current_);
+  } else {
+    current_.incremental_marking_steps =
+        current_.cumulative_incremental_marking_steps -
+        previous_mark_compactor_event_.cumulative_incremental_marking_steps;
+    current_.incremental_marking_bytes =
+        current_.cumulative_incremental_marking_bytes -
+        previous_mark_compactor_event_.cumulative_incremental_marking_bytes;
+    current_.incremental_marking_duration =
+        current_.cumulative_incremental_marking_duration -
+        previous_mark_compactor_event_.cumulative_incremental_marking_duration;
+    current_.pure_incremental_marking_duration =
+        current_.cumulative_pure_incremental_marking_duration -
+        previous_mark_compactor_event_
+            .cumulative_pure_incremental_marking_duration;
+    longest_incremental_marking_step_ = 0.0;
+    mark_compactor_events_.push_front(current_);
+  }
+
+  // TODO(ernstm): move the code below out of GCTracer.
+
+  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
+
+  double duration = current_.end_time - current_.start_time;
+  double spent_in_mutator = Max(current_.start_time - previous_.end_time, 0.0);
+
+  heap_->UpdateCumulativeGCStatistics(duration, spent_in_mutator,
+                                      current_.scopes[Scope::MC_MARK]);
+
+  if (current_.type == Event::SCAVENGER && FLAG_trace_gc_ignore_scavenger)
+    return;
+
+  if (FLAG_trace_gc) {
+    if (FLAG_trace_gc_nvp)
+      PrintNVP();
+    else
+      Print();
+
+    heap_->PrintShortHeapStatistics();
+  }
+}
+
+
+void GCTracer::AddNewSpaceAllocationTime(double duration,
+                                         intptr_t allocation_in_bytes) {
+  allocation_events_.push_front(AllocationEvent(duration, allocation_in_bytes));
+}
+
+
+void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
+  cumulative_incremental_marking_steps_++;
+  cumulative_incremental_marking_bytes_ += bytes;
+  cumulative_incremental_marking_duration_ += duration;
+  longest_incremental_marking_step_ =
+      Max(longest_incremental_marking_step_, duration);
+  cumulative_marking_duration_ += duration;
+  if (bytes > 0) {
+    cumulative_pure_incremental_marking_duration_ += duration;
+  }
+}
+
+
+void GCTracer::Print() const {
+  PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
+
+  PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ", current_.TypeName(false),
+         static_cast<double>(current_.start_object_size) / MB,
+         static_cast<double>(current_.start_memory_size) / MB,
+         static_cast<double>(current_.end_object_size) / MB,
+         static_cast<double>(current_.end_memory_size) / MB);
+
+  int external_time = static_cast<int>(current_.scopes[Scope::EXTERNAL]);
+  if (external_time > 0) PrintF("%d / ", external_time);
+
+  double duration = current_.end_time - current_.start_time;
+  PrintF("%.1f ms", duration);
+  if (current_.type == Event::SCAVENGER) {
+    if (current_.incremental_marking_steps > 0) {
+      PrintF(" (+ %.1f ms in %d steps since last GC)",
+             current_.incremental_marking_duration,
+             current_.incremental_marking_steps);
+    }
+  } else {
+    if (current_.incremental_marking_steps > 0) {
+      PrintF(
+          " (+ %.1f ms in %d steps since start of marking, "
+          "biggest step %.1f ms)",
+          current_.incremental_marking_duration,
+          current_.incremental_marking_steps,
+          current_.longest_incremental_marking_step);
+    }
+  }
+
+  if (current_.gc_reason != NULL) {
+    PrintF(" [%s]", current_.gc_reason);
+  }
+
+  if (current_.collector_reason != NULL) {
+    PrintF(" [%s]", current_.collector_reason);
+  }
+
+  PrintF(".\n");
+}
+
+
+void GCTracer::PrintNVP() const {
+  PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
+
+  double duration = current_.end_time - current_.start_time;
+  double spent_in_mutator = current_.start_time - previous_.end_time;
+
+  PrintF("pause=%.1f ", duration);
+  PrintF("mutator=%.1f ", spent_in_mutator);
+  PrintF("gc=%s ", current_.TypeName(true));
+
+  PrintF("external=%.1f ", current_.scopes[Scope::EXTERNAL]);
+  PrintF("mark=%.1f ", current_.scopes[Scope::MC_MARK]);
+  PrintF("sweep=%.2f ", current_.scopes[Scope::MC_SWEEP]);
+  PrintF("sweepns=%.2f ", current_.scopes[Scope::MC_SWEEP_NEWSPACE]);
+  PrintF("sweepos=%.2f ", current_.scopes[Scope::MC_SWEEP_OLDSPACE]);
+  PrintF("sweepcode=%.2f ", current_.scopes[Scope::MC_SWEEP_CODE]);
+  PrintF("sweepcell=%.2f ", current_.scopes[Scope::MC_SWEEP_CELL]);
+  PrintF("sweepmap=%.2f ", current_.scopes[Scope::MC_SWEEP_MAP]);
+  PrintF("evacuate=%.1f ", current_.scopes[Scope::MC_EVACUATE_PAGES]);
+  PrintF("new_new=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
+  PrintF("root_new=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
+  PrintF("old_new=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
+  PrintF("compaction_ptrs=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
+  PrintF("intracompaction_ptrs=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
+  PrintF("misc_compaction=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_MISC_POINTERS]);
+  PrintF("weakcollection_process=%.1f ",
+         current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS]);
+  PrintF("weakcollection_clear=%.1f ",
+         current_.scopes[Scope::MC_WEAKCOLLECTION_CLEAR]);
+  PrintF("weakcollection_abort=%.1f ",
+         current_.scopes[Scope::MC_WEAKCOLLECTION_ABORT]);
+
+  PrintF("total_size_before=%" V8_PTR_PREFIX "d ", current_.start_object_size);
+  PrintF("total_size_after=%" V8_PTR_PREFIX "d ", current_.end_object_size);
+  PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", current_.start_holes_size);
+  PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", current_.end_holes_size);
+
+  intptr_t allocated_since_last_gc =
+      current_.start_object_size - previous_.end_object_size;
+  PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc);
+  PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size_);
+  PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ",
+         heap_->semi_space_copied_object_size_);
+  PrintF("nodes_died_in_new=%d ", heap_->nodes_died_in_new_space_);
+  PrintF("nodes_copied_in_new=%d ", heap_->nodes_copied_in_new_space_);
+  PrintF("nodes_promoted=%d ", heap_->nodes_promoted_);
+  PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
+  PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
+  PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
+         NewSpaceAllocationThroughputInBytesPerMillisecond());
+
+  if (current_.type == Event::SCAVENGER) {
+    PrintF("steps_count=%d ", current_.incremental_marking_steps);
+    PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
+    PrintF("scavenge_throughput=%" V8_PTR_PREFIX "d ",
+           ScavengeSpeedInBytesPerMillisecond());
+  } else {
+    PrintF("steps_count=%d ", current_.incremental_marking_steps);
+    PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
+    PrintF("longest_step=%.1f ", current_.longest_incremental_marking_step);
+    PrintF("incremental_marking_throughput=%" V8_PTR_PREFIX "d ",
+           IncrementalMarkingSpeedInBytesPerMillisecond());
+  }
+
+  PrintF("\n");
+}
+
+
+double GCTracer::MeanDuration(const EventBuffer& events) const {
+  if (events.empty()) return 0.0;
+
+  double mean = 0.0;
+  EventBuffer::const_iterator iter = events.begin();
+  while (iter != events.end()) {
+    mean += iter->end_time - iter->start_time;
+    ++iter;
+  }
+
+  return mean / events.size();
+}
+
+
+double GCTracer::MaxDuration(const EventBuffer& events) const {
+  if (events.empty()) return 0.0;
+
+  double maximum = 0.0f;
+  EventBuffer::const_iterator iter = events.begin();
+  while (iter != events.end()) {
+    maximum = Max(iter->end_time - iter->start_time, maximum);
+    ++iter;
+  }
+
+  return maximum;
+}
+
+
+double GCTracer::MeanIncrementalMarkingDuration() const {
+  if (cumulative_incremental_marking_steps_ == 0) return 0.0;
+
+  // We haven't completed an entire round of incremental marking, yet.
+  // Use data from GCTracer instead of data from event buffers.
+  if (mark_compactor_events_.empty()) {
+    return cumulative_incremental_marking_duration_ /
+           cumulative_incremental_marking_steps_;
+  }
+
+  int steps = 0;
+  double durations = 0.0;
+  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+  while (iter != mark_compactor_events_.end()) {
+    steps += iter->incremental_marking_steps;
+    durations += iter->incremental_marking_duration;
+    ++iter;
+  }
+
+  if (steps == 0) return 0.0;
+
+  return durations / steps;
+}
+
+
+double GCTracer::MaxIncrementalMarkingDuration() const {
+  // We haven't completed an entire round of incremental marking, yet.
+  // Use data from GCTracer instead of data from event buffers.
+  if (mark_compactor_events_.empty()) return longest_incremental_marking_step_;
+
+  double max_duration = 0.0;
+  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+  while (iter != mark_compactor_events_.end())
+    max_duration = Max(iter->longest_incremental_marking_step, max_duration);
+
+  return max_duration;
+}
+
+
+intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
+  if (cumulative_incremental_marking_duration_ == 0.0) return 0;
+
+  // We haven't completed an entire round of incremental marking, yet.
+  // Use data from GCTracer instead of data from event buffers.
+  if (mark_compactor_events_.empty()) {
+    return static_cast<intptr_t>(cumulative_incremental_marking_bytes_ /
+                                 cumulative_pure_incremental_marking_duration_);
+  }
+
+  intptr_t bytes = 0;
+  double durations = 0.0;
+  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+  while (iter != mark_compactor_events_.end()) {
+    bytes += iter->incremental_marking_bytes;
+    durations += iter->pure_incremental_marking_duration;
+    ++iter;
+  }
+
+  if (durations == 0.0) return 0;
+
+  return static_cast<intptr_t>(bytes / durations);
+}
+
+
+intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond() const {
+  intptr_t bytes = 0;
+  double durations = 0.0;
+  EventBuffer::const_iterator iter = scavenger_events_.begin();
+  while (iter != scavenger_events_.end()) {
+    bytes += iter->new_space_object_size;
+    durations += iter->end_time - iter->start_time;
+    ++iter;
+  }
+
+  if (durations == 0.0) return 0;
+
+  return static_cast<intptr_t>(bytes / durations);
+}
+
+
+intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
+  intptr_t bytes = 0;
+  double durations = 0.0;
+  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+  while (iter != mark_compactor_events_.end()) {
+    bytes += iter->start_object_size;
+    durations += iter->end_time - iter->start_time +
+                 iter->pure_incremental_marking_duration;
+    ++iter;
+  }
+
+  if (durations == 0.0) return 0;
+
+  return static_cast<intptr_t>(bytes / durations);
+}
+
+
+intptr_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond() const {
+  intptr_t bytes = 0;
+  double durations = 0.0;
+  AllocationEventBuffer::const_iterator iter = allocation_events_.begin();
+  while (iter != allocation_events_.end()) {
+    bytes += iter->allocation_in_bytes_;
+    durations += iter->duration_;
+    ++iter;
+  }
+
+  if (durations == 0.0) return 0;
+
+  return static_cast<intptr_t>(bytes / durations);
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
new file mode 100644
index 0000000..4e70f07
--- /dev/null
+++ b/src/heap/gc-tracer.h
@@ -0,0 +1,401 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_GC_TRACER_H_
+#define V8_HEAP_GC_TRACER_H_
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace internal {
+
+// A simple ring buffer class with maximum size known at compile time.
+// The class only implements the functionality required in GCTracer.
+template <typename T, size_t MAX_SIZE>
+class RingBuffer {
+ public:
+  class const_iterator {
+   public:
+    const_iterator() : index_(0), elements_(NULL) {}
+
+    const_iterator(size_t index, const T* elements)
+        : index_(index), elements_(elements) {}
+
+    bool operator==(const const_iterator& rhs) const {
+      return elements_ == rhs.elements_ && index_ == rhs.index_;
+    }
+
+    bool operator!=(const const_iterator& rhs) const {
+      return elements_ != rhs.elements_ || index_ != rhs.index_;
+    }
+
+    operator const T*() const { return elements_ + index_; }
+
+    const T* operator->() const { return elements_ + index_; }
+
+    const T& operator*() const { return elements_[index_]; }
+
+    const_iterator& operator++() {
+      index_ = (index_ + 1) % (MAX_SIZE + 1);
+      return *this;
+    }
+
+    const_iterator& operator--() {
+      index_ = (index_ + MAX_SIZE) % (MAX_SIZE + 1);
+      return *this;
+    }
+
+   private:
+    size_t index_;
+    const T* elements_;
+  };
+
+  RingBuffer() : begin_(0), end_(0) {}
+
+  bool empty() const { return begin_ == end_; }
+  size_t size() const {
+    return (end_ - begin_ + MAX_SIZE + 1) % (MAX_SIZE + 1);
+  }
+  const_iterator begin() const { return const_iterator(begin_, elements_); }
+  const_iterator end() const { return const_iterator(end_, elements_); }
+  const_iterator back() const { return --end(); }
+  void push_back(const T& element) {
+    elements_[end_] = element;
+    end_ = (end_ + 1) % (MAX_SIZE + 1);
+    if (end_ == begin_) begin_ = (begin_ + 1) % (MAX_SIZE + 1);
+  }
+  void push_front(const T& element) {
+    begin_ = (begin_ + MAX_SIZE) % (MAX_SIZE + 1);
+    if (begin_ == end_) end_ = (end_ + MAX_SIZE) % (MAX_SIZE + 1);
+    elements_[begin_] = element;
+  }
+
+ private:
+  T elements_[MAX_SIZE + 1];
+  size_t begin_;
+  size_t end_;
+
+  DISALLOW_COPY_AND_ASSIGN(RingBuffer);
+};
+
+
+// GCTracer collects and prints ONE line after each garbage collector
+// invocation IFF --trace_gc is used.
+// TODO(ernstm): Unit tests.
+class GCTracer {
+ public:
+  class Scope {
+   public:
+    enum ScopeId {
+      EXTERNAL,
+      MC_MARK,
+      MC_SWEEP,
+      MC_SWEEP_NEWSPACE,
+      MC_SWEEP_OLDSPACE,
+      MC_SWEEP_CODE,
+      MC_SWEEP_CELL,
+      MC_SWEEP_MAP,
+      MC_EVACUATE_PAGES,
+      MC_UPDATE_NEW_TO_NEW_POINTERS,
+      MC_UPDATE_ROOT_TO_NEW_POINTERS,
+      MC_UPDATE_OLD_TO_NEW_POINTERS,
+      MC_UPDATE_POINTERS_TO_EVACUATED,
+      MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
+      MC_UPDATE_MISC_POINTERS,
+      MC_WEAKCOLLECTION_PROCESS,
+      MC_WEAKCOLLECTION_CLEAR,
+      MC_WEAKCOLLECTION_ABORT,
+      MC_FLUSH_CODE,
+      NUMBER_OF_SCOPES
+    };
+
+    Scope(GCTracer* tracer, ScopeId scope) : tracer_(tracer), scope_(scope) {
+      start_time_ = base::OS::TimeCurrentMillis();
+    }
+
+    ~Scope() {
+      DCHECK(scope_ < NUMBER_OF_SCOPES);  // scope_ is unsigned.
+      tracer_->current_.scopes[scope_] +=
+          base::OS::TimeCurrentMillis() - start_time_;
+    }
+
+   private:
+    GCTracer* tracer_;
+    ScopeId scope_;
+    double start_time_;
+
+    DISALLOW_COPY_AND_ASSIGN(Scope);
+  };
+
+
+  class AllocationEvent {
+   public:
+    // Default constructor leaves the event uninitialized.
+    AllocationEvent() {}
+
+    AllocationEvent(double duration, intptr_t allocation_in_bytes);
+
+    // Time spent in the mutator during the end of the last garbage collection
+    // to the beginning of the next garbage collection.
+    double duration_;
+
+    // Memory allocated in the new space during the end of the last garbage
+    // collection to the beginning of the next garbage collection.
+    intptr_t allocation_in_bytes_;
+  };
+
+  class Event {
+   public:
+    enum Type { SCAVENGER = 0, MARK_COMPACTOR = 1, START = 2 };
+
+    // Default constructor leaves the event uninitialized.
+    Event() {}
+
+    Event(Type type, const char* gc_reason, const char* collector_reason);
+
+    // Returns a string describing the event type.
+    const char* TypeName(bool short_name) const;
+
+    // Type of event
+    Type type;
+
+    const char* gc_reason;
+    const char* collector_reason;
+
+    // Timestamp set in the constructor.
+    double start_time;
+
+    // Timestamp set in the destructor.
+    double end_time;
+
+    // Size of objects in heap set in constructor.
+    intptr_t start_object_size;
+
+    // Size of objects in heap set in destructor.
+    intptr_t end_object_size;
+
+    // Size of memory allocated from OS set in constructor.
+    intptr_t start_memory_size;
+
+    // Size of memory allocated from OS set in destructor.
+    intptr_t end_memory_size;
+
+    // Total amount of space either wasted or contained in one of free lists
+    // before the current GC.
+    intptr_t start_holes_size;
+
+    // Total amount of space either wasted or contained in one of free lists
+    // after the current GC.
+    intptr_t end_holes_size;
+
+    // Size of new space objects in constructor.
+    intptr_t new_space_object_size;
+
+    // Number of incremental marking steps since creation of tracer.
+    // (value at start of event)
+    int cumulative_incremental_marking_steps;
+
+    // Incremental marking steps since
+    // - last event for SCAVENGER events
+    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    int incremental_marking_steps;
+
+    // Bytes marked since creation of tracer (value at start of event).
+    intptr_t cumulative_incremental_marking_bytes;
+
+    // Bytes marked since
+    // - last event for SCAVENGER events
+    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    intptr_t incremental_marking_bytes;
+
+    // Cumulative duration of incremental marking steps since creation of
+    // tracer. (value at start of event)
+    double cumulative_incremental_marking_duration;
+
+    // Duration of incremental marking steps since
+    // - last event for SCAVENGER events
+    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    double incremental_marking_duration;
+
+    // Cumulative pure duration of incremental marking steps since creation of
+    // tracer. (value at start of event)
+    double cumulative_pure_incremental_marking_duration;
+
+    // Duration of pure incremental marking steps since
+    // - last event for SCAVENGER events
+    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    double pure_incremental_marking_duration;
+
+    // Longest incremental marking step since start of marking.
+    // (value at start of event)
+    double longest_incremental_marking_step;
+
+    // Amounts of time spent in different scopes during GC.
+    double scopes[Scope::NUMBER_OF_SCOPES];
+  };
+
+  static const int kRingBufferMaxSize = 10;
+
+  typedef RingBuffer<Event, kRingBufferMaxSize> EventBuffer;
+
+  typedef RingBuffer<AllocationEvent, kRingBufferMaxSize> AllocationEventBuffer;
+
+  explicit GCTracer(Heap* heap);
+
+  // Start collecting data.
+  void Start(GarbageCollector collector, const char* gc_reason,
+             const char* collector_reason);
+
+  // Stop collecting data and print results.
+  void Stop();
+
+  // Log an allocation throughput event.
+  void AddNewSpaceAllocationTime(double duration, intptr_t allocation_in_bytes);
+
+  // Log an incremental marking step.
+  void AddIncrementalMarkingStep(double duration, intptr_t bytes);
+
+  // Log time spent in marking.
+  void AddMarkingTime(double duration) {
+    cumulative_marking_duration_ += duration;
+  }
+
+  // Time spent in marking.
+  double cumulative_marking_duration() const {
+    return cumulative_marking_duration_;
+  }
+
+  // Log time spent in sweeping on main thread.
+  void AddSweepingTime(double duration) {
+    cumulative_sweeping_duration_ += duration;
+  }
+
+  // Time spent in sweeping on main thread.
+  double cumulative_sweeping_duration() const {
+    return cumulative_sweeping_duration_;
+  }
+
+  // Compute the mean duration of the last scavenger events. Returns 0 if no
+  // events have been recorded.
+  double MeanScavengerDuration() const {
+    return MeanDuration(scavenger_events_);
+  }
+
+  // Compute the max duration of the last scavenger events. Returns 0 if no
+  // events have been recorded.
+  double MaxScavengerDuration() const { return MaxDuration(scavenger_events_); }
+
+  // Compute the mean duration of the last mark compactor events. Returns 0 if
+  // no events have been recorded.
+  double MeanMarkCompactorDuration() const {
+    return MeanDuration(mark_compactor_events_);
+  }
+
+  // Compute the max duration of the last mark compactor events. Return 0 if no
+  // events have been recorded.
+  double MaxMarkCompactorDuration() const {
+    return MaxDuration(mark_compactor_events_);
+  }
+
+  // Compute the mean step duration of the last incremental marking round.
+  // Returns 0 if no incremental marking round has been completed.
+  double MeanIncrementalMarkingDuration() const;
+
+  // Compute the max step duration of the last incremental marking round.
+  // Returns 0 if no incremental marking round has been completed.
+  double MaxIncrementalMarkingDuration() const;
+
+  // Compute the average incremental marking speed in bytes/millisecond.
+  // Returns 0 if no events have been recorded.
+  intptr_t IncrementalMarkingSpeedInBytesPerMillisecond() const;
+
+  // Compute the average scavenge speed in bytes/millisecond.
+  // Returns 0 if no events have been recorded.
+  intptr_t ScavengeSpeedInBytesPerMillisecond() const;
+
+  // Compute the max mark-sweep speed in bytes/millisecond.
+  // Returns 0 if no events have been recorded.
+  intptr_t MarkCompactSpeedInBytesPerMillisecond() const;
+
+  // Allocation throughput in the new space in bytes/millisecond.
+  // Returns 0 if no events have been recorded.
+  intptr_t NewSpaceAllocationThroughputInBytesPerMillisecond() const;
+
+ private:
+  // Print one detailed trace line in name=value format.
+  // TODO(ernstm): Move to Heap.
+  void PrintNVP() const;
+
+  // Print one trace line.
+  // TODO(ernstm): Move to Heap.
+  void Print() const;
+
+  // Compute the mean duration of the events in the given ring buffer.
+  double MeanDuration(const EventBuffer& events) const;
+
+  // Compute the max duration of the events in the given ring buffer.
+  double MaxDuration(const EventBuffer& events) const;
+
+  // Pointer to the heap that owns this tracer.
+  Heap* heap_;
+
+  // Current tracer event. Populated during Start/Stop cycle. Valid after Stop()
+  // has returned.
+  Event current_;
+
+  // Previous tracer event.
+  Event previous_;
+
+  // Previous MARK_COMPACTOR event.
+  Event previous_mark_compactor_event_;
+
+  // RingBuffers for SCAVENGER events.
+  EventBuffer scavenger_events_;
+
+  // RingBuffers for MARK_COMPACTOR events.
+  EventBuffer mark_compactor_events_;
+
+  // RingBuffer for allocation events.
+  AllocationEventBuffer allocation_events_;
+
+  // Cumulative number of incremental marking steps since creation of tracer.
+  int cumulative_incremental_marking_steps_;
+
+  // Cumulative size of incremental marking steps (in bytes) since creation of
+  // tracer.
+  intptr_t cumulative_incremental_marking_bytes_;
+
+  // Cumulative duration of incremental marking steps since creation of tracer.
+  double cumulative_incremental_marking_duration_;
+
+  // Cumulative duration of pure incremental marking steps since creation of
+  // tracer.
+  double cumulative_pure_incremental_marking_duration_;
+
+  // Longest incremental marking step since start of marking.
+  double longest_incremental_marking_step_;
+
+  // Total marking time.
+  // This timer is precise when run with --print-cumulative-gc-stat
+  double cumulative_marking_duration_;
+
+  // Total sweeping time on the main thread.
+  // This timer is precise when run with --print-cumulative-gc-stat
+  // TODO(hpayer): Account for sweeping time on sweeper threads. Add a
+  // different field for that.
+  // TODO(hpayer): This timer right now just holds the sweeping time
+  // of the initial atomic sweeping pause. Make sure that it accumulates
+  // all sweeping operations performed on the main thread.
+  double cumulative_sweeping_duration_;
+
+  // Holds the new space top pointer recorded at the end of the last garbage
+  // collection.
+  intptr_t new_space_top_after_gc_;
+
+  DISALLOW_COPY_AND_ASSIGN(GCTracer);
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_GC_TRACER_H_
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
new file mode 100644
index 0000000..e658224
--- /dev/null
+++ b/src/heap/heap-inl.h
@@ -0,0 +1,780 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_INL_H_
+#define V8_HEAP_HEAP_INL_H_
+
+#include <cmath>
+
+#include "src/base/platform/platform.h"
+#include "src/cpu-profiler.h"
+#include "src/heap/heap.h"
+#include "src/heap/store-buffer.h"
+#include "src/heap/store-buffer-inl.h"
+#include "src/heap-profiler.h"
+#include "src/isolate.h"
+#include "src/list-inl.h"
+#include "src/msan.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+void PromotionQueue::insert(HeapObject* target, int size) {
+  if (emergency_stack_ != NULL) {
+    emergency_stack_->Add(Entry(target, size));
+    return;
+  }
+
+  if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
+    NewSpacePage* rear_page =
+        NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
+    DCHECK(!rear_page->prev_page()->is_anchor());
+    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
+  }
+
+  if ((rear_ - 2) < limit_) {
+    RelocateQueueHead();
+    emergency_stack_->Add(Entry(target, size));
+    return;
+  }
+
+  *(--rear_) = reinterpret_cast<intptr_t>(target);
+  *(--rear_) = size;
+// Assert no overflow into live objects.
+#ifdef DEBUG
+  SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
+                              reinterpret_cast<Address>(rear_));
+#endif
+}
+
+
+template <>
+bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
+  // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
+  return chars == str.length();
+}
+
+
+template <>
+bool inline Heap::IsOneByte(String* str, int chars) {
+  return str->IsOneByteRepresentation();
+}
+
+
+AllocationResult Heap::AllocateInternalizedStringFromUtf8(
+    Vector<const char> str, int chars, uint32_t hash_field) {
+  if (IsOneByte(str, chars)) {
+    return AllocateOneByteInternalizedString(Vector<const uint8_t>::cast(str),
+                                             hash_field);
+  }
+  return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
+}
+
+
+template <typename T>
+AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
+                                                      uint32_t hash_field) {
+  if (IsOneByte(t, chars)) {
+    return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
+  }
+  return AllocateInternalizedStringImpl<false>(t, chars, hash_field);
+}
+
+
+AllocationResult Heap::AllocateOneByteInternalizedString(
+    Vector<const uint8_t> str, uint32_t hash_field) {
+  CHECK_GE(String::kMaxLength, str.length());
+  // Compute map and object size.
+  Map* map = one_byte_internalized_string_map();
+  int size = SeqOneByteString::SizeFor(str.length());
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
+
+  // Allocate string.
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  // String maps are all immortal immovable objects.
+  result->set_map_no_write_barrier(map);
+  // Set length and hash fields of the allocated string.
+  String* answer = String::cast(result);
+  answer->set_length(str.length());
+  answer->set_hash_field(hash_field);
+
+  DCHECK_EQ(size, answer->Size());
+
+  // Fill in the characters.
+  MemCopy(answer->address() + SeqOneByteString::kHeaderSize, str.start(),
+          str.length());
+
+  return answer;
+}
+
+
+AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
+                                                         uint32_t hash_field) {
+  CHECK_GE(String::kMaxLength, str.length());
+  // Compute map and object size.
+  Map* map = internalized_string_map();
+  int size = SeqTwoByteString::SizeFor(str.length());
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
+
+  // Allocate string.
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map(map);
+  // Set length and hash fields of the allocated string.
+  String* answer = String::cast(result);
+  answer->set_length(str.length());
+  answer->set_hash_field(hash_field);
+
+  DCHECK_EQ(size, answer->Size());
+
+  // Fill in the characters.
+  MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(),
+          str.length() * kUC16Size);
+
+  return answer;
+}
+
+AllocationResult Heap::CopyFixedArray(FixedArray* src) {
+  if (src->length() == 0) return src;
+  return CopyFixedArrayWithMap(src, src->map());
+}
+
+
+AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
+  if (src->length() == 0) return src;
+  return CopyFixedDoubleArrayWithMap(src, src->map());
+}
+
+
+AllocationResult Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
+  if (src->length() == 0) return src;
+  return CopyConstantPoolArrayWithMap(src, src->map());
+}
+
+
+AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
+                                   AllocationSpace retry_space) {
+  DCHECK(AllowHandleAllocation::IsAllowed());
+  DCHECK(AllowHeapAllocation::IsAllowed());
+  DCHECK(gc_state_ == NOT_IN_GC);
+#ifdef DEBUG
+  if (FLAG_gc_interval >= 0 && AllowAllocationFailure::IsAllowed(isolate_) &&
+      Heap::allocation_timeout_-- <= 0) {
+    return AllocationResult::Retry(space);
+  }
+  isolate_->counters()->objs_since_last_full()->Increment();
+  isolate_->counters()->objs_since_last_young()->Increment();
+#endif
+
+  HeapObject* object;
+  AllocationResult allocation;
+  if (NEW_SPACE == space) {
+    allocation = new_space_.AllocateRaw(size_in_bytes);
+    if (always_allocate() && allocation.IsRetry() && retry_space != NEW_SPACE) {
+      space = retry_space;
+    } else {
+      if (allocation.To(&object)) {
+        OnAllocationEvent(object, size_in_bytes);
+      }
+      return allocation;
+    }
+  }
+
+  if (OLD_POINTER_SPACE == space) {
+    allocation = old_pointer_space_->AllocateRaw(size_in_bytes);
+  } else if (OLD_DATA_SPACE == space) {
+    allocation = old_data_space_->AllocateRaw(size_in_bytes);
+  } else if (CODE_SPACE == space) {
+    if (size_in_bytes <= code_space()->AreaSize()) {
+      allocation = code_space_->AllocateRaw(size_in_bytes);
+    } else {
+      // Large code objects are allocated in large object space.
+      allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
+    }
+  } else if (LO_SPACE == space) {
+    allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+  } else if (CELL_SPACE == space) {
+    allocation = cell_space_->AllocateRaw(size_in_bytes);
+  } else if (PROPERTY_CELL_SPACE == space) {
+    allocation = property_cell_space_->AllocateRaw(size_in_bytes);
+  } else {
+    DCHECK(MAP_SPACE == space);
+    allocation = map_space_->AllocateRaw(size_in_bytes);
+  }
+  if (allocation.To(&object)) {
+    OnAllocationEvent(object, size_in_bytes);
+  } else {
+    old_gen_exhausted_ = true;
+  }
+  return allocation;
+}
+
+
+void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
+  HeapProfiler* profiler = isolate_->heap_profiler();
+  if (profiler->is_tracking_allocations()) {
+    profiler->AllocationEvent(object->address(), size_in_bytes);
+  }
+
+  if (FLAG_verify_predictable) {
+    ++allocations_count_;
+
+    UpdateAllocationsHash(object);
+    UpdateAllocationsHash(size_in_bytes);
+
+    if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
+        (--dump_allocations_hash_countdown_ == 0)) {
+      dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+      PrintAlloctionsHash();
+    }
+  }
+}
+
+
+void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
+                       int size_in_bytes) {
+  HeapProfiler* heap_profiler = isolate_->heap_profiler();
+  if (heap_profiler->is_tracking_object_moves()) {
+    heap_profiler->ObjectMoveEvent(source->address(), target->address(),
+                                   size_in_bytes);
+  }
+
+  if (isolate_->logger()->is_logging_code_events() ||
+      isolate_->cpu_profiler()->is_profiling()) {
+    if (target->IsSharedFunctionInfo()) {
+      PROFILE(isolate_, SharedFunctionInfoMoveEvent(source->address(),
+                                                    target->address()));
+    }
+  }
+
+  if (FLAG_verify_predictable) {
+    ++allocations_count_;
+
+    UpdateAllocationsHash(source);
+    UpdateAllocationsHash(target);
+    UpdateAllocationsHash(size_in_bytes);
+
+    if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
+        (--dump_allocations_hash_countdown_ == 0)) {
+      dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+      PrintAlloctionsHash();
+    }
+  }
+}
+
+
+void Heap::UpdateAllocationsHash(HeapObject* object) {
+  Address object_address = object->address();
+  MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
+  AllocationSpace allocation_space = memory_chunk->owner()->identity();
+
+  STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
+  uint32_t value =
+      static_cast<uint32_t>(object_address - memory_chunk->address()) |
+      (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
+
+  UpdateAllocationsHash(value);
+}
+
+
+void Heap::UpdateAllocationsHash(uint32_t value) {
+  uint16_t c1 = static_cast<uint16_t>(value);
+  uint16_t c2 = static_cast<uint16_t>(value >> 16);
+  raw_allocations_hash_ =
+      StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
+  raw_allocations_hash_ =
+      StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
+}
+
+
+void Heap::PrintAlloctionsHash() {
+  uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
+  PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count_, hash);
+}
+
+
+void Heap::FinalizeExternalString(String* string) {
+  DCHECK(string->IsExternalString());
+  v8::String::ExternalStringResourceBase** resource_addr =
+      reinterpret_cast<v8::String::ExternalStringResourceBase**>(
+          reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
+          kHeapObjectTag);
+
+  // Dispose of the C++ object if it has not already been disposed.
+  if (*resource_addr != NULL) {
+    (*resource_addr)->Dispose();
+    *resource_addr = NULL;
+  }
+}
+
+
+bool Heap::InNewSpace(Object* object) {
+  bool result = new_space_.Contains(object);
+  DCHECK(!result ||                 // Either not in new space
+         gc_state_ != NOT_IN_GC ||  // ... or in the middle of GC
+         InToSpace(object));        // ... or in to-space (where we allocate).
+  return result;
+}
+
+
+bool Heap::InNewSpace(Address address) { return new_space_.Contains(address); }
+
+
+bool Heap::InFromSpace(Object* object) {
+  return new_space_.FromSpaceContains(object);
+}
+
+
+bool Heap::InToSpace(Object* object) {
+  return new_space_.ToSpaceContains(object);
+}
+
+
+bool Heap::InOldPointerSpace(Address address) {
+  return old_pointer_space_->Contains(address);
+}
+
+
+bool Heap::InOldPointerSpace(Object* object) {
+  return InOldPointerSpace(reinterpret_cast<Address>(object));
+}
+
+
+bool Heap::InOldDataSpace(Address address) {
+  return old_data_space_->Contains(address);
+}
+
+
+bool Heap::InOldDataSpace(Object* object) {
+  return InOldDataSpace(reinterpret_cast<Address>(object));
+}
+
+
+bool Heap::OldGenerationAllocationLimitReached() {
+  if (!incremental_marking()->IsStopped()) return false;
+  return OldGenerationSpaceAvailable() < 0;
+}
+
+
+bool Heap::ShouldBePromoted(Address old_address, int object_size) {
+  NewSpacePage* page = NewSpacePage::FromAddress(old_address);
+  Address age_mark = new_space_.age_mark();
+  return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
+         (!page->ContainsLimit(age_mark) || old_address < age_mark);
+}
+
+
+void Heap::RecordWrite(Address address, int offset) {
+  if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
+}
+
+
+void Heap::RecordWrites(Address address, int start, int len) {
+  if (!InNewSpace(address)) {
+    for (int i = 0; i < len; i++) {
+      store_buffer_.Mark(address + start + i * kPointerSize);
+    }
+  }
+}
+
+
+OldSpace* Heap::TargetSpace(HeapObject* object) {
+  InstanceType type = object->map()->instance_type();
+  AllocationSpace space = TargetSpaceId(type);
+  return (space == OLD_POINTER_SPACE) ? old_pointer_space_ : old_data_space_;
+}
+
+
+AllocationSpace Heap::TargetSpaceId(InstanceType type) {
+  // Heap numbers and sequential strings are promoted to old data space, all
+  // other object types are promoted to old pointer space.  We do not use
+  // object->IsHeapNumber() and object->IsSeqString() because we already
+  // know that object has the heap object tag.
+
+  // These objects are never allocated in new space.
+  DCHECK(type != MAP_TYPE);
+  DCHECK(type != CODE_TYPE);
+  DCHECK(type != ODDBALL_TYPE);
+  DCHECK(type != CELL_TYPE);
+  DCHECK(type != PROPERTY_CELL_TYPE);
+
+  if (type <= LAST_NAME_TYPE) {
+    if (type == SYMBOL_TYPE) return OLD_POINTER_SPACE;
+    DCHECK(type < FIRST_NONSTRING_TYPE);
+    // There are four string representations: sequential strings, external
+    // strings, cons strings, and sliced strings.
+    // Only the latter two contain non-map-word pointers to heap objects.
+    return ((type & kIsIndirectStringMask) == kIsIndirectStringTag)
+               ? OLD_POINTER_SPACE
+               : OLD_DATA_SPACE;
+  } else {
+    return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
+  }
+}
+
+
+bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
+  // Object migration is governed by the following rules:
+  //
+  // 1) Objects in new-space can be migrated to one of the old spaces
+  //    that matches their target space or they stay in new-space.
+  // 2) Objects in old-space stay in the same space when migrating.
+  // 3) Fillers (two or more words) can migrate due to left-trimming of
+  //    fixed arrays in new-space, old-data-space and old-pointer-space.
+  // 4) Fillers (one word) can never migrate, they are skipped by
+  //    incremental marking explicitly to prevent invalid pattern.
+  // 5) Short external strings can end up in old pointer space when a cons
+  //    string in old pointer space is made external (String::MakeExternal).
+  //
+  // Since this function is used for debugging only, we do not place
+  // asserts here, but check everything explicitly.
+  if (obj->map() == one_pointer_filler_map()) return false;
+  InstanceType type = obj->map()->instance_type();
+  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+  AllocationSpace src = chunk->owner()->identity();
+  switch (src) {
+    case NEW_SPACE:
+      return dst == src || dst == TargetSpaceId(type);
+    case OLD_POINTER_SPACE:
+      return dst == src && (dst == TargetSpaceId(type) || obj->IsFiller() ||
+                            obj->IsExternalString());
+    case OLD_DATA_SPACE:
+      return dst == src && dst == TargetSpaceId(type);
+    case CODE_SPACE:
+      return dst == src && type == CODE_TYPE;
+    case MAP_SPACE:
+    case CELL_SPACE:
+    case PROPERTY_CELL_SPACE:
+    case LO_SPACE:
+      return false;
+    case INVALID_SPACE:
+      break;
+  }
+  UNREACHABLE();
+  return false;
+}
+
+
+void Heap::CopyBlock(Address dst, Address src, int byte_size) {
+  CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
+            static_cast<size_t>(byte_size / kPointerSize));
+}
+
+
+void Heap::MoveBlock(Address dst, Address src, int byte_size) {
+  DCHECK(IsAligned(byte_size, kPointerSize));
+
+  int size_in_words = byte_size / kPointerSize;
+
+  if ((dst < src) || (dst >= (src + byte_size))) {
+    Object** src_slot = reinterpret_cast<Object**>(src);
+    Object** dst_slot = reinterpret_cast<Object**>(dst);
+    Object** end_slot = src_slot + size_in_words;
+
+    while (src_slot != end_slot) {
+      *dst_slot++ = *src_slot++;
+    }
+  } else {
+    MemMove(dst, src, static_cast<size_t>(byte_size));
+  }
+}
+
+
+void Heap::ScavengePointer(HeapObject** p) { ScavengeObject(p, *p); }
+
+
+AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
+  // Check if there is potentially a memento behind the object. If
+  // the last word of the memento is on another page we return
+  // immediately.
+  Address object_address = object->address();
+  Address memento_address = object_address + object->Size();
+  Address last_memento_word_address = memento_address + kPointerSize;
+  if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
+    return NULL;
+  }
+
+  HeapObject* candidate = HeapObject::FromAddress(memento_address);
+  Map* candidate_map = candidate->map();
+  // This fast check may peek at an uninitialized word. However, the slow check
+  // below (memento_address == top) ensures that this is safe. Mark the word as
+  // initialized to silence MemorySanitizer warnings.
+  MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
+  if (candidate_map != allocation_memento_map()) return NULL;
+
+  // Either the object is the last object in the new space, or there is another
+  // object of at least word size (the header map word) following it, so
+  // suffices to compare ptr and top here. Note that technically we do not have
+  // to compare with the current top pointer of the from space page during GC,
+  // since we always install filler objects above the top pointer of a from
+  // space page when performing a garbage collection. However, always performing
+  // the test makes it possible to have a single, unified version of
+  // FindAllocationMemento that is used both by the GC and the mutator.
+  Address top = NewSpaceTop();
+  DCHECK(memento_address == top ||
+         memento_address + HeapObject::kHeaderSize <= top ||
+         !NewSpacePage::OnSamePage(memento_address, top));
+  if (memento_address == top) return NULL;
+
+  AllocationMemento* memento = AllocationMemento::cast(candidate);
+  if (!memento->IsValid()) return NULL;
+  return memento;
+}
+
+
+void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
+                                        ScratchpadSlotMode mode) {
+  Heap* heap = object->GetHeap();
+  DCHECK(heap->InFromSpace(object));
+
+  if (!FLAG_allocation_site_pretenuring ||
+      !AllocationSite::CanTrack(object->map()->instance_type()))
+    return;
+
+  AllocationMemento* memento = heap->FindAllocationMemento(object);
+  if (memento == NULL) return;
+
+  if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
+    heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(), mode);
+  }
+}
+
+
+void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
+  DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
+
+  // We use the first word (where the map pointer usually is) of a heap
+  // object to record the forwarding pointer.  A forwarding pointer can
+  // point to an old space, the code space, or the to space of the new
+  // generation.
+  MapWord first_word = object->map_word();
+
+  // If the first word is a forwarding address, the object has already been
+  // copied.
+  if (first_word.IsForwardingAddress()) {
+    HeapObject* dest = first_word.ToForwardingAddress();
+    DCHECK(object->GetIsolate()->heap()->InFromSpace(*p));
+    *p = dest;
+    return;
+  }
+
+  UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
+
+  // AllocationMementos are unrooted and shouldn't survive a scavenge
+  DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
+  // Call the slow part of scavenge object.
+  return ScavengeObjectSlow(p, object);
+}
+
+
+bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
+                          const v8::GCCallbackFlags callbackFlags) {
+  const char* collector_reason = NULL;
+  GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+  return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
+}
+
+
+Isolate* Heap::isolate() {
+  return reinterpret_cast<Isolate*>(
+      reinterpret_cast<intptr_t>(this) -
+      reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
+}
+
+
+// Calls the FUNCTION_CALL function and retries it up to three times
+// to guarantee that any allocations performed during the call will
+// succeed if there's enough memory.
+
+// Warning: Do not use the identifiers __object__, __maybe_object__ or
+// __scope__ in a call to this macro.
+
+#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
+  if (__allocation__.To(&__object__)) {                   \
+    DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
+    RETURN_VALUE;                                         \
+  }
+
+#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)    \
+  do {                                                                        \
+    AllocationResult __allocation__ = FUNCTION_CALL;                          \
+    Object* __object__ = NULL;                                                \
+    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
+    (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(),            \
+                                      "allocation failure");                  \
+    __allocation__ = FUNCTION_CALL;                                           \
+    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
+    (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment();        \
+    (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc");          \
+    {                                                                         \
+      AlwaysAllocateScope __scope__(ISOLATE);                                 \
+      __allocation__ = FUNCTION_CALL;                                         \
+    }                                                                         \
+    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
+    /* TODO(1181417): Fix this. */                                            \
+    v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
+    RETURN_EMPTY;                                                             \
+  } while (false)
+
+#define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \
+                              RETURN_EMPTY)                         \
+  CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)
+
+#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE)                      \
+  CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL,                               \
+                        return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
+                        return Handle<TYPE>())
+
+
+#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
+  CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return)
+
+
+void ExternalStringTable::AddString(String* string) {
+  DCHECK(string->IsExternalString());
+  if (heap_->InNewSpace(string)) {
+    new_space_strings_.Add(string);
+  } else {
+    old_space_strings_.Add(string);
+  }
+}
+
+
+void ExternalStringTable::Iterate(ObjectVisitor* v) {
+  if (!new_space_strings_.is_empty()) {
+    Object** start = &new_space_strings_[0];
+    v->VisitPointers(start, start + new_space_strings_.length());
+  }
+  if (!old_space_strings_.is_empty()) {
+    Object** start = &old_space_strings_[0];
+    v->VisitPointers(start, start + old_space_strings_.length());
+  }
+}
+
+
+// Verify() is inline to avoid ifdef-s around its calls in release
+// mode.
+void ExternalStringTable::Verify() {
+#ifdef DEBUG
+  for (int i = 0; i < new_space_strings_.length(); ++i) {
+    Object* obj = Object::cast(new_space_strings_[i]);
+    DCHECK(heap_->InNewSpace(obj));
+    DCHECK(obj != heap_->the_hole_value());
+  }
+  for (int i = 0; i < old_space_strings_.length(); ++i) {
+    Object* obj = Object::cast(old_space_strings_[i]);
+    DCHECK(!heap_->InNewSpace(obj));
+    DCHECK(obj != heap_->the_hole_value());
+  }
+#endif
+}
+
+
+void ExternalStringTable::AddOldString(String* string) {
+  DCHECK(string->IsExternalString());
+  DCHECK(!heap_->InNewSpace(string));
+  old_space_strings_.Add(string);
+}
+
+
+void ExternalStringTable::ShrinkNewStrings(int position) {
+  new_space_strings_.Rewind(position);
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+#endif
+}
+
+
+void Heap::ClearInstanceofCache() {
+  set_instanceof_cache_function(the_hole_value());
+}
+
+
+Object* Heap::ToBoolean(bool condition) {
+  return condition ? true_value() : false_value();
+}
+
+
+void Heap::CompletelyClearInstanceofCache() {
+  set_instanceof_cache_map(the_hole_value());
+  set_instanceof_cache_function(the_hole_value());
+}
+
+
+AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
+    : heap_(isolate->heap()), daf_(isolate) {
+  // We shouldn't hit any nested scopes, because that requires
+  // non-handle code to call handle code. The code still works but
+  // performance will degrade, so we want to catch this situation
+  // in debug mode.
+  DCHECK(heap_->always_allocate_scope_depth_ == 0);
+  heap_->always_allocate_scope_depth_++;
+}
+
+
+AlwaysAllocateScope::~AlwaysAllocateScope() {
+  heap_->always_allocate_scope_depth_--;
+  DCHECK(heap_->always_allocate_scope_depth_ == 0);
+}
+
+
+#ifdef VERIFY_HEAP
+NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() {
+  Isolate* isolate = Isolate::Current();
+  isolate->heap()->no_weak_object_verification_scope_depth_++;
+}
+
+
+NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
+  Isolate* isolate = Isolate::Current();
+  isolate->heap()->no_weak_object_verification_scope_depth_--;
+}
+#endif
+
+
+GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
+  heap_->gc_callbacks_depth_++;
+}
+
+
+GCCallbacksScope::~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
+
+
+bool GCCallbacksScope::CheckReenter() {
+  return heap_->gc_callbacks_depth_ == 1;
+}
+
+
+void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
+  for (Object** current = start; current < end; current++) {
+    if ((*current)->IsHeapObject()) {
+      HeapObject* object = HeapObject::cast(*current);
+      CHECK(object->GetIsolate()->heap()->Contains(object));
+      CHECK(object->map()->IsMap());
+    }
+  }
+}
+
+
+void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
+  for (Object** current = start; current < end; current++) {
+    CHECK((*current)->IsSmi());
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_HEAP_INL_H_
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
new file mode 100644
index 0000000..dfe60fe
--- /dev/null
+++ b/src/heap/heap.cc
@@ -0,0 +1,6159 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/accessors.h"
+#include "src/api.h"
+#include "src/base/bits.h"
+#include "src/base/once.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/compilation-cache.h"
+#include "src/conversions.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/global-handles.h"
+#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/objects-visiting.h"
+#include "src/heap/store-buffer.h"
+#include "src/heap-profiler.h"
+#include "src/isolate-inl.h"
+#include "src/natives.h"
+#include "src/runtime-profiler.h"
+#include "src/scopeinfo.h"
+#include "src/snapshot.h"
+#include "src/utils.h"
+#include "src/v8threads.h"
+#include "src/vm-state-inl.h"
+
+#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
+#include "src/regexp-macro-assembler.h"          // NOLINT
+#include "src/arm/regexp-macro-assembler-arm.h"  // NOLINT
+#endif
+#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
+#include "src/regexp-macro-assembler.h"            // NOLINT
+#include "src/mips/regexp-macro-assembler-mips.h"  // NOLINT
+#endif
+#if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
+#include "src/regexp-macro-assembler.h"
+#include "src/mips64/regexp-macro-assembler-mips64.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+Heap::Heap()
+    : amount_of_external_allocated_memory_(0),
+      amount_of_external_allocated_memory_at_last_global_gc_(0),
+      isolate_(NULL),
+      code_range_size_(0),
+      // semispace_size_ should be a power of 2 and old_generation_size_ should
+      // be a multiple of Page::kPageSize.
+      reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
+      max_semi_space_size_(8 * (kPointerSize / 4) * MB),
+      initial_semispace_size_(Page::kPageSize),
+      max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
+      max_executable_size_(256ul * (kPointerSize / 4) * MB),
+      // Variables set based on semispace_size_ and old_generation_size_ in
+      // ConfigureHeap.
+      // Will be 4 * reserved_semispace_size_ to ensure that young
+      // generation can be aligned to its size.
+      maximum_committed_(0),
+      survived_since_last_expansion_(0),
+      sweep_generation_(0),
+      always_allocate_scope_depth_(0),
+      contexts_disposed_(0),
+      global_ic_age_(0),
+      flush_monomorphic_ics_(false),
+      scan_on_scavenge_pages_(0),
+      new_space_(this),
+      old_pointer_space_(NULL),
+      old_data_space_(NULL),
+      code_space_(NULL),
+      map_space_(NULL),
+      cell_space_(NULL),
+      property_cell_space_(NULL),
+      lo_space_(NULL),
+      gc_state_(NOT_IN_GC),
+      gc_post_processing_depth_(0),
+      allocations_count_(0),
+      raw_allocations_hash_(0),
+      dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
+      ms_count_(0),
+      gc_count_(0),
+      remembered_unmapped_pages_index_(0),
+      unflattened_strings_length_(0),
+#ifdef DEBUG
+      allocation_timeout_(0),
+#endif  // DEBUG
+      old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
+      old_gen_exhausted_(false),
+      inline_allocation_disabled_(false),
+      store_buffer_rebuilder_(store_buffer()),
+      hidden_string_(NULL),
+      gc_safe_size_of_old_object_(NULL),
+      total_regexp_code_generated_(0),
+      tracer_(this),
+      high_survival_rate_period_length_(0),
+      promoted_objects_size_(0),
+      promotion_rate_(0),
+      semi_space_copied_object_size_(0),
+      semi_space_copied_rate_(0),
+      nodes_died_in_new_space_(0),
+      nodes_copied_in_new_space_(0),
+      nodes_promoted_(0),
+      maximum_size_scavenges_(0),
+      max_gc_pause_(0.0),
+      total_gc_time_ms_(0.0),
+      max_alive_after_gc_(0),
+      min_in_mutator_(kMaxInt),
+      marking_time_(0.0),
+      sweeping_time_(0.0),
+      mark_compact_collector_(this),
+      store_buffer_(this),
+      marking_(this),
+      incremental_marking_(this),
+      gc_count_at_last_idle_gc_(0),
+      full_codegen_bytes_generated_(0),
+      crankshaft_codegen_bytes_generated_(0),
+      gcs_since_last_deopt_(0),
+#ifdef VERIFY_HEAP
+      no_weak_object_verification_scope_depth_(0),
+#endif
+      allocation_sites_scratchpad_length_(0),
+      promotion_queue_(this),
+      configured_(false),
+      external_string_table_(this),
+      chunks_queued_for_free_(NULL),
+      gc_callbacks_depth_(0) {
+// Allow build-time customization of the max semispace size. Building
+// V8 with snapshots and a non-default max semispace size is much
+// easier if you can define it as part of the build environment.
+#if defined(V8_MAX_SEMISPACE_SIZE)
+  max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
+#endif
+
+  // Ensure old_generation_size_ is a multiple of kPageSize.
+  DCHECK(MB >= Page::kPageSize);
+
+  memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
+  set_native_contexts_list(NULL);
+  set_array_buffers_list(Smi::FromInt(0));
+  set_allocation_sites_list(Smi::FromInt(0));
+  set_encountered_weak_collections(Smi::FromInt(0));
+  // Put a dummy entry in the remembered pages so we can find the list the
+  // minidump even if there are no real unmapped pages.
+  RememberUnmappedPage(NULL, false);
+
+  ClearObjectStats(true);
+}
+
+
+intptr_t Heap::Capacity() {
+  if (!HasBeenSetUp()) return 0;
+
+  return new_space_.Capacity() + old_pointer_space_->Capacity() +
+         old_data_space_->Capacity() + code_space_->Capacity() +
+         map_space_->Capacity() + cell_space_->Capacity() +
+         property_cell_space_->Capacity();
+}
+
+
+intptr_t Heap::CommittedMemory() {
+  if (!HasBeenSetUp()) return 0;
+
+  return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
+         old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
+         map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
+         property_cell_space_->CommittedMemory() + lo_space_->Size();
+}
+
+
+size_t Heap::CommittedPhysicalMemory() {
+  if (!HasBeenSetUp()) return 0;
+
+  return new_space_.CommittedPhysicalMemory() +
+         old_pointer_space_->CommittedPhysicalMemory() +
+         old_data_space_->CommittedPhysicalMemory() +
+         code_space_->CommittedPhysicalMemory() +
+         map_space_->CommittedPhysicalMemory() +
+         cell_space_->CommittedPhysicalMemory() +
+         property_cell_space_->CommittedPhysicalMemory() +
+         lo_space_->CommittedPhysicalMemory();
+}
+
+
+intptr_t Heap::CommittedMemoryExecutable() {
+  if (!HasBeenSetUp()) return 0;
+
+  return isolate()->memory_allocator()->SizeExecutable();
+}
+
+
+void Heap::UpdateMaximumCommitted() {
+  if (!HasBeenSetUp()) return;
+
+  intptr_t current_committed_memory = CommittedMemory();
+  if (current_committed_memory > maximum_committed_) {
+    maximum_committed_ = current_committed_memory;
+  }
+}
+
+
+intptr_t Heap::Available() {
+  if (!HasBeenSetUp()) return 0;
+
+  return new_space_.Available() + old_pointer_space_->Available() +
+         old_data_space_->Available() + code_space_->Available() +
+         map_space_->Available() + cell_space_->Available() +
+         property_cell_space_->Available();
+}
+
+
+bool Heap::HasBeenSetUp() {
+  return old_pointer_space_ != NULL && old_data_space_ != NULL &&
+         code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
+         property_cell_space_ != NULL && lo_space_ != NULL;
+}
+
+
+int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
+  if (IntrusiveMarking::IsMarked(object)) {
+    return IntrusiveMarking::SizeOfMarkedObject(object);
+  }
+  return object->SizeFromMap(object->map());
+}
+
+
+GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
+                                              const char** reason) {
+  // Is global GC requested?
+  if (space != NEW_SPACE) {
+    isolate_->counters()->gc_compactor_caused_by_request()->Increment();
+    *reason = "GC in old space requested";
+    return MARK_COMPACTOR;
+  }
+
+  if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
+    *reason = "GC in old space forced by flags";
+    return MARK_COMPACTOR;
+  }
+
+  // Is enough data promoted to justify a global GC?
+  if (OldGenerationAllocationLimitReached()) {
+    isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
+    *reason = "promotion limit reached";
+    return MARK_COMPACTOR;
+  }
+
+  // Have allocation in OLD and LO failed?
+  if (old_gen_exhausted_) {
+    isolate_->counters()
+        ->gc_compactor_caused_by_oldspace_exhaustion()
+        ->Increment();
+    *reason = "old generations exhausted";
+    return MARK_COMPACTOR;
+  }
+
+  // Is there enough space left in OLD to guarantee that a scavenge can
+  // succeed?
+  //
+  // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
+  // for object promotion. It counts only the bytes that the memory
+  // allocator has not yet allocated from the OS and assigned to any space,
+  // and does not count available bytes already in the old space or code
+  // space.  Undercounting is safe---we may get an unrequested full GC when
+  // a scavenge would have succeeded.
+  if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
+    isolate_->counters()
+        ->gc_compactor_caused_by_oldspace_exhaustion()
+        ->Increment();
+    *reason = "scavenge might not succeed";
+    return MARK_COMPACTOR;
+  }
+
+  // Default
+  *reason = NULL;
+  return SCAVENGER;
+}
+
+
+// TODO(1238405): Combine the infrastructure for --heap-stats and
+// --log-gc to avoid the complicated preprocessor and flag testing.
+void Heap::ReportStatisticsBeforeGC() {
+// Heap::ReportHeapStatistics will also log NewSpace statistics when
+// compiled --log-gc is set.  The following logic is used to avoid
+// double logging.
+#ifdef DEBUG
+  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
+  if (FLAG_heap_stats) {
+    ReportHeapStatistics("Before GC");
+  } else if (FLAG_log_gc) {
+    new_space_.ReportStatistics();
+  }
+  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
+#else
+  if (FLAG_log_gc) {
+    new_space_.CollectStatistics();
+    new_space_.ReportStatistics();
+    new_space_.ClearHistograms();
+  }
+#endif  // DEBUG
+}
+
+
+void Heap::PrintShortHeapStatistics() {
+  if (!FLAG_trace_gc_verbose) return;
+  PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX "d KB\n",
+           isolate_->memory_allocator()->Size() / KB,
+           isolate_->memory_allocator()->Available() / KB);
+  PrintPID("New space,          used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           new_space_.Size() / KB, new_space_.Available() / KB,
+           new_space_.CommittedMemory() / KB);
+  PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           old_pointer_space_->SizeOfObjects() / KB,
+           old_pointer_space_->Available() / KB,
+           old_pointer_space_->CommittedMemory() / KB);
+  PrintPID("Old data space,     used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           old_data_space_->SizeOfObjects() / KB,
+           old_data_space_->Available() / KB,
+           old_data_space_->CommittedMemory() / KB);
+  PrintPID("Code space,         used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
+           code_space_->CommittedMemory() / KB);
+  PrintPID("Map space,          used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
+           map_space_->CommittedMemory() / KB);
+  PrintPID("Cell space,         used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
+           cell_space_->CommittedMemory() / KB);
+  PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           property_cell_space_->SizeOfObjects() / KB,
+           property_cell_space_->Available() / KB,
+           property_cell_space_->CommittedMemory() / KB);
+  PrintPID("Large object space, used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
+           lo_space_->CommittedMemory() / KB);
+  PrintPID("All spaces,         used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           this->SizeOfObjects() / KB, this->Available() / KB,
+           this->CommittedMemory() / KB);
+  PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
+           static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
+  PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
+}
+
+
+// TODO(1238405): Combine the infrastructure for --heap-stats and
+// --log-gc to avoid the complicated preprocessor and flag testing.
+void Heap::ReportStatisticsAfterGC() {
+// Similar to the before GC, we use some complicated logic to ensure that
+// NewSpace statistics are logged exactly once when --log-gc is turned on.
+#if defined(DEBUG)
+  if (FLAG_heap_stats) {
+    new_space_.CollectStatistics();
+    ReportHeapStatistics("After GC");
+  } else if (FLAG_log_gc) {
+    new_space_.ReportStatistics();
+  }
+#else
+  if (FLAG_log_gc) new_space_.ReportStatistics();
+#endif  // DEBUG
+}
+
+
+void Heap::GarbageCollectionPrologue() {
+  {
+    AllowHeapAllocation for_the_first_part_of_prologue;
+    ClearJSFunctionResultCaches();
+    gc_count_++;
+    unflattened_strings_length_ = 0;
+
+    if (FLAG_flush_code && FLAG_flush_code_incrementally) {
+      mark_compact_collector()->EnableCodeFlushing(true);
+    }
+
+#ifdef VERIFY_HEAP
+    if (FLAG_verify_heap) {
+      Verify();
+    }
+#endif
+  }
+
+  // Reset GC statistics.
+  promoted_objects_size_ = 0;
+  semi_space_copied_object_size_ = 0;
+  nodes_died_in_new_space_ = 0;
+  nodes_copied_in_new_space_ = 0;
+  nodes_promoted_ = 0;
+
+  UpdateMaximumCommitted();
+
+#ifdef DEBUG
+  DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
+
+  if (FLAG_gc_verbose) Print();
+
+  ReportStatisticsBeforeGC();
+#endif  // DEBUG
+
+  store_buffer()->GCPrologue();
+
+  if (isolate()->concurrent_osr_enabled()) {
+    isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
+  }
+
+  if (new_space_.IsAtMaximumCapacity()) {
+    maximum_size_scavenges_++;
+  } else {
+    maximum_size_scavenges_ = 0;
+  }
+  CheckNewSpaceExpansionCriteria();
+}
+
+
+intptr_t Heap::SizeOfObjects() {
+  intptr_t total = 0;
+  AllSpaces spaces(this);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+    total += space->SizeOfObjects();
+  }
+  return total;
+}
+
+
+void Heap::ClearAllICsByKind(Code::Kind kind) {
+  HeapObjectIterator it(code_space());
+
+  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
+    Code* code = Code::cast(object);
+    Code::Kind current_kind = code->kind();
+    if (current_kind == Code::FUNCTION ||
+        current_kind == Code::OPTIMIZED_FUNCTION) {
+      code->ClearInlineCaches(kind);
+    }
+  }
+}
+
+
+void Heap::RepairFreeListsAfterBoot() {
+  PagedSpaces spaces(this);
+  for (PagedSpace* space = spaces.next(); space != NULL;
+       space = spaces.next()) {
+    space->RepairFreeListsAfterBoot();
+  }
+}
+
+
+void Heap::ProcessPretenuringFeedback() {
+  if (FLAG_allocation_site_pretenuring) {
+    int tenure_decisions = 0;
+    int dont_tenure_decisions = 0;
+    int allocation_mementos_found = 0;
+    int allocation_sites = 0;
+    int active_allocation_sites = 0;
+
+    // If the scratchpad overflowed, we have to iterate over the allocation
+    // sites list.
+    // TODO(hpayer): We iterate over the whole list of allocation sites when
+    // we grew to the maximum semi-space size to deopt maybe tenured
+    // allocation sites. We could hold the maybe tenured allocation sites
+    // in a seperate data structure if this is a performance problem.
+    bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
+    bool use_scratchpad =
+        allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
+        !deopt_maybe_tenured;
+
+    int i = 0;
+    Object* list_element = allocation_sites_list();
+    bool trigger_deoptimization = false;
+    bool maximum_size_scavenge = MaximumSizeScavenge();
+    while (use_scratchpad ? i < allocation_sites_scratchpad_length_
+                          : list_element->IsAllocationSite()) {
+      AllocationSite* site =
+          use_scratchpad
+              ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
+              : AllocationSite::cast(list_element);
+      allocation_mementos_found += site->memento_found_count();
+      if (site->memento_found_count() > 0) {
+        active_allocation_sites++;
+        if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
+          trigger_deoptimization = true;
+        }
+        if (site->GetPretenureMode() == TENURED) {
+          tenure_decisions++;
+        } else {
+          dont_tenure_decisions++;
+        }
+        allocation_sites++;
+      }
+
+      if (deopt_maybe_tenured && site->IsMaybeTenure()) {
+        site->set_deopt_dependent_code(true);
+        trigger_deoptimization = true;
+      }
+
+      if (use_scratchpad) {
+        i++;
+      } else {
+        list_element = site->weak_next();
+      }
+    }
+
+    if (trigger_deoptimization) {
+      isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
+    }
+
+    FlushAllocationSitesScratchpad();
+
+    if (FLAG_trace_pretenuring_statistics &&
+        (allocation_mementos_found > 0 || tenure_decisions > 0 ||
+         dont_tenure_decisions > 0)) {
+      PrintF(
+          "GC: (mode, #visited allocation sites, #active allocation sites, "
+          "#mementos, #tenure decisions, #donttenure decisions) "
+          "(%s, %d, %d, %d, %d, %d)\n",
+          use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
+          active_allocation_sites, allocation_mementos_found, tenure_decisions,
+          dont_tenure_decisions);
+    }
+  }
+}
+
+
+void Heap::DeoptMarkedAllocationSites() {
+  // TODO(hpayer): If iterating over the allocation sites list becomes a
+  // performance issue, use a cache heap data structure instead (similar to the
+  // allocation sites scratchpad).
+  Object* list_element = allocation_sites_list();
+  while (list_element->IsAllocationSite()) {
+    AllocationSite* site = AllocationSite::cast(list_element);
+    if (site->deopt_dependent_code()) {
+      site->dependent_code()->MarkCodeForDeoptimization(
+          isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
+      site->set_deopt_dependent_code(false);
+    }
+    list_element = site->weak_next();
+  }
+  Deoptimizer::DeoptimizeMarkedCode(isolate_);
+}
+
+
+void Heap::GarbageCollectionEpilogue() {
+  store_buffer()->GCEpilogue();
+
+  // In release mode, we only zap the from space under heap verification.
+  if (Heap::ShouldZapGarbage()) {
+    ZapFromSpace();
+  }
+
+  // Process pretenuring feedback and update allocation sites.
+  ProcessPretenuringFeedback();
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+#endif
+
+  AllowHeapAllocation for_the_rest_of_the_epilogue;
+
+#ifdef DEBUG
+  if (FLAG_print_global_handles) isolate_->global_handles()->Print();
+  if (FLAG_print_handles) PrintHandles();
+  if (FLAG_gc_verbose) Print();
+  if (FLAG_code_stats) ReportCodeStatistics("After GC");
+#endif
+  if (FLAG_deopt_every_n_garbage_collections > 0) {
+    // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
+    // the topmost optimized frame can be deoptimized safely, because it
+    // might not have a lazy bailout point right after its current PC.
+    if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
+      Deoptimizer::DeoptimizeAll(isolate());
+      gcs_since_last_deopt_ = 0;
+    }
+  }
+
+  UpdateMaximumCommitted();
+
+  isolate_->counters()->alive_after_last_gc()->Set(
+      static_cast<int>(SizeOfObjects()));
+
+  isolate_->counters()->string_table_capacity()->Set(
+      string_table()->Capacity());
+  isolate_->counters()->number_of_symbols()->Set(
+      string_table()->NumberOfElements());
+
+  if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
+    isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
+        static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
+                         (crankshaft_codegen_bytes_generated_ +
+                          full_codegen_bytes_generated_)));
+  }
+
+  if (CommittedMemory() > 0) {
+    isolate_->counters()->external_fragmentation_total()->AddSample(
+        static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
+
+    isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
+        (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+    isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
+        static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_old_data_space()->AddSample(
+        static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_code_space()->AddSample(
+        static_cast<int>((code_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
+        (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+    isolate_->counters()->heap_fraction_cell_space()->AddSample(
+        static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
+        static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
+        (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+
+    isolate_->counters()->heap_sample_total_committed()->AddSample(
+        static_cast<int>(CommittedMemory() / KB));
+    isolate_->counters()->heap_sample_total_used()->AddSample(
+        static_cast<int>(SizeOfObjects() / KB));
+    isolate_->counters()->heap_sample_map_space_committed()->AddSample(
+        static_cast<int>(map_space()->CommittedMemory() / KB));
+    isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
+        static_cast<int>(cell_space()->CommittedMemory() / KB));
+    isolate_->counters()
+        ->heap_sample_property_cell_space_committed()
+        ->AddSample(
+            static_cast<int>(property_cell_space()->CommittedMemory() / KB));
+    isolate_->counters()->heap_sample_code_space_committed()->AddSample(
+        static_cast<int>(code_space()->CommittedMemory() / KB));
+
+    isolate_->counters()->heap_sample_maximum_committed()->AddSample(
+        static_cast<int>(MaximumCommittedMemory() / KB));
+  }
+
+#define UPDATE_COUNTERS_FOR_SPACE(space)                \
+  isolate_->counters()->space##_bytes_available()->Set( \
+      static_cast<int>(space()->Available()));          \
+  isolate_->counters()->space##_bytes_committed()->Set( \
+      static_cast<int>(space()->CommittedMemory()));    \
+  isolate_->counters()->space##_bytes_used()->Set(      \
+      static_cast<int>(space()->SizeOfObjects()));
+#define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
+  if (space()->CommittedMemory() > 0) {                                \
+    isolate_->counters()->external_fragmentation_##space()->AddSample( \
+        static_cast<int>(100 -                                         \
+                         (space()->SizeOfObjects() * 100.0) /          \
+                             space()->CommittedMemory()));             \
+  }
+#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
+  UPDATE_COUNTERS_FOR_SPACE(space)                         \
+  UPDATE_FRAGMENTATION_FOR_SPACE(space)
+
+  UPDATE_COUNTERS_FOR_SPACE(new_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
+#undef UPDATE_COUNTERS_FOR_SPACE
+#undef UPDATE_FRAGMENTATION_FOR_SPACE
+#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
+
+#ifdef DEBUG
+  ReportStatisticsAfterGC();
+#endif  // DEBUG
+
+  // Remember the last top pointer so that we can later find out
+  // whether we allocated in new space since the last GC.
+  new_space_top_after_last_gc_ = new_space()->top();
+}
+
+
+void Heap::CollectAllGarbage(int flags, const char* gc_reason,
+                             const v8::GCCallbackFlags gc_callback_flags) {
+  // Since we are ignoring the return value, the exact choice of space does
+  // not matter, so long as we do not specify NEW_SPACE, which would not
+  // cause a full GC.
+  mark_compact_collector_.SetFlags(flags);
+  CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
+  mark_compact_collector_.SetFlags(kNoGCFlags);
+}
+
+
+void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
+  // Since we are ignoring the return value, the exact choice of space does
+  // not matter, so long as we do not specify NEW_SPACE, which would not
+  // cause a full GC.
+  // Major GC would invoke weak handle callbacks on weakly reachable
+  // handles, but won't collect weakly reachable objects until next
+  // major GC.  Therefore if we collect aggressively and weak handle callback
+  // has been invoked, we rerun major GC to release objects which become
+  // garbage.
+  // Note: as weak callbacks can execute arbitrary code, we cannot
+  // hope that eventually there will be no weak callbacks invocations.
+  // Therefore stop recollecting after several attempts.
+  if (isolate()->concurrent_recompilation_enabled()) {
+    // The optimizing compiler may be unnecessarily holding on to memory.
+    DisallowHeapAllocation no_recursive_gc;
+    isolate()->optimizing_compiler_thread()->Flush();
+  }
+  mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
+                                     kReduceMemoryFootprintMask);
+  isolate_->compilation_cache()->Clear();
+  const int kMaxNumberOfAttempts = 7;
+  const int kMinNumberOfAttempts = 2;
+  for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
+    if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
+        attempt + 1 >= kMinNumberOfAttempts) {
+      break;
+    }
+  }
+  mark_compact_collector()->SetFlags(kNoGCFlags);
+  new_space_.Shrink();
+  UncommitFromSpace();
+  incremental_marking()->UncommitMarkingDeque();
+}
+
+
+void Heap::EnsureFillerObjectAtTop() {
+  // There may be an allocation memento behind every object in new space.
+  // If we evacuate a not full new space or if we are on the last page of
+  // the new space, then there may be uninitialized memory behind the top
+  // pointer of the new space page. We store a filler object there to
+  // identify the unused space.
+  Address from_top = new_space_.top();
+  Address from_limit = new_space_.limit();
+  if (from_top < from_limit) {
+    int remaining_in_page = static_cast<int>(from_limit - from_top);
+    CreateFillerObjectAt(from_top, remaining_in_page);
+  }
+}
+
+
+bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
+                          const char* collector_reason,
+                          const v8::GCCallbackFlags gc_callback_flags) {
+  // The VM is in the GC state until exiting this function.
+  VMState<GC> state(isolate_);
+
+#ifdef DEBUG
+  // Reset the allocation timeout to the GC interval, but make sure to
+  // allow at least a few allocations after a collection. The reason
+  // for this is that we have a lot of allocation sequences and we
+  // assume that a garbage collection will allow the subsequent
+  // allocation attempts to go through.
+  allocation_timeout_ = Max(6, FLAG_gc_interval);
+#endif
+
+  EnsureFillerObjectAtTop();
+
+  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Scavenge during marking.\n");
+    }
+  }
+
+  if (collector == MARK_COMPACTOR &&
+      !mark_compact_collector()->abort_incremental_marking() &&
+      !incremental_marking()->IsStopped() &&
+      !incremental_marking()->should_hurry() &&
+      FLAG_incremental_marking_steps) {
+    // Make progress in incremental marking.
+    const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
+    incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
+                                IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+    if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+      }
+      collector = SCAVENGER;
+      collector_reason = "incremental marking delaying mark-sweep";
+    }
+  }
+
+  bool next_gc_likely_to_collect_more = false;
+
+  {
+    tracer()->Start(collector, gc_reason, collector_reason);
+    DCHECK(AllowHeapAllocation::IsAllowed());
+    DisallowHeapAllocation no_allocation_during_gc;
+    GarbageCollectionPrologue();
+
+    {
+      HistogramTimerScope histogram_timer_scope(
+          (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
+                                   : isolate_->counters()->gc_compactor());
+      next_gc_likely_to_collect_more =
+          PerformGarbageCollection(collector, gc_callback_flags);
+    }
+
+    GarbageCollectionEpilogue();
+    tracer()->Stop();
+  }
+
+  // Start incremental marking for the next cycle. The heap snapshot
+  // generator needs incremental marking to stay off after it aborted.
+  if (!mark_compact_collector()->abort_incremental_marking() &&
+      WorthActivatingIncrementalMarking()) {
+    incremental_marking()->Start();
+  }
+
+  return next_gc_likely_to_collect_more;
+}
+
+
+int Heap::NotifyContextDisposed() {
+  if (isolate()->concurrent_recompilation_enabled()) {
+    // Flush the queued recompilation tasks.
+    isolate()->optimizing_compiler_thread()->Flush();
+  }
+  flush_monomorphic_ics_ = true;
+  AgeInlineCaches();
+  return ++contexts_disposed_;
+}
+
+
+void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
+                        int len) {
+  if (len == 0) return;
+
+  DCHECK(array->map() != fixed_cow_array_map());
+  Object** dst_objects = array->data_start() + dst_index;
+  MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
+  if (!InNewSpace(array)) {
+    for (int i = 0; i < len; i++) {
+      // TODO(hpayer): check store buffer for entries
+      if (InNewSpace(dst_objects[i])) {
+        RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
+      }
+    }
+  }
+  incremental_marking()->RecordWrites(array);
+}
+
+
+#ifdef VERIFY_HEAP
+// Helper class for verifying the string table.
+class StringTableVerifier : public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    // Visit all HeapObject pointers in [start, end).
+    for (Object** p = start; p < end; p++) {
+      if ((*p)->IsHeapObject()) {
+        // Check that the string is actually internalized.
+        CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
+              (*p)->IsInternalizedString());
+      }
+    }
+  }
+};
+
+
+static void VerifyStringTable(Heap* heap) {
+  StringTableVerifier verifier;
+  heap->string_table()->IterateElements(&verifier);
+}
+#endif  // VERIFY_HEAP
+
+
+static bool AbortIncrementalMarkingAndCollectGarbage(
+    Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
+  heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
+  bool result = heap->CollectGarbage(space, gc_reason);
+  heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
+  return result;
+}
+
+
+void Heap::ReserveSpace(int* sizes, Address* locations_out) {
+  bool gc_performed = true;
+  int counter = 0;
+  static const int kThreshold = 20;
+  while (gc_performed && counter++ < kThreshold) {
+    gc_performed = false;
+    DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
+    for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
+      if (sizes[space] != 0) {
+        AllocationResult allocation;
+        if (space == NEW_SPACE) {
+          allocation = new_space()->AllocateRaw(sizes[space]);
+        } else {
+          allocation = paged_space(space)->AllocateRaw(sizes[space]);
+        }
+        FreeListNode* node;
+        if (!allocation.To(&node)) {
+          if (space == NEW_SPACE) {
+            Heap::CollectGarbage(NEW_SPACE,
+                                 "failed to reserve space in the new space");
+          } else {
+            AbortIncrementalMarkingAndCollectGarbage(
+                this, static_cast<AllocationSpace>(space),
+                "failed to reserve space in paged space");
+          }
+          gc_performed = true;
+          break;
+        } else {
+          // Mark with a free list node, in case we have a GC before
+          // deserializing.
+          node->set_size(this, sizes[space]);
+          locations_out[space] = node->address();
+        }
+      }
+    }
+  }
+
+  if (gc_performed) {
+    // Failed to reserve the space after several attempts.
+    V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
+  }
+}
+
+
+void Heap::EnsureFromSpaceIsCommitted() {
+  if (new_space_.CommitFromSpaceIfNeeded()) return;
+
+  // Committing memory to from space failed.
+  // Memory is exhausted and we will die.
+  V8::FatalProcessOutOfMemory("Committing semi space failed.");
+}
+
+
+void Heap::ClearJSFunctionResultCaches() {
+  if (isolate_->bootstrapper()->IsActive()) return;
+
+  Object* context = native_contexts_list();
+  while (!context->IsUndefined()) {
+    // Get the caches for this context. GC can happen when the context
+    // is not fully initialized, so the caches can be undefined.
+    Object* caches_or_undefined =
+        Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
+    if (!caches_or_undefined->IsUndefined()) {
+      FixedArray* caches = FixedArray::cast(caches_or_undefined);
+      // Clear the caches:
+      int length = caches->length();
+      for (int i = 0; i < length; i++) {
+        JSFunctionResultCache::cast(caches->get(i))->Clear();
+      }
+    }
+    // Get the next context:
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void Heap::ClearNormalizedMapCaches() {
+  if (isolate_->bootstrapper()->IsActive() &&
+      !incremental_marking()->IsMarking()) {
+    return;
+  }
+
+  Object* context = native_contexts_list();
+  while (!context->IsUndefined()) {
+    // GC can happen when the context is not fully initialized,
+    // so the cache can be undefined.
+    Object* cache =
+        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
+    if (!cache->IsUndefined()) {
+      NormalizedMapCache::cast(cache)->Clear();
+    }
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
+  if (start_new_space_size == 0) return;
+
+  promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
+                     static_cast<double>(start_new_space_size) * 100);
+
+  semi_space_copied_rate_ =
+      (static_cast<double>(semi_space_copied_object_size_) /
+       static_cast<double>(start_new_space_size) * 100);
+
+  double survival_rate = promotion_rate_ + semi_space_copied_rate_;
+
+  if (survival_rate > kYoungSurvivalRateHighThreshold) {
+    high_survival_rate_period_length_++;
+  } else {
+    high_survival_rate_period_length_ = 0;
+  }
+}
+
+bool Heap::PerformGarbageCollection(
+    GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
+  int freed_global_handles = 0;
+
+  if (collector != SCAVENGER) {
+    PROFILE(isolate_, CodeMovingGCEvent());
+  }
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    VerifyStringTable(this);
+  }
+#endif
+
+  GCType gc_type =
+      collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
+
+  {
+    GCCallbacksScope scope(this);
+    if (scope.CheckReenter()) {
+      AllowHeapAllocation allow_allocation;
+      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+      VMState<EXTERNAL> state(isolate_);
+      HandleScope handle_scope(isolate_);
+      CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
+    }
+  }
+
+  EnsureFromSpaceIsCommitted();
+
+  int start_new_space_size = Heap::new_space()->SizeAsInt();
+
+  if (IsHighSurvivalRate()) {
+    // We speed up the incremental marker if it is running so that it
+    // does not fall behind the rate of promotion, which would cause a
+    // constantly growing old space.
+    incremental_marking()->NotifyOfHighPromotionRate();
+  }
+
+  if (collector == MARK_COMPACTOR) {
+    // Perform mark-sweep with optional compaction.
+    MarkCompact();
+    sweep_generation_++;
+    // Temporarily set the limit for case when PostGarbageCollectionProcessing
+    // allocates and triggers GC. The real limit is set at after
+    // PostGarbageCollectionProcessing.
+    old_generation_allocation_limit_ =
+        OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
+    old_gen_exhausted_ = false;
+  } else {
+    Scavenge();
+  }
+
+  UpdateSurvivalStatistics(start_new_space_size);
+
+  isolate_->counters()->objs_since_last_young()->Set(0);
+
+  // Callbacks that fire after this point might trigger nested GCs and
+  // restart incremental marking, the assertion can't be moved down.
+  DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
+
+  gc_post_processing_depth_++;
+  {
+    AllowHeapAllocation allow_allocation;
+    GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+    freed_global_handles =
+        isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
+  }
+  gc_post_processing_depth_--;
+
+  isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
+
+  // Update relocatables.
+  Relocatable::PostGarbageCollectionProcessing(isolate_);
+
+  if (collector == MARK_COMPACTOR) {
+    // Register the amount of external allocated memory.
+    amount_of_external_allocated_memory_at_last_global_gc_ =
+        amount_of_external_allocated_memory_;
+    old_generation_allocation_limit_ = OldGenerationAllocationLimit(
+        PromotedSpaceSizeOfObjects(), freed_global_handles);
+  }
+
+  {
+    GCCallbacksScope scope(this);
+    if (scope.CheckReenter()) {
+      AllowHeapAllocation allow_allocation;
+      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+      VMState<EXTERNAL> state(isolate_);
+      HandleScope handle_scope(isolate_);
+      CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
+    }
+  }
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    VerifyStringTable(this);
+  }
+#endif
+
+  return freed_global_handles > 0;
+}
+
+
+void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
+  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
+    if (gc_type & gc_prologue_callbacks_[i].gc_type) {
+      if (!gc_prologue_callbacks_[i].pass_isolate_) {
+        v8::GCPrologueCallback callback =
+            reinterpret_cast<v8::GCPrologueCallback>(
+                gc_prologue_callbacks_[i].callback);
+        callback(gc_type, flags);
+      } else {
+        v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+        gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
+      }
+    }
+  }
+}
+
+
+void Heap::CallGCEpilogueCallbacks(GCType gc_type,
+                                   GCCallbackFlags gc_callback_flags) {
+  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
+    if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
+      if (!gc_epilogue_callbacks_[i].pass_isolate_) {
+        v8::GCPrologueCallback callback =
+            reinterpret_cast<v8::GCPrologueCallback>(
+                gc_epilogue_callbacks_[i].callback);
+        callback(gc_type, gc_callback_flags);
+      } else {
+        v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+        gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
+      }
+    }
+  }
+}
+
+
+void Heap::MarkCompact() {
+  gc_state_ = MARK_COMPACT;
+  LOG(isolate_, ResourceEvent("markcompact", "begin"));
+
+  uint64_t size_of_objects_before_gc = SizeOfObjects();
+
+  mark_compact_collector_.Prepare();
+
+  ms_count_++;
+
+  MarkCompactPrologue();
+
+  mark_compact_collector_.CollectGarbage();
+
+  LOG(isolate_, ResourceEvent("markcompact", "end"));
+
+  gc_state_ = NOT_IN_GC;
+
+  isolate_->counters()->objs_since_last_full()->Set(0);
+
+  flush_monomorphic_ics_ = false;
+
+  if (FLAG_allocation_site_pretenuring) {
+    EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
+  }
+}
+
+
+void Heap::MarkCompactPrologue() {
+  // At any old GC clear the keyed lookup cache to enable collection of unused
+  // maps.
+  isolate_->keyed_lookup_cache()->Clear();
+  isolate_->context_slot_cache()->Clear();
+  isolate_->descriptor_lookup_cache()->Clear();
+  RegExpResultsCache::Clear(string_split_cache());
+  RegExpResultsCache::Clear(regexp_multiple_cache());
+
+  isolate_->compilation_cache()->MarkCompactPrologue();
+
+  CompletelyClearInstanceofCache();
+
+  FlushNumberStringCache();
+  if (FLAG_cleanup_code_caches_at_gc) {
+    polymorphic_code_cache()->set_cache(undefined_value());
+  }
+
+  ClearNormalizedMapCaches();
+}
+
+
+// Helper class for copying HeapObjects
+class ScavengeVisitor : public ObjectVisitor {
+ public:
+  explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
+
+  void VisitPointer(Object** p) { ScavengePointer(p); }
+
+  void VisitPointers(Object** start, Object** end) {
+    // Copy all HeapObject pointers in [start, end)
+    for (Object** p = start; p < end; p++) ScavengePointer(p);
+  }
+
+ private:
+  void ScavengePointer(Object** p) {
+    Object* object = *p;
+    if (!heap_->InNewSpace(object)) return;
+    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
+                         reinterpret_cast<HeapObject*>(object));
+  }
+
+  Heap* heap_;
+};
+
+
+#ifdef VERIFY_HEAP
+// Visitor class to verify pointers in code or data space do not point into
+// new space.
+class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
+ public:
+  explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
+      }
+    }
+  }
+
+ private:
+  Heap* heap_;
+};
+
+
+static void VerifyNonPointerSpacePointers(Heap* heap) {
+  // Verify that there are no pointers to new space in spaces where we
+  // do not expect them.
+  VerifyNonPointerSpacePointersVisitor v(heap);
+  HeapObjectIterator code_it(heap->code_space());
+  for (HeapObject* object = code_it.Next(); object != NULL;
+       object = code_it.Next())
+    object->Iterate(&v);
+
+    HeapObjectIterator data_it(heap->old_data_space());
+    for (HeapObject* object = data_it.Next(); object != NULL;
+         object = data_it.Next())
+      object->Iterate(&v);
+}
+#endif  // VERIFY_HEAP
+
+
+void Heap::CheckNewSpaceExpansionCriteria() {
+  if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
+      survived_since_last_expansion_ > new_space_.TotalCapacity()) {
+    // Grow the size of new space if there is room to grow, enough data
+    // has survived scavenge since the last expansion and we are not in
+    // high promotion mode.
+    new_space_.Grow();
+    survived_since_last_expansion_ = 0;
+  }
+}
+
+
+static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
+  return heap->InNewSpace(*p) &&
+         !HeapObject::cast(*p)->map_word().IsForwardingAddress();
+}
+
+
+void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
+                                       StoreBufferEvent event) {
+  heap->store_buffer_rebuilder_.Callback(page, event);
+}
+
+
+void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
+  if (event == kStoreBufferStartScanningPagesEvent) {
+    start_of_current_page_ = NULL;
+    current_page_ = NULL;
+  } else if (event == kStoreBufferScanningPageEvent) {
+    if (current_page_ != NULL) {
+      // If this page already overflowed the store buffer during this iteration.
+      if (current_page_->scan_on_scavenge()) {
+        // Then we should wipe out the entries that have been added for it.
+        store_buffer_->SetTop(start_of_current_page_);
+      } else if (store_buffer_->Top() - start_of_current_page_ >=
+                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
+        // Did we find too many pointers in the previous page?  The heuristic is
+        // that no page can take more then 1/5 the remaining slots in the store
+        // buffer.
+        current_page_->set_scan_on_scavenge(true);
+        store_buffer_->SetTop(start_of_current_page_);
+      } else {
+        // In this case the page we scanned took a reasonable number of slots in
+        // the store buffer.  It has now been rehabilitated and is no longer
+        // marked scan_on_scavenge.
+        DCHECK(!current_page_->scan_on_scavenge());
+      }
+    }
+    start_of_current_page_ = store_buffer_->Top();
+    current_page_ = page;
+  } else if (event == kStoreBufferFullEvent) {
+    // The current page overflowed the store buffer again.  Wipe out its entries
+    // in the store buffer and mark it scan-on-scavenge again.  This may happen
+    // several times while scanning.
+    if (current_page_ == NULL) {
+      // Store Buffer overflowed while scanning promoted objects.  These are not
+      // in any particular page, though they are likely to be clustered by the
+      // allocation routines.
+      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
+    } else {
+      // Store Buffer overflowed while scanning a particular old space page for
+      // pointers to new space.
+      DCHECK(current_page_ == page);
+      DCHECK(page != NULL);
+      current_page_->set_scan_on_scavenge(true);
+      DCHECK(start_of_current_page_ != store_buffer_->Top());
+      store_buffer_->SetTop(start_of_current_page_);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void PromotionQueue::Initialize() {
+  // Assumes that a NewSpacePage exactly fits a number of promotion queue
+  // entries (where each is a pair of intptr_t). This allows us to simplify
+  // the test fpr when to switch pages.
+  DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
+         0);
+  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
+  front_ = rear_ =
+      reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
+  emergency_stack_ = NULL;
+}
+
+
+void PromotionQueue::RelocateQueueHead() {
+  DCHECK(emergency_stack_ == NULL);
+
+  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+  intptr_t* head_start = rear_;
+  intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
+
+  int entries_count =
+      static_cast<int>(head_end - head_start) / kEntrySizeInWords;
+
+  emergency_stack_ = new List<Entry>(2 * entries_count);
+
+  while (head_start != head_end) {
+    int size = static_cast<int>(*(head_start++));
+    HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
+    emergency_stack_->Add(Entry(obj, size));
+  }
+  rear_ = head_end;
+}
+
+
+class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
+
+  virtual Object* RetainAs(Object* object) {
+    if (!heap_->InFromSpace(object)) {
+      return object;
+    }
+
+    MapWord map_word = HeapObject::cast(object)->map_word();
+    if (map_word.IsForwardingAddress()) {
+      return map_word.ToForwardingAddress();
+    }
+    return NULL;
+  }
+
+ private:
+  Heap* heap_;
+};
+
+
+void Heap::Scavenge() {
+  RelocationLock relocation_lock(this);
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
+#endif
+
+  gc_state_ = SCAVENGE;
+
+  // Implements Cheney's copying algorithm
+  LOG(isolate_, ResourceEvent("scavenge", "begin"));
+
+  // Clear descriptor cache.
+  isolate_->descriptor_lookup_cache()->Clear();
+
+  // Used for updating survived_since_last_expansion_ at function end.
+  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
+
+  SelectScavengingVisitorsTable();
+
+  incremental_marking()->PrepareForScavenge();
+
+  // Flip the semispaces.  After flipping, to space is empty, from space has
+  // live objects.
+  new_space_.Flip();
+  new_space_.ResetAllocationInfo();
+
+  // We need to sweep newly copied objects which can be either in the
+  // to space or promoted to the old generation.  For to-space
+  // objects, we treat the bottom of the to space as a queue.  Newly
+  // copied and unswept objects lie between a 'front' mark and the
+  // allocation pointer.
+  //
+  // Promoted objects can go into various old-generation spaces, and
+  // can be allocated internally in the spaces (from the free list).
+  // We treat the top of the to space as a queue of addresses of
+  // promoted objects.  The addresses of newly promoted and unswept
+  // objects lie between a 'front' mark and a 'rear' mark that is
+  // updated as a side effect of promoting an object.
+  //
+  // There is guaranteed to be enough room at the top of the to space
+  // for the addresses of promoted objects: every object promoted
+  // frees up its size in bytes from the top of the new space, and
+  // objects are at least one pointer in size.
+  Address new_space_front = new_space_.ToSpaceStart();
+  promotion_queue_.Initialize();
+
+#ifdef DEBUG
+  store_buffer()->Clean();
+#endif
+
+  ScavengeVisitor scavenge_visitor(this);
+  // Copy roots.
+  IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+
+  // Copy objects reachable from the old generation.
+  {
+    StoreBufferRebuildScope scope(this, store_buffer(),
+                                  &ScavengeStoreBufferCallback);
+    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
+  }
+
+  // Copy objects reachable from simple cells by scavenging cell values
+  // directly.
+  HeapObjectIterator cell_iterator(cell_space_);
+  for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
+       heap_object = cell_iterator.Next()) {
+    if (heap_object->IsCell()) {
+      Cell* cell = Cell::cast(heap_object);
+      Address value_address = cell->ValueAddress();
+      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+    }
+  }
+
+  // Copy objects reachable from global property cells by scavenging global
+  // property cell values directly.
+  HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
+  for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
+       heap_object != NULL;
+       heap_object = js_global_property_cell_iterator.Next()) {
+    if (heap_object->IsPropertyCell()) {
+      PropertyCell* cell = PropertyCell::cast(heap_object);
+      Address value_address = cell->ValueAddress();
+      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+      Address type_address = cell->TypeAddress();
+      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
+    }
+  }
+
+  // Copy objects reachable from the encountered weak collections list.
+  scavenge_visitor.VisitPointer(&encountered_weak_collections_);
+
+  // Copy objects reachable from the code flushing candidates list.
+  MarkCompactCollector* collector = mark_compact_collector();
+  if (collector->is_code_flushing_enabled()) {
+    collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
+  }
+
+  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+
+  while (isolate()->global_handles()->IterateObjectGroups(
+      &scavenge_visitor, &IsUnscavengedHeapObject)) {
+    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+  }
+  isolate()->global_handles()->RemoveObjectGroups();
+  isolate()->global_handles()->RemoveImplicitRefGroups();
+
+  isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
+      &IsUnscavengedHeapObject);
+  isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
+      &scavenge_visitor);
+  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+
+  UpdateNewSpaceReferencesInExternalStringTable(
+      &UpdateNewSpaceReferenceInExternalStringTableEntry);
+
+  promotion_queue_.Destroy();
+
+  incremental_marking()->UpdateMarkingDequeAfterScavenge();
+
+  ScavengeWeakObjectRetainer weak_object_retainer(this);
+  ProcessWeakReferences(&weak_object_retainer);
+
+  DCHECK(new_space_front == new_space_.top());
+
+  // Set age mark.
+  new_space_.set_age_mark(new_space_.top());
+
+  new_space_.LowerInlineAllocationLimit(
+      new_space_.inline_allocation_limit_step());
+
+  // Update how much has survived scavenge.
+  IncrementYoungSurvivorsCounter(static_cast<int>(
+      (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
+
+  LOG(isolate_, ResourceEvent("scavenge", "end"));
+
+  gc_state_ = NOT_IN_GC;
+
+  gc_idle_time_handler_.NotifyScavenge();
+}
+
+
+String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+                                                                Object** p) {
+  MapWord first_word = HeapObject::cast(*p)->map_word();
+
+  if (!first_word.IsForwardingAddress()) {
+    // Unreachable external string can be finalized.
+    heap->FinalizeExternalString(String::cast(*p));
+    return NULL;
+  }
+
+  // String is still reachable.
+  return String::cast(first_word.ToForwardingAddress());
+}
+
+
+void Heap::UpdateNewSpaceReferencesInExternalStringTable(
+    ExternalStringTableUpdaterCallback updater_func) {
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    external_string_table_.Verify();
+  }
+#endif
+
+  if (external_string_table_.new_space_strings_.is_empty()) return;
+
+  Object** start = &external_string_table_.new_space_strings_[0];
+  Object** end = start + external_string_table_.new_space_strings_.length();
+  Object** last = start;
+
+  for (Object** p = start; p < end; ++p) {
+    DCHECK(InFromSpace(*p));
+    String* target = updater_func(this, p);
+
+    if (target == NULL) continue;
+
+    DCHECK(target->IsExternalString());
+
+    if (InNewSpace(target)) {
+      // String is still in new space.  Update the table entry.
+      *last = target;
+      ++last;
+    } else {
+      // String got promoted.  Move it to the old string list.
+      external_string_table_.AddOldString(target);
+    }
+  }
+
+  DCHECK(last <= end);
+  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
+}
+
+
+void Heap::UpdateReferencesInExternalStringTable(
+    ExternalStringTableUpdaterCallback updater_func) {
+  // Update old space string references.
+  if (external_string_table_.old_space_strings_.length() > 0) {
+    Object** start = &external_string_table_.old_space_strings_[0];
+    Object** end = start + external_string_table_.old_space_strings_.length();
+    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
+  }
+
+  UpdateNewSpaceReferencesInExternalStringTable(updater_func);
+}
+
+
+void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
+  ProcessArrayBuffers(retainer);
+  ProcessNativeContexts(retainer);
+  // TODO(mvstanton): AllocationSites only need to be processed during
+  // MARK_COMPACT, as they live in old space. Verify and address.
+  ProcessAllocationSites(retainer);
+}
+
+
+void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
+  Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
+  // Update the head of the list of contexts.
+  set_native_contexts_list(head);
+}
+
+
+void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
+  Object* array_buffer_obj =
+      VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
+  set_array_buffers_list(array_buffer_obj);
+}
+
+
+void Heap::TearDownArrayBuffers() {
+  Object* undefined = undefined_value();
+  for (Object* o = array_buffers_list(); o != undefined;) {
+    JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
+    Runtime::FreeArrayBuffer(isolate(), buffer);
+    o = buffer->weak_next();
+  }
+  set_array_buffers_list(undefined);
+}
+
+
+void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
+  Object* allocation_site_obj =
+      VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
+  set_allocation_sites_list(allocation_site_obj);
+}
+
+
+void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
+  DisallowHeapAllocation no_allocation_scope;
+  Object* cur = allocation_sites_list();
+  bool marked = false;
+  while (cur->IsAllocationSite()) {
+    AllocationSite* casted = AllocationSite::cast(cur);
+    if (casted->GetPretenureMode() == flag) {
+      casted->ResetPretenureDecision();
+      casted->set_deopt_dependent_code(true);
+      marked = true;
+    }
+    cur = casted->weak_next();
+  }
+  if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
+}
+
+
+void Heap::EvaluateOldSpaceLocalPretenuring(
+    uint64_t size_of_objects_before_gc) {
+  uint64_t size_of_objects_after_gc = SizeOfObjects();
+  double old_generation_survival_rate =
+      (static_cast<double>(size_of_objects_after_gc) * 100) /
+      static_cast<double>(size_of_objects_before_gc);
+
+  if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
+    // Too many objects died in the old generation, pretenuring of wrong
+    // allocation sites may be the cause for that. We have to deopt all
+    // dependent code registered in the allocation sites to re-evaluate
+    // our pretenuring decisions.
+    ResetAllAllocationSitesDependentCode(TENURED);
+    if (FLAG_trace_pretenuring) {
+      PrintF(
+          "Deopt all allocation sites dependent code due to low survival "
+          "rate in the old generation %f\n",
+          old_generation_survival_rate);
+    }
+  }
+}
+
+
+void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
+  DisallowHeapAllocation no_allocation;
+  // All external strings are listed in the external string table.
+
+  class ExternalStringTableVisitorAdapter : public ObjectVisitor {
+   public:
+    explicit ExternalStringTableVisitorAdapter(
+        v8::ExternalResourceVisitor* visitor)
+        : visitor_(visitor) {}
+    virtual void VisitPointers(Object** start, Object** end) {
+      for (Object** p = start; p < end; p++) {
+        DCHECK((*p)->IsExternalString());
+        visitor_->VisitExternalString(
+            Utils::ToLocal(Handle<String>(String::cast(*p))));
+      }
+    }
+
+   private:
+    v8::ExternalResourceVisitor* visitor_;
+  } external_string_table_visitor(visitor);
+
+  external_string_table_.Iterate(&external_string_table_visitor);
+}
+
+
+class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
+ public:
+  static inline void VisitPointer(Heap* heap, Object** p) {
+    Object* object = *p;
+    if (!heap->InNewSpace(object)) return;
+    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
+                         reinterpret_cast<HeapObject*>(object));
+  }
+};
+
+
+Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
+                         Address new_space_front) {
+  do {
+    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
+    // The addresses new_space_front and new_space_.top() define a
+    // queue of unprocessed copied objects.  Process them until the
+    // queue is empty.
+    while (new_space_front != new_space_.top()) {
+      if (!NewSpacePage::IsAtEnd(new_space_front)) {
+        HeapObject* object = HeapObject::FromAddress(new_space_front);
+        new_space_front +=
+            NewSpaceScavenger::IterateBody(object->map(), object);
+      } else {
+        new_space_front =
+            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
+      }
+    }
+
+    // Promote and process all the to-be-promoted objects.
+    {
+      StoreBufferRebuildScope scope(this, store_buffer(),
+                                    &ScavengeStoreBufferCallback);
+      while (!promotion_queue()->is_empty()) {
+        HeapObject* target;
+        int size;
+        promotion_queue()->remove(&target, &size);
+
+        // Promoted object might be already partially visited
+        // during old space pointer iteration. Thus we search specificly
+        // for pointers to from semispace instead of looking for pointers
+        // to new space.
+        DCHECK(!target->IsMap());
+        IterateAndMarkPointersToFromSpace(
+            target->address(), target->address() + size, &ScavengeObject);
+      }
+    }
+
+    // Take another spin if there are now unswept objects in new space
+    // (there are currently no more unswept promoted objects).
+  } while (new_space_front != new_space_.top());
+
+  return new_space_front;
+}
+
+
+STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
+              0);  // NOLINT
+STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
+              0);  // NOLINT
+STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
+               kDoubleAlignmentMask) == 0);  // NOLINT
+
+
+INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
+                                              int size));
+
+static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
+                                       int size) {
+  if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
+    heap->CreateFillerObjectAt(object->address(), kPointerSize);
+    return HeapObject::FromAddress(object->address() + kPointerSize);
+  } else {
+    heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
+                               kPointerSize);
+    return object;
+  }
+}
+
+
+enum LoggingAndProfiling {
+  LOGGING_AND_PROFILING_ENABLED,
+  LOGGING_AND_PROFILING_DISABLED
+};
+
+
+enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
+
+
+template <MarksHandling marks_handling,
+          LoggingAndProfiling logging_and_profiling_mode>
+class ScavengingVisitor : public StaticVisitorBase {
+ public:
+  static void Initialize() {
+    table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
+    table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
+    table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
+    table_.Register(kVisitByteArray, &EvacuateByteArray);
+    table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+    table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
+    table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
+    table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
+
+    table_.Register(
+        kVisitNativeContext,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            Context::kSize>);
+
+    table_.Register(
+        kVisitConsString,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            ConsString::kSize>);
+
+    table_.Register(
+        kVisitSlicedString,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            SlicedString::kSize>);
+
+    table_.Register(
+        kVisitSymbol,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            Symbol::kSize>);
+
+    table_.Register(
+        kVisitSharedFunctionInfo,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            SharedFunctionInfo::kSize>);
+
+    table_.Register(kVisitJSWeakCollection,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    table_.Register(kVisitJSArrayBuffer,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    table_.Register(kVisitJSTypedArray,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    table_.Register(kVisitJSDataView,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    table_.Register(kVisitJSRegExp,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    if (marks_handling == IGNORE_MARKS) {
+      table_.Register(
+          kVisitJSFunction,
+          &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+              JSFunction::kSize>);
+    } else {
+      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
+    }
+
+    table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
+                                   kVisitDataObject, kVisitDataObjectGeneric>();
+
+    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+                                   kVisitJSObject, kVisitJSObjectGeneric>();
+
+    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+                                   kVisitStruct, kVisitStructGeneric>();
+  }
+
+  static VisitorDispatchTable<ScavengingCallback>* GetTable() {
+    return &table_;
+  }
+
+ private:
+  enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
+
+  static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
+    bool should_record = false;
+#ifdef DEBUG
+    should_record = FLAG_heap_stats;
+#endif
+    should_record = should_record || FLAG_log_gc;
+    if (should_record) {
+      if (heap->new_space()->Contains(obj)) {
+        heap->new_space()->RecordAllocation(obj);
+      } else {
+        heap->new_space()->RecordPromotion(obj);
+      }
+    }
+  }
+
+  // Helper function used by CopyObject to copy a source object to an
+  // allocated target object and update the forwarding pointer in the source
+  // object.  Returns the target object.
+  INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
+                                   HeapObject* target, int size)) {
+    // If we migrate into to-space, then the to-space top pointer should be
+    // right after the target object. Incorporate double alignment
+    // over-allocation.
+    DCHECK(!heap->InToSpace(target) ||
+           target->address() + size == heap->new_space()->top() ||
+           target->address() + size + kPointerSize == heap->new_space()->top());
+
+    // Make sure that we do not overwrite the promotion queue which is at
+    // the end of to-space.
+    DCHECK(!heap->InToSpace(target) ||
+           heap->promotion_queue()->IsBelowPromotionQueue(
+               heap->new_space()->top()));
+
+    // Copy the content of source to target.
+    heap->CopyBlock(target->address(), source->address(), size);
+
+    // Set the forwarding address.
+    source->set_map_word(MapWord::FromForwardingAddress(target));
+
+    if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
+      // Update NewSpace stats if necessary.
+      RecordCopiedObject(heap, target);
+      heap->OnMoveEvent(target, source, size);
+    }
+
+    if (marks_handling == TRANSFER_MARKS) {
+      if (Marking::TransferColor(source, target)) {
+        MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
+      }
+    }
+  }
+
+  template <int alignment>
+  static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
+                                         HeapObject* object, int object_size) {
+    Heap* heap = map->GetHeap();
+
+    int allocation_size = object_size;
+    if (alignment != kObjectAlignment) {
+      DCHECK(alignment == kDoubleAlignment);
+      allocation_size += kPointerSize;
+    }
+
+    DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
+    AllocationResult allocation =
+        heap->new_space()->AllocateRaw(allocation_size);
+
+    HeapObject* target = NULL;  // Initialization to please compiler.
+    if (allocation.To(&target)) {
+      // Order is important here: Set the promotion limit before storing a
+      // filler for double alignment or migrating the object. Otherwise we
+      // may end up overwriting promotion queue entries when we migrate the
+      // object.
+      heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+
+      if (alignment != kObjectAlignment) {
+        target = EnsureDoubleAligned(heap, target, allocation_size);
+      }
+
+      // Order is important: slot might be inside of the target if target
+      // was allocated over a dead object and slot comes from the store
+      // buffer.
+      *slot = target;
+      MigrateObject(heap, object, target, object_size);
+
+      heap->IncrementSemiSpaceCopiedObjectSize(object_size);
+      return true;
+    }
+    return false;
+  }
+
+
+  template <ObjectContents object_contents, int alignment>
+  static inline bool PromoteObject(Map* map, HeapObject** slot,
+                                   HeapObject* object, int object_size) {
+    Heap* heap = map->GetHeap();
+
+    int allocation_size = object_size;
+    if (alignment != kObjectAlignment) {
+      DCHECK(alignment == kDoubleAlignment);
+      allocation_size += kPointerSize;
+    }
+
+    AllocationResult allocation;
+    if (object_contents == DATA_OBJECT) {
+      DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+      allocation = heap->old_data_space()->AllocateRaw(allocation_size);
+    } else {
+      DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+      allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
+    }
+
+    HeapObject* target = NULL;  // Initialization to please compiler.
+    if (allocation.To(&target)) {
+      if (alignment != kObjectAlignment) {
+        target = EnsureDoubleAligned(heap, target, allocation_size);
+      }
+
+      // Order is important: slot might be inside of the target if target
+      // was allocated over a dead object and slot comes from the store
+      // buffer.
+      *slot = target;
+      MigrateObject(heap, object, target, object_size);
+
+      if (object_contents == POINTER_OBJECT) {
+        if (map->instance_type() == JS_FUNCTION_TYPE) {
+          heap->promotion_queue()->insert(target,
+                                          JSFunction::kNonWeakFieldsEndOffset);
+        } else {
+          heap->promotion_queue()->insert(target, object_size);
+        }
+      }
+      heap->IncrementPromotedObjectsSize(object_size);
+      return true;
+    }
+    return false;
+  }
+
+
+  template <ObjectContents object_contents, int alignment>
+  static inline void EvacuateObject(Map* map, HeapObject** slot,
+                                    HeapObject* object, int object_size) {
+    SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+    SLOW_DCHECK(object->Size() == object_size);
+    Heap* heap = map->GetHeap();
+
+    if (!heap->ShouldBePromoted(object->address(), object_size)) {
+      // A semi-space copy may fail due to fragmentation. In that case, we
+      // try to promote the object.
+      if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
+        return;
+      }
+    }
+
+    if (PromoteObject<object_contents, alignment>(map, slot, object,
+                                                  object_size)) {
+      return;
+    }
+
+    // If promotion failed, we try to copy the object to the other semi-space
+    if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
+
+    UNREACHABLE();
+  }
+
+
+  static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
+                                        HeapObject* object) {
+    ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+        JSFunction::kSize>(map, slot, object);
+
+    MapWord map_word = object->map_word();
+    DCHECK(map_word.IsForwardingAddress());
+    HeapObject* target = map_word.ToForwardingAddress();
+
+    MarkBit mark_bit = Marking::MarkBitFrom(target);
+    if (Marking::IsBlack(mark_bit)) {
+      // This object is black and it might not be rescanned by marker.
+      // We should explicitly record code entry slot for compaction because
+      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
+      // miss it as it is not HeapObject-tagged.
+      Address code_entry_slot =
+          target->address() + JSFunction::kCodeEntryOffset;
+      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+      map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
+          code_entry_slot, code);
+    }
+  }
+
+
+  static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
+                                        HeapObject* object) {
+    int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
+                                                     object_size);
+  }
+
+
+  static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
+                                              HeapObject* object) {
+    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+    int object_size = FixedDoubleArray::SizeFor(length);
+    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
+                                             HeapObject* object) {
+    int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
+                                               HeapObject* object) {
+    int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
+    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateByteArray(Map* map, HeapObject** slot,
+                                       HeapObject* object) {
+    int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
+                                              HeapObject* object) {
+    int object_size = SeqOneByteString::cast(object)
+                          ->SeqOneByteStringSize(map->instance_type());
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
+                                              HeapObject* object) {
+    int object_size = SeqTwoByteString::cast(object)
+                          ->SeqTwoByteStringSize(map->instance_type());
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
+                                               HeapObject* object) {
+    DCHECK(IsShortcutCandidate(map->instance_type()));
+
+    Heap* heap = map->GetHeap();
+
+    if (marks_handling == IGNORE_MARKS &&
+        ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
+      HeapObject* first =
+          HeapObject::cast(ConsString::cast(object)->unchecked_first());
+
+      *slot = first;
+
+      if (!heap->InNewSpace(first)) {
+        object->set_map_word(MapWord::FromForwardingAddress(first));
+        return;
+      }
+
+      MapWord first_word = first->map_word();
+      if (first_word.IsForwardingAddress()) {
+        HeapObject* target = first_word.ToForwardingAddress();
+
+        *slot = target;
+        object->set_map_word(MapWord::FromForwardingAddress(target));
+        return;
+      }
+
+      heap->DoScavengeObject(first->map(), slot, first);
+      object->set_map_word(MapWord::FromForwardingAddress(*slot));
+      return;
+    }
+
+    int object_size = ConsString::kSize;
+    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
+                                                     object_size);
+  }
+
+  template <ObjectContents object_contents>
+  class ObjectEvacuationStrategy {
+   public:
+    template <int object_size>
+    static inline void VisitSpecialized(Map* map, HeapObject** slot,
+                                        HeapObject* object) {
+      EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
+                                                        object_size);
+    }
+
+    static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
+      int object_size = map->instance_size();
+      EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
+                                                        object_size);
+    }
+  };
+
+  static VisitorDispatchTable<ScavengingCallback> table_;
+};
+
+
+template <MarksHandling marks_handling,
+          LoggingAndProfiling logging_and_profiling_mode>
+VisitorDispatchTable<ScavengingCallback>
+    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
+
+
+static void InitializeScavengingVisitorsTables() {
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+}
+
+
+void Heap::SelectScavengingVisitorsTable() {
+  bool logging_and_profiling =
+      FLAG_verify_predictable || isolate()->logger()->is_logging() ||
+      isolate()->cpu_profiler()->is_profiling() ||
+      (isolate()->heap_profiler() != NULL &&
+       isolate()->heap_profiler()->is_tracking_object_moves());
+
+  if (!incremental_marking()->IsMarking()) {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+  } else {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+
+    if (incremental_marking()->IsCompacting()) {
+      // When compacting forbid short-circuiting of cons-strings.
+      // Scavenging code relies on the fact that new space object
+      // can't be evacuated into evacuation candidate but
+      // short-circuiting violates this assumption.
+      scavenging_visitors_table_.Register(
+          StaticVisitorBase::kVisitShortcutCandidate,
+          scavenging_visitors_table_.GetVisitorById(
+              StaticVisitorBase::kVisitConsString));
+    }
+  }
+}
+
+
+void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
+  SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
+  MapWord first_word = object->map_word();
+  SLOW_DCHECK(!first_word.IsForwardingAddress());
+  Map* map = first_word.ToMap();
+  map->GetHeap()->DoScavengeObject(map, p, object);
+}
+
+
+AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
+                                          int instance_size) {
+  Object* result;
+  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  // Map::cast cannot be used due to uninitialized map field.
+  reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
+  reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
+  reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
+  reinterpret_cast<Map*>(result)->set_visitor_id(
+      StaticVisitorBase::GetVisitorId(instance_type, instance_size));
+  reinterpret_cast<Map*>(result)->set_inobject_properties(0);
+  reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
+  reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
+  reinterpret_cast<Map*>(result)->set_bit_field(0);
+  reinterpret_cast<Map*>(result)->set_bit_field2(0);
+  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
+                   Map::OwnsDescriptors::encode(true);
+  reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
+  return result;
+}
+
+
+AllocationResult Heap::AllocateMap(InstanceType instance_type,
+                                   int instance_size,
+                                   ElementsKind elements_kind) {
+  HeapObject* result;
+  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  result->set_map_no_write_barrier(meta_map());
+  Map* map = Map::cast(result);
+  map->set_instance_type(instance_type);
+  map->set_visitor_id(
+      StaticVisitorBase::GetVisitorId(instance_type, instance_size));
+  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
+  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
+  map->set_instance_size(instance_size);
+  map->set_inobject_properties(0);
+  map->set_pre_allocated_property_fields(0);
+  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
+                          SKIP_WRITE_BARRIER);
+  map->init_back_pointer(undefined_value());
+  map->set_unused_property_fields(0);
+  map->set_instance_descriptors(empty_descriptor_array());
+  map->set_bit_field(0);
+  map->set_bit_field2(1 << Map::kIsExtensible);
+  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
+                   Map::OwnsDescriptors::encode(true);
+  map->set_bit_field3(bit_field3);
+  map->set_elements_kind(elements_kind);
+
+  return map;
+}
+
+
+AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
+                                            AllocationSpace space) {
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, space);
+    if (!allocation.To(&obj)) return allocation;
+  }
+#ifdef DEBUG
+  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+  DCHECK(chunk->owner()->identity() == space);
+#endif
+  CreateFillerObjectAt(obj->address(), size);
+  return obj;
+}
+
+
+const Heap::StringTypeTable Heap::string_type_table[] = {
+#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
+  { type, size, k##camel_name##MapRootIndex }             \
+  ,
+    STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
+#undef STRING_TYPE_ELEMENT
+};
+
+
+const Heap::ConstantStringTable Heap::constant_string_table[] = {
+#define CONSTANT_STRING_ELEMENT(name, contents) \
+  { contents, k##name##RootIndex }              \
+  ,
+    INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
+#undef CONSTANT_STRING_ELEMENT
+};
+
+
+const Heap::StructTable Heap::struct_table[] = {
+#define STRUCT_TABLE_ELEMENT(NAME, Name, name)        \
+  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
+  ,
+    STRUCT_LIST(STRUCT_TABLE_ELEMENT)
+#undef STRUCT_TABLE_ELEMENT
+};
+
+
+bool Heap::CreateInitialMaps() {
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
+    if (!allocation.To(&obj)) return false;
+  }
+  // Map::cast cannot be used due to uninitialized map field.
+  Map* new_meta_map = reinterpret_cast<Map*>(obj);
+  set_meta_map(new_meta_map);
+  new_meta_map->set_map(new_meta_map);
+
+  {  // Partial map allocation
+#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)                \
+  {                                                                          \
+    Map* map;                                                                \
+    if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
+    set_##field_name##_map(map);                                             \
+  }
+
+    ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
+    ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
+    ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
+    ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
+                         constant_pool_array);
+
+#undef ALLOCATE_PARTIAL_MAP
+  }
+
+  // Allocate the empty array.
+  {
+    AllocationResult allocation = AllocateEmptyFixedArray();
+    if (!allocation.To(&obj)) return false;
+  }
+  set_empty_fixed_array(FixedArray::cast(obj));
+
+  {
+    AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
+    if (!allocation.To(&obj)) return false;
+  }
+  set_null_value(Oddball::cast(obj));
+  Oddball::cast(obj)->set_kind(Oddball::kNull);
+
+  {
+    AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
+    if (!allocation.To(&obj)) return false;
+  }
+  set_undefined_value(Oddball::cast(obj));
+  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
+  DCHECK(!InNewSpace(undefined_value()));
+
+  // Set preliminary exception sentinel value before actually initializing it.
+  set_exception(null_value());
+
+  // Allocate the empty descriptor array.
+  {
+    AllocationResult allocation = AllocateEmptyFixedArray();
+    if (!allocation.To(&obj)) return false;
+  }
+  set_empty_descriptor_array(DescriptorArray::cast(obj));
+
+  // Allocate the constant pool array.
+  {
+    AllocationResult allocation = AllocateEmptyConstantPoolArray();
+    if (!allocation.To(&obj)) return false;
+  }
+  set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
+
+  // Fix the instance_descriptors for the existing maps.
+  meta_map()->set_code_cache(empty_fixed_array());
+  meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
+  meta_map()->init_back_pointer(undefined_value());
+  meta_map()->set_instance_descriptors(empty_descriptor_array());
+
+  fixed_array_map()->set_code_cache(empty_fixed_array());
+  fixed_array_map()->set_dependent_code(
+      DependentCode::cast(empty_fixed_array()));
+  fixed_array_map()->init_back_pointer(undefined_value());
+  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
+
+  undefined_map()->set_code_cache(empty_fixed_array());
+  undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
+  undefined_map()->init_back_pointer(undefined_value());
+  undefined_map()->set_instance_descriptors(empty_descriptor_array());
+
+  null_map()->set_code_cache(empty_fixed_array());
+  null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
+  null_map()->init_back_pointer(undefined_value());
+  null_map()->set_instance_descriptors(empty_descriptor_array());
+
+  constant_pool_array_map()->set_code_cache(empty_fixed_array());
+  constant_pool_array_map()->set_dependent_code(
+      DependentCode::cast(empty_fixed_array()));
+  constant_pool_array_map()->init_back_pointer(undefined_value());
+  constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
+
+  // Fix prototype object for existing maps.
+  meta_map()->set_prototype(null_value());
+  meta_map()->set_constructor(null_value());
+
+  fixed_array_map()->set_prototype(null_value());
+  fixed_array_map()->set_constructor(null_value());
+
+  undefined_map()->set_prototype(null_value());
+  undefined_map()->set_constructor(null_value());
+
+  null_map()->set_prototype(null_value());
+  null_map()->set_constructor(null_value());
+
+  constant_pool_array_map()->set_prototype(null_value());
+  constant_pool_array_map()->set_constructor(null_value());
+
+  {  // Map allocation
+#define ALLOCATE_MAP(instance_type, size, field_name)               \
+  {                                                                 \
+    Map* map;                                                       \
+    if (!AllocateMap((instance_type), size).To(&map)) return false; \
+    set_##field_name##_map(map);                                    \
+  }
+
+#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
+  ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
+    DCHECK(fixed_array_map() != fixed_cow_array_map());
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
+    ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
+    ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
+                 mutable_heap_number)
+    ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
+    ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
+
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
+
+    for (unsigned i = 0; i < arraysize(string_type_table); i++) {
+      const StringTypeTable& entry = string_type_table[i];
+      {
+        AllocationResult allocation = AllocateMap(entry.type, entry.size);
+        if (!allocation.To(&obj)) return false;
+      }
+      // Mark cons string maps as unstable, because their objects can change
+      // maps during GC.
+      Map* map = Map::cast(obj);
+      if (StringShape(entry.type).IsCons()) map->mark_unstable();
+      roots_[entry.index] = map;
+    }
+
+    ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
+    undetectable_string_map()->set_is_undetectable();
+
+    ALLOCATE_VARSIZE_MAP(ONE_BYTE_STRING_TYPE, undetectable_one_byte_string);
+    undetectable_one_byte_string_map()->set_is_undetectable();
+
+    ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
+    ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
+    ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
+
+#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)        \
+  ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
+               external_##type##_array)
+
+    TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
+#undef ALLOCATE_EXTERNAL_ARRAY_MAP
+
+#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
+  ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
+
+    TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
+#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
+
+    ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
+
+    ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
+    ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
+    ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
+    ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
+
+
+    for (unsigned i = 0; i < arraysize(struct_table); i++) {
+      const StructTable& entry = struct_table[i];
+      Map* map;
+      if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
+      roots_[entry.index] = map;
+    }
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
+    native_context_map()->set_dictionary_map(true);
+    native_context_map()->set_visitor_id(
+        StaticVisitorBase::kVisitNativeContext);
+
+    ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
+                 shared_function_info)
+
+    ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
+    ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
+    external_map()->set_is_extensible(false);
+#undef ALLOCATE_VARSIZE_MAP
+#undef ALLOCATE_MAP
+  }
+
+  {  // Empty arrays
+    {
+      ByteArray* byte_array;
+      if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
+      set_empty_byte_array(byte_array);
+    }
+
+#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)  \
+  {                                                                   \
+    ExternalArray* obj;                                               \
+    if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
+      return false;                                                   \
+    set_empty_external_##type##_array(obj);                           \
+  }
+
+    TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
+#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
+
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+  {                                                                     \
+    FixedTypedArrayBase* obj;                                           \
+    if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
+      return false;                                                     \
+    set_empty_fixed_##type##_array(obj);                                \
+  }
+
+    TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
+#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
+  }
+  DCHECK(!InNewSpace(empty_fixed_array()));
+  return true;
+}
+
+
+AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
+                                          PretenureFlag pretenure) {
+  // Statically ensure that it is safe to allocate heap numbers in paged
+  // spaces.
+  int size = HeapNumber::kSize;
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
+
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
+  HeapObject::cast(result)->set_map_no_write_barrier(map);
+  HeapNumber::cast(result)->set_value(value);
+  return result;
+}
+
+
+AllocationResult Heap::AllocateCell(Object* value) {
+  int size = Cell::kSize;
+  STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
+
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+  result->set_map_no_write_barrier(cell_map());
+  Cell::cast(result)->set_value(value);
+  return result;
+}
+
+
+AllocationResult Heap::AllocatePropertyCell() {
+  int size = PropertyCell::kSize;
+  STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
+
+  HeapObject* result;
+  AllocationResult allocation =
+      AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  result->set_map_no_write_barrier(global_property_cell_map());
+  PropertyCell* cell = PropertyCell::cast(result);
+  cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
+                           SKIP_WRITE_BARRIER);
+  cell->set_value(the_hole_value());
+  cell->set_type(HeapType::None());
+  return result;
+}
+
+
+void Heap::CreateApiObjects() {
+  HandleScope scope(isolate());
+  Factory* factory = isolate()->factory();
+  Handle<Map> new_neander_map =
+      factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+
+  // Don't use Smi-only elements optimizations for objects with the neander
+  // map. There are too many cases where element values are set directly with a
+  // bottleneck to trap the Smi-only -> fast elements transition, and there
+  // appears to be no benefit for optimize this case.
+  new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
+  set_neander_map(*new_neander_map);
+
+  Handle<JSObject> listeners = factory->NewNeanderObject();
+  Handle<FixedArray> elements = factory->NewFixedArray(2);
+  elements->set(0, Smi::FromInt(0));
+  listeners->set_elements(*elements);
+  set_message_listeners(*listeners);
+}
+
+
+void Heap::CreateJSEntryStub() {
+  JSEntryStub stub(isolate(), StackFrame::ENTRY);
+  set_js_entry_code(*stub.GetCode());
+}
+
+
+void Heap::CreateJSConstructEntryStub() {
+  JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
+  set_js_construct_entry_code(*stub.GetCode());
+}
+
+
+void Heap::CreateFixedStubs() {
+  // Here we create roots for fixed stubs. They are needed at GC
+  // for cooking and uncooking (check out frames.cc).
+  // The eliminates the need for doing dictionary lookup in the
+  // stub cache for these stubs.
+  HandleScope scope(isolate());
+
+  // Create stubs that should be there, so we don't unexpectedly have to
+  // create them if we need them during the creation of another stub.
+  // Stub creation mixes raw pointers and handles in an unsafe manner so
+  // we cannot create stubs while we are creating stubs.
+  CodeStub::GenerateStubsAheadOfTime(isolate());
+
+  // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
+  // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
+  // is created.
+
+  // gcc-4.4 has problem generating correct code of following snippet:
+  // {  JSEntryStub stub;
+  //    js_entry_code_ = *stub.GetCode();
+  // }
+  // {  JSConstructEntryStub stub;
+  //    js_construct_entry_code_ = *stub.GetCode();
+  // }
+  // To workaround the problem, make separate functions without inlining.
+  Heap::CreateJSEntryStub();
+  Heap::CreateJSConstructEntryStub();
+}
+
+
+void Heap::CreateInitialObjects() {
+  HandleScope scope(isolate());
+  Factory* factory = isolate()->factory();
+
+  // The -0 value must be set before NewNumber works.
+  set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
+  DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
+
+  set_nan_value(
+      *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
+  set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
+
+  // The hole has not been created yet, but we want to put something
+  // predictable in the gaps in the string table, so lets make that Smi zero.
+  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
+
+  // Allocate initial string table.
+  set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
+
+  // Finish initializing oddballs after creating the string table.
+  Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
+                      factory->nan_value(), Oddball::kUndefined);
+
+  // Initialize the null_value.
+  Oddball::Initialize(isolate(), factory->null_value(), "null",
+                      handle(Smi::FromInt(0), isolate()), Oddball::kNull);
+
+  set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
+                                      handle(Smi::FromInt(1), isolate()),
+                                      Oddball::kTrue));
+
+  set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
+                                       handle(Smi::FromInt(0), isolate()),
+                                       Oddball::kFalse));
+
+  set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
+                                          handle(Smi::FromInt(-1), isolate()),
+                                          Oddball::kTheHole));
+
+  set_uninitialized_value(*factory->NewOddball(
+      factory->uninitialized_map(), "uninitialized",
+      handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
+
+  set_arguments_marker(*factory->NewOddball(
+      factory->arguments_marker_map(), "arguments_marker",
+      handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
+
+  set_no_interceptor_result_sentinel(*factory->NewOddball(
+      factory->no_interceptor_result_sentinel_map(),
+      "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
+      Oddball::kOther));
+
+  set_termination_exception(*factory->NewOddball(
+      factory->termination_exception_map(), "termination_exception",
+      handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
+
+  set_exception(*factory->NewOddball(factory->exception_map(), "exception",
+                                     handle(Smi::FromInt(-5), isolate()),
+                                     Oddball::kException));
+
+  for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
+    Handle<String> str =
+        factory->InternalizeUtf8String(constant_string_table[i].contents);
+    roots_[constant_string_table[i].index] = *str;
+  }
+
+  // Allocate the hidden string which is used to identify the hidden properties
+  // in JSObjects. The hash code has a special value so that it will not match
+  // the empty string when searching for the property. It cannot be part of the
+  // loop above because it needs to be allocated manually with the special
+  // hash code in place. The hash code for the hidden_string is zero to ensure
+  // that it will always be at the first entry in property descriptors.
+  hidden_string_ = *factory->NewOneByteInternalizedString(
+      OneByteVector("", 0), String::kEmptyStringHash);
+
+  // Create the code_stubs dictionary. The initial size is set to avoid
+  // expanding the dictionary during bootstrapping.
+  set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
+
+  // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
+  // is set to avoid expanding the dictionary during bootstrapping.
+  set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
+
+  set_polymorphic_code_cache(PolymorphicCodeCache::cast(
+      *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
+
+  set_instanceof_cache_function(Smi::FromInt(0));
+  set_instanceof_cache_map(Smi::FromInt(0));
+  set_instanceof_cache_answer(Smi::FromInt(0));
+
+  CreateFixedStubs();
+
+  // Allocate the dictionary of intrinsic function names.
+  Handle<NameDictionary> intrinsic_names =
+      NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
+  Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
+  set_intrinsic_function_names(*intrinsic_names);
+
+  set_number_string_cache(
+      *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
+
+  // Allocate cache for single character one byte strings.
+  set_single_character_string_cache(
+      *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
+
+  // Allocate cache for string split and regexp-multiple.
+  set_string_split_cache(*factory->NewFixedArray(
+      RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
+  set_regexp_multiple_cache(*factory->NewFixedArray(
+      RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
+
+  // Allocate cache for external strings pointing to native source code.
+  set_natives_source_cache(
+      *factory->NewFixedArray(Natives::GetBuiltinsCount()));
+
+  set_undefined_cell(*factory->NewCell(factory->undefined_value()));
+
+  // The symbol registry is initialized lazily.
+  set_symbol_registry(undefined_value());
+
+  // Allocate object to hold object observation state.
+  set_observation_state(*factory->NewJSObjectFromMap(
+      factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
+
+  // Microtask queue uses the empty fixed array as a sentinel for "empty".
+  // Number of queued microtasks stored in Isolate::pending_microtask_count().
+  set_microtask_queue(empty_fixed_array());
+
+  set_detailed_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
+  set_elements_transition_symbol(*factory->NewPrivateOwnSymbol());
+  set_frozen_symbol(*factory->NewPrivateOwnSymbol());
+  set_megamorphic_symbol(*factory->NewPrivateOwnSymbol());
+  set_premonomorphic_symbol(*factory->NewPrivateOwnSymbol());
+  set_generic_symbol(*factory->NewPrivateOwnSymbol());
+  set_nonexistent_symbol(*factory->NewPrivateOwnSymbol());
+  set_normal_ic_symbol(*factory->NewPrivateOwnSymbol());
+  set_observed_symbol(*factory->NewPrivateOwnSymbol());
+  set_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
+  set_uninitialized_symbol(*factory->NewPrivateOwnSymbol());
+  set_home_object_symbol(*factory->NewPrivateOwnSymbol());
+
+  Handle<SeededNumberDictionary> slow_element_dictionary =
+      SeededNumberDictionary::New(isolate(), 0, TENURED);
+  slow_element_dictionary->set_requires_slow_elements();
+  set_empty_slow_element_dictionary(*slow_element_dictionary);
+
+  set_materialized_objects(*factory->NewFixedArray(0, TENURED));
+
+  // Handling of script id generation is in Factory::NewScript.
+  set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
+
+  set_allocation_sites_scratchpad(
+      *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
+  InitializeAllocationSitesScratchpad();
+
+  // Initialize keyed lookup cache.
+  isolate_->keyed_lookup_cache()->Clear();
+
+  // Initialize context slot cache.
+  isolate_->context_slot_cache()->Clear();
+
+  // Initialize descriptor cache.
+  isolate_->descriptor_lookup_cache()->Clear();
+
+  // Initialize compilation cache.
+  isolate_->compilation_cache()->Clear();
+}
+
+
+bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
+  RootListIndex writable_roots[] = {
+      kStoreBufferTopRootIndex,
+      kStackLimitRootIndex,
+      kNumberStringCacheRootIndex,
+      kInstanceofCacheFunctionRootIndex,
+      kInstanceofCacheMapRootIndex,
+      kInstanceofCacheAnswerRootIndex,
+      kCodeStubsRootIndex,
+      kNonMonomorphicCacheRootIndex,
+      kPolymorphicCodeCacheRootIndex,
+      kLastScriptIdRootIndex,
+      kEmptyScriptRootIndex,
+      kRealStackLimitRootIndex,
+      kArgumentsAdaptorDeoptPCOffsetRootIndex,
+      kConstructStubDeoptPCOffsetRootIndex,
+      kGetterStubDeoptPCOffsetRootIndex,
+      kSetterStubDeoptPCOffsetRootIndex,
+      kStringTableRootIndex,
+  };
+
+  for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
+    if (root_index == writable_roots[i]) return true;
+  }
+  return false;
+}
+
+
+bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
+  return !RootCanBeWrittenAfterInitialization(root_index) &&
+         !InNewSpace(roots_array_start()[root_index]);
+}
+
+
+Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
+                                   Object* key_pattern, ResultsCacheType type) {
+  FixedArray* cache;
+  if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
+  if (type == STRING_SPLIT_SUBSTRINGS) {
+    DCHECK(key_pattern->IsString());
+    if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
+    cache = heap->string_split_cache();
+  } else {
+    DCHECK(type == REGEXP_MULTIPLE_INDICES);
+    DCHECK(key_pattern->IsFixedArray());
+    cache = heap->regexp_multiple_cache();
+  }
+
+  uint32_t hash = key_string->Hash();
+  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
+                    ~(kArrayEntriesPerCacheEntry - 1));
+  if (cache->get(index + kStringOffset) == key_string &&
+      cache->get(index + kPatternOffset) == key_pattern) {
+    return cache->get(index + kArrayOffset);
+  }
+  index =
+      ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+  if (cache->get(index + kStringOffset) == key_string &&
+      cache->get(index + kPatternOffset) == key_pattern) {
+    return cache->get(index + kArrayOffset);
+  }
+  return Smi::FromInt(0);
+}
+
+
+void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
+                               Handle<Object> key_pattern,
+                               Handle<FixedArray> value_array,
+                               ResultsCacheType type) {
+  Factory* factory = isolate->factory();
+  Handle<FixedArray> cache;
+  if (!key_string->IsInternalizedString()) return;
+  if (type == STRING_SPLIT_SUBSTRINGS) {
+    DCHECK(key_pattern->IsString());
+    if (!key_pattern->IsInternalizedString()) return;
+    cache = factory->string_split_cache();
+  } else {
+    DCHECK(type == REGEXP_MULTIPLE_INDICES);
+    DCHECK(key_pattern->IsFixedArray());
+    cache = factory->regexp_multiple_cache();
+  }
+
+  uint32_t hash = key_string->Hash();
+  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
+                    ~(kArrayEntriesPerCacheEntry - 1));
+  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
+    cache->set(index + kStringOffset, *key_string);
+    cache->set(index + kPatternOffset, *key_pattern);
+    cache->set(index + kArrayOffset, *value_array);
+  } else {
+    uint32_t index2 =
+        ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+    if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
+      cache->set(index2 + kStringOffset, *key_string);
+      cache->set(index2 + kPatternOffset, *key_pattern);
+      cache->set(index2 + kArrayOffset, *value_array);
+    } else {
+      cache->set(index2 + kStringOffset, Smi::FromInt(0));
+      cache->set(index2 + kPatternOffset, Smi::FromInt(0));
+      cache->set(index2 + kArrayOffset, Smi::FromInt(0));
+      cache->set(index + kStringOffset, *key_string);
+      cache->set(index + kPatternOffset, *key_pattern);
+      cache->set(index + kArrayOffset, *value_array);
+    }
+  }
+  // If the array is a reasonably short list of substrings, convert it into a
+  // list of internalized strings.
+  if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
+    for (int i = 0; i < value_array->length(); i++) {
+      Handle<String> str(String::cast(value_array->get(i)), isolate);
+      Handle<String> internalized_str = factory->InternalizeString(str);
+      value_array->set(i, *internalized_str);
+    }
+  }
+  // Convert backing store to a copy-on-write array.
+  value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
+}
+
+
+void RegExpResultsCache::Clear(FixedArray* cache) {
+  for (int i = 0; i < kRegExpResultsCacheSize; i++) {
+    cache->set(i, Smi::FromInt(0));
+  }
+}
+
+
+int Heap::FullSizeNumberStringCacheLength() {
+  // Compute the size of the number string cache based on the max newspace size.
+  // The number string cache has a minimum size based on twice the initial cache
+  // size to ensure that it is bigger after being made 'full size'.
+  int number_string_cache_size = max_semi_space_size_ / 512;
+  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
+                                 Min(0x4000, number_string_cache_size));
+  // There is a string and a number per entry so the length is twice the number
+  // of entries.
+  return number_string_cache_size * 2;
+}
+
+
+void Heap::FlushNumberStringCache() {
+  // Flush the number to string cache.
+  int len = number_string_cache()->length();
+  for (int i = 0; i < len; i++) {
+    number_string_cache()->set_undefined(i);
+  }
+}
+
+
+void Heap::FlushAllocationSitesScratchpad() {
+  for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
+    allocation_sites_scratchpad()->set_undefined(i);
+  }
+  allocation_sites_scratchpad_length_ = 0;
+}
+
+
+void Heap::InitializeAllocationSitesScratchpad() {
+  DCHECK(allocation_sites_scratchpad()->length() ==
+         kAllocationSiteScratchpadSize);
+  for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
+    allocation_sites_scratchpad()->set_undefined(i);
+  }
+}
+
+
+void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
+                                         ScratchpadSlotMode mode) {
+  if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
+    // We cannot use the normal write-barrier because slots need to be
+    // recorded with non-incremental marking as well. We have to explicitly
+    // record the slot to take evacuation candidates into account.
+    allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
+                                       site, SKIP_WRITE_BARRIER);
+    Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
+        allocation_sites_scratchpad_length_);
+
+    if (mode == RECORD_SCRATCHPAD_SLOT) {
+      // We need to allow slots buffer overflow here since the evacuation
+      // candidates are not part of the global list of old space pages and
+      // releasing an evacuation candidate due to a slots buffer overflow
+      // results in lost pages.
+      mark_compact_collector()->RecordSlot(slot, slot, *slot,
+                                           SlotsBuffer::IGNORE_OVERFLOW);
+    }
+    allocation_sites_scratchpad_length_++;
+  }
+}
+
+
+Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
+  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
+}
+
+
+Heap::RootListIndex Heap::RootIndexForExternalArrayType(
+    ExternalArrayType array_type) {
+  switch (array_type) {
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                                  \
+    return kExternal##Type##ArrayMapRootIndex;
+
+    TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
+#undef ARRAY_TYPE_TO_ROOT_INDEX
+
+    default:
+      UNREACHABLE();
+      return kUndefinedValueRootIndex;
+  }
+}
+
+
+Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
+  return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
+}
+
+
+Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
+    ExternalArrayType array_type) {
+  switch (array_type) {
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                                  \
+    return kFixed##Type##ArrayMapRootIndex;
+
+    TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
+#undef ARRAY_TYPE_TO_ROOT_INDEX
+
+    default:
+      UNREACHABLE();
+      return kUndefinedValueRootIndex;
+  }
+}
+
+
+Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
+    ElementsKind elementsKind) {
+  switch (elementsKind) {
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case EXTERNAL_##TYPE##_ELEMENTS:                                \
+    return kEmptyExternal##Type##ArrayRootIndex;
+
+    TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
+#undef ELEMENT_KIND_TO_ROOT_INDEX
+
+    default:
+      UNREACHABLE();
+      return kUndefinedValueRootIndex;
+  }
+}
+
+
+Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
+    ElementsKind elementsKind) {
+  switch (elementsKind) {
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case TYPE##_ELEMENTS:                                           \
+    return kEmptyFixed##Type##ArrayRootIndex;
+
+    TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
+#undef ELEMENT_KIND_TO_ROOT_INDEX
+    default:
+      UNREACHABLE();
+      return kUndefinedValueRootIndex;
+  }
+}
+
+
+ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
+  return ExternalArray::cast(
+      roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
+}
+
+
+FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
+  return FixedTypedArrayBase::cast(
+      roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
+}
+
+
+AllocationResult Heap::AllocateForeign(Address address,
+                                       PretenureFlag pretenure) {
+  // Statically ensure that it is safe to allocate foreigns in paged spaces.
+  STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
+  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+  Foreign* result;
+  AllocationResult allocation = Allocate(foreign_map(), space);
+  if (!allocation.To(&result)) return allocation;
+  result->set_foreign_address(address);
+  return result;
+}
+
+
+AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
+  if (length < 0 || length > ByteArray::kMaxLength) {
+    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+  }
+  int size = ByteArray::SizeFor(length);
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map_no_write_barrier(byte_array_map());
+  ByteArray::cast(result)->set_length(length);
+  return result;
+}
+
+
+void Heap::CreateFillerObjectAt(Address addr, int size) {
+  if (size == 0) return;
+  HeapObject* filler = HeapObject::FromAddress(addr);
+  if (size == kPointerSize) {
+    filler->set_map_no_write_barrier(one_pointer_filler_map());
+  } else if (size == 2 * kPointerSize) {
+    filler->set_map_no_write_barrier(two_pointer_filler_map());
+  } else {
+    filler->set_map_no_write_barrier(free_space_map());
+    FreeSpace::cast(filler)->set_size(size);
+  }
+}
+
+
+bool Heap::CanMoveObjectStart(HeapObject* object) {
+  Address address = object->address();
+  bool is_in_old_pointer_space = InOldPointerSpace(address);
+  bool is_in_old_data_space = InOldDataSpace(address);
+
+  if (lo_space()->Contains(object)) return false;
+
+  Page* page = Page::FromAddress(address);
+  // We can move the object start if:
+  // (1) the object is not in old pointer or old data space,
+  // (2) the page of the object was already swept,
+  // (3) the page was already concurrently swept. This case is an optimization
+  // for concurrent sweeping. The WasSwept predicate for concurrently swept
+  // pages is set after sweeping all pages.
+  return (!is_in_old_pointer_space && !is_in_old_data_space) ||
+         page->WasSwept() || page->SweepingCompleted();
+}
+
+
+void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
+  if (incremental_marking()->IsMarking() &&
+      Marking::IsBlack(Marking::MarkBitFrom(address))) {
+    if (mode == FROM_GC) {
+      MemoryChunk::IncrementLiveBytesFromGC(address, by);
+    } else {
+      MemoryChunk::IncrementLiveBytesFromMutator(address, by);
+    }
+  }
+}
+
+
+FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
+                                         int elements_to_trim) {
+  const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
+  const int bytes_to_trim = elements_to_trim * element_size;
+  Map* map = object->map();
+
+  // For now this trick is only applied to objects in new and paged space.
+  // In large object space the object's start must coincide with chunk
+  // and thus the trick is just not applicable.
+  DCHECK(!lo_space()->Contains(object));
+  DCHECK(object->map() != fixed_cow_array_map());
+
+  STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
+  STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
+  STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
+
+  const int len = object->length();
+  DCHECK(elements_to_trim <= len);
+
+  // Calculate location of new array start.
+  Address new_start = object->address() + bytes_to_trim;
+
+  // Technically in new space this write might be omitted (except for
+  // debug mode which iterates through the heap), but to play safer
+  // we still do it.
+  CreateFillerObjectAt(object->address(), bytes_to_trim);
+
+  // Initialize header of the trimmed array. Since left trimming is only
+  // performed on pages which are not concurrently swept creating a filler
+  // object does not require synchronization.
+  DCHECK(CanMoveObjectStart(object));
+  Object** former_start = HeapObject::RawField(object, 0);
+  int new_start_index = elements_to_trim * (element_size / kPointerSize);
+  former_start[new_start_index] = map;
+  former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
+  FixedArrayBase* new_object =
+      FixedArrayBase::cast(HeapObject::FromAddress(new_start));
+
+  // Maintain consistency of live bytes during incremental marking
+  marking()->TransferMark(object->address(), new_start);
+  AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
+
+  // Notify the heap profiler of change in object layout.
+  OnMoveEvent(new_object, object, new_object->Size());
+  return new_object;
+}
+
+
+// Force instantiation of templatized method.
+template
+void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
+template
+void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
+
+
+template<Heap::InvocationMode mode>
+void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
+  const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
+  const int bytes_to_trim = elements_to_trim * element_size;
+
+  // For now this trick is only applied to objects in new and paged space.
+  DCHECK(object->map() != fixed_cow_array_map());
+
+  const int len = object->length();
+  DCHECK(elements_to_trim < len);
+
+  // Calculate location of new array end.
+  Address new_end = object->address() + object->Size() - bytes_to_trim;
+
+  // Technically in new space this write might be omitted (except for
+  // debug mode which iterates through the heap), but to play safer
+  // we still do it.
+  // We do not create a filler for objects in large object space.
+  // TODO(hpayer): We should shrink the large object page if the size
+  // of the object changed significantly.
+  if (!lo_space()->Contains(object)) {
+    CreateFillerObjectAt(new_end, bytes_to_trim);
+  }
+
+  // Initialize header of the trimmed array. We are storing the new length
+  // using release store after creating a filler for the left-over space to
+  // avoid races with the sweeper thread.
+  object->synchronized_set_length(len - elements_to_trim);
+
+  // Maintain consistency of live bytes during incremental marking
+  AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
+
+  // Notify the heap profiler of change in object layout. The array may not be
+  // moved during GC, and size has to be adjusted nevertheless.
+  HeapProfiler* profiler = isolate()->heap_profiler();
+  if (profiler->is_tracking_allocations()) {
+    profiler->UpdateObjectSizeEvent(object->address(), object->Size());
+  }
+}
+
+
+AllocationResult Heap::AllocateExternalArray(int length,
+                                             ExternalArrayType array_type,
+                                             void* external_pointer,
+                                             PretenureFlag pretenure) {
+  int size = ExternalArray::kAlignedSize;
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
+  ExternalArray::cast(result)->set_length(length);
+  ExternalArray::cast(result)->set_external_pointer(external_pointer);
+  return result;
+}
+
+static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
+                               ElementsKind* element_kind) {
+  switch (array_type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                          \
+    *element_size = size;                               \
+    *element_kind = TYPE##_ELEMENTS;                    \
+    return;
+
+    TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+    default:
+      *element_size = 0;               // Bogus
+      *element_kind = UINT8_ELEMENTS;  // Bogus
+      UNREACHABLE();
+  }
+}
+
+
+AllocationResult Heap::AllocateFixedTypedArray(int length,
+                                               ExternalArrayType array_type,
+                                               PretenureFlag pretenure) {
+  int element_size;
+  ElementsKind elements_kind;
+  ForFixedTypedArray(array_type, &element_size, &elements_kind);
+  int size = OBJECT_POINTER_ALIGN(length * element_size +
+                                  FixedTypedArrayBase::kDataOffset);
+#ifndef V8_HOST_ARCH_64_BIT
+  if (array_type == kExternalFloat64Array) {
+    size += kPointerSize;
+  }
+#endif
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
+  HeapObject* object;
+  AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  if (!allocation.To(&object)) return allocation;
+
+  if (array_type == kExternalFloat64Array) {
+    object = EnsureDoubleAligned(this, object, size);
+  }
+
+  object->set_map(MapForFixedTypedArray(array_type));
+  FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
+  elements->set_length(length);
+  memset(elements->DataPtr(), 0, elements->DataSize());
+  return elements;
+}
+
+
+AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
+  DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
+  AllocationResult allocation =
+      AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
+
+  HeapObject* result;
+  if (!allocation.To(&result)) return allocation;
+
+  if (immovable) {
+    Address address = result->address();
+    // Code objects which should stay at a fixed address are allocated either
+    // in the first page of code space (objects on the first page of each space
+    // are never moved) or in large object space.
+    if (!code_space_->FirstPage()->Contains(address) &&
+        MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
+      // Discard the first code allocation, which was on a page where it could
+      // be moved.
+      CreateFillerObjectAt(result->address(), object_size);
+      allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
+      if (!allocation.To(&result)) return allocation;
+      OnAllocationEvent(result, object_size);
+    }
+  }
+
+  result->set_map_no_write_barrier(code_map());
+  Code* code = Code::cast(result);
+  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
+         isolate_->code_range()->contains(code->address()));
+  code->set_gc_metadata(Smi::FromInt(0));
+  code->set_ic_age(global_ic_age_);
+  return code;
+}
+
+
+AllocationResult Heap::CopyCode(Code* code) {
+  AllocationResult allocation;
+  HeapObject* new_constant_pool;
+  if (FLAG_enable_ool_constant_pool &&
+      code->constant_pool() != empty_constant_pool_array()) {
+    // Copy the constant pool, since edits to the copied code may modify
+    // the constant pool.
+    allocation = CopyConstantPoolArray(code->constant_pool());
+    if (!allocation.To(&new_constant_pool)) return allocation;
+  } else {
+    new_constant_pool = empty_constant_pool_array();
+  }
+
+  HeapObject* result;
+  // Allocate an object the same size as the code object.
+  int obj_size = code->Size();
+  allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  // Copy code object.
+  Address old_addr = code->address();
+  Address new_addr = result->address();
+  CopyBlock(new_addr, old_addr, obj_size);
+  Code* new_code = Code::cast(result);
+
+  // Update the constant pool.
+  new_code->set_constant_pool(new_constant_pool);
+
+  // Relocate the copy.
+  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
+         isolate_->code_range()->contains(code->address()));
+  new_code->Relocate(new_addr - old_addr);
+  return new_code;
+}
+
+
+AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
+  // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
+  // do not risk leaving uninitialized Code object (and breaking the heap).
+  ByteArray* reloc_info_array;
+  {
+    AllocationResult allocation =
+        AllocateByteArray(reloc_info.length(), TENURED);
+    if (!allocation.To(&reloc_info_array)) return allocation;
+  }
+  HeapObject* new_constant_pool;
+  if (FLAG_enable_ool_constant_pool &&
+      code->constant_pool() != empty_constant_pool_array()) {
+    // Copy the constant pool, since edits to the copied code may modify
+    // the constant pool.
+    AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
+    if (!allocation.To(&new_constant_pool)) return allocation;
+  } else {
+    new_constant_pool = empty_constant_pool_array();
+  }
+
+  int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
+
+  int new_obj_size = Code::SizeFor(new_body_size);
+
+  Address old_addr = code->address();
+
+  size_t relocation_offset =
+      static_cast<size_t>(code->instruction_end() - old_addr);
+
+  HeapObject* result;
+  AllocationResult allocation =
+      AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  // Copy code object.
+  Address new_addr = result->address();
+
+  // Copy header and instructions.
+  CopyBytes(new_addr, old_addr, relocation_offset);
+
+  Code* new_code = Code::cast(result);
+  new_code->set_relocation_info(reloc_info_array);
+
+  // Update constant pool.
+  new_code->set_constant_pool(new_constant_pool);
+
+  // Copy patched rinfo.
+  CopyBytes(new_code->relocation_start(), reloc_info.start(),
+            static_cast<size_t>(reloc_info.length()));
+
+  // Relocate the copy.
+  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
+         isolate_->code_range()->contains(code->address()));
+  new_code->Relocate(new_addr - old_addr);
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) code->ObjectVerify();
+#endif
+  return new_code;
+}
+
+
+void Heap::InitializeAllocationMemento(AllocationMemento* memento,
+                                       AllocationSite* allocation_site) {
+  memento->set_map_no_write_barrier(allocation_memento_map());
+  DCHECK(allocation_site->map() == allocation_site_map());
+  memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
+  if (FLAG_allocation_site_pretenuring) {
+    allocation_site->IncrementMementoCreateCount();
+  }
+}
+
+
+AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
+                                AllocationSite* allocation_site) {
+  DCHECK(gc_state_ == NOT_IN_GC);
+  DCHECK(map->instance_type() != MAP_TYPE);
+  // If allocation failures are disallowed, we may allocate in a different
+  // space when new space is full and the object is not a large object.
+  AllocationSpace retry_space =
+      (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
+  int size = map->instance_size();
+  if (allocation_site != NULL) {
+    size += AllocationMemento::kSize;
+  }
+  HeapObject* result;
+  AllocationResult allocation = AllocateRaw(size, space, retry_space);
+  if (!allocation.To(&result)) return allocation;
+  // No need for write barrier since object is white and map is in old space.
+  result->set_map_no_write_barrier(map);
+  if (allocation_site != NULL) {
+    AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+        reinterpret_cast<Address>(result) + map->instance_size());
+    InitializeAllocationMemento(alloc_memento, allocation_site);
+  }
+  return result;
+}
+
+
+void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
+                                     Map* map) {
+  obj->set_properties(properties);
+  obj->initialize_elements();
+  // TODO(1240798): Initialize the object's body using valid initial values
+  // according to the object's initial map.  For example, if the map's
+  // instance type is JS_ARRAY_TYPE, the length field should be initialized
+  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
+  // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
+  // verification code has to cope with (temporarily) invalid objects.  See
+  // for example, JSArray::JSArrayVerify).
+  Object* filler;
+  // We cannot always fill with one_pointer_filler_map because objects
+  // created from API functions expect their internal fields to be initialized
+  // with undefined_value.
+  // Pre-allocated fields need to be initialized with undefined_value as well
+  // so that object accesses before the constructor completes (e.g. in the
+  // debugger) will not cause a crash.
+  if (map->constructor()->IsJSFunction() &&
+      JSFunction::cast(map->constructor())
+          ->IsInobjectSlackTrackingInProgress()) {
+    // We might want to shrink the object later.
+    DCHECK(obj->GetInternalFieldCount() == 0);
+    filler = Heap::one_pointer_filler_map();
+  } else {
+    filler = Heap::undefined_value();
+  }
+  obj->InitializeBody(map, Heap::undefined_value(), filler);
+}
+
+
+AllocationResult Heap::AllocateJSObjectFromMap(
+    Map* map, PretenureFlag pretenure, bool allocate_properties,
+    AllocationSite* allocation_site) {
+  // JSFunctions should be allocated using AllocateFunction to be
+  // properly initialized.
+  DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
+
+  // Both types of global objects should be allocated using
+  // AllocateGlobalObject to be properly initialized.
+  DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
+  DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
+
+  // Allocate the backing storage for the properties.
+  FixedArray* properties;
+  if (allocate_properties) {
+    int prop_size = map->InitialPropertiesLength();
+    DCHECK(prop_size >= 0);
+    {
+      AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
+      if (!allocation.To(&properties)) return allocation;
+    }
+  } else {
+    properties = empty_fixed_array();
+  }
+
+  // Allocate the JSObject.
+  int size = map->instance_size();
+  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
+  JSObject* js_obj;
+  AllocationResult allocation = Allocate(map, space, allocation_site);
+  if (!allocation.To(&js_obj)) return allocation;
+
+  // Initialize the JSObject.
+  InitializeJSObjectFromMap(js_obj, properties, map);
+  DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
+         js_obj->HasFixedTypedArrayElements());
+  return js_obj;
+}
+
+
+AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
+                                        PretenureFlag pretenure,
+                                        AllocationSite* allocation_site) {
+  DCHECK(constructor->has_initial_map());
+
+  // Allocate the object based on the constructors initial map.
+  AllocationResult allocation = AllocateJSObjectFromMap(
+      constructor->initial_map(), pretenure, true, allocation_site);
+#ifdef DEBUG
+  // Make sure result is NOT a global object if valid.
+  HeapObject* obj;
+  DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
+#endif
+  return allocation;
+}
+
+
+AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
+  // Never used to copy functions.  If functions need to be copied we
+  // have to be careful to clear the literals array.
+  SLOW_DCHECK(!source->IsJSFunction());
+
+  // Make the clone.
+  Map* map = source->map();
+  int object_size = map->instance_size();
+  HeapObject* clone;
+
+  DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
+
+  WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
+
+  // If we're forced to always allocate, we use the general allocation
+  // functions which may leave us with an object in old space.
+  if (always_allocate()) {
+    {
+      AllocationResult allocation =
+          AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
+      if (!allocation.To(&clone)) return allocation;
+    }
+    Address clone_address = clone->address();
+    CopyBlock(clone_address, source->address(), object_size);
+    // Update write barrier for all fields that lie beyond the header.
+    RecordWrites(clone_address, JSObject::kHeaderSize,
+                 (object_size - JSObject::kHeaderSize) / kPointerSize);
+  } else {
+    wb_mode = SKIP_WRITE_BARRIER;
+
+    {
+      int adjusted_object_size =
+          site != NULL ? object_size + AllocationMemento::kSize : object_size;
+      AllocationResult allocation =
+          AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
+      if (!allocation.To(&clone)) return allocation;
+    }
+    SLOW_DCHECK(InNewSpace(clone));
+    // Since we know the clone is allocated in new space, we can copy
+    // the contents without worrying about updating the write barrier.
+    CopyBlock(clone->address(), source->address(), object_size);
+
+    if (site != NULL) {
+      AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+          reinterpret_cast<Address>(clone) + object_size);
+      InitializeAllocationMemento(alloc_memento, site);
+    }
+  }
+
+  SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
+              source->GetElementsKind());
+  FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
+  FixedArray* properties = FixedArray::cast(source->properties());
+  // Update elements if necessary.
+  if (elements->length() > 0) {
+    FixedArrayBase* elem;
+    {
+      AllocationResult allocation;
+      if (elements->map() == fixed_cow_array_map()) {
+        allocation = FixedArray::cast(elements);
+      } else if (source->HasFastDoubleElements()) {
+        allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
+      } else {
+        allocation = CopyFixedArray(FixedArray::cast(elements));
+      }
+      if (!allocation.To(&elem)) return allocation;
+    }
+    JSObject::cast(clone)->set_elements(elem, wb_mode);
+  }
+  // Update properties if necessary.
+  if (properties->length() > 0) {
+    FixedArray* prop;
+    {
+      AllocationResult allocation = CopyFixedArray(properties);
+      if (!allocation.To(&prop)) return allocation;
+    }
+    JSObject::cast(clone)->set_properties(prop, wb_mode);
+  }
+  // Return the new clone.
+  return clone;
+}
+
+
+static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
+                                    int len) {
+  // Only works for one byte strings.
+  DCHECK(vector.length() == len);
+  MemCopy(chars, vector.start(), len);
+}
+
+static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
+                                    int len) {
+  const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
+  unsigned stream_length = vector.length();
+  while (stream_length != 0) {
+    unsigned consumed = 0;
+    uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
+    DCHECK(c != unibrow::Utf8::kBadChar);
+    DCHECK(consumed <= stream_length);
+    stream_length -= consumed;
+    stream += consumed;
+    if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+      len -= 2;
+      if (len < 0) break;
+      *chars++ = unibrow::Utf16::LeadSurrogate(c);
+      *chars++ = unibrow::Utf16::TrailSurrogate(c);
+    } else {
+      len -= 1;
+      if (len < 0) break;
+      *chars++ = c;
+    }
+  }
+  DCHECK(stream_length == 0);
+  DCHECK(len == 0);
+}
+
+
+static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
+  DCHECK(s->length() == len);
+  String::WriteToFlat(s, chars, 0, len);
+}
+
+
+static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
+  DCHECK(s->length() == len);
+  String::WriteToFlat(s, chars, 0, len);
+}
+
+
+template <bool is_one_byte, typename T>
+AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
+                                                      uint32_t hash_field) {
+  DCHECK(chars >= 0);
+  // Compute map and object size.
+  int size;
+  Map* map;
+
+  DCHECK_LE(0, chars);
+  DCHECK_GE(String::kMaxLength, chars);
+  if (is_one_byte) {
+    map = one_byte_internalized_string_map();
+    size = SeqOneByteString::SizeFor(chars);
+  } else {
+    map = internalized_string_map();
+    size = SeqTwoByteString::SizeFor(chars);
+  }
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
+
+  // Allocate string.
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map_no_write_barrier(map);
+  // Set length and hash fields of the allocated string.
+  String* answer = String::cast(result);
+  answer->set_length(chars);
+  answer->set_hash_field(hash_field);
+
+  DCHECK_EQ(size, answer->Size());
+
+  if (is_one_byte) {
+    WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
+  } else {
+    WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
+  }
+  return answer;
+}
+
+
+// Need explicit instantiations.
+template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
+                                                                     int,
+                                                                     uint32_t);
+template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
+                                                                      int,
+                                                                      uint32_t);
+template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
+    Vector<const char>, int, uint32_t);
+
+
+AllocationResult Heap::AllocateRawOneByteString(int length,
+                                                PretenureFlag pretenure) {
+  DCHECK_LE(0, length);
+  DCHECK_GE(String::kMaxLength, length);
+  int size = SeqOneByteString::SizeFor(length);
+  DCHECK(size <= SeqOneByteString::kMaxSize);
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  // Partially initialize the object.
+  result->set_map_no_write_barrier(one_byte_string_map());
+  String::cast(result)->set_length(length);
+  String::cast(result)->set_hash_field(String::kEmptyHashField);
+  DCHECK_EQ(size, HeapObject::cast(result)->Size());
+
+  return result;
+}
+
+
+AllocationResult Heap::AllocateRawTwoByteString(int length,
+                                                PretenureFlag pretenure) {
+  DCHECK_LE(0, length);
+  DCHECK_GE(String::kMaxLength, length);
+  int size = SeqTwoByteString::SizeFor(length);
+  DCHECK(size <= SeqTwoByteString::kMaxSize);
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  // Partially initialize the object.
+  result->set_map_no_write_barrier(string_map());
+  String::cast(result)->set_length(length);
+  String::cast(result)->set_hash_field(String::kEmptyHashField);
+  DCHECK_EQ(size, HeapObject::cast(result)->Size());
+  return result;
+}
+
+
+AllocationResult Heap::AllocateEmptyFixedArray() {
+  int size = FixedArray::SizeFor(0);
+  HeapObject* result;
+  {
+    AllocationResult allocation =
+        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+  // Initialize the object.
+  result->set_map_no_write_barrier(fixed_array_map());
+  FixedArray::cast(result)->set_length(0);
+  return result;
+}
+
+
+AllocationResult Heap::AllocateEmptyExternalArray(
+    ExternalArrayType array_type) {
+  return AllocateExternalArray(0, array_type, NULL, TENURED);
+}
+
+
+AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
+  if (!InNewSpace(src)) {
+    return src;
+  }
+
+  int len = src->length();
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
+    if (!allocation.To(&obj)) return allocation;
+  }
+  obj->set_map_no_write_barrier(fixed_array_map());
+  FixedArray* result = FixedArray::cast(obj);
+  result->set_length(len);
+
+  // Copy the content
+  DisallowHeapAllocation no_gc;
+  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
+
+  // TODO(mvstanton): The map is set twice because of protection against calling
+  // set() on a COW FixedArray. Issue v8:3221 created to track this, and
+  // we might then be able to remove this whole method.
+  HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
+  return result;
+}
+
+
+AllocationResult Heap::AllocateEmptyFixedTypedArray(
+    ExternalArrayType array_type) {
+  return AllocateFixedTypedArray(0, array_type, TENURED);
+}
+
+
+AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
+  int len = src->length();
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
+    if (!allocation.To(&obj)) return allocation;
+  }
+  if (InNewSpace(obj)) {
+    obj->set_map_no_write_barrier(map);
+    CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
+              FixedArray::SizeFor(len) - kPointerSize);
+    return obj;
+  }
+  obj->set_map_no_write_barrier(map);
+  FixedArray* result = FixedArray::cast(obj);
+  result->set_length(len);
+
+  // Copy the content
+  DisallowHeapAllocation no_gc;
+  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
+  return result;
+}
+
+
+AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
+                                                   Map* map) {
+  int len = src->length();
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
+    if (!allocation.To(&obj)) return allocation;
+  }
+  obj->set_map_no_write_barrier(map);
+  CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
+            src->address() + FixedDoubleArray::kLengthOffset,
+            FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
+  return obj;
+}
+
+
+AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
+                                                    Map* map) {
+  HeapObject* obj;
+  if (src->is_extended_layout()) {
+    ConstantPoolArray::NumberOfEntries small(src,
+                                             ConstantPoolArray::SMALL_SECTION);
+    ConstantPoolArray::NumberOfEntries extended(
+        src, ConstantPoolArray::EXTENDED_SECTION);
+    AllocationResult allocation =
+        AllocateExtendedConstantPoolArray(small, extended);
+    if (!allocation.To(&obj)) return allocation;
+  } else {
+    ConstantPoolArray::NumberOfEntries small(src,
+                                             ConstantPoolArray::SMALL_SECTION);
+    AllocationResult allocation = AllocateConstantPoolArray(small);
+    if (!allocation.To(&obj)) return allocation;
+  }
+  obj->set_map_no_write_barrier(map);
+  CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
+            src->address() + ConstantPoolArray::kFirstEntryOffset,
+            src->size() - ConstantPoolArray::kFirstEntryOffset);
+  return obj;
+}
+
+
+AllocationResult Heap::AllocateRawFixedArray(int length,
+                                             PretenureFlag pretenure) {
+  if (length < 0 || length > FixedArray::kMaxLength) {
+    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+  }
+  int size = FixedArray::SizeFor(length);
+  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
+
+  return AllocateRaw(size, space, OLD_POINTER_SPACE);
+}
+
+
+AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
+                                                    PretenureFlag pretenure,
+                                                    Object* filler) {
+  DCHECK(length >= 0);
+  DCHECK(empty_fixed_array()->IsFixedArray());
+  if (length == 0) return empty_fixed_array();
+
+  DCHECK(!InNewSpace(filler));
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map_no_write_barrier(fixed_array_map());
+  FixedArray* array = FixedArray::cast(result);
+  array->set_length(length);
+  MemsetPointer(array->data_start(), filler, length);
+  return array;
+}
+
+
+AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
+  return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
+}
+
+
+AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
+  if (length == 0) return empty_fixed_array();
+
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
+    if (!allocation.To(&obj)) return allocation;
+  }
+
+  obj->set_map_no_write_barrier(fixed_array_map());
+  FixedArray::cast(obj)->set_length(length);
+  return obj;
+}
+
+
+AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
+    int length, PretenureFlag pretenure) {
+  if (length == 0) return empty_fixed_array();
+
+  HeapObject* elements;
+  AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
+  if (!allocation.To(&elements)) return allocation;
+
+  elements->set_map_no_write_barrier(fixed_double_array_map());
+  FixedDoubleArray::cast(elements)->set_length(length);
+  return elements;
+}
+
+
+AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
+                                                   PretenureFlag pretenure) {
+  if (length < 0 || length > FixedDoubleArray::kMaxLength) {
+    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+  }
+  int size = FixedDoubleArray::SizeFor(length);
+#ifndef V8_HOST_ARCH_64_BIT
+  size += kPointerSize;
+#endif
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
+  HeapObject* object;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&object)) return allocation;
+  }
+
+  return EnsureDoubleAligned(this, object, size);
+}
+
+
+AllocationResult Heap::AllocateConstantPoolArray(
+    const ConstantPoolArray::NumberOfEntries& small) {
+  CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
+  int size = ConstantPoolArray::SizeFor(small);
+#ifndef V8_HOST_ARCH_64_BIT
+  size += kPointerSize;
+#endif
+  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
+
+  HeapObject* object;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
+    if (!allocation.To(&object)) return allocation;
+  }
+  object = EnsureDoubleAligned(this, object, size);
+  object->set_map_no_write_barrier(constant_pool_array_map());
+
+  ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
+  constant_pool->Init(small);
+  constant_pool->ClearPtrEntries(isolate());
+  return constant_pool;
+}
+
+
+AllocationResult Heap::AllocateExtendedConstantPoolArray(
+    const ConstantPoolArray::NumberOfEntries& small,
+    const ConstantPoolArray::NumberOfEntries& extended) {
+  CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
+  CHECK(extended.are_in_range(0, kMaxInt));
+  int size = ConstantPoolArray::SizeForExtended(small, extended);
+#ifndef V8_HOST_ARCH_64_BIT
+  size += kPointerSize;
+#endif
+  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
+
+  HeapObject* object;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
+    if (!allocation.To(&object)) return allocation;
+  }
+  object = EnsureDoubleAligned(this, object, size);
+  object->set_map_no_write_barrier(constant_pool_array_map());
+
+  ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
+  constant_pool->InitExtended(small, extended);
+  constant_pool->ClearPtrEntries(isolate());
+  return constant_pool;
+}
+
+
+AllocationResult Heap::AllocateEmptyConstantPoolArray() {
+  ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
+  int size = ConstantPoolArray::SizeFor(small);
+  HeapObject* result;
+  {
+    AllocationResult allocation =
+        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+  result->set_map_no_write_barrier(constant_pool_array_map());
+  ConstantPoolArray::cast(result)->Init(small);
+  return result;
+}
+
+
+AllocationResult Heap::AllocateSymbol() {
+  // Statically ensure that it is safe to allocate symbols in paged spaces.
+  STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
+
+  HeapObject* result;
+  AllocationResult allocation =
+      AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  result->set_map_no_write_barrier(symbol_map());
+
+  // Generate a random hash value.
+  int hash;
+  int attempts = 0;
+  do {
+    hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
+    attempts++;
+  } while (hash == 0 && attempts < 30);
+  if (hash == 0) hash = 1;  // never return 0
+
+  Symbol::cast(result)
+      ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
+  Symbol::cast(result)->set_name(undefined_value());
+  Symbol::cast(result)->set_flags(Smi::FromInt(0));
+
+  DCHECK(!Symbol::cast(result)->is_private());
+  return result;
+}
+
+
+AllocationResult Heap::AllocateStruct(InstanceType type) {
+  Map* map;
+  switch (type) {
+#define MAKE_CASE(NAME, Name, name) \
+  case NAME##_TYPE:                 \
+    map = name##_map();             \
+    break;
+    STRUCT_LIST(MAKE_CASE)
+#undef MAKE_CASE
+    default:
+      UNREACHABLE();
+      return exception();
+  }
+  int size = map->instance_size();
+  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
+  Struct* result;
+  {
+    AllocationResult allocation = Allocate(map, space);
+    if (!allocation.To(&result)) return allocation;
+  }
+  result->InitializeBody(size);
+  return result;
+}
+
+
+bool Heap::IsHeapIterable() {
+  // TODO(hpayer): This function is not correct. Allocation folding in old
+  // space breaks the iterability.
+  return new_space_top_after_last_gc_ == new_space()->top();
+}
+
+
+void Heap::MakeHeapIterable() {
+  DCHECK(AllowHeapAllocation::IsAllowed());
+  if (!IsHeapIterable()) {
+    CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
+  }
+  if (mark_compact_collector()->sweeping_in_progress()) {
+    mark_compact_collector()->EnsureSweepingCompleted();
+  }
+  DCHECK(IsHeapIterable());
+}
+
+
+void Heap::IdleMarkCompact(const char* message) {
+  bool uncommit = false;
+  if (gc_count_at_last_idle_gc_ == gc_count_) {
+    // No GC since the last full GC, the mutator is probably not active.
+    isolate_->compilation_cache()->Clear();
+    uncommit = true;
+  }
+  CollectAllGarbage(kReduceMemoryFootprintMask, message);
+  gc_idle_time_handler_.NotifyIdleMarkCompact();
+  gc_count_at_last_idle_gc_ = gc_count_;
+  if (uncommit) {
+    new_space_.Shrink();
+    UncommitFromSpace();
+  }
+}
+
+
+void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
+  incremental_marking()->Step(step_size,
+                              IncrementalMarking::NO_GC_VIA_STACK_GUARD, true);
+
+  if (incremental_marking()->IsComplete()) {
+    IdleMarkCompact("idle notification: finalize incremental");
+  }
+}
+
+
+bool Heap::WorthActivatingIncrementalMarking() {
+  return incremental_marking()->IsStopped() &&
+         incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
+}
+
+
+bool Heap::IdleNotification(int idle_time_in_ms) {
+  // If incremental marking is off, we do not perform idle notification.
+  if (!FLAG_incremental_marking) return true;
+  base::ElapsedTimer timer;
+  timer.Start();
+  isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
+      idle_time_in_ms);
+  HistogramTimerScope idle_notification_scope(
+      isolate_->counters()->gc_idle_notification());
+
+  GCIdleTimeHandler::HeapState heap_state;
+  heap_state.contexts_disposed = contexts_disposed_;
+  heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
+  heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
+  // TODO(ulan): Start incremental marking only for large heaps.
+  heap_state.can_start_incremental_marking =
+      incremental_marking()->ShouldActivate();
+  heap_state.sweeping_in_progress =
+      mark_compact_collector()->sweeping_in_progress();
+  heap_state.mark_compact_speed_in_bytes_per_ms =
+      static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
+  heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
+      tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+  heap_state.scavenge_speed_in_bytes_per_ms =
+      static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
+  heap_state.available_new_space_memory = new_space_.Available();
+  heap_state.new_space_capacity = new_space_.Capacity();
+  heap_state.new_space_allocation_throughput_in_bytes_per_ms =
+      static_cast<size_t>(
+          tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
+
+  GCIdleTimeAction action =
+      gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
+
+  bool result = false;
+  switch (action.type) {
+    case DONE:
+      result = true;
+      break;
+    case DO_INCREMENTAL_MARKING:
+      if (incremental_marking()->IsStopped()) {
+        incremental_marking()->Start();
+      }
+      AdvanceIdleIncrementalMarking(action.parameter);
+      break;
+    case DO_FULL_GC: {
+      HistogramTimerScope scope(isolate_->counters()->gc_context());
+      if (contexts_disposed_) {
+        CollectAllGarbage(kReduceMemoryFootprintMask,
+                          "idle notification: contexts disposed");
+        gc_idle_time_handler_.NotifyIdleMarkCompact();
+        gc_count_at_last_idle_gc_ = gc_count_;
+      } else {
+        IdleMarkCompact("idle notification: finalize idle round");
+      }
+      break;
+    }
+    case DO_SCAVENGE:
+      CollectGarbage(NEW_SPACE, "idle notification: scavenge");
+      break;
+    case DO_FINALIZE_SWEEPING:
+      mark_compact_collector()->EnsureSweepingCompleted();
+      break;
+    case DO_NOTHING:
+      break;
+  }
+
+  int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
+  if (actual_time_ms <= idle_time_in_ms) {
+    isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
+        idle_time_in_ms - actual_time_ms);
+  } else {
+    isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
+        actual_time_ms - idle_time_in_ms);
+  }
+
+  if (FLAG_trace_idle_notification) {
+    PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
+           idle_time_in_ms, actual_time_ms);
+    action.Print();
+    PrintF("]\n");
+  }
+
+  contexts_disposed_ = 0;
+  return result;
+}
+
+
+#ifdef DEBUG
+
+void Heap::Print() {
+  if (!HasBeenSetUp()) return;
+  isolate()->PrintStack(stdout);
+  AllSpaces spaces(this);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+    space->Print();
+  }
+}
+
+
+void Heap::ReportCodeStatistics(const char* title) {
+  PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
+  PagedSpace::ResetCodeStatistics(isolate());
+  // We do not look for code in new space, map space, or old space.  If code
+  // somehow ends up in those spaces, we would miss it here.
+  code_space_->CollectCodeStatistics();
+  lo_space_->CollectCodeStatistics();
+  PagedSpace::ReportCodeStatistics(isolate());
+}
+
+
+// This function expects that NewSpace's allocated objects histogram is
+// populated (via a call to CollectStatistics or else as a side effect of a
+// just-completed scavenge collection).
+void Heap::ReportHeapStatistics(const char* title) {
+  USE(title);
+  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
+         gc_count_);
+  PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
+         old_generation_allocation_limit_);
+
+  PrintF("\n");
+  PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
+  isolate_->global_handles()->PrintStats();
+  PrintF("\n");
+
+  PrintF("Heap statistics : ");
+  isolate_->memory_allocator()->ReportStatistics();
+  PrintF("To space : ");
+  new_space_.ReportStatistics();
+  PrintF("Old pointer space : ");
+  old_pointer_space_->ReportStatistics();
+  PrintF("Old data space : ");
+  old_data_space_->ReportStatistics();
+  PrintF("Code space : ");
+  code_space_->ReportStatistics();
+  PrintF("Map space : ");
+  map_space_->ReportStatistics();
+  PrintF("Cell space : ");
+  cell_space_->ReportStatistics();
+  PrintF("PropertyCell space : ");
+  property_cell_space_->ReportStatistics();
+  PrintF("Large object space : ");
+  lo_space_->ReportStatistics();
+  PrintF(">>>>>> ========================================= >>>>>>\n");
+}
+
+#endif  // DEBUG
+
+bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
+
+
+bool Heap::Contains(Address addr) {
+  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
+  return HasBeenSetUp() &&
+         (new_space_.ToSpaceContains(addr) ||
+          old_pointer_space_->Contains(addr) ||
+          old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
+          map_space_->Contains(addr) || cell_space_->Contains(addr) ||
+          property_cell_space_->Contains(addr) ||
+          lo_space_->SlowContains(addr));
+}
+
+
+bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
+  return InSpace(value->address(), space);
+}
+
+
+bool Heap::InSpace(Address addr, AllocationSpace space) {
+  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
+  if (!HasBeenSetUp()) return false;
+
+  switch (space) {
+    case NEW_SPACE:
+      return new_space_.ToSpaceContains(addr);
+    case OLD_POINTER_SPACE:
+      return old_pointer_space_->Contains(addr);
+    case OLD_DATA_SPACE:
+      return old_data_space_->Contains(addr);
+    case CODE_SPACE:
+      return code_space_->Contains(addr);
+    case MAP_SPACE:
+      return map_space_->Contains(addr);
+    case CELL_SPACE:
+      return cell_space_->Contains(addr);
+    case PROPERTY_CELL_SPACE:
+      return property_cell_space_->Contains(addr);
+    case LO_SPACE:
+      return lo_space_->SlowContains(addr);
+    case INVALID_SPACE:
+      break;
+  }
+  UNREACHABLE();
+  return false;
+}
+
+
+#ifdef VERIFY_HEAP
+void Heap::Verify() {
+  CHECK(HasBeenSetUp());
+  HandleScope scope(isolate());
+
+  store_buffer()->Verify();
+
+  if (mark_compact_collector()->sweeping_in_progress()) {
+    // We have to wait here for the sweeper threads to have an iterable heap.
+    mark_compact_collector()->EnsureSweepingCompleted();
+  }
+
+  VerifyPointersVisitor visitor;
+  IterateRoots(&visitor, VISIT_ONLY_STRONG);
+
+  VerifySmisVisitor smis_visitor;
+  IterateSmiRoots(&smis_visitor);
+
+  new_space_.Verify();
+
+  old_pointer_space_->Verify(&visitor);
+  map_space_->Verify(&visitor);
+
+  VerifyPointersVisitor no_dirty_regions_visitor;
+  old_data_space_->Verify(&no_dirty_regions_visitor);
+  code_space_->Verify(&no_dirty_regions_visitor);
+  cell_space_->Verify(&no_dirty_regions_visitor);
+  property_cell_space_->Verify(&no_dirty_regions_visitor);
+
+  lo_space_->Verify();
+}
+#endif
+
+
+void Heap::ZapFromSpace() {
+  NewSpacePageIterator it(new_space_.FromSpaceStart(),
+                          new_space_.FromSpaceEnd());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    for (Address cursor = page->area_start(), limit = page->area_end();
+         cursor < limit; cursor += kPointerSize) {
+      Memory::Address_at(cursor) = kFromSpaceZapValue;
+    }
+  }
+}
+
+
+void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
+                                             ObjectSlotCallback callback) {
+  Address slot_address = start;
+
+  // We are not collecting slots on new space objects during mutation
+  // thus we have to scan for pointers to evacuation candidates when we
+  // promote objects. But we should not record any slots in non-black
+  // objects. Grey object's slots would be rescanned.
+  // White object might not survive until the end of collection
+  // it would be a violation of the invariant to record it's slots.
+  bool record_slots = false;
+  if (incremental_marking()->IsCompacting()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
+    record_slots = Marking::IsBlack(mark_bit);
+  }
+
+  while (slot_address < end) {
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    Object* object = *slot;
+    // If the store buffer becomes overfull we mark pages as being exempt from
+    // the store buffer.  These pages are scanned to find pointers that point
+    // to the new space.  In that case we may hit newly promoted objects and
+    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
+    if (object->IsHeapObject()) {
+      if (Heap::InFromSpace(object)) {
+        callback(reinterpret_cast<HeapObject**>(slot),
+                 HeapObject::cast(object));
+        Object* new_object = *slot;
+        if (InNewSpace(new_object)) {
+          SLOW_DCHECK(Heap::InToSpace(new_object));
+          SLOW_DCHECK(new_object->IsHeapObject());
+          store_buffer_.EnterDirectlyIntoStoreBuffer(
+              reinterpret_cast<Address>(slot));
+        }
+        SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
+      } else if (record_slots &&
+                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
+        mark_compact_collector()->RecordSlot(slot, slot, object);
+      }
+    }
+    slot_address += kPointerSize;
+  }
+}
+
+
+#ifdef DEBUG
+typedef bool (*CheckStoreBufferFilter)(Object** addr);
+
+
+bool IsAMapPointerAddress(Object** addr) {
+  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
+  int mod = a % Map::kSize;
+  return mod >= Map::kPointerFieldsBeginOffset &&
+         mod < Map::kPointerFieldsEndOffset;
+}
+
+
+bool EverythingsAPointer(Object** addr) { return true; }
+
+
+static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
+                             Object**** store_buffer_position,
+                             Object*** store_buffer_top,
+                             CheckStoreBufferFilter filter,
+                             Address special_garbage_start,
+                             Address special_garbage_end) {
+  Map* free_space_map = heap->free_space_map();
+  for (; current < limit; current++) {
+    Object* o = *current;
+    Address current_address = reinterpret_cast<Address>(current);
+    // Skip free space.
+    if (o == free_space_map) {
+      Address current_address = reinterpret_cast<Address>(current);
+      FreeSpace* free_space =
+          FreeSpace::cast(HeapObject::FromAddress(current_address));
+      int skip = free_space->Size();
+      DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
+      DCHECK(skip > 0);
+      current_address += skip - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
+    }
+    // Skip the current linear allocation space between top and limit which is
+    // unmarked with the free space map, but can contain junk.
+    if (current_address == special_garbage_start &&
+        special_garbage_end != special_garbage_start) {
+      current_address = special_garbage_end - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
+    }
+    if (!(*filter)(current)) continue;
+    DCHECK(current_address < special_garbage_start ||
+           current_address >= special_garbage_end);
+    DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
+    // We have to check that the pointer does not point into new space
+    // without trying to cast it to a heap object since the hash field of
+    // a string can contain values like 1 and 3 which are tagged null
+    // pointers.
+    if (!heap->InNewSpace(o)) continue;
+    while (**store_buffer_position < current &&
+           *store_buffer_position < store_buffer_top) {
+      (*store_buffer_position)++;
+    }
+    if (**store_buffer_position != current ||
+        *store_buffer_position == store_buffer_top) {
+      Object** obj_start = current;
+      while (!(*obj_start)->IsMap()) obj_start--;
+      UNREACHABLE();
+    }
+  }
+}
+
+
+// Check that the store buffer contains all intergenerational pointers by
+// scanning a page and ensuring that all pointers to young space are in the
+// store buffer.
+void Heap::OldPointerSpaceCheckStoreBuffer() {
+  OldSpace* space = old_pointer_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->area_start());
+
+    Address end = page->area_end();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this, current, limit, &store_buffer_position,
+                     store_buffer_top, &EverythingsAPointer, space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::MapSpaceCheckStoreBuffer() {
+  MapSpace* space = map_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->area_start());
+
+    Address end = page->area_end();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this, current, limit, &store_buffer_position,
+                     store_buffer_top, &IsAMapPointerAddress, space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::LargeObjectSpaceCheckStoreBuffer() {
+  LargeObjectIterator it(lo_space());
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    // We only have code, sequential strings, or fixed arrays in large
+    // object space, and only fixed arrays can possibly contain pointers to
+    // the young generation.
+    if (object->IsFixedArray()) {
+      Object*** store_buffer_position = store_buffer()->Start();
+      Object*** store_buffer_top = store_buffer()->Top();
+      Object** current = reinterpret_cast<Object**>(object->address());
+      Object** limit =
+          reinterpret_cast<Object**>(object->address() + object->Size());
+      CheckStoreBuffer(this, current, limit, &store_buffer_position,
+                       store_buffer_top, &EverythingsAPointer, NULL, NULL);
+    }
+  }
+}
+#endif
+
+
+void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
+  IterateStrongRoots(v, mode);
+  IterateWeakRoots(v, mode);
+}
+
+
+void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
+  v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
+  v->Synchronize(VisitorSynchronization::kStringTable);
+  if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+    // Scavenge collections have special processing for this.
+    external_string_table_.Iterate(v);
+  }
+  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
+}
+
+
+void Heap::IterateSmiRoots(ObjectVisitor* v) {
+  // Acquire execution access since we are going to read stack limit values.
+  ExecutionAccess access(isolate());
+  v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
+  v->Synchronize(VisitorSynchronization::kSmiRootList);
+}
+
+
+void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
+  v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
+  v->Synchronize(VisitorSynchronization::kStrongRootList);
+
+  v->VisitPointer(bit_cast<Object**>(&hidden_string_));
+  v->Synchronize(VisitorSynchronization::kInternalizedString);
+
+  isolate_->bootstrapper()->Iterate(v);
+  v->Synchronize(VisitorSynchronization::kBootstrapper);
+  isolate_->Iterate(v);
+  v->Synchronize(VisitorSynchronization::kTop);
+  Relocatable::Iterate(isolate_, v);
+  v->Synchronize(VisitorSynchronization::kRelocatable);
+
+  if (isolate_->deoptimizer_data() != NULL) {
+    isolate_->deoptimizer_data()->Iterate(v);
+  }
+  v->Synchronize(VisitorSynchronization::kDebug);
+  isolate_->compilation_cache()->Iterate(v);
+  v->Synchronize(VisitorSynchronization::kCompilationCache);
+
+  // Iterate over local handles in handle scopes.
+  isolate_->handle_scope_implementer()->Iterate(v);
+  isolate_->IterateDeferredHandles(v);
+  v->Synchronize(VisitorSynchronization::kHandleScope);
+
+  // Iterate over the builtin code objects and code stubs in the
+  // heap. Note that it is not necessary to iterate over code objects
+  // on scavenge collections.
+  if (mode != VISIT_ALL_IN_SCAVENGE) {
+    isolate_->builtins()->IterateBuiltins(v);
+  }
+  v->Synchronize(VisitorSynchronization::kBuiltins);
+
+  // Iterate over global handles.
+  switch (mode) {
+    case VISIT_ONLY_STRONG:
+      isolate_->global_handles()->IterateStrongRoots(v);
+      break;
+    case VISIT_ALL_IN_SCAVENGE:
+      isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
+      break;
+    case VISIT_ALL_IN_SWEEP_NEWSPACE:
+    case VISIT_ALL:
+      isolate_->global_handles()->IterateAllRoots(v);
+      break;
+  }
+  v->Synchronize(VisitorSynchronization::kGlobalHandles);
+
+  // Iterate over eternal handles.
+  if (mode == VISIT_ALL_IN_SCAVENGE) {
+    isolate_->eternal_handles()->IterateNewSpaceRoots(v);
+  } else {
+    isolate_->eternal_handles()->IterateAllRoots(v);
+  }
+  v->Synchronize(VisitorSynchronization::kEternalHandles);
+
+  // Iterate over pointers being held by inactive threads.
+  isolate_->thread_manager()->Iterate(v);
+  v->Synchronize(VisitorSynchronization::kThreadManager);
+
+  // Iterate over the pointers the Serialization/Deserialization code is
+  // holding.
+  // During garbage collection this keeps the partial snapshot cache alive.
+  // During deserialization of the startup snapshot this creates the partial
+  // snapshot cache and deserializes the objects it refers to.  During
+  // serialization this does nothing, since the partial snapshot cache is
+  // empty.  However the next thing we do is create the partial snapshot,
+  // filling up the partial snapshot cache with objects it needs as we go.
+  SerializerDeserializer::Iterate(isolate_, v);
+  // We don't do a v->Synchronize call here, because in debug mode that will
+  // output a flag to the snapshot.  However at this point the serializer and
+  // deserializer are deliberately a little unsynchronized (see above) so the
+  // checking of the sync flag in the snapshot would fail.
+}
+
+
+// TODO(1236194): Since the heap size is configurable on the command line
+// and through the API, we should gracefully handle the case that the heap
+// size is not big enough to fit all the initial objects.
+bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
+                         int max_executable_size, size_t code_range_size) {
+  if (HasBeenSetUp()) return false;
+
+  // Overwrite default configuration.
+  if (max_semi_space_size > 0) {
+    max_semi_space_size_ = max_semi_space_size * MB;
+  }
+  if (max_old_space_size > 0) {
+    max_old_generation_size_ = max_old_space_size * MB;
+  }
+  if (max_executable_size > 0) {
+    max_executable_size_ = max_executable_size * MB;
+  }
+
+  // If max space size flags are specified overwrite the configuration.
+  if (FLAG_max_semi_space_size > 0) {
+    max_semi_space_size_ = FLAG_max_semi_space_size * MB;
+  }
+  if (FLAG_max_old_space_size > 0) {
+    max_old_generation_size_ = FLAG_max_old_space_size * MB;
+  }
+  if (FLAG_max_executable_size > 0) {
+    max_executable_size_ = FLAG_max_executable_size * MB;
+  }
+
+  if (FLAG_stress_compaction) {
+    // This will cause more frequent GCs when stressing.
+    max_semi_space_size_ = Page::kPageSize;
+  }
+
+  if (Snapshot::HaveASnapshotToStartFrom()) {
+    // If we are using a snapshot we always reserve the default amount
+    // of memory for each semispace because code in the snapshot has
+    // write-barrier code that relies on the size and alignment of new
+    // space.  We therefore cannot use a larger max semispace size
+    // than the default reserved semispace size.
+    if (max_semi_space_size_ > reserved_semispace_size_) {
+      max_semi_space_size_ = reserved_semispace_size_;
+      if (FLAG_trace_gc) {
+        PrintPID("Max semi-space size cannot be more than %d kbytes\n",
+                 reserved_semispace_size_ >> 10);
+      }
+    }
+  } else {
+    // If we are not using snapshots we reserve space for the actual
+    // max semispace size.
+    reserved_semispace_size_ = max_semi_space_size_;
+  }
+
+  // The max executable size must be less than or equal to the max old
+  // generation size.
+  if (max_executable_size_ > max_old_generation_size_) {
+    max_executable_size_ = max_old_generation_size_;
+  }
+
+  // The new space size must be a power of two to support single-bit testing
+  // for containment.
+  max_semi_space_size_ =
+      base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
+  reserved_semispace_size_ =
+      base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
+
+  if (FLAG_min_semi_space_size > 0) {
+    int initial_semispace_size = FLAG_min_semi_space_size * MB;
+    if (initial_semispace_size > max_semi_space_size_) {
+      initial_semispace_size_ = max_semi_space_size_;
+      if (FLAG_trace_gc) {
+        PrintPID(
+            "Min semi-space size cannot be more than the maximum"
+            "semi-space size of %d MB\n",
+            max_semi_space_size_);
+      }
+    } else {
+      initial_semispace_size_ = initial_semispace_size;
+    }
+  }
+
+  initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
+
+  // The old generation is paged and needs at least one page for each space.
+  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+  max_old_generation_size_ =
+      Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
+          max_old_generation_size_);
+
+  // We rely on being able to allocate new arrays in paged spaces.
+  DCHECK(Page::kMaxRegularHeapObjectSize >=
+         (JSArray::kSize +
+          FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
+          AllocationMemento::kSize));
+
+  code_range_size_ = code_range_size * MB;
+
+  configured_ = true;
+  return true;
+}
+
+
+bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
+
+
+void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
+  *stats->start_marker = HeapStats::kStartMarker;
+  *stats->end_marker = HeapStats::kEndMarker;
+  *stats->new_space_size = new_space_.SizeAsInt();
+  *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
+  *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
+  *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
+  *stats->old_data_space_size = old_data_space_->SizeOfObjects();
+  *stats->old_data_space_capacity = old_data_space_->Capacity();
+  *stats->code_space_size = code_space_->SizeOfObjects();
+  *stats->code_space_capacity = code_space_->Capacity();
+  *stats->map_space_size = map_space_->SizeOfObjects();
+  *stats->map_space_capacity = map_space_->Capacity();
+  *stats->cell_space_size = cell_space_->SizeOfObjects();
+  *stats->cell_space_capacity = cell_space_->Capacity();
+  *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
+  *stats->property_cell_space_capacity = property_cell_space_->Capacity();
+  *stats->lo_space_size = lo_space_->Size();
+  isolate_->global_handles()->RecordStats(stats);
+  *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
+  *stats->memory_allocator_capacity =
+      isolate()->memory_allocator()->Size() +
+      isolate()->memory_allocator()->Available();
+  *stats->os_error = base::OS::GetLastError();
+  isolate()->memory_allocator()->Available();
+  if (take_snapshot) {
+    HeapIterator iterator(this);
+    for (HeapObject* obj = iterator.next(); obj != NULL;
+         obj = iterator.next()) {
+      InstanceType type = obj->map()->instance_type();
+      DCHECK(0 <= type && type <= LAST_TYPE);
+      stats->objects_per_type[type]++;
+      stats->size_per_type[type] += obj->Size();
+    }
+  }
+}
+
+
+intptr_t Heap::PromotedSpaceSizeOfObjects() {
+  return old_pointer_space_->SizeOfObjects() +
+         old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
+         map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
+         property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
+}
+
+
+int64_t Heap::PromotedExternalMemorySize() {
+  if (amount_of_external_allocated_memory_ <=
+      amount_of_external_allocated_memory_at_last_global_gc_)
+    return 0;
+  return amount_of_external_allocated_memory_ -
+         amount_of_external_allocated_memory_at_last_global_gc_;
+}
+
+
+intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
+                                            int freed_global_handles) {
+  const int kMaxHandles = 1000;
+  const int kMinHandles = 100;
+  double min_factor = 1.1;
+  double max_factor = 4;
+  // We set the old generation growing factor to 2 to grow the heap slower on
+  // memory-constrained devices.
+  if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
+    max_factor = 2;
+  }
+  // If there are many freed global handles, then the next full GC will
+  // likely collect a lot of garbage. Choose the heap growing factor
+  // depending on freed global handles.
+  // TODO(ulan, hpayer): Take into account mutator utilization.
+  double factor;
+  if (freed_global_handles <= kMinHandles) {
+    factor = max_factor;
+  } else if (freed_global_handles >= kMaxHandles) {
+    factor = min_factor;
+  } else {
+    // Compute factor using linear interpolation between points
+    // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
+    factor = max_factor -
+             (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
+                 (kMaxHandles - kMinHandles);
+  }
+
+  if (FLAG_stress_compaction ||
+      mark_compact_collector()->reduce_memory_footprint_) {
+    factor = min_factor;
+  }
+
+  intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
+  limit = Max(limit, kMinimumOldGenerationAllocationLimit);
+  limit += new_space_.Capacity();
+  intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+  return Min(limit, halfway_to_the_max);
+}
+
+
+void Heap::EnableInlineAllocation() {
+  if (!inline_allocation_disabled_) return;
+  inline_allocation_disabled_ = false;
+
+  // Update inline allocation limit for new space.
+  new_space()->UpdateInlineAllocationLimit(0);
+}
+
+
+void Heap::DisableInlineAllocation() {
+  if (inline_allocation_disabled_) return;
+  inline_allocation_disabled_ = true;
+
+  // Update inline allocation limit for new space.
+  new_space()->UpdateInlineAllocationLimit(0);
+
+  // Update inline allocation limit for old spaces.
+  PagedSpaces spaces(this);
+  for (PagedSpace* space = spaces.next(); space != NULL;
+       space = spaces.next()) {
+    space->EmptyAllocationInfo();
+  }
+}
+
+
+V8_DECLARE_ONCE(initialize_gc_once);
+
+static void InitializeGCOnce() {
+  InitializeScavengingVisitorsTables();
+  NewSpaceScavenger::Initialize();
+  MarkCompactCollector::Initialize();
+}
+
+
+bool Heap::SetUp() {
+#ifdef DEBUG
+  allocation_timeout_ = FLAG_gc_interval;
+#endif
+
+  // Initialize heap spaces and initial maps and objects. Whenever something
+  // goes wrong, just return false. The caller should check the results and
+  // call Heap::TearDown() to release allocated memory.
+  //
+  // If the heap is not yet configured (e.g. through the API), configure it.
+  // Configuration is based on the flags new-space-size (really the semispace
+  // size) and old-space-size if set or the initial values of semispace_size_
+  // and old_generation_size_ otherwise.
+  if (!configured_) {
+    if (!ConfigureHeapDefault()) return false;
+  }
+
+  base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
+
+  MarkMapPointersAsEncoded(false);
+
+  // Set up memory allocator.
+  if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
+    return false;
+
+  // Set up new space.
+  if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
+    return false;
+  }
+  new_space_top_after_last_gc_ = new_space()->top();
+
+  // Initialize old pointer space.
+  old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
+                                    OLD_POINTER_SPACE, NOT_EXECUTABLE);
+  if (old_pointer_space_ == NULL) return false;
+  if (!old_pointer_space_->SetUp()) return false;
+
+  // Initialize old data space.
+  old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
+                                 NOT_EXECUTABLE);
+  if (old_data_space_ == NULL) return false;
+  if (!old_data_space_->SetUp()) return false;
+
+  if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
+
+  // Initialize the code space, set its maximum capacity to the old
+  // generation size. It needs executable memory.
+  code_space_ =
+      new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
+  if (code_space_ == NULL) return false;
+  if (!code_space_->SetUp()) return false;
+
+  // Initialize map space.
+  map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
+  if (map_space_ == NULL) return false;
+  if (!map_space_->SetUp()) return false;
+
+  // Initialize simple cell space.
+  cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
+  if (cell_space_ == NULL) return false;
+  if (!cell_space_->SetUp()) return false;
+
+  // Initialize global property cell space.
+  property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
+                                               PROPERTY_CELL_SPACE);
+  if (property_cell_space_ == NULL) return false;
+  if (!property_cell_space_->SetUp()) return false;
+
+  // The large object code space may contain code or data.  We set the memory
+  // to be non-executable here for safety, but this means we need to enable it
+  // explicitly when allocating large code objects.
+  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
+  if (lo_space_ == NULL) return false;
+  if (!lo_space_->SetUp()) return false;
+
+  // Set up the seed that is used to randomize the string hash function.
+  DCHECK(hash_seed() == 0);
+  if (FLAG_randomize_hashes) {
+    if (FLAG_hash_seed == 0) {
+      int rnd = isolate()->random_number_generator()->NextInt();
+      set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
+    } else {
+      set_hash_seed(Smi::FromInt(FLAG_hash_seed));
+    }
+  }
+
+  LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
+  LOG(isolate_, IntPtrTEvent("heap-available", Available()));
+
+  store_buffer()->SetUp();
+
+  mark_compact_collector()->SetUp();
+
+  return true;
+}
+
+
+bool Heap::CreateHeapObjects() {
+  // Create initial maps.
+  if (!CreateInitialMaps()) return false;
+  CreateApiObjects();
+
+  // Create initial objects
+  CreateInitialObjects();
+  CHECK_EQ(0, gc_count_);
+
+  set_native_contexts_list(undefined_value());
+  set_array_buffers_list(undefined_value());
+  set_allocation_sites_list(undefined_value());
+  weak_object_to_code_table_ = undefined_value();
+  return true;
+}
+
+
+void Heap::SetStackLimits() {
+  DCHECK(isolate_ != NULL);
+  DCHECK(isolate_ == isolate());
+  // On 64 bit machines, pointers are generally out of range of Smis.  We write
+  // something that looks like an out of range Smi to the GC.
+
+  // Set up the special root array entries containing the stack limits.
+  // These are actually addresses, but the tag makes the GC ignore it.
+  roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
+      (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
+  roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
+      (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
+}
+
+
+void Heap::TearDown() {
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+#endif
+
+  UpdateMaximumCommitted();
+
+  if (FLAG_print_cumulative_gc_stat) {
+    PrintF("\n");
+    PrintF("gc_count=%d ", gc_count_);
+    PrintF("mark_sweep_count=%d ", ms_count_);
+    PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
+    PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
+    PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
+    PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
+    PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
+    PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
+    PrintF("\n\n");
+  }
+
+  if (FLAG_print_max_heap_committed) {
+    PrintF("\n");
+    PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
+           MaximumCommittedMemory());
+    PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
+           new_space_.MaximumCommittedMemory());
+    PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
+           old_data_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
+           old_pointer_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
+           old_pointer_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
+           code_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
+           map_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
+           cell_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
+           property_cell_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
+           lo_space_->MaximumCommittedMemory());
+    PrintF("\n\n");
+  }
+
+  if (FLAG_verify_predictable) {
+    PrintAlloctionsHash();
+  }
+
+  TearDownArrayBuffers();
+
+  isolate_->global_handles()->TearDown();
+
+  external_string_table_.TearDown();
+
+  mark_compact_collector()->TearDown();
+
+  new_space_.TearDown();
+
+  if (old_pointer_space_ != NULL) {
+    old_pointer_space_->TearDown();
+    delete old_pointer_space_;
+    old_pointer_space_ = NULL;
+  }
+
+  if (old_data_space_ != NULL) {
+    old_data_space_->TearDown();
+    delete old_data_space_;
+    old_data_space_ = NULL;
+  }
+
+  if (code_space_ != NULL) {
+    code_space_->TearDown();
+    delete code_space_;
+    code_space_ = NULL;
+  }
+
+  if (map_space_ != NULL) {
+    map_space_->TearDown();
+    delete map_space_;
+    map_space_ = NULL;
+  }
+
+  if (cell_space_ != NULL) {
+    cell_space_->TearDown();
+    delete cell_space_;
+    cell_space_ = NULL;
+  }
+
+  if (property_cell_space_ != NULL) {
+    property_cell_space_->TearDown();
+    delete property_cell_space_;
+    property_cell_space_ = NULL;
+  }
+
+  if (lo_space_ != NULL) {
+    lo_space_->TearDown();
+    delete lo_space_;
+    lo_space_ = NULL;
+  }
+
+  store_buffer()->TearDown();
+  incremental_marking()->TearDown();
+
+  isolate_->memory_allocator()->TearDown();
+}
+
+
+void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+                                 GCType gc_type, bool pass_isolate) {
+  DCHECK(callback != NULL);
+  GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
+  DCHECK(!gc_prologue_callbacks_.Contains(pair));
+  return gc_prologue_callbacks_.Add(pair);
+}
+
+
+void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
+  DCHECK(callback != NULL);
+  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
+    if (gc_prologue_callbacks_[i].callback == callback) {
+      gc_prologue_callbacks_.Remove(i);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
+void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+                                 GCType gc_type, bool pass_isolate) {
+  DCHECK(callback != NULL);
+  GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
+  DCHECK(!gc_epilogue_callbacks_.Contains(pair));
+  return gc_epilogue_callbacks_.Add(pair);
+}
+
+
+void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
+  DCHECK(callback != NULL);
+  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
+    if (gc_epilogue_callbacks_[i].callback == callback) {
+      gc_epilogue_callbacks_.Remove(i);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
+// TODO(ishell): Find a better place for this.
+void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
+                                         Handle<DependentCode> dep) {
+  DCHECK(!InNewSpace(*obj));
+  DCHECK(!InNewSpace(*dep));
+  // This handle scope keeps the table handle local to this function, which
+  // allows us to safely skip write barriers in table update operations.
+  HandleScope scope(isolate());
+  Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
+                              isolate());
+  table = WeakHashTable::Put(table, obj, dep);
+
+  if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
+    WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
+  }
+  set_weak_object_to_code_table(*table);
+  DCHECK_EQ(*dep, table->Lookup(obj));
+}
+
+
+DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
+  Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
+  if (dep->IsDependentCode()) return DependentCode::cast(dep);
+  return DependentCode::cast(empty_fixed_array());
+}
+
+
+void Heap::EnsureWeakObjectToCodeTable() {
+  if (!weak_object_to_code_table()->IsHashTable()) {
+    set_weak_object_to_code_table(
+        *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
+                            TENURED));
+  }
+}
+
+
+void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
+  v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
+}
+
+#ifdef DEBUG
+
+class PrintHandleVisitor : public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++)
+      PrintF("  handle %p to %p\n", reinterpret_cast<void*>(p),
+             reinterpret_cast<void*>(*p));
+  }
+};
+
+
+void Heap::PrintHandles() {
+  PrintF("Handles:\n");
+  PrintHandleVisitor v;
+  isolate_->handle_scope_implementer()->Iterate(&v);
+}
+
+#endif
+
+
+Space* AllSpaces::next() {
+  switch (counter_++) {
+    case NEW_SPACE:
+      return heap_->new_space();
+    case OLD_POINTER_SPACE:
+      return heap_->old_pointer_space();
+    case OLD_DATA_SPACE:
+      return heap_->old_data_space();
+    case CODE_SPACE:
+      return heap_->code_space();
+    case MAP_SPACE:
+      return heap_->map_space();
+    case CELL_SPACE:
+      return heap_->cell_space();
+    case PROPERTY_CELL_SPACE:
+      return heap_->property_cell_space();
+    case LO_SPACE:
+      return heap_->lo_space();
+    default:
+      return NULL;
+  }
+}
+
+
+PagedSpace* PagedSpaces::next() {
+  switch (counter_++) {
+    case OLD_POINTER_SPACE:
+      return heap_->old_pointer_space();
+    case OLD_DATA_SPACE:
+      return heap_->old_data_space();
+    case CODE_SPACE:
+      return heap_->code_space();
+    case MAP_SPACE:
+      return heap_->map_space();
+    case CELL_SPACE:
+      return heap_->cell_space();
+    case PROPERTY_CELL_SPACE:
+      return heap_->property_cell_space();
+    default:
+      return NULL;
+  }
+}
+
+
+OldSpace* OldSpaces::next() {
+  switch (counter_++) {
+    case OLD_POINTER_SPACE:
+      return heap_->old_pointer_space();
+    case OLD_DATA_SPACE:
+      return heap_->old_data_space();
+    case CODE_SPACE:
+      return heap_->code_space();
+    default:
+      return NULL;
+  }
+}
+
+
+SpaceIterator::SpaceIterator(Heap* heap)
+    : heap_(heap),
+      current_space_(FIRST_SPACE),
+      iterator_(NULL),
+      size_func_(NULL) {}
+
+
+SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
+    : heap_(heap),
+      current_space_(FIRST_SPACE),
+      iterator_(NULL),
+      size_func_(size_func) {}
+
+
+SpaceIterator::~SpaceIterator() {
+  // Delete active iterator if any.
+  delete iterator_;
+}
+
+
+bool SpaceIterator::has_next() {
+  // Iterate until no more spaces.
+  return current_space_ != LAST_SPACE;
+}
+
+
+ObjectIterator* SpaceIterator::next() {
+  if (iterator_ != NULL) {
+    delete iterator_;
+    iterator_ = NULL;
+    // Move to the next space
+    current_space_++;
+    if (current_space_ > LAST_SPACE) {
+      return NULL;
+    }
+  }
+
+  // Return iterator for the new current space.
+  return CreateIterator();
+}
+
+
+// Create an iterator for the space to iterate.
+ObjectIterator* SpaceIterator::CreateIterator() {
+  DCHECK(iterator_ == NULL);
+
+  switch (current_space_) {
+    case NEW_SPACE:
+      iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
+      break;
+    case OLD_POINTER_SPACE:
+      iterator_ =
+          new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
+      break;
+    case OLD_DATA_SPACE:
+      iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
+      break;
+    case CODE_SPACE:
+      iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
+      break;
+    case MAP_SPACE:
+      iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
+      break;
+    case CELL_SPACE:
+      iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
+      break;
+    case PROPERTY_CELL_SPACE:
+      iterator_ =
+          new HeapObjectIterator(heap_->property_cell_space(), size_func_);
+      break;
+    case LO_SPACE:
+      iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
+      break;
+  }
+
+  // Return the newly allocated iterator;
+  DCHECK(iterator_ != NULL);
+  return iterator_;
+}
+
+
+class HeapObjectsFilter {
+ public:
+  virtual ~HeapObjectsFilter() {}
+  virtual bool SkipObject(HeapObject* object) = 0;
+};
+
+
+class UnreachableObjectsFilter : public HeapObjectsFilter {
+ public:
+  explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
+    MarkReachableObjects();
+  }
+
+  ~UnreachableObjectsFilter() {
+    heap_->mark_compact_collector()->ClearMarkbits();
+  }
+
+  bool SkipObject(HeapObject* object) {
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    return !mark_bit.Get();
+  }
+
+ private:
+  class MarkingVisitor : public ObjectVisitor {
+   public:
+    MarkingVisitor() : marking_stack_(10) {}
+
+    void VisitPointers(Object** start, Object** end) {
+      for (Object** p = start; p < end; p++) {
+        if (!(*p)->IsHeapObject()) continue;
+        HeapObject* obj = HeapObject::cast(*p);
+        MarkBit mark_bit = Marking::MarkBitFrom(obj);
+        if (!mark_bit.Get()) {
+          mark_bit.Set();
+          marking_stack_.Add(obj);
+        }
+      }
+    }
+
+    void TransitiveClosure() {
+      while (!marking_stack_.is_empty()) {
+        HeapObject* obj = marking_stack_.RemoveLast();
+        obj->Iterate(this);
+      }
+    }
+
+   private:
+    List<HeapObject*> marking_stack_;
+  };
+
+  void MarkReachableObjects() {
+    MarkingVisitor visitor;
+    heap_->IterateRoots(&visitor, VISIT_ALL);
+    visitor.TransitiveClosure();
+  }
+
+  Heap* heap_;
+  DisallowHeapAllocation no_allocation_;
+};
+
+
+HeapIterator::HeapIterator(Heap* heap)
+    : make_heap_iterable_helper_(heap),
+      no_heap_allocation_(),
+      heap_(heap),
+      filtering_(HeapIterator::kNoFiltering),
+      filter_(NULL) {
+  Init();
+}
+
+
+HeapIterator::HeapIterator(Heap* heap,
+                           HeapIterator::HeapObjectsFiltering filtering)
+    : make_heap_iterable_helper_(heap),
+      no_heap_allocation_(),
+      heap_(heap),
+      filtering_(filtering),
+      filter_(NULL) {
+  Init();
+}
+
+
+HeapIterator::~HeapIterator() { Shutdown(); }
+
+
+void HeapIterator::Init() {
+  // Start the iteration.
+  space_iterator_ = new SpaceIterator(heap_);
+  switch (filtering_) {
+    case kFilterUnreachable:
+      filter_ = new UnreachableObjectsFilter(heap_);
+      break;
+    default:
+      break;
+  }
+  object_iterator_ = space_iterator_->next();
+}
+
+
+void HeapIterator::Shutdown() {
+#ifdef DEBUG
+  // Assert that in filtering mode we have iterated through all
+  // objects. Otherwise, heap will be left in an inconsistent state.
+  if (filtering_ != kNoFiltering) {
+    DCHECK(object_iterator_ == NULL);
+  }
+#endif
+  // Make sure the last iterator is deallocated.
+  delete space_iterator_;
+  space_iterator_ = NULL;
+  object_iterator_ = NULL;
+  delete filter_;
+  filter_ = NULL;
+}
+
+
+HeapObject* HeapIterator::next() {
+  if (filter_ == NULL) return NextObject();
+
+  HeapObject* obj = NextObject();
+  while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
+  return obj;
+}
+
+
+HeapObject* HeapIterator::NextObject() {
+  // No iterator means we are done.
+  if (object_iterator_ == NULL) return NULL;
+
+  if (HeapObject* obj = object_iterator_->next_object()) {
+    // If the current iterator has more objects we are fine.
+    return obj;
+  } else {
+    // Go though the spaces looking for one that has objects.
+    while (space_iterator_->has_next()) {
+      object_iterator_ = space_iterator_->next();
+      if (HeapObject* obj = object_iterator_->next_object()) {
+        return obj;
+      }
+    }
+  }
+  // Done with the last space.
+  object_iterator_ = NULL;
+  return NULL;
+}
+
+
+void HeapIterator::reset() {
+  // Restart the iterator.
+  Shutdown();
+  Init();
+}
+
+
+#ifdef DEBUG
+
+Object* const PathTracer::kAnyGlobalObject = NULL;
+
+class PathTracer::MarkVisitor : public ObjectVisitor {
+ public:
+  explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
+  void VisitPointers(Object** start, Object** end) {
+    // Scan all HeapObject pointers in [start, end)
+    for (Object** p = start; !tracer_->found() && (p < end); p++) {
+      if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
+    }
+  }
+
+ private:
+  PathTracer* tracer_;
+};
+
+
+class PathTracer::UnmarkVisitor : public ObjectVisitor {
+ public:
+  explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
+  void VisitPointers(Object** start, Object** end) {
+    // Scan all HeapObject pointers in [start, end)
+    for (Object** p = start; p < end; p++) {
+      if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
+    }
+  }
+
+ private:
+  PathTracer* tracer_;
+};
+
+
+void PathTracer::VisitPointers(Object** start, Object** end) {
+  bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
+  // Visit all HeapObject pointers in [start, end)
+  for (Object** p = start; !done && (p < end); p++) {
+    if ((*p)->IsHeapObject()) {
+      TracePathFrom(p);
+      done = ((what_to_find_ == FIND_FIRST) && found_target_);
+    }
+  }
+}
+
+
+void PathTracer::Reset() {
+  found_target_ = false;
+  object_stack_.Clear();
+}
+
+
+void PathTracer::TracePathFrom(Object** root) {
+  DCHECK((search_target_ == kAnyGlobalObject) ||
+         search_target_->IsHeapObject());
+  found_target_in_trace_ = false;
+  Reset();
+
+  MarkVisitor mark_visitor(this);
+  MarkRecursively(root, &mark_visitor);
+
+  UnmarkVisitor unmark_visitor(this);
+  UnmarkRecursively(root, &unmark_visitor);
+
+  ProcessResults();
+}
+
+
+static bool SafeIsNativeContext(HeapObject* obj) {
+  return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
+}
+
+
+void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
+  if (!(*p)->IsHeapObject()) return;
+
+  HeapObject* obj = HeapObject::cast(*p);
+
+  MapWord map_word = obj->map_word();
+  if (!map_word.ToMap()->IsHeapObject()) return;  // visited before
+
+  if (found_target_in_trace_) return;  // stop if target found
+  object_stack_.Add(obj);
+  if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
+      (obj == search_target_)) {
+    found_target_in_trace_ = true;
+    found_target_ = true;
+    return;
+  }
+
+  bool is_native_context = SafeIsNativeContext(obj);
+
+  // not visited yet
+  Map* map = Map::cast(map_word.ToMap());
+
+  MapWord marked_map_word =
+      MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
+  obj->set_map_word(marked_map_word);
+
+  // Scan the object body.
+  if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
+    // This is specialized to scan Context's properly.
+    Object** start =
+        reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
+    Object** end =
+        reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
+                                   Context::FIRST_WEAK_SLOT * kPointerSize);
+    mark_visitor->VisitPointers(start, end);
+  } else {
+    obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
+  }
+
+  // Scan the map after the body because the body is a lot more interesting
+  // when doing leak detection.
+  MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
+
+  if (!found_target_in_trace_) {  // don't pop if found the target
+    object_stack_.RemoveLast();
+  }
+}
+
+
+void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
+  if (!(*p)->IsHeapObject()) return;
+
+  HeapObject* obj = HeapObject::cast(*p);
+
+  MapWord map_word = obj->map_word();
+  if (map_word.ToMap()->IsHeapObject()) return;  // unmarked already
+
+  MapWord unmarked_map_word =
+      MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
+  obj->set_map_word(unmarked_map_word);
+
+  Map* map = Map::cast(unmarked_map_word.ToMap());
+
+  UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
+
+  obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
+}
+
+
+void PathTracer::ProcessResults() {
+  if (found_target_) {
+    OFStream os(stdout);
+    os << "=====================================\n"
+       << "====        Path to object       ====\n"
+       << "=====================================\n\n";
+
+    DCHECK(!object_stack_.is_empty());
+    for (int i = 0; i < object_stack_.length(); i++) {
+      if (i > 0) os << "\n     |\n     |\n     V\n\n";
+      object_stack_[i]->Print(os);
+    }
+    os << "=====================================\n";
+  }
+}
+
+
+// Triggers a depth-first traversal of reachable objects from one
+// given root object and finds a path to a specific heap object and
+// prints it.
+void Heap::TracePathToObjectFrom(Object* target, Object* root) {
+  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
+  tracer.VisitPointer(&root);
+}
+
+
+// Triggers a depth-first traversal of reachable objects from roots
+// and finds a path to a specific heap object and prints it.
+void Heap::TracePathToObject(Object* target) {
+  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
+  IterateRoots(&tracer, VISIT_ONLY_STRONG);
+}
+
+
+// Triggers a depth-first traversal of reachable objects from roots
+// and finds a path to any global object and prints it. Useful for
+// determining the source for leaks of global objects.
+void Heap::TracePathToGlobal() {
+  PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
+                    VISIT_ALL);
+  IterateRoots(&tracer, VISIT_ONLY_STRONG);
+}
+#endif
+
+
+void Heap::UpdateCumulativeGCStatistics(double duration,
+                                        double spent_in_mutator,
+                                        double marking_time) {
+  if (FLAG_print_cumulative_gc_stat) {
+    total_gc_time_ms_ += duration;
+    max_gc_pause_ = Max(max_gc_pause_, duration);
+    max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
+    min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
+  } else if (FLAG_trace_gc_verbose) {
+    total_gc_time_ms_ += duration;
+  }
+
+  marking_time_ += marking_time;
+}
+
+
+int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
+  DisallowHeapAllocation no_gc;
+  // Uses only lower 32 bits if pointers are larger.
+  uintptr_t addr_hash =
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
+  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
+}
+
+
+int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
+  DisallowHeapAllocation no_gc;
+  int index = (Hash(map, name) & kHashMask);
+  for (int i = 0; i < kEntriesPerBucket; i++) {
+    Key& key = keys_[index + i];
+    if ((key.map == *map) && key.name->Equals(*name)) {
+      return field_offsets_[index + i];
+    }
+  }
+  return kNotFound;
+}
+
+
+void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
+                              int field_offset) {
+  DisallowHeapAllocation no_gc;
+  if (!name->IsUniqueName()) {
+    if (!StringTable::InternalizeStringIfExists(
+             name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
+      return;
+    }
+  }
+  // This cache is cleared only between mark compact passes, so we expect the
+  // cache to only contain old space names.
+  DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
+
+  int index = (Hash(map, name) & kHashMask);
+  // After a GC there will be free slots, so we use them in order (this may
+  // help to get the most frequently used one in position 0).
+  for (int i = 0; i < kEntriesPerBucket; i++) {
+    Key& key = keys_[index];
+    Object* free_entry_indicator = NULL;
+    if (key.map == free_entry_indicator) {
+      key.map = *map;
+      key.name = *name;
+      field_offsets_[index + i] = field_offset;
+      return;
+    }
+  }
+  // No free entry found in this bucket, so we move them all down one and
+  // put the new entry at position zero.
+  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
+    Key& key = keys_[index + i];
+    Key& key2 = keys_[index + i - 1];
+    key = key2;
+    field_offsets_[index + i] = field_offsets_[index + i - 1];
+  }
+
+  // Write the new first entry.
+  Key& key = keys_[index];
+  key.map = *map;
+  key.name = *name;
+  field_offsets_[index] = field_offset;
+}
+
+
+void KeyedLookupCache::Clear() {
+  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
+}
+
+
+void DescriptorLookupCache::Clear() {
+  for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
+}
+
+
+void ExternalStringTable::CleanUp() {
+  int last = 0;
+  for (int i = 0; i < new_space_strings_.length(); ++i) {
+    if (new_space_strings_[i] == heap_->the_hole_value()) {
+      continue;
+    }
+    DCHECK(new_space_strings_[i]->IsExternalString());
+    if (heap_->InNewSpace(new_space_strings_[i])) {
+      new_space_strings_[last++] = new_space_strings_[i];
+    } else {
+      old_space_strings_.Add(new_space_strings_[i]);
+    }
+  }
+  new_space_strings_.Rewind(last);
+  new_space_strings_.Trim();
+
+  last = 0;
+  for (int i = 0; i < old_space_strings_.length(); ++i) {
+    if (old_space_strings_[i] == heap_->the_hole_value()) {
+      continue;
+    }
+    DCHECK(old_space_strings_[i]->IsExternalString());
+    DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
+    old_space_strings_[last++] = old_space_strings_[i];
+  }
+  old_space_strings_.Rewind(last);
+  old_space_strings_.Trim();
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+#endif
+}
+
+
+void ExternalStringTable::TearDown() {
+  for (int i = 0; i < new_space_strings_.length(); ++i) {
+    heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
+  }
+  new_space_strings_.Free();
+  for (int i = 0; i < old_space_strings_.length(); ++i) {
+    heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
+  }
+  old_space_strings_.Free();
+}
+
+
+void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
+  chunk->set_next_chunk(chunks_queued_for_free_);
+  chunks_queued_for_free_ = chunk;
+}
+
+
+void Heap::FreeQueuedChunks() {
+  if (chunks_queued_for_free_ == NULL) return;
+  MemoryChunk* next;
+  MemoryChunk* chunk;
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+
+    if (chunk->owner()->identity() == LO_SPACE) {
+      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
+      // If FromAnyPointerAddress encounters a slot that belongs to a large
+      // chunk queued for deletion it will fail to find the chunk because
+      // it try to perform a search in the list of pages owned by of the large
+      // object space and queued chunks were detached from that list.
+      // To work around this we split large chunk into normal kPageSize aligned
+      // pieces and initialize size, owner and flags field of every piece.
+      // If FromAnyPointerAddress encounters a slot that belongs to one of
+      // these smaller pieces it will treat it as a slot on a normal Page.
+      Address chunk_end = chunk->address() + chunk->size();
+      MemoryChunk* inner =
+          MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
+      MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
+      while (inner <= inner_last) {
+        // Size of a large chunk is always a multiple of
+        // OS::AllocateAlignment() so there is always
+        // enough space for a fake MemoryChunk header.
+        Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
+        // Guard against overflow.
+        if (area_end < inner->address()) area_end = chunk_end;
+        inner->SetArea(inner->address(), area_end);
+        inner->set_size(Page::kPageSize);
+        inner->set_owner(lo_space());
+        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+        inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
+      }
+    }
+  }
+  isolate_->heap()->store_buffer()->Compact();
+  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    isolate_->memory_allocator()->Free(chunk);
+  }
+  chunks_queued_for_free_ = NULL;
+}
+
+
+void Heap::RememberUnmappedPage(Address page, bool compacted) {
+  uintptr_t p = reinterpret_cast<uintptr_t>(page);
+  // Tag the page pointer to make it findable in the dump file.
+  if (compacted) {
+    p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
+  } else {
+    p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
+  }
+  remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
+      reinterpret_cast<Address>(p);
+  remembered_unmapped_pages_index_++;
+  remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
+}
+
+
+void Heap::ClearObjectStats(bool clear_last_time_stats) {
+  memset(object_counts_, 0, sizeof(object_counts_));
+  memset(object_sizes_, 0, sizeof(object_sizes_));
+  if (clear_last_time_stats) {
+    memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
+    memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
+  }
+}
+
+
+static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
+
+
+void Heap::CheckpointObjectStats() {
+  base::LockGuard<base::Mutex> lock_guard(
+      checkpoint_object_stats_mutex.Pointer());
+  Counters* counters = isolate()->counters();
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)              \
+  counters->count_of_##name()->Increment(                \
+      static_cast<int>(object_counts_[name]));           \
+  counters->count_of_##name()->Decrement(                \
+      static_cast<int>(object_counts_last_time_[name])); \
+  counters->size_of_##name()->Increment(                 \
+      static_cast<int>(object_sizes_[name]));            \
+  counters->size_of_##name()->Decrement(                 \
+      static_cast<int>(object_sizes_last_time_[name]));
+  INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+  int index;
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
+  index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
+  counters->count_of_CODE_TYPE_##name()->Increment(       \
+      static_cast<int>(object_counts_[index]));           \
+  counters->count_of_CODE_TYPE_##name()->Decrement(       \
+      static_cast<int>(object_counts_last_time_[index])); \
+  counters->size_of_CODE_TYPE_##name()->Increment(        \
+      static_cast<int>(object_sizes_[index]));            \
+  counters->size_of_CODE_TYPE_##name()->Decrement(        \
+      static_cast<int>(object_sizes_last_time_[index]));
+  CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
+  index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
+  counters->count_of_FIXED_ARRAY_##name()->Increment(     \
+      static_cast<int>(object_counts_[index]));           \
+  counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
+      static_cast<int>(object_counts_last_time_[index])); \
+  counters->size_of_FIXED_ARRAY_##name()->Increment(      \
+      static_cast<int>(object_sizes_[index]));            \
+  counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
+      static_cast<int>(object_sizes_last_time_[index]));
+  FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
+  index =                                                                     \
+      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
+  counters->count_of_CODE_AGE_##name()->Increment(                            \
+      static_cast<int>(object_counts_[index]));                               \
+  counters->count_of_CODE_AGE_##name()->Decrement(                            \
+      static_cast<int>(object_counts_last_time_[index]));                     \
+  counters->size_of_CODE_AGE_##name()->Increment(                             \
+      static_cast<int>(object_sizes_[index]));                                \
+  counters->size_of_CODE_AGE_##name()->Decrement(                             \
+      static_cast<int>(object_sizes_last_time_[index]));
+  CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+
+  MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
+  MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
+  ClearObjectStats();
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/heap.gyp b/src/heap/heap.gyp
new file mode 100644
index 0000000..2970eb8
--- /dev/null
+++ b/src/heap/heap.gyp
@@ -0,0 +1,52 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'v8_code': 1,
+  },
+  'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+  'targets': [
+    {
+      'target_name': 'heap-unittests',
+      'type': 'executable',
+      'dependencies': [
+        '../../testing/gtest.gyp:gtest',
+        '../../testing/gtest.gyp:gtest_main',
+        '../../tools/gyp/v8.gyp:v8_libplatform',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'sources': [  ### gcmole(all) ###
+        'gc-idle-time-handler-unittest.cc',
+      ],
+      'conditions': [
+        ['component=="shared_library"', {
+          # heap-unittests can't be built against a shared library, so we
+          # need to depend on the underlying static target in that case.
+          'conditions': [
+            ['v8_use_snapshot=="true"', {
+              'dependencies': ['../../tools/gyp/v8.gyp:v8_snapshot'],
+            },
+            {
+              'dependencies': [
+                '../../tools/gyp/v8.gyp:v8_nosnapshot',
+              ],
+            }],
+          ],
+        }, {
+          'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+        }],
+        ['os_posix == 1', {
+          # TODO(svenpanne): This is a temporary work-around to fix the warnings
+          # that show up because we use -std=gnu++0x instead of -std=c++11.
+          'cflags!': [
+            '-pedantic',
+          ],
+        }],
+      ],
+    },
+  ],
+}
diff --git a/src/heap/heap.h b/src/heap/heap.h
new file mode 100644
index 0000000..c9d0f31
--- /dev/null
+++ b/src/heap/heap.h
@@ -0,0 +1,2503 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_H_
+#define V8_HEAP_HEAP_H_
+
+#include <cmath>
+
+#include "src/allocation.h"
+#include "src/assert-scope.h"
+#include "src/counters.h"
+#include "src/globals.h"
+#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/gc-tracer.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/objects-visiting.h"
+#include "src/heap/spaces.h"
+#include "src/heap/store-buffer.h"
+#include "src/list.h"
+#include "src/splay-tree-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Defines all the roots in Heap.
+#define STRONG_ROOT_LIST(V)                                                    \
+  V(Map, byte_array_map, ByteArrayMap)                                         \
+  V(Map, free_space_map, FreeSpaceMap)                                         \
+  V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
+  V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
+  /* Cluster the most popular ones in a few cache lines here at the top.    */ \
+  V(Smi, store_buffer_top, StoreBufferTop)                                     \
+  V(Oddball, undefined_value, UndefinedValue)                                  \
+  V(Oddball, the_hole_value, TheHoleValue)                                     \
+  V(Oddball, null_value, NullValue)                                            \
+  V(Oddball, true_value, TrueValue)                                            \
+  V(Oddball, false_value, FalseValue)                                          \
+  V(Oddball, uninitialized_value, UninitializedValue)                          \
+  V(Oddball, exception, Exception)                                             \
+  V(Map, cell_map, CellMap)                                                    \
+  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
+  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
+  V(Map, meta_map, MetaMap)                                                    \
+  V(Map, heap_number_map, HeapNumberMap)                                       \
+  V(Map, mutable_heap_number_map, MutableHeapNumberMap)                        \
+  V(Map, native_context_map, NativeContextMap)                                 \
+  V(Map, fixed_array_map, FixedArrayMap)                                       \
+  V(Map, code_map, CodeMap)                                                    \
+  V(Map, scope_info_map, ScopeInfoMap)                                         \
+  V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
+  V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
+  V(Map, constant_pool_array_map, ConstantPoolArrayMap)                        \
+  V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel)      \
+  V(Map, hash_table_map, HashTableMap)                                         \
+  V(Map, ordered_hash_table_map, OrderedHashTableMap)                          \
+  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
+  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
+  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
+  V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray)      \
+  V(Oddball, arguments_marker, ArgumentsMarker)                                \
+  /* The roots above this line should be boring from a GC point of view.    */ \
+  /* This means they are never in new space and never on a page that is     */ \
+  /* being compacted.                                                       */ \
+  V(FixedArray, number_string_cache, NumberStringCache)                        \
+  V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
+  V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
+  V(Object, instanceof_cache_answer, InstanceofCacheAnswer)                    \
+  V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
+  V(FixedArray, string_split_cache, StringSplitCache)                          \
+  V(FixedArray, regexp_multiple_cache, RegExpMultipleCache)                    \
+  V(Oddball, termination_exception, TerminationException)                      \
+  V(Smi, hash_seed, HashSeed)                                                  \
+  V(Map, symbol_map, SymbolMap)                                                \
+  V(Map, string_map, StringMap)                                                \
+  V(Map, one_byte_string_map, OneByteStringMap)                                \
+  V(Map, cons_string_map, ConsStringMap)                                       \
+  V(Map, cons_one_byte_string_map, ConsOneByteStringMap)                       \
+  V(Map, sliced_string_map, SlicedStringMap)                                   \
+  V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap)                   \
+  V(Map, external_string_map, ExternalStringMap)                               \
+  V(Map, external_string_with_one_byte_data_map,                               \
+    ExternalStringWithOneByteDataMap)                                          \
+  V(Map, external_one_byte_string_map, ExternalOneByteStringMap)               \
+  V(Map, short_external_string_map, ShortExternalStringMap)                    \
+  V(Map, short_external_string_with_one_byte_data_map,                         \
+    ShortExternalStringWithOneByteDataMap)                                     \
+  V(Map, internalized_string_map, InternalizedStringMap)                       \
+  V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap)       \
+  V(Map, external_internalized_string_map, ExternalInternalizedStringMap)      \
+  V(Map, external_internalized_string_with_one_byte_data_map,                  \
+    ExternalInternalizedStringWithOneByteDataMap)                              \
+  V(Map, external_one_byte_internalized_string_map,                            \
+    ExternalOneByteInternalizedStringMap)                                      \
+  V(Map, short_external_internalized_string_map,                               \
+    ShortExternalInternalizedStringMap)                                        \
+  V(Map, short_external_internalized_string_with_one_byte_data_map,            \
+    ShortExternalInternalizedStringWithOneByteDataMap)                         \
+  V(Map, short_external_one_byte_internalized_string_map,                      \
+    ShortExternalOneByteInternalizedStringMap)                                 \
+  V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap)    \
+  V(Map, undetectable_string_map, UndetectableStringMap)                       \
+  V(Map, undetectable_one_byte_string_map, UndetectableOneByteStringMap)       \
+  V(Map, external_int8_array_map, ExternalInt8ArrayMap)                        \
+  V(Map, external_uint8_array_map, ExternalUint8ArrayMap)                      \
+  V(Map, external_int16_array_map, ExternalInt16ArrayMap)                      \
+  V(Map, external_uint16_array_map, ExternalUint16ArrayMap)                    \
+  V(Map, external_int32_array_map, ExternalInt32ArrayMap)                      \
+  V(Map, external_uint32_array_map, ExternalUint32ArrayMap)                    \
+  V(Map, external_float32_array_map, ExternalFloat32ArrayMap)                  \
+  V(Map, external_float64_array_map, ExternalFloat64ArrayMap)                  \
+  V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap)       \
+  V(ExternalArray, empty_external_int8_array, EmptyExternalInt8Array)          \
+  V(ExternalArray, empty_external_uint8_array, EmptyExternalUint8Array)        \
+  V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array)        \
+  V(ExternalArray, empty_external_uint16_array, EmptyExternalUint16Array)      \
+  V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array)        \
+  V(ExternalArray, empty_external_uint32_array, EmptyExternalUint32Array)      \
+  V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array)    \
+  V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array)    \
+  V(ExternalArray, empty_external_uint8_clamped_array,                         \
+    EmptyExternalUint8ClampedArray)                                            \
+  V(Map, fixed_uint8_array_map, FixedUint8ArrayMap)                            \
+  V(Map, fixed_int8_array_map, FixedInt8ArrayMap)                              \
+  V(Map, fixed_uint16_array_map, FixedUint16ArrayMap)                          \
+  V(Map, fixed_int16_array_map, FixedInt16ArrayMap)                            \
+  V(Map, fixed_uint32_array_map, FixedUint32ArrayMap)                          \
+  V(Map, fixed_int32_array_map, FixedInt32ArrayMap)                            \
+  V(Map, fixed_float32_array_map, FixedFloat32ArrayMap)                        \
+  V(Map, fixed_float64_array_map, FixedFloat64ArrayMap)                        \
+  V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap)             \
+  V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array)        \
+  V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array)          \
+  V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array)      \
+  V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array)        \
+  V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array)      \
+  V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array)        \
+  V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array)    \
+  V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array)    \
+  V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array,                      \
+    EmptyFixedUint8ClampedArray)                                               \
+  V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
+  V(Map, function_context_map, FunctionContextMap)                             \
+  V(Map, catch_context_map, CatchContextMap)                                   \
+  V(Map, with_context_map, WithContextMap)                                     \
+  V(Map, block_context_map, BlockContextMap)                                   \
+  V(Map, module_context_map, ModuleContextMap)                                 \
+  V(Map, global_context_map, GlobalContextMap)                                 \
+  V(Map, undefined_map, UndefinedMap)                                          \
+  V(Map, the_hole_map, TheHoleMap)                                             \
+  V(Map, null_map, NullMap)                                                    \
+  V(Map, boolean_map, BooleanMap)                                              \
+  V(Map, uninitialized_map, UninitializedMap)                                  \
+  V(Map, arguments_marker_map, ArgumentsMarkerMap)                             \
+  V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap)   \
+  V(Map, exception_map, ExceptionMap)                                          \
+  V(Map, termination_exception_map, TerminationExceptionMap)                   \
+  V(Map, message_object_map, JSMessageObjectMap)                               \
+  V(Map, foreign_map, ForeignMap)                                              \
+  V(HeapNumber, nan_value, NanValue)                                           \
+  V(HeapNumber, infinity_value, InfinityValue)                                 \
+  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
+  V(Map, neander_map, NeanderMap)                                              \
+  V(JSObject, message_listeners, MessageListeners)                             \
+  V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
+  V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache)      \
+  V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache)        \
+  V(Code, js_entry_code, JsEntryCode)                                          \
+  V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
+  V(FixedArray, natives_source_cache, NativesSourceCache)                      \
+  V(Script, empty_script, EmptyScript)                                         \
+  V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames)          \
+  V(Cell, undefined_cell, UndefineCell)                                        \
+  V(JSObject, observation_state, ObservationState)                             \
+  V(Map, external_map, ExternalMap)                                            \
+  V(Object, symbol_registry, SymbolRegistry)                                   \
+  V(Symbol, frozen_symbol, FrozenSymbol)                                       \
+  V(Symbol, nonexistent_symbol, NonExistentSymbol)                             \
+  V(Symbol, elements_transition_symbol, ElementsTransitionSymbol)              \
+  V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
+    EmptySlowElementDictionary)                                                \
+  V(Symbol, observed_symbol, ObservedSymbol)                                   \
+  V(Symbol, uninitialized_symbol, UninitializedSymbol)                         \
+  V(Symbol, megamorphic_symbol, MegamorphicSymbol)                             \
+  V(Symbol, premonomorphic_symbol, PremonomorphicSymbol)                       \
+  V(Symbol, generic_symbol, GenericSymbol)                                     \
+  V(Symbol, stack_trace_symbol, StackTraceSymbol)                              \
+  V(Symbol, detailed_stack_trace_symbol, DetailedStackTraceSymbol)             \
+  V(Symbol, normal_ic_symbol, NormalICSymbol)                                  \
+  V(Symbol, home_object_symbol, HomeObjectSymbol)                              \
+  V(FixedArray, materialized_objects, MaterializedObjects)                     \
+  V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad)        \
+  V(FixedArray, microtask_queue, MicrotaskQueue)
+
+// Entries in this list are limited to Smis and are not visited during GC.
+#define SMI_ROOT_LIST(V)                                                   \
+  V(Smi, stack_limit, StackLimit)                                          \
+  V(Smi, real_stack_limit, RealStackLimit)                                 \
+  V(Smi, last_script_id, LastScriptId)                                     \
+  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
+  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)       \
+  V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)             \
+  V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
+
+#define ROOT_LIST(V)  \
+  STRONG_ROOT_LIST(V) \
+  SMI_ROOT_LIST(V)    \
+  V(StringTable, string_table, StringTable)
+
+// Heap roots that are known to be immortal immovable, for which we can safely
+// skip write barriers.
+#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
+  V(byte_array_map)                     \
+  V(free_space_map)                     \
+  V(one_pointer_filler_map)             \
+  V(two_pointer_filler_map)             \
+  V(undefined_value)                    \
+  V(the_hole_value)                     \
+  V(null_value)                         \
+  V(true_value)                         \
+  V(false_value)                        \
+  V(uninitialized_value)                \
+  V(cell_map)                           \
+  V(global_property_cell_map)           \
+  V(shared_function_info_map)           \
+  V(meta_map)                           \
+  V(heap_number_map)                    \
+  V(mutable_heap_number_map)            \
+  V(native_context_map)                 \
+  V(fixed_array_map)                    \
+  V(code_map)                           \
+  V(scope_info_map)                     \
+  V(fixed_cow_array_map)                \
+  V(fixed_double_array_map)             \
+  V(constant_pool_array_map)            \
+  V(no_interceptor_result_sentinel)     \
+  V(hash_table_map)                     \
+  V(ordered_hash_table_map)             \
+  V(empty_fixed_array)                  \
+  V(empty_byte_array)                   \
+  V(empty_descriptor_array)             \
+  V(empty_constant_pool_array)          \
+  V(arguments_marker)                   \
+  V(symbol_map)                         \
+  V(sloppy_arguments_elements_map)      \
+  V(function_context_map)               \
+  V(catch_context_map)                  \
+  V(with_context_map)                   \
+  V(block_context_map)                  \
+  V(module_context_map)                 \
+  V(global_context_map)                 \
+  V(undefined_map)                      \
+  V(the_hole_map)                       \
+  V(null_map)                           \
+  V(boolean_map)                        \
+  V(uninitialized_map)                  \
+  V(message_object_map)                 \
+  V(foreign_map)                        \
+  V(neander_map)
+
+#define INTERNALIZED_STRING_LIST(V)                                \
+  V(Object_string, "Object")                                       \
+  V(proto_string, "__proto__")                                     \
+  V(arguments_string, "arguments")                                 \
+  V(Arguments_string, "Arguments")                                 \
+  V(caller_string, "caller")                                       \
+  V(boolean_string, "boolean")                                     \
+  V(Boolean_string, "Boolean")                                     \
+  V(callee_string, "callee")                                       \
+  V(constructor_string, "constructor")                             \
+  V(dot_result_string, ".result")                                  \
+  V(dot_for_string, ".for.")                                       \
+  V(eval_string, "eval")                                           \
+  V(empty_string, "")                                              \
+  V(function_string, "function")                                   \
+  V(Function_string, "Function")                                   \
+  V(length_string, "length")                                       \
+  V(name_string, "name")                                           \
+  V(null_string, "null")                                           \
+  V(number_string, "number")                                       \
+  V(Number_string, "Number")                                       \
+  V(nan_string, "NaN")                                             \
+  V(source_string, "source")                                       \
+  V(source_url_string, "source_url")                               \
+  V(source_mapping_url_string, "source_mapping_url")               \
+  V(global_string, "global")                                       \
+  V(ignore_case_string, "ignoreCase")                              \
+  V(multiline_string, "multiline")                                 \
+  V(sticky_string, "sticky")                                       \
+  V(harmony_regexps_string, "harmony_regexps")                     \
+  V(input_string, "input")                                         \
+  V(index_string, "index")                                         \
+  V(last_index_string, "lastIndex")                                \
+  V(object_string, "object")                                       \
+  V(prototype_string, "prototype")                                 \
+  V(string_string, "string")                                       \
+  V(String_string, "String")                                       \
+  V(symbol_string, "symbol")                                       \
+  V(Symbol_string, "Symbol")                                       \
+  V(Map_string, "Map")                                             \
+  V(Set_string, "Set")                                             \
+  V(WeakMap_string, "WeakMap")                                     \
+  V(WeakSet_string, "WeakSet")                                     \
+  V(for_string, "for")                                             \
+  V(for_api_string, "for_api")                                     \
+  V(for_intern_string, "for_intern")                               \
+  V(private_api_string, "private_api")                             \
+  V(private_intern_string, "private_intern")                       \
+  V(Date_string, "Date")                                           \
+  V(char_at_string, "CharAt")                                      \
+  V(undefined_string, "undefined")                                 \
+  V(value_of_string, "valueOf")                                    \
+  V(stack_string, "stack")                                         \
+  V(toJSON_string, "toJSON")                                       \
+  V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic")           \
+  V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic")         \
+  V(stack_overflow_string, "kStackOverflowBoilerplate")            \
+  V(illegal_access_string, "illegal access")                       \
+  V(cell_value_string, "%cell_value")                              \
+  V(illegal_argument_string, "illegal argument")                   \
+  V(identity_hash_string, "v8::IdentityHash")                      \
+  V(closure_string, "(closure)")                                   \
+  V(dot_string, ".")                                               \
+  V(compare_ic_string, "==")                                       \
+  V(strict_compare_ic_string, "===")                               \
+  V(infinity_string, "Infinity")                                   \
+  V(minus_infinity_string, "-Infinity")                            \
+  V(query_colon_string, "(?:)")                                    \
+  V(Generator_string, "Generator")                                 \
+  V(throw_string, "throw")                                         \
+  V(done_string, "done")                                           \
+  V(value_string, "value")                                         \
+  V(next_string, "next")                                           \
+  V(byte_length_string, "byteLength")                              \
+  V(byte_offset_string, "byteOffset")                              \
+  V(intl_initialized_marker_string, "v8::intl_initialized_marker") \
+  V(intl_impl_object_string, "v8::intl_object")
+
+// Forward declarations.
+class HeapStats;
+class Isolate;
+class WeakObjectRetainer;
+
+
+typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
+                                                      Object** pointer);
+
+class StoreBufferRebuilder {
+ public:
+  explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer) {}
+
+  void Callback(MemoryChunk* page, StoreBufferEvent event);
+
+ private:
+  StoreBuffer* store_buffer_;
+
+  // We record in this variable how full the store buffer was when we started
+  // iterating over the current page, finding pointers to new space.  If the
+  // store buffer overflows again we can exempt the page from the store buffer
+  // by rewinding to this point instead of having to search the store buffer.
+  Object*** start_of_current_page_;
+  // The current page we are scanning in the store buffer iterator.
+  MemoryChunk* current_page_;
+};
+
+
+// A queue of objects promoted during scavenge. Each object is accompanied
+// by it's size to avoid dereferencing a map pointer for scanning.
+class PromotionQueue {
+ public:
+  explicit PromotionQueue(Heap* heap)
+      : front_(NULL),
+        rear_(NULL),
+        limit_(NULL),
+        emergency_stack_(0),
+        heap_(heap) {}
+
+  void Initialize();
+
+  void Destroy() {
+    DCHECK(is_empty());
+    delete emergency_stack_;
+    emergency_stack_ = NULL;
+  }
+
+  Page* GetHeadPage() {
+    return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+  }
+
+  void SetNewLimit(Address limit) {
+    limit_ = reinterpret_cast<intptr_t*>(limit);
+
+    if (limit_ <= rear_) {
+      return;
+    }
+
+    RelocateQueueHead();
+  }
+
+  bool IsBelowPromotionQueue(Address to_space_top) {
+    // If the given to-space top pointer and the head of the promotion queue
+    // are not on the same page, then the to-space objects are below the
+    // promotion queue.
+    if (GetHeadPage() != Page::FromAddress(to_space_top)) {
+      return true;
+    }
+    // If the to space top pointer is smaller or equal than the promotion
+    // queue head, then the to-space objects are below the promotion queue.
+    return reinterpret_cast<intptr_t*>(to_space_top) <= rear_;
+  }
+
+  bool is_empty() {
+    return (front_ == rear_) &&
+           (emergency_stack_ == NULL || emergency_stack_->length() == 0);
+  }
+
+  inline void insert(HeapObject* target, int size);
+
+  void remove(HeapObject** target, int* size) {
+    DCHECK(!is_empty());
+    if (front_ == rear_) {
+      Entry e = emergency_stack_->RemoveLast();
+      *target = e.obj_;
+      *size = e.size_;
+      return;
+    }
+
+    if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
+      NewSpacePage* front_page =
+          NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
+      DCHECK(!front_page->prev_page()->is_anchor());
+      front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
+    }
+    *target = reinterpret_cast<HeapObject*>(*(--front_));
+    *size = static_cast<int>(*(--front_));
+    // Assert no underflow.
+    SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
+                                reinterpret_cast<Address>(front_));
+  }
+
+ private:
+  // The front of the queue is higher in the memory page chain than the rear.
+  intptr_t* front_;
+  intptr_t* rear_;
+  intptr_t* limit_;
+
+  static const int kEntrySizeInWords = 2;
+
+  struct Entry {
+    Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {}
+
+    HeapObject* obj_;
+    int size_;
+  };
+  List<Entry>* emergency_stack_;
+
+  Heap* heap_;
+
+  void RelocateQueueHead();
+
+  DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
+};
+
+
+typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
+                                   HeapObject* object);
+
+
+// External strings table is a place where all external strings are
+// registered.  We need to keep track of such strings to properly
+// finalize them.
+class ExternalStringTable {
+ public:
+  // Registers an external string.
+  inline void AddString(String* string);
+
+  inline void Iterate(ObjectVisitor* v);
+
+  // Restores internal invariant and gets rid of collected strings.
+  // Must be called after each Iterate() that modified the strings.
+  void CleanUp();
+
+  // Destroys all allocated memory.
+  void TearDown();
+
+ private:
+  explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
+
+  friend class Heap;
+
+  inline void Verify();
+
+  inline void AddOldString(String* string);
+
+  // Notifies the table that only a prefix of the new list is valid.
+  inline void ShrinkNewStrings(int position);
+
+  // To speed up scavenge collections new space string are kept
+  // separate from old space strings.
+  List<Object*> new_space_strings_;
+  List<Object*> old_space_strings_;
+
+  Heap* heap_;
+
+  DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
+};
+
+
+enum ArrayStorageAllocationMode {
+  DONT_INITIALIZE_ARRAY_ELEMENTS,
+  INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+};
+
+
+class Heap {
+ public:
+  // Configure heap size in MB before setup. Return false if the heap has been
+  // set up already.
+  bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
+                     int max_executable_size, size_t code_range_size);
+  bool ConfigureHeapDefault();
+
+  // Prepares the heap, setting up memory areas that are needed in the isolate
+  // without actually creating any objects.
+  bool SetUp();
+
+  // Bootstraps the object heap with the core set of objects required to run.
+  // Returns whether it succeeded.
+  bool CreateHeapObjects();
+
+  // Destroys all memory allocated by the heap.
+  void TearDown();
+
+  // Set the stack limit in the roots_ array.  Some architectures generate
+  // code that looks here, because it is faster than loading from the static
+  // jslimit_/real_jslimit_ variable in the StackGuard.
+  void SetStackLimits();
+
+  // Returns whether SetUp has been called.
+  bool HasBeenSetUp();
+
+  // Returns the maximum amount of memory reserved for the heap.  For
+  // the young generation, we reserve 4 times the amount needed for a
+  // semi space.  The young generation consists of two semi spaces and
+  // we reserve twice the amount needed for those in order to ensure
+  // that new space can be aligned to its size.
+  intptr_t MaxReserved() {
+    return 4 * reserved_semispace_size_ + max_old_generation_size_;
+  }
+  int MaxSemiSpaceSize() { return max_semi_space_size_; }
+  int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
+  int InitialSemiSpaceSize() { return initial_semispace_size_; }
+  intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
+  intptr_t MaxExecutableSize() { return max_executable_size_; }
+
+  // Returns the capacity of the heap in bytes w/o growing. Heap grows when
+  // more spaces are needed until it reaches the limit.
+  intptr_t Capacity();
+
+  // Returns the amount of memory currently committed for the heap.
+  intptr_t CommittedMemory();
+
+  // Returns the amount of executable memory currently committed for the heap.
+  intptr_t CommittedMemoryExecutable();
+
+  // Returns the amount of phyical memory currently committed for the heap.
+  size_t CommittedPhysicalMemory();
+
+  // Returns the maximum amount of memory ever committed for the heap.
+  intptr_t MaximumCommittedMemory() { return maximum_committed_; }
+
+  // Updates the maximum committed memory for the heap. Should be called
+  // whenever a space grows.
+  void UpdateMaximumCommitted();
+
+  // Returns the available bytes in space w/o growing.
+  // Heap doesn't guarantee that it can allocate an object that requires
+  // all available bytes. Check MaxHeapObjectSize() instead.
+  intptr_t Available();
+
+  // Returns of size of all objects residing in the heap.
+  intptr_t SizeOfObjects();
+
+  // Return the starting address and a mask for the new space.  And-masking an
+  // address with the mask will result in the start address of the new space
+  // for all addresses in either semispace.
+  Address NewSpaceStart() { return new_space_.start(); }
+  uintptr_t NewSpaceMask() { return new_space_.mask(); }
+  Address NewSpaceTop() { return new_space_.top(); }
+
+  NewSpace* new_space() { return &new_space_; }
+  OldSpace* old_pointer_space() { return old_pointer_space_; }
+  OldSpace* old_data_space() { return old_data_space_; }
+  OldSpace* code_space() { return code_space_; }
+  MapSpace* map_space() { return map_space_; }
+  CellSpace* cell_space() { return cell_space_; }
+  PropertyCellSpace* property_cell_space() { return property_cell_space_; }
+  LargeObjectSpace* lo_space() { return lo_space_; }
+  PagedSpace* paged_space(int idx) {
+    switch (idx) {
+      case OLD_POINTER_SPACE:
+        return old_pointer_space();
+      case OLD_DATA_SPACE:
+        return old_data_space();
+      case MAP_SPACE:
+        return map_space();
+      case CELL_SPACE:
+        return cell_space();
+      case PROPERTY_CELL_SPACE:
+        return property_cell_space();
+      case CODE_SPACE:
+        return code_space();
+      case NEW_SPACE:
+      case LO_SPACE:
+        UNREACHABLE();
+    }
+    return NULL;
+  }
+
+  bool always_allocate() { return always_allocate_scope_depth_ != 0; }
+  Address always_allocate_scope_depth_address() {
+    return reinterpret_cast<Address>(&always_allocate_scope_depth_);
+  }
+
+  Address* NewSpaceAllocationTopAddress() {
+    return new_space_.allocation_top_address();
+  }
+  Address* NewSpaceAllocationLimitAddress() {
+    return new_space_.allocation_limit_address();
+  }
+
+  Address* OldPointerSpaceAllocationTopAddress() {
+    return old_pointer_space_->allocation_top_address();
+  }
+  Address* OldPointerSpaceAllocationLimitAddress() {
+    return old_pointer_space_->allocation_limit_address();
+  }
+
+  Address* OldDataSpaceAllocationTopAddress() {
+    return old_data_space_->allocation_top_address();
+  }
+  Address* OldDataSpaceAllocationLimitAddress() {
+    return old_data_space_->allocation_limit_address();
+  }
+
+  // Returns a deep copy of the JavaScript object.
+  // Properties and elements are copied too.
+  // Optionally takes an AllocationSite to be appended in an AllocationMemento.
+  MUST_USE_RESULT AllocationResult
+      CopyJSObject(JSObject* source, AllocationSite* site = NULL);
+
+  // Clear the Instanceof cache (used when a prototype changes).
+  inline void ClearInstanceofCache();
+
+  // Iterates the whole code space to clear all ICs of the given kind.
+  void ClearAllICsByKind(Code::Kind kind);
+
+  // For use during bootup.
+  void RepairFreeListsAfterBoot();
+
+  template <typename T>
+  static inline bool IsOneByte(T t, int chars);
+
+  // Move len elements within a given array from src_index index to dst_index
+  // index.
+  void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
+
+  // Sloppy mode arguments object size.
+  static const int kSloppyArgumentsObjectSize =
+      JSObject::kHeaderSize + 2 * kPointerSize;
+  // Strict mode arguments has no callee so it is smaller.
+  static const int kStrictArgumentsObjectSize =
+      JSObject::kHeaderSize + 1 * kPointerSize;
+  // Indicies for direct access into argument objects.
+  static const int kArgumentsLengthIndex = 0;
+  // callee is only valid in sloppy mode.
+  static const int kArgumentsCalleeIndex = 1;
+
+  // Finalizes an external string by deleting the associated external
+  // data and clearing the resource pointer.
+  inline void FinalizeExternalString(String* string);
+
+  // Initialize a filler object to keep the ability to iterate over the heap
+  // when introducing gaps within pages.
+  void CreateFillerObjectAt(Address addr, int size);
+
+  bool CanMoveObjectStart(HeapObject* object);
+
+  // Indicates whether live bytes adjustment is triggered from within the GC
+  // code or from mutator code.
+  enum InvocationMode { FROM_GC, FROM_MUTATOR };
+
+  // Maintain consistency of live bytes during incremental marking.
+  void AdjustLiveBytes(Address address, int by, InvocationMode mode);
+
+  // Trim the given array from the left. Note that this relocates the object
+  // start and hence is only valid if there is only a single reference to it.
+  FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
+
+  // Trim the given array from the right.
+  template<Heap::InvocationMode mode>
+  void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
+
+  // Converts the given boolean condition to JavaScript boolean value.
+  inline Object* ToBoolean(bool condition);
+
+  // Performs garbage collection operation.
+  // Returns whether there is a chance that another major GC could
+  // collect more garbage.
+  inline bool CollectGarbage(
+      AllocationSpace space, const char* gc_reason = NULL,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  static const int kNoGCFlags = 0;
+  static const int kReduceMemoryFootprintMask = 1;
+  static const int kAbortIncrementalMarkingMask = 2;
+
+  // Making the heap iterable requires us to abort incremental marking.
+  static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
+
+  // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
+  // non-zero, then the slower precise sweeper is used, which leaves the heap
+  // in a state where we can iterate over the heap visiting all objects.
+  void CollectAllGarbage(
+      int flags, const char* gc_reason = NULL,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  // Last hope GC, should try to squeeze as much as possible.
+  void CollectAllAvailableGarbage(const char* gc_reason = NULL);
+
+  // Check whether the heap is currently iterable.
+  bool IsHeapIterable();
+
+  // Notify the heap that a context has been disposed.
+  int NotifyContextDisposed();
+
+  inline void increment_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_++;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
+  inline void decrement_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_--;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
+  PromotionQueue* promotion_queue() { return &promotion_queue_; }
+
+  void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+                             GCType gc_type_filter, bool pass_isolate = true);
+  void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
+
+  void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+                             GCType gc_type_filter, bool pass_isolate = true);
+  void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
+
+// Heap root getters.  We have versions with and without type::cast() here.
+// You can't use type::cast during GC because the assert fails.
+// TODO(1490): Try removing the unchecked accessors, now that GC marking does
+// not corrupt the map.
+#define ROOT_ACCESSOR(type, name, camel_name)                           \
+  type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \
+  type* raw_unchecked_##name() {                                        \
+    return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]);   \
+  }
+  ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+// Utility type maps
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+  Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
+  STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define STRING_ACCESSOR(name, str) \
+  String* name() { return String::cast(roots_[k##name##RootIndex]); }
+  INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
+
+  // The hidden_string is special because it is the empty string, but does
+  // not match the empty string.
+  String* hidden_string() { return hidden_string_; }
+
+  void set_native_contexts_list(Object* object) {
+    native_contexts_list_ = object;
+  }
+  Object* native_contexts_list() const { return native_contexts_list_; }
+
+  void set_array_buffers_list(Object* object) { array_buffers_list_ = object; }
+  Object* array_buffers_list() const { return array_buffers_list_; }
+
+  void set_allocation_sites_list(Object* object) {
+    allocation_sites_list_ = object;
+  }
+  Object* allocation_sites_list() { return allocation_sites_list_; }
+
+  // Used in CreateAllocationSiteStub and the (de)serializer.
+  Object** allocation_sites_list_address() { return &allocation_sites_list_; }
+
+  Object* weak_object_to_code_table() { return weak_object_to_code_table_; }
+
+  void set_encountered_weak_collections(Object* weak_collection) {
+    encountered_weak_collections_ = weak_collection;
+  }
+  Object* encountered_weak_collections() const {
+    return encountered_weak_collections_;
+  }
+
+  // Number of mark-sweeps.
+  unsigned int ms_count() { return ms_count_; }
+
+  // Iterates over all roots in the heap.
+  void IterateRoots(ObjectVisitor* v, VisitMode mode);
+  // Iterates over all strong roots in the heap.
+  void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+  // Iterates over entries in the smi roots list.  Only interesting to the
+  // serializer/deserializer, since GC does not care about smis.
+  void IterateSmiRoots(ObjectVisitor* v);
+  // Iterates over all the other roots in the heap.
+  void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
+
+  // Iterate pointers to from semispace of new space found in memory interval
+  // from start to end.
+  void IterateAndMarkPointersToFromSpace(Address start, Address end,
+                                         ObjectSlotCallback callback);
+
+  // Returns whether the object resides in new space.
+  inline bool InNewSpace(Object* object);
+  inline bool InNewSpace(Address address);
+  inline bool InNewSpacePage(Address address);
+  inline bool InFromSpace(Object* object);
+  inline bool InToSpace(Object* object);
+
+  // Returns whether the object resides in old pointer space.
+  inline bool InOldPointerSpace(Address address);
+  inline bool InOldPointerSpace(Object* object);
+
+  // Returns whether the object resides in old data space.
+  inline bool InOldDataSpace(Address address);
+  inline bool InOldDataSpace(Object* object);
+
+  // Checks whether an address/object in the heap (including auxiliary
+  // area and unused area).
+  bool Contains(Address addr);
+  bool Contains(HeapObject* value);
+
+  // Checks whether an address/object in a space.
+  // Currently used by tests, serialization and heap verification only.
+  bool InSpace(Address addr, AllocationSpace space);
+  bool InSpace(HeapObject* value, AllocationSpace space);
+
+  // Finds out which space an object should get promoted to based on its type.
+  inline OldSpace* TargetSpace(HeapObject* object);
+  static inline AllocationSpace TargetSpaceId(InstanceType type);
+
+  // Checks whether the given object is allowed to be migrated from it's
+  // current space into the given destination space. Used for debugging.
+  inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
+
+  // Sets the stub_cache_ (only used when expanding the dictionary).
+  void public_set_code_stubs(UnseededNumberDictionary* value) {
+    roots_[kCodeStubsRootIndex] = value;
+  }
+
+  // Support for computing object sizes for old objects during GCs. Returns
+  // a function that is guaranteed to be safe for computing object sizes in
+  // the current GC phase.
+  HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
+    return gc_safe_size_of_old_object_;
+  }
+
+  // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
+  void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
+    roots_[kNonMonomorphicCacheRootIndex] = value;
+  }
+
+  void public_set_empty_script(Script* script) {
+    roots_[kEmptyScriptRootIndex] = script;
+  }
+
+  void public_set_store_buffer_top(Address* top) {
+    roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
+  }
+
+  void public_set_materialized_objects(FixedArray* objects) {
+    roots_[kMaterializedObjectsRootIndex] = objects;
+  }
+
+  // Generated code can embed this address to get access to the roots.
+  Object** roots_array_start() { return roots_; }
+
+  Address* store_buffer_top_address() {
+    return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
+  }
+
+#ifdef VERIFY_HEAP
+  // Verify the heap is in its normal state before or after a GC.
+  void Verify();
+
+
+  bool weak_embedded_objects_verification_enabled() {
+    return no_weak_object_verification_scope_depth_ == 0;
+  }
+#endif
+
+#ifdef DEBUG
+  void Print();
+  void PrintHandles();
+
+  void OldPointerSpaceCheckStoreBuffer();
+  void MapSpaceCheckStoreBuffer();
+  void LargeObjectSpaceCheckStoreBuffer();
+
+  // Report heap statistics.
+  void ReportHeapStatistics(const char* title);
+  void ReportCodeStatistics(const char* title);
+#endif
+
+  // Zapping is needed for verify heap, and always done in debug builds.
+  static inline bool ShouldZapGarbage() {
+#ifdef DEBUG
+    return true;
+#else
+#ifdef VERIFY_HEAP
+    return FLAG_verify_heap;
+#else
+    return false;
+#endif
+#endif
+  }
+
+  // Number of "runtime allocations" done so far.
+  uint32_t allocations_count() { return allocations_count_; }
+
+  // Returns deterministic "time" value in ms. Works only with
+  // FLAG_verify_predictable.
+  double synthetic_time() { return allocations_count_ / 2.0; }
+
+  // Print short heap statistics.
+  void PrintShortHeapStatistics();
+
+  // Write barrier support for address[offset] = o.
+  INLINE(void RecordWrite(Address address, int offset));
+
+  // Write barrier support for address[start : start + len[ = o.
+  INLINE(void RecordWrites(Address address, int start, int len));
+
+  enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
+  inline HeapState gc_state() { return gc_state_; }
+
+  inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
+
+#ifdef DEBUG
+  void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
+
+  void TracePathToObjectFrom(Object* target, Object* root);
+  void TracePathToObject(Object* target);
+  void TracePathToGlobal();
+#endif
+
+  // Callback function passed to Heap::Iterate etc.  Copies an object if
+  // necessary, the object might be promoted to an old space.  The caller must
+  // ensure the precondition that the object is (a) a heap object and (b) in
+  // the heap's from space.
+  static inline void ScavengePointer(HeapObject** p);
+  static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+
+  enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
+
+  // If an object has an AllocationMemento trailing it, return it, otherwise
+  // return NULL;
+  inline AllocationMemento* FindAllocationMemento(HeapObject* object);
+
+  // An object may have an AllocationSite associated with it through a trailing
+  // AllocationMemento. Its feedback should be updated when objects are found
+  // in the heap.
+  static inline void UpdateAllocationSiteFeedback(HeapObject* object,
+                                                  ScratchpadSlotMode mode);
+
+  // Support for partial snapshots.  After calling this we have a linear
+  // space to write objects in each space.
+  void ReserveSpace(int* sizes, Address* addresses);
+
+  //
+  // Support for the API.
+  //
+
+  void CreateApiObjects();
+
+  inline intptr_t PromotedTotalSize() {
+    int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+    if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt);
+    if (total < 0) return 0;
+    return static_cast<intptr_t>(total);
+  }
+
+  inline intptr_t OldGenerationSpaceAvailable() {
+    return old_generation_allocation_limit_ - PromotedTotalSize();
+  }
+
+  inline intptr_t OldGenerationCapacityAvailable() {
+    return max_old_generation_size_ - PromotedTotalSize();
+  }
+
+  static const intptr_t kMinimumOldGenerationAllocationLimit =
+      8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
+
+  static const int kPointerMultiplier = i::kPointerSize / 4;
+
+  // The new space size has to be a power of 2. Sizes are in MB.
+  static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
+  static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
+  static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
+  static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
+
+  // The old space size has to be a multiple of Page::kPageSize.
+  // Sizes are in MB.
+  static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
+  static const int kMaxOldSpaceSizeMediumMemoryDevice =
+      256 * kPointerMultiplier;
+  static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
+  static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
+
+  // The executable size has to be a multiple of Page::kPageSize.
+  // Sizes are in MB.
+  static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
+  static const int kMaxExecutableSizeMediumMemoryDevice =
+      192 * kPointerMultiplier;
+  static const int kMaxExecutableSizeHighMemoryDevice =
+      256 * kPointerMultiplier;
+  static const int kMaxExecutableSizeHugeMemoryDevice =
+      256 * kPointerMultiplier;
+
+  intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
+                                        int freed_global_handles);
+
+  // Indicates whether inline bump-pointer allocation has been disabled.
+  bool inline_allocation_disabled() { return inline_allocation_disabled_; }
+
+  // Switch whether inline bump-pointer allocation should be used.
+  void EnableInlineAllocation();
+  void DisableInlineAllocation();
+
+  // Implements the corresponding V8 API function.
+  bool IdleNotification(int idle_time_in_ms);
+
+  // Declare all the root indices.  This defines the root list order.
+  enum RootListIndex {
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+    STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+
+#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
+    INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
+#undef STRING_DECLARATION
+
+// Utility type maps
+#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
+    STRUCT_LIST(DECLARE_STRUCT_MAP)
+#undef DECLARE_STRUCT_MAP
+    kStringTableRootIndex,
+
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+    SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+    kRootListLength,
+    kStrongRootListLength = kStringTableRootIndex,
+    kSmiRootsStart = kStringTableRootIndex + 1
+  };
+
+  STATIC_ASSERT(kUndefinedValueRootIndex ==
+                Internals::kUndefinedValueRootIndex);
+  STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
+  STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
+  STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
+  STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
+
+  // Generated code can embed direct references to non-writable roots if
+  // they are in new space.
+  static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
+  // Generated code can treat direct references to this root as constant.
+  bool RootCanBeTreatedAsConstant(RootListIndex root_index);
+
+  Map* MapForFixedTypedArray(ExternalArrayType array_type);
+  RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
+
+  Map* MapForExternalArrayType(ExternalArrayType array_type);
+  RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type);
+
+  RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
+  RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
+  ExternalArray* EmptyExternalArrayForMap(Map* map);
+  FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
+
+  void RecordStats(HeapStats* stats, bool take_snapshot = false);
+
+  // Copy block of memory from src to dst. Size of block should be aligned
+  // by pointer size.
+  static inline void CopyBlock(Address dst, Address src, int byte_size);
+
+  // Optimized version of memmove for blocks with pointer size aligned sizes and
+  // pointer size aligned addresses.
+  static inline void MoveBlock(Address dst, Address src, int byte_size);
+
+  // Check new space expansion criteria and expand semispaces if it was hit.
+  void CheckNewSpaceExpansionCriteria();
+
+  inline void IncrementPromotedObjectsSize(int object_size) {
+    DCHECK(object_size > 0);
+    promoted_objects_size_ += object_size;
+  }
+
+  inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
+    DCHECK(object_size > 0);
+    semi_space_copied_object_size_ += object_size;
+  }
+
+  inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
+
+  inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
+
+  inline void IncrementNodesPromoted() { nodes_promoted_++; }
+
+  inline void IncrementYoungSurvivorsCounter(int survived) {
+    DCHECK(survived >= 0);
+    survived_since_last_expansion_ += survived;
+  }
+
+  inline bool NextGCIsLikelyToBeFull() {
+    if (FLAG_gc_global) return true;
+
+    if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
+
+    intptr_t adjusted_allocation_limit =
+        old_generation_allocation_limit_ - new_space_.Capacity();
+
+    if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
+
+    return false;
+  }
+
+  void UpdateNewSpaceReferencesInExternalStringTable(
+      ExternalStringTableUpdaterCallback updater_func);
+
+  void UpdateReferencesInExternalStringTable(
+      ExternalStringTableUpdaterCallback updater_func);
+
+  void ProcessWeakReferences(WeakObjectRetainer* retainer);
+
+  void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
+
+  // An object should be promoted if the object has survived a
+  // scavenge operation.
+  inline bool ShouldBePromoted(Address old_address, int object_size);
+
+  void ClearJSFunctionResultCaches();
+
+  void ClearNormalizedMapCaches();
+
+  GCTracer* tracer() { return &tracer_; }
+
+  // Returns the size of objects residing in non new spaces.
+  intptr_t PromotedSpaceSizeOfObjects();
+
+  double total_regexp_code_generated() { return total_regexp_code_generated_; }
+  void IncreaseTotalRegexpCodeGenerated(int size) {
+    total_regexp_code_generated_ += size;
+  }
+
+  void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
+    if (is_crankshafted) {
+      crankshaft_codegen_bytes_generated_ += size;
+    } else {
+      full_codegen_bytes_generated_ += size;
+    }
+  }
+
+  // Update GC statistics that are tracked on the Heap.
+  void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
+                                    double marking_time);
+
+  // Returns maximum GC pause.
+  double get_max_gc_pause() { return max_gc_pause_; }
+
+  // Returns maximum size of objects alive after GC.
+  intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
+
+  // Returns minimal interval between two subsequent collections.
+  double get_min_in_mutator() { return min_in_mutator_; }
+
+  MarkCompactCollector* mark_compact_collector() {
+    return &mark_compact_collector_;
+  }
+
+  StoreBuffer* store_buffer() { return &store_buffer_; }
+
+  Marking* marking() { return &marking_; }
+
+  IncrementalMarking* incremental_marking() { return &incremental_marking_; }
+
+  ExternalStringTable* external_string_table() {
+    return &external_string_table_;
+  }
+
+  // Returns the current sweep generation.
+  int sweep_generation() { return sweep_generation_; }
+
+  inline Isolate* isolate();
+
+  void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
+  void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
+
+  inline bool OldGenerationAllocationLimitReached();
+
+  inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+    scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
+  }
+
+  void QueueMemoryChunkForFree(MemoryChunk* chunk);
+  void FreeQueuedChunks();
+
+  int gc_count() const { return gc_count_; }
+
+  // Completely clear the Instanceof cache (to stop it keeping objects alive
+  // around a GC).
+  inline void CompletelyClearInstanceofCache();
+
+  // The roots that have an index less than this are always in old space.
+  static const int kOldSpaceRoots = 0x20;
+
+  uint32_t HashSeed() {
+    uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
+    DCHECK(FLAG_randomize_hashes || seed == 0);
+    return seed;
+  }
+
+  void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
+    DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
+    set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
+  }
+
+  void SetConstructStubDeoptPCOffset(int pc_offset) {
+    DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
+    set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+  }
+
+  void SetGetterStubDeoptPCOffset(int pc_offset) {
+    DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
+    set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+  }
+
+  void SetSetterStubDeoptPCOffset(int pc_offset) {
+    DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
+    set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+  }
+
+  // For post mortem debugging.
+  void RememberUnmappedPage(Address page, bool compacted);
+
+  // Global inline caching age: it is incremented on some GCs after context
+  // disposal. We use it to flush inline caches.
+  int global_ic_age() { return global_ic_age_; }
+
+  void AgeInlineCaches() {
+    global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
+  }
+
+  bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
+
+  int64_t amount_of_external_allocated_memory() {
+    return amount_of_external_allocated_memory_;
+  }
+
+  void DeoptMarkedAllocationSites();
+
+  bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
+
+  bool DeoptMaybeTenuredAllocationSites() {
+    return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
+  }
+
+  // ObjectStats are kept in two arrays, counts and sizes. Related stats are
+  // stored in a contiguous linear buffer. Stats groups are stored one after
+  // another.
+  enum {
+    FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
+    FIRST_FIXED_ARRAY_SUB_TYPE =
+        FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
+    FIRST_CODE_AGE_SUB_TYPE =
+        FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+    OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
+  };
+
+  void RecordObjectStats(InstanceType type, size_t size) {
+    DCHECK(type <= LAST_TYPE);
+    object_counts_[type]++;
+    object_sizes_[type] += size;
+  }
+
+  void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
+    int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
+    int code_age_index =
+        FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
+    DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
+           code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
+    DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
+           code_age_index < OBJECT_STATS_COUNT);
+    object_counts_[code_sub_type_index]++;
+    object_sizes_[code_sub_type_index] += size;
+    object_counts_[code_age_index]++;
+    object_sizes_[code_age_index] += size;
+  }
+
+  void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
+    DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
+    object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
+    object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
+  }
+
+  void CheckpointObjectStats();
+
+  // We don't use a LockGuard here since we want to lock the heap
+  // only when FLAG_concurrent_recompilation is true.
+  class RelocationLock {
+   public:
+    explicit RelocationLock(Heap* heap) : heap_(heap) {
+      heap_->relocation_mutex_.Lock();
+    }
+
+
+    ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
+
+   private:
+    Heap* heap_;
+  };
+
+  void AddWeakObjectToCodeDependency(Handle<Object> obj,
+                                     Handle<DependentCode> dep);
+
+  DependentCode* LookupWeakObjectToCodeDependency(Handle<Object> obj);
+
+  void InitializeWeakObjectToCodeTable() {
+    set_weak_object_to_code_table(undefined_value());
+  }
+
+  void EnsureWeakObjectToCodeTable();
+
+  static void FatalProcessOutOfMemory(const char* location,
+                                      bool take_snapshot = false);
+
+  // This event is triggered after successful allocation of a new object made
+  // by runtime. Allocations of target space for object evacuation do not
+  // trigger the event. In order to track ALL allocations one must turn off
+  // FLAG_inline_new and FLAG_use_allocation_folding.
+  inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
+
+  // This event is triggered after object is moved to a new place.
+  inline void OnMoveEvent(HeapObject* target, HeapObject* source,
+                          int size_in_bytes);
+
+ protected:
+  // Methods made available to tests.
+
+  // Allocates a JS Map in the heap.
+  MUST_USE_RESULT AllocationResult
+      AllocateMap(InstanceType instance_type, int instance_size,
+                  ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+
+  // Allocates and initializes a new JavaScript object based on a
+  // constructor.
+  // If allocation_site is non-null, then a memento is emitted after the object
+  // that points to the site.
+  MUST_USE_RESULT AllocationResult
+      AllocateJSObject(JSFunction* constructor,
+                       PretenureFlag pretenure = NOT_TENURED,
+                       AllocationSite* allocation_site = NULL);
+
+  // Allocates and initializes a new JavaScript object based on a map.
+  // Passing an allocation site means that a memento will be created that
+  // points to the site.
+  MUST_USE_RESULT AllocationResult
+      AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
+                              bool alloc_props = true,
+                              AllocationSite* allocation_site = NULL);
+
+  // Allocated a HeapNumber from value.
+  MUST_USE_RESULT AllocationResult
+      AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
+                         PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocate a byte array of the specified length
+  MUST_USE_RESULT AllocationResult
+      AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
+
+  // Copy the code and scope info part of the code object, but insert
+  // the provided data as the relocation information.
+  MUST_USE_RESULT AllocationResult
+      CopyCode(Code* code, Vector<byte> reloc_info);
+
+  MUST_USE_RESULT AllocationResult CopyCode(Code* code);
+
+  // Allocates a fixed array initialized with undefined values
+  MUST_USE_RESULT AllocationResult
+      AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
+
+ private:
+  Heap();
+
+  // The amount of external memory registered through the API kept alive
+  // by global handles
+  int64_t amount_of_external_allocated_memory_;
+
+  // Caches the amount of external memory registered at the last global gc.
+  int64_t amount_of_external_allocated_memory_at_last_global_gc_;
+
+  // This can be calculated directly from a pointer to the heap; however, it is
+  // more expedient to get at the isolate directly from within Heap methods.
+  Isolate* isolate_;
+
+  Object* roots_[kRootListLength];
+
+  size_t code_range_size_;
+  int reserved_semispace_size_;
+  int max_semi_space_size_;
+  int initial_semispace_size_;
+  intptr_t max_old_generation_size_;
+  intptr_t max_executable_size_;
+  intptr_t maximum_committed_;
+
+  // For keeping track of how much data has survived
+  // scavenge since last new space expansion.
+  int survived_since_last_expansion_;
+
+  // For keeping track on when to flush RegExp code.
+  int sweep_generation_;
+
+  int always_allocate_scope_depth_;
+
+  // For keeping track of context disposals.
+  int contexts_disposed_;
+
+  int global_ic_age_;
+
+  bool flush_monomorphic_ics_;
+
+  int scan_on_scavenge_pages_;
+
+  NewSpace new_space_;
+  OldSpace* old_pointer_space_;
+  OldSpace* old_data_space_;
+  OldSpace* code_space_;
+  MapSpace* map_space_;
+  CellSpace* cell_space_;
+  PropertyCellSpace* property_cell_space_;
+  LargeObjectSpace* lo_space_;
+  HeapState gc_state_;
+  int gc_post_processing_depth_;
+  Address new_space_top_after_last_gc_;
+
+  // Returns the amount of external memory registered since last global gc.
+  int64_t PromotedExternalMemorySize();
+
+  // How many "runtime allocations" happened.
+  uint32_t allocations_count_;
+
+  // Running hash over allocations performed.
+  uint32_t raw_allocations_hash_;
+
+  // Countdown counter, dumps allocation hash when 0.
+  uint32_t dump_allocations_hash_countdown_;
+
+  // How many mark-sweep collections happened.
+  unsigned int ms_count_;
+
+  // How many gc happened.
+  unsigned int gc_count_;
+
+  // For post mortem debugging.
+  static const int kRememberedUnmappedPages = 128;
+  int remembered_unmapped_pages_index_;
+  Address remembered_unmapped_pages_[kRememberedUnmappedPages];
+
+  // Total length of the strings we failed to flatten since the last GC.
+  int unflattened_strings_length_;
+
+#define ROOT_ACCESSOR(type, name, camel_name)                                 \
+  inline void set_##name(type* value) {                                       \
+    /* The deserializer makes use of the fact that these common roots are */  \
+    /* never in new space and never on a page that is being compacted.    */  \
+    DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
+    roots_[k##camel_name##RootIndex] = value;                                 \
+  }
+  ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+#ifdef DEBUG
+  // If the --gc-interval flag is set to a positive value, this
+  // variable holds the value indicating the number of allocations
+  // remain until the next failure and garbage collection.
+  int allocation_timeout_;
+#endif  // DEBUG
+
+  // Limit that triggers a global GC on the next (normally caused) GC.  This
+  // is checked when we have already decided to do a GC to help determine
+  // which collector to invoke, before expanding a paged space in the old
+  // generation and on every allocation in large object space.
+  intptr_t old_generation_allocation_limit_;
+
+  // Indicates that an allocation has failed in the old generation since the
+  // last GC.
+  bool old_gen_exhausted_;
+
+  // Indicates that inline bump-pointer allocation has been globally disabled
+  // for all spaces. This is used to disable allocations in generated code.
+  bool inline_allocation_disabled_;
+
+  // Weak list heads, threaded through the objects.
+  // List heads are initilized lazily and contain the undefined_value at start.
+  Object* native_contexts_list_;
+  Object* array_buffers_list_;
+  Object* allocation_sites_list_;
+
+  // WeakHashTable that maps objects embedded in optimized code to dependent
+  // code list. It is initilized lazily and contains the undefined_value at
+  // start.
+  Object* weak_object_to_code_table_;
+
+  // List of encountered weak collections (JSWeakMap and JSWeakSet) during
+  // marking. It is initialized during marking, destroyed after marking and
+  // contains Smi(0) while marking is not active.
+  Object* encountered_weak_collections_;
+
+  StoreBufferRebuilder store_buffer_rebuilder_;
+
+  struct StringTypeTable {
+    InstanceType type;
+    int size;
+    RootListIndex index;
+  };
+
+  struct ConstantStringTable {
+    const char* contents;
+    RootListIndex index;
+  };
+
+  struct StructTable {
+    InstanceType type;
+    int size;
+    RootListIndex index;
+  };
+
+  static const StringTypeTable string_type_table[];
+  static const ConstantStringTable constant_string_table[];
+  static const StructTable struct_table[];
+
+  // The special hidden string which is an empty string, but does not match
+  // any string when looked up in properties.
+  String* hidden_string_;
+
+  // GC callback function, called before and after mark-compact GC.
+  // Allocations in the callback function are disallowed.
+  struct GCPrologueCallbackPair {
+    GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+                           GCType gc_type, bool pass_isolate)
+        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
+    bool operator==(const GCPrologueCallbackPair& pair) const {
+      return pair.callback == callback;
+    }
+    v8::Isolate::GCPrologueCallback callback;
+    GCType gc_type;
+    // TODO(dcarney): remove variable
+    bool pass_isolate_;
+  };
+  List<GCPrologueCallbackPair> gc_prologue_callbacks_;
+
+  struct GCEpilogueCallbackPair {
+    GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+                           GCType gc_type, bool pass_isolate)
+        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
+    bool operator==(const GCEpilogueCallbackPair& pair) const {
+      return pair.callback == callback;
+    }
+    v8::Isolate::GCPrologueCallback callback;
+    GCType gc_type;
+    // TODO(dcarney): remove variable
+    bool pass_isolate_;
+  };
+  List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
+
+  // Support for computing object sizes during GC.
+  HeapObjectCallback gc_safe_size_of_old_object_;
+  static int GcSafeSizeOfOldObject(HeapObject* object);
+
+  // Update the GC state. Called from the mark-compact collector.
+  void MarkMapPointersAsEncoded(bool encoded) {
+    DCHECK(!encoded);
+    gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
+  }
+
+  // Code that should be run before and after each GC.  Includes some
+  // reporting/verification activities when compiled with DEBUG set.
+  void GarbageCollectionPrologue();
+  void GarbageCollectionEpilogue();
+
+  // Pretenuring decisions are made based on feedback collected during new
+  // space evacuation. Note that between feedback collection and calling this
+  // method object in old space must not move.
+  // Right now we only process pretenuring feedback in high promotion mode.
+  void ProcessPretenuringFeedback();
+
+  // Checks whether a global GC is necessary
+  GarbageCollector SelectGarbageCollector(AllocationSpace space,
+                                          const char** reason);
+
+  // Make sure there is a filler value behind the top of the new space
+  // so that the GC does not confuse some unintialized/stale memory
+  // with the allocation memento of the object at the top
+  void EnsureFillerObjectAtTop();
+
+  // Ensure that we have swept all spaces in such a way that we can iterate
+  // over all objects.  May cause a GC.
+  void MakeHeapIterable();
+
+  // Performs garbage collection operation.
+  // Returns whether there is a chance that another major GC could
+  // collect more garbage.
+  bool CollectGarbage(
+      GarbageCollector collector, const char* gc_reason,
+      const char* collector_reason,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  // Performs garbage collection
+  // Returns whether there is a chance another major GC could
+  // collect more garbage.
+  bool PerformGarbageCollection(
+      GarbageCollector collector,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  inline void UpdateOldSpaceLimits();
+
+  // Selects the proper allocation space depending on the given object
+  // size, pretenuring decision, and preferred old-space.
+  static AllocationSpace SelectSpace(int object_size,
+                                     AllocationSpace preferred_old_space,
+                                     PretenureFlag pretenure) {
+    DCHECK(preferred_old_space == OLD_POINTER_SPACE ||
+           preferred_old_space == OLD_DATA_SPACE);
+    if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
+    return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
+  }
+
+  // Allocate an uninitialized object.  The memory is non-executable if the
+  // hardware and OS allow.  This is the single choke-point for allocations
+  // performed by the runtime and should not be bypassed (to extend this to
+  // inlined allocations, use the Heap::DisableInlineAllocation() support).
+  MUST_USE_RESULT inline AllocationResult AllocateRaw(
+      int size_in_bytes, AllocationSpace space, AllocationSpace retry_space);
+
+  // Allocates a heap object based on the map.
+  MUST_USE_RESULT AllocationResult
+      Allocate(Map* map, AllocationSpace space,
+               AllocationSite* allocation_site = NULL);
+
+  // Allocates a partial map for bootstrapping.
+  MUST_USE_RESULT AllocationResult
+      AllocatePartialMap(InstanceType instance_type, int instance_size);
+
+  // Initializes a JSObject based on its map.
+  void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
+                                 Map* map);
+  void InitializeAllocationMemento(AllocationMemento* memento,
+                                   AllocationSite* allocation_site);
+
+  // Allocate a block of memory in the given space (filled with a filler).
+  // Used as a fall-back for generated code when the space is full.
+  MUST_USE_RESULT AllocationResult
+      AllocateFillerObject(int size, bool double_align, AllocationSpace space);
+
+  // Allocate an uninitialized fixed array.
+  MUST_USE_RESULT AllocationResult
+      AllocateRawFixedArray(int length, PretenureFlag pretenure);
+
+  // Allocate an uninitialized fixed double array.
+  MUST_USE_RESULT AllocationResult
+      AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
+
+  // Allocate an initialized fixed array with the given filler value.
+  MUST_USE_RESULT AllocationResult
+      AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
+                                   Object* filler);
+
+  // Allocate and partially initializes a String.  There are two String
+  // encodings: one-byte and two-byte.  These functions allocate a string of
+  // the given length and set its map and length fields.  The characters of
+  // the string are uninitialized.
+  MUST_USE_RESULT AllocationResult
+      AllocateRawOneByteString(int length, PretenureFlag pretenure);
+  MUST_USE_RESULT AllocationResult
+      AllocateRawTwoByteString(int length, PretenureFlag pretenure);
+
+  bool CreateInitialMaps();
+  void CreateInitialObjects();
+
+  // Allocates an internalized string in old space based on the character
+  // stream.
+  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
+      Vector<const char> str, int chars, uint32_t hash_field);
+
+  MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
+      Vector<const uint8_t> str, uint32_t hash_field);
+
+  MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
+      Vector<const uc16> str, uint32_t hash_field);
+
+  template <bool is_one_byte, typename T>
+  MUST_USE_RESULT AllocationResult
+      AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
+
+  template <typename T>
+  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
+      T t, int chars, uint32_t hash_field);
+
+  // Allocates an uninitialized fixed array. It must be filled by the caller.
+  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
+
+  // Make a copy of src and return it. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
+
+  // Make a copy of src, set the map, and return the copy. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  MUST_USE_RESULT AllocationResult
+      CopyFixedArrayWithMap(FixedArray* src, Map* map);
+
+  // Make a copy of src and return it. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
+      FixedDoubleArray* src);
+
+  // Make a copy of src and return it. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray(
+      ConstantPoolArray* src);
+
+
+  // Computes a single character string where the character has code.
+  // A cache is used for one-byte (Latin1) codes.
+  MUST_USE_RESULT AllocationResult
+      LookupSingleCharacterStringFromCode(uint16_t code);
+
+  // Allocate a symbol in old space.
+  MUST_USE_RESULT AllocationResult AllocateSymbol();
+
+  // Make a copy of src, set the map, and return the copy.
+  MUST_USE_RESULT AllocationResult
+      CopyConstantPoolArrayWithMap(ConstantPoolArray* src, Map* map);
+
+  MUST_USE_RESULT AllocationResult AllocateConstantPoolArray(
+      const ConstantPoolArray::NumberOfEntries& small);
+
+  MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray(
+      const ConstantPoolArray::NumberOfEntries& small,
+      const ConstantPoolArray::NumberOfEntries& extended);
+
+  // Allocates an external array of the specified length and type.
+  MUST_USE_RESULT AllocationResult
+      AllocateExternalArray(int length, ExternalArrayType array_type,
+                            void* external_pointer, PretenureFlag pretenure);
+
+  // Allocates a fixed typed array of the specified length and type.
+  MUST_USE_RESULT AllocationResult
+      AllocateFixedTypedArray(int length, ExternalArrayType array_type,
+                              PretenureFlag pretenure);
+
+  // Make a copy of src and return it.
+  MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
+
+  // Make a copy of src, set the map, and return the copy.
+  MUST_USE_RESULT AllocationResult
+      CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
+
+  // Allocates a fixed double array with uninitialized values. Returns
+  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
+      int length, PretenureFlag pretenure = NOT_TENURED);
+
+  // These five Create*EntryStub functions are here and forced to not be inlined
+  // because of a gcc-4.4 bug that assigns wrong vtable entries.
+  NO_INLINE(void CreateJSEntryStub());
+  NO_INLINE(void CreateJSConstructEntryStub());
+
+  void CreateFixedStubs();
+
+  // Allocate empty fixed array.
+  MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
+
+  // Allocate empty external array of given type.
+  MUST_USE_RESULT AllocationResult
+      AllocateEmptyExternalArray(ExternalArrayType array_type);
+
+  // Allocate empty fixed typed array of given type.
+  MUST_USE_RESULT AllocationResult
+      AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
+
+  // Allocate empty constant pool array.
+  MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray();
+
+  // Allocate a tenured simple cell.
+  MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
+
+  // Allocate a tenured JS global property cell initialized with the hole.
+  MUST_USE_RESULT AllocationResult AllocatePropertyCell();
+
+  // Allocates a new utility object in the old generation.
+  MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
+
+  // Allocates a new foreign object.
+  MUST_USE_RESULT AllocationResult
+      AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
+
+  MUST_USE_RESULT AllocationResult
+      AllocateCode(int object_size, bool immovable);
+
+  MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
+
+  MUST_USE_RESULT AllocationResult InternalizeString(String* str);
+
+  // Performs a minor collection in new generation.
+  void Scavenge();
+
+  // Commits from space if it is uncommitted.
+  void EnsureFromSpaceIsCommitted();
+
+  // Uncommit unused semi space.
+  bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+
+  // Fill in bogus values in from space
+  void ZapFromSpace();
+
+  static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
+      Heap* heap, Object** pointer);
+
+  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+  static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
+                                          StoreBufferEvent event);
+
+  // Performs a major collection in the whole heap.
+  void MarkCompact();
+
+  // Code to be run before and after mark-compact.
+  void MarkCompactPrologue();
+
+  void ProcessNativeContexts(WeakObjectRetainer* retainer);
+  void ProcessArrayBuffers(WeakObjectRetainer* retainer);
+  void ProcessAllocationSites(WeakObjectRetainer* retainer);
+
+  // Deopts all code that contains allocation instruction which are tenured or
+  // not tenured. Moreover it clears the pretenuring allocation site statistics.
+  void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
+
+  // Evaluates local pretenuring for the old space and calls
+  // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
+  // the old space.
+  void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
+
+  // Called on heap tear-down.
+  void TearDownArrayBuffers();
+
+  // Record statistics before and after garbage collection.
+  void ReportStatisticsBeforeGC();
+  void ReportStatisticsAfterGC();
+
+  // Slow part of scavenge object.
+  static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
+
+  // Total RegExp code ever generated
+  double total_regexp_code_generated_;
+
+  GCTracer tracer_;
+
+  // Creates and installs the full-sized number string cache.
+  int FullSizeNumberStringCacheLength();
+  // Flush the number to string cache.
+  void FlushNumberStringCache();
+
+  // Sets used allocation sites entries to undefined.
+  void FlushAllocationSitesScratchpad();
+
+  // Initializes the allocation sites scratchpad with undefined values.
+  void InitializeAllocationSitesScratchpad();
+
+  // Adds an allocation site to the scratchpad if there is space left.
+  void AddAllocationSiteToScratchpad(AllocationSite* site,
+                                     ScratchpadSlotMode mode);
+
+  void UpdateSurvivalStatistics(int start_new_space_size);
+
+  static const int kYoungSurvivalRateHighThreshold = 90;
+  static const int kYoungSurvivalRateAllowedDeviation = 15;
+
+  static const int kOldSurvivalRateLowThreshold = 10;
+
+  int high_survival_rate_period_length_;
+  intptr_t promoted_objects_size_;
+  double promotion_rate_;
+  intptr_t semi_space_copied_object_size_;
+  double semi_space_copied_rate_;
+  int nodes_died_in_new_space_;
+  int nodes_copied_in_new_space_;
+  int nodes_promoted_;
+
+  // This is the pretenuring trigger for allocation sites that are in maybe
+  // tenure state. When we switched to the maximum new space size we deoptimize
+  // the code that belongs to the allocation site and derive the lifetime
+  // of the allocation site.
+  unsigned int maximum_size_scavenges_;
+
+  // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
+  // Re-visit incremental marking heuristics.
+  bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
+
+  void SelectScavengingVisitorsTable();
+
+  void IdleMarkCompact(const char* message);
+
+  void AdvanceIdleIncrementalMarking(intptr_t step_size);
+
+  bool WorthActivatingIncrementalMarking();
+
+  void ClearObjectStats(bool clear_last_time_stats = false);
+
+  void set_weak_object_to_code_table(Object* value) {
+    DCHECK(!InNewSpace(value));
+    weak_object_to_code_table_ = value;
+  }
+
+  Object** weak_object_to_code_table_address() {
+    return &weak_object_to_code_table_;
+  }
+
+  inline void UpdateAllocationsHash(HeapObject* object);
+  inline void UpdateAllocationsHash(uint32_t value);
+  inline void PrintAlloctionsHash();
+
+  static const int kInitialStringTableSize = 2048;
+  static const int kInitialEvalCacheSize = 64;
+  static const int kInitialNumberStringCacheSize = 256;
+
+  // Object counts and used memory by InstanceType
+  size_t object_counts_[OBJECT_STATS_COUNT];
+  size_t object_counts_last_time_[OBJECT_STATS_COUNT];
+  size_t object_sizes_[OBJECT_STATS_COUNT];
+  size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
+
+  // Maximum GC pause.
+  double max_gc_pause_;
+
+  // Total time spent in GC.
+  double total_gc_time_ms_;
+
+  // Maximum size of objects alive after GC.
+  intptr_t max_alive_after_gc_;
+
+  // Minimal interval between two subsequent collections.
+  double min_in_mutator_;
+
+  // Cumulative GC time spent in marking
+  double marking_time_;
+
+  // Cumulative GC time spent in sweeping
+  double sweeping_time_;
+
+  MarkCompactCollector mark_compact_collector_;
+
+  StoreBuffer store_buffer_;
+
+  Marking marking_;
+
+  IncrementalMarking incremental_marking_;
+
+  GCIdleTimeHandler gc_idle_time_handler_;
+  unsigned int gc_count_at_last_idle_gc_;
+
+  // These two counters are monotomically increasing and never reset.
+  size_t full_codegen_bytes_generated_;
+  size_t crankshaft_codegen_bytes_generated_;
+
+  // If the --deopt_every_n_garbage_collections flag is set to a positive value,
+  // this variable holds the number of garbage collections since the last
+  // deoptimization triggered by garbage collection.
+  int gcs_since_last_deopt_;
+
+#ifdef VERIFY_HEAP
+  int no_weak_object_verification_scope_depth_;
+#endif
+
+  static const int kAllocationSiteScratchpadSize = 256;
+  int allocation_sites_scratchpad_length_;
+
+  static const int kMaxMarkCompactsInIdleRound = 7;
+  static const int kIdleScavengeThreshold = 5;
+
+  // Shared state read by the scavenge collector and set by ScavengeObject.
+  PromotionQueue promotion_queue_;
+
+  // Flag is set when the heap has been configured.  The heap can be repeatedly
+  // configured through the API until it is set up.
+  bool configured_;
+
+  ExternalStringTable external_string_table_;
+
+  VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+  MemoryChunk* chunks_queued_for_free_;
+
+  base::Mutex relocation_mutex_;
+
+  int gc_callbacks_depth_;
+
+  friend class AlwaysAllocateScope;
+  friend class Factory;
+  friend class GCCallbacksScope;
+  friend class GCTracer;
+  friend class HeapIterator;
+  friend class Isolate;
+  friend class MarkCompactCollector;
+  friend class MarkCompactMarkingVisitor;
+  friend class MapCompact;
+#ifdef VERIFY_HEAP
+  friend class NoWeakObjectVerificationScope;
+#endif
+  friend class Page;
+
+  DISALLOW_COPY_AND_ASSIGN(Heap);
+};
+
+
+class HeapStats {
+ public:
+  static const int kStartMarker = 0xDECADE00;
+  static const int kEndMarker = 0xDECADE01;
+
+  int* start_marker;                       //  0
+  int* new_space_size;                     //  1
+  int* new_space_capacity;                 //  2
+  intptr_t* old_pointer_space_size;        //  3
+  intptr_t* old_pointer_space_capacity;    //  4
+  intptr_t* old_data_space_size;           //  5
+  intptr_t* old_data_space_capacity;       //  6
+  intptr_t* code_space_size;               //  7
+  intptr_t* code_space_capacity;           //  8
+  intptr_t* map_space_size;                //  9
+  intptr_t* map_space_capacity;            // 10
+  intptr_t* cell_space_size;               // 11
+  intptr_t* cell_space_capacity;           // 12
+  intptr_t* lo_space_size;                 // 13
+  int* global_handle_count;                // 14
+  int* weak_global_handle_count;           // 15
+  int* pending_global_handle_count;        // 16
+  int* near_death_global_handle_count;     // 17
+  int* free_global_handle_count;           // 18
+  intptr_t* memory_allocator_size;         // 19
+  intptr_t* memory_allocator_capacity;     // 20
+  int* objects_per_type;                   // 21
+  int* size_per_type;                      // 22
+  int* os_error;                           // 23
+  int* end_marker;                         // 24
+  intptr_t* property_cell_space_size;      // 25
+  intptr_t* property_cell_space_capacity;  // 26
+};
+
+
+class AlwaysAllocateScope {
+ public:
+  explicit inline AlwaysAllocateScope(Isolate* isolate);
+  inline ~AlwaysAllocateScope();
+
+ private:
+  // Implicitly disable artificial allocation failures.
+  Heap* heap_;
+  DisallowAllocationFailure daf_;
+};
+
+
+#ifdef VERIFY_HEAP
+class NoWeakObjectVerificationScope {
+ public:
+  inline NoWeakObjectVerificationScope();
+  inline ~NoWeakObjectVerificationScope();
+};
+#endif
+
+
+class GCCallbacksScope {
+ public:
+  explicit inline GCCallbacksScope(Heap* heap);
+  inline ~GCCallbacksScope();
+
+  inline bool CheckReenter();
+
+ private:
+  Heap* heap_;
+};
+
+
+// Visitor class to verify interior pointers in spaces that do not contain
+// or care about intergenerational references. All heap object pointers have to
+// point into the heap to a location that has a map pointer at its first word.
+// Caveat: Heap::Contains is an approximation because it can return true for
+// objects in a heap space but above the allocation pointer.
+class VerifyPointersVisitor : public ObjectVisitor {
+ public:
+  inline void VisitPointers(Object** start, Object** end);
+};
+
+
+// Verify that all objects are Smis.
+class VerifySmisVisitor : public ObjectVisitor {
+ public:
+  inline void VisitPointers(Object** start, Object** end);
+};
+
+
+// Space iterator for iterating over all spaces of the heap.  Returns each space
+// in turn, and null when it is done.
+class AllSpaces BASE_EMBEDDED {
+ public:
+  explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
+  Space* next();
+
+ private:
+  Heap* heap_;
+  int counter_;
+};
+
+
+// Space iterator for iterating over all old spaces of the heap: Old pointer
+// space, old data space and code space.  Returns each space in turn, and null
+// when it is done.
+class OldSpaces BASE_EMBEDDED {
+ public:
+  explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
+  OldSpace* next();
+
+ private:
+  Heap* heap_;
+  int counter_;
+};
+
+
+// Space iterator for iterating over all the paged spaces of the heap: Map
+// space, old pointer space, old data space, code space and cell space.  Returns
+// each space in turn, and null when it is done.
+class PagedSpaces BASE_EMBEDDED {
+ public:
+  explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
+  PagedSpace* next();
+
+ private:
+  Heap* heap_;
+  int counter_;
+};
+
+
+// Space iterator for iterating over all spaces of the heap.
+// For each space an object iterator is provided. The deallocation of the
+// returned object iterators is handled by the space iterator.
+class SpaceIterator : public Malloced {
+ public:
+  explicit SpaceIterator(Heap* heap);
+  SpaceIterator(Heap* heap, HeapObjectCallback size_func);
+  virtual ~SpaceIterator();
+
+  bool has_next();
+  ObjectIterator* next();
+
+ private:
+  ObjectIterator* CreateIterator();
+
+  Heap* heap_;
+  int current_space_;         // from enum AllocationSpace.
+  ObjectIterator* iterator_;  // object iterator for the current space.
+  HeapObjectCallback size_func_;
+};
+
+
+// A HeapIterator provides iteration over the whole heap. It
+// aggregates the specific iterators for the different spaces as
+// these can only iterate over one space only.
+//
+// HeapIterator ensures there is no allocation during its lifetime
+// (using an embedded DisallowHeapAllocation instance).
+//
+// HeapIterator can skip free list nodes (that is, de-allocated heap
+// objects that still remain in the heap). As implementation of free
+// nodes filtering uses GC marks, it can't be used during MS/MC GC
+// phases. Also, it is forbidden to interrupt iteration in this mode,
+// as this will leave heap objects marked (and thus, unusable).
+class HeapObjectsFilter;
+
+class HeapIterator BASE_EMBEDDED {
+ public:
+  enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
+
+  explicit HeapIterator(Heap* heap);
+  HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
+  ~HeapIterator();
+
+  HeapObject* next();
+  void reset();
+
+ private:
+  struct MakeHeapIterableHelper {
+    explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
+  };
+
+  // Perform the initialization.
+  void Init();
+  // Perform all necessary shutdown (destruction) work.
+  void Shutdown();
+  HeapObject* NextObject();
+
+  MakeHeapIterableHelper make_heap_iterable_helper_;
+  DisallowHeapAllocation no_heap_allocation_;
+  Heap* heap_;
+  HeapObjectsFiltering filtering_;
+  HeapObjectsFilter* filter_;
+  // Space iterator for iterating all the spaces.
+  SpaceIterator* space_iterator_;
+  // Object iterator for the space currently being iterated.
+  ObjectIterator* object_iterator_;
+};
+
+
+// Cache for mapping (map, property name) into field offset.
+// Cleared at startup and prior to mark sweep collection.
+class KeyedLookupCache {
+ public:
+  // Lookup field offset for (map, name). If absent, -1 is returned.
+  int Lookup(Handle<Map> map, Handle<Name> name);
+
+  // Update an element in the cache.
+  void Update(Handle<Map> map, Handle<Name> name, int field_offset);
+
+  // Clear the cache.
+  void Clear();
+
+  static const int kLength = 256;
+  static const int kCapacityMask = kLength - 1;
+  static const int kMapHashShift = 5;
+  static const int kHashMask = -4;  // Zero the last two bits.
+  static const int kEntriesPerBucket = 4;
+  static const int kEntryLength = 2;
+  static const int kMapIndex = 0;
+  static const int kKeyIndex = 1;
+  static const int kNotFound = -1;
+
+  // kEntriesPerBucket should be a power of 2.
+  STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
+  STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
+
+ private:
+  KeyedLookupCache() {
+    for (int i = 0; i < kLength; ++i) {
+      keys_[i].map = NULL;
+      keys_[i].name = NULL;
+      field_offsets_[i] = kNotFound;
+    }
+  }
+
+  static inline int Hash(Handle<Map> map, Handle<Name> name);
+
+  // Get the address of the keys and field_offsets arrays.  Used in
+  // generated code to perform cache lookups.
+  Address keys_address() { return reinterpret_cast<Address>(&keys_); }
+
+  Address field_offsets_address() {
+    return reinterpret_cast<Address>(&field_offsets_);
+  }
+
+  struct Key {
+    Map* map;
+    Name* name;
+  };
+
+  Key keys_[kLength];
+  int field_offsets_[kLength];
+
+  friend class ExternalReference;
+  friend class Isolate;
+  DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
+};
+
+
+// Cache for mapping (map, property name) into descriptor index.
+// The cache contains both positive and negative results.
+// Descriptor index equals kNotFound means the property is absent.
+// Cleared at startup and prior to any gc.
+class DescriptorLookupCache {
+ public:
+  // Lookup descriptor index for (map, name).
+  // If absent, kAbsent is returned.
+  int Lookup(Map* source, Name* name) {
+    if (!name->IsUniqueName()) return kAbsent;
+    int index = Hash(source, name);
+    Key& key = keys_[index];
+    if ((key.source == source) && (key.name == name)) return results_[index];
+    return kAbsent;
+  }
+
+  // Update an element in the cache.
+  void Update(Map* source, Name* name, int result) {
+    DCHECK(result != kAbsent);
+    if (name->IsUniqueName()) {
+      int index = Hash(source, name);
+      Key& key = keys_[index];
+      key.source = source;
+      key.name = name;
+      results_[index] = result;
+    }
+  }
+
+  // Clear the cache.
+  void Clear();
+
+  static const int kAbsent = -2;
+
+ private:
+  DescriptorLookupCache() {
+    for (int i = 0; i < kLength; ++i) {
+      keys_[i].source = NULL;
+      keys_[i].name = NULL;
+      results_[i] = kAbsent;
+    }
+  }
+
+  static int Hash(Object* source, Name* name) {
+    // Uses only lower 32 bits if pointers are larger.
+    uint32_t source_hash =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
+        kPointerSizeLog2;
+    uint32_t name_hash =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >>
+        kPointerSizeLog2;
+    return (source_hash ^ name_hash) % kLength;
+  }
+
+  static const int kLength = 64;
+  struct Key {
+    Map* source;
+    Name* name;
+  };
+
+  Key keys_[kLength];
+  int results_[kLength];
+
+  friend class Isolate;
+  DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
+};
+
+
+class RegExpResultsCache {
+ public:
+  enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
+
+  // Attempt to retrieve a cached result.  On failure, 0 is returned as a Smi.
+  // On success, the returned result is guaranteed to be a COW-array.
+  static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
+                        ResultsCacheType type);
+  // Attempt to add value_array to the cache specified by type.  On success,
+  // value_array is turned into a COW-array.
+  static void Enter(Isolate* isolate, Handle<String> key_string,
+                    Handle<Object> key_pattern, Handle<FixedArray> value_array,
+                    ResultsCacheType type);
+  static void Clear(FixedArray* cache);
+  static const int kRegExpResultsCacheSize = 0x100;
+
+ private:
+  static const int kArrayEntriesPerCacheEntry = 4;
+  static const int kStringOffset = 0;
+  static const int kPatternOffset = 1;
+  static const int kArrayOffset = 2;
+};
+
+
+// Abstract base class for checking whether a weak object should be retained.
+class WeakObjectRetainer {
+ public:
+  virtual ~WeakObjectRetainer() {}
+
+  // Return whether this object should be retained. If NULL is returned the
+  // object has no references. Otherwise the address of the retained object
+  // should be returned as in some GC situations the object has been moved.
+  virtual Object* RetainAs(Object* object) = 0;
+};
+
+
+// Intrusive object marking uses least significant bit of
+// heap object's map word to mark objects.
+// Normally all map words have least significant bit set
+// because they contain tagged map pointer.
+// If the bit is not set object is marked.
+// All objects should be unmarked before resuming
+// JavaScript execution.
+class IntrusiveMarking {
+ public:
+  static bool IsMarked(HeapObject* object) {
+    return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
+  }
+
+  static void ClearMark(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
+    DCHECK(!IsMarked(object));
+  }
+
+  static void SetMark(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
+    DCHECK(IsMarked(object));
+  }
+
+  static Map* MapOfMarkedObject(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
+  }
+
+  static int SizeOfMarkedObject(HeapObject* object) {
+    return object->SizeFromMap(MapOfMarkedObject(object));
+  }
+
+ private:
+  static const uintptr_t kNotMarkedBit = 0x1;
+  STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);  // NOLINT
+};
+
+
+#ifdef DEBUG
+// Helper class for tracing paths to a search target Object from all roots.
+// The TracePathFrom() method can be used to trace paths from a specific
+// object to the search target object.
+class PathTracer : public ObjectVisitor {
+ public:
+  enum WhatToFind {
+    FIND_ALL,   // Will find all matches.
+    FIND_FIRST  // Will stop the search after first match.
+  };
+
+  // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
+  static const int kMarkTag = 2;
+
+  // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
+  // after the first match.  If FIND_ALL is specified, then tracing will be
+  // done for all matches.
+  PathTracer(Object* search_target, WhatToFind what_to_find,
+             VisitMode visit_mode)
+      : search_target_(search_target),
+        found_target_(false),
+        found_target_in_trace_(false),
+        what_to_find_(what_to_find),
+        visit_mode_(visit_mode),
+        object_stack_(20),
+        no_allocation() {}
+
+  virtual void VisitPointers(Object** start, Object** end);
+
+  void Reset();
+  void TracePathFrom(Object** root);
+
+  bool found() const { return found_target_; }
+
+  static Object* const kAnyGlobalObject;
+
+ protected:
+  class MarkVisitor;
+  class UnmarkVisitor;
+
+  void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
+  void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
+  virtual void ProcessResults();
+
+  Object* search_target_;
+  bool found_target_;
+  bool found_target_in_trace_;
+  WhatToFind what_to_find_;
+  VisitMode visit_mode_;
+  List<Object*> object_stack_;
+
+  DisallowHeapAllocation no_allocation;  // i.e. no gc allowed.
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
+};
+#endif  // DEBUG
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_HEAP_H_
diff --git a/src/heap/incremental-marking-inl.h b/src/heap/incremental-marking-inl.h
new file mode 100644
index 0000000..5258c5c
--- /dev/null
+++ b/src/heap/incremental-marking-inl.h
@@ -0,0 +1,117 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_INCREMENTAL_MARKING_INL_H_
+#define V8_HEAP_INCREMENTAL_MARKING_INL_H_
+
+#include "src/heap/incremental-marking.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object** slot,
+                                         Object* value) {
+  HeapObject* value_heap_obj = HeapObject::cast(value);
+  MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
+  if (Marking::IsWhite(value_bit)) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+      if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+        if (chunk->IsLeftOfProgressBar(slot)) {
+          WhiteToGreyAndPush(value_heap_obj, value_bit);
+          RestartIfNotMarking();
+        } else {
+          return false;
+        }
+      } else {
+        BlackToGreyAndUnshift(obj, obj_bit);
+        RestartIfNotMarking();
+        return false;
+      }
+    } else {
+      return false;
+    }
+  }
+  if (!is_compacting_) return false;
+  MarkBit obj_bit = Marking::MarkBitFrom(obj);
+  return Marking::IsBlack(obj_bit);
+}
+
+
+void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
+                                     Object* value) {
+  if (IsMarking() && value->IsHeapObject()) {
+    RecordWriteSlow(obj, slot, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
+                                                Code* value) {
+  if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
+}
+
+
+void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
+                                             Object* value) {
+  if (IsMarking() && value->IsHeapObject()) {
+    RecordWriteIntoCodeSlow(obj, rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWrites(HeapObject* obj) {
+  if (IsMarking()) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+      if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+        chunk->set_progress_bar(0);
+      }
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+  }
+}
+
+
+void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
+                                               MarkBit mark_bit) {
+  DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
+  DCHECK(obj->Size() >= 2 * kPointerSize);
+  DCHECK(IsMarking());
+  Marking::BlackToGrey(mark_bit);
+  int obj_size = obj->Size();
+  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size);
+  bytes_scanned_ -= obj_size;
+  int64_t old_bytes_rescanned = bytes_rescanned_;
+  bytes_rescanned_ = old_bytes_rescanned + obj_size;
+  if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
+    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
+      // If we have queued twice the heap size for rescanning then we are
+      // going around in circles, scanning the same objects again and again
+      // as the program mutates the heap faster than we can incrementally
+      // trace it.  In this case we switch to non-incremental marking in
+      // order to finish off this marking phase.
+      if (FLAG_trace_gc) {
+        PrintPID("Hurrying incremental marking because of lack of progress\n");
+      }
+      marking_speed_ = kMaxMarkingSpeed;
+    }
+  }
+
+  marking_deque_.UnshiftGrey(obj);
+}
+
+
+void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
+  Marking::WhiteToGrey(mark_bit);
+  marking_deque_.PushGrey(obj);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_INCREMENTAL_MARKING_INL_H_
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
new file mode 100644
index 0000000..d72423a
--- /dev/null
+++ b/src/heap/incremental-marking.cc
@@ -0,0 +1,982 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/heap/incremental-marking.h"
+
+#include "src/code-stubs.h"
+#include "src/compilation-cache.h"
+#include "src/conversions.h"
+#include "src/heap/objects-visiting.h"
+#include "src/heap/objects-visiting-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+IncrementalMarking::IncrementalMarking(Heap* heap)
+    : heap_(heap),
+      state_(STOPPED),
+      marking_deque_memory_(NULL),
+      marking_deque_memory_committed_(false),
+      steps_count_(0),
+      old_generation_space_available_at_start_of_incremental_(0),
+      old_generation_space_used_at_start_of_incremental_(0),
+      should_hurry_(false),
+      marking_speed_(0),
+      allocated_(0),
+      no_marking_scope_depth_(0),
+      unscanned_bytes_of_large_object_(0) {}
+
+
+void IncrementalMarking::TearDown() { delete marking_deque_memory_; }
+
+
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
+                                         Object* value) {
+  if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      // Object is not going to be rescanned we need to record the slot.
+      heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
+                                                  slot, value);
+    }
+  }
+}
+
+
+void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
+                                             Isolate* isolate) {
+  DCHECK(obj->IsHeapObject());
+  IncrementalMarking* marking = isolate->heap()->incremental_marking();
+
+  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+  int counter = chunk->write_barrier_counter();
+  if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
+    marking->write_barriers_invoked_since_last_step_ +=
+        MemoryChunk::kWriteBarrierCounterGranularity -
+        chunk->write_barrier_counter();
+    chunk->set_write_barrier_counter(
+        MemoryChunk::kWriteBarrierCounterGranularity);
+  }
+
+  marking->RecordWrite(obj, slot, *slot);
+}
+
+
+void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
+                                               HeapObject* value) {
+  if (IsMarking()) {
+    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+    RecordWriteIntoCode(host, &rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
+  if (IsMarking()) {
+    Code* host = heap_->isolate()
+                     ->inner_pointer_to_code_cache()
+                     ->GcSafeFindCodeForInnerPointer(pc);
+    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+    RecordWriteIntoCode(host, &rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
+                                                    Object** slot,
+                                                    Code* value) {
+  if (BaseRecordWrite(host, slot, value)) {
+    DCHECK(slot != NULL);
+    heap_->mark_compact_collector()->RecordCodeEntrySlot(
+        reinterpret_cast<Address>(slot), value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
+                                                 RelocInfo* rinfo,
+                                                 Object* value) {
+  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+  if (Marking::IsWhite(value_bit)) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+    // Object is either grey or white.  It will be scanned if survives.
+    return;
+  }
+
+  if (is_compacting_) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      // Object is not going to be rescanned.  We need to record the slot.
+      heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
+                                                       Code::cast(value));
+    }
+  }
+}
+
+
+static void MarkObjectGreyDoNotEnqueue(Object* obj) {
+  if (obj->IsHeapObject()) {
+    HeapObject* heap_obj = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
+    if (Marking::IsBlack(mark_bit)) {
+      MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
+                                            -heap_obj->Size());
+    }
+    Marking::AnyToGrey(mark_bit);
+  }
+}
+
+
+static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
+                                       MarkBit mark_bit, int size) {
+  DCHECK(!Marking::IsImpossible(mark_bit));
+  if (mark_bit.Get()) return;
+  mark_bit.Set();
+  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
+  DCHECK(Marking::IsBlack(mark_bit));
+}
+
+
+static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
+                                        MarkBit mark_bit, int size) {
+  DCHECK(!Marking::IsImpossible(mark_bit));
+  if (Marking::IsBlack(mark_bit)) return;
+  Marking::MarkBlack(mark_bit);
+  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
+  DCHECK(Marking::IsBlack(mark_bit));
+}
+
+
+class IncrementalMarkingMarkingVisitor
+    : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
+ public:
+  static void Initialize() {
+    StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
+    table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
+    table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
+    table_.Register(kVisitJSRegExp, &VisitJSRegExp);
+  }
+
+  static const int kProgressBarScanningChunk = 32 * 1024;
+
+  static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
+    MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+    // TODO(mstarzinger): Move setting of the flag to the allocation site of
+    // the array. The visitor should just check the flag.
+    if (FLAG_use_marking_progress_bar &&
+        chunk->owner()->identity() == LO_SPACE) {
+      chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
+    }
+    if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+      Heap* heap = map->GetHeap();
+      // When using a progress bar for large fixed arrays, scan only a chunk of
+      // the array and try to push it onto the marking deque again until it is
+      // fully scanned. Fall back to scanning it through to the end in case this
+      // fails because of a full deque.
+      int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+      int start_offset =
+          Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
+      int end_offset =
+          Min(object_size, start_offset + kProgressBarScanningChunk);
+      int already_scanned_offset = start_offset;
+      bool scan_until_end = false;
+      do {
+        VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
+                                HeapObject::RawField(object, start_offset),
+                                HeapObject::RawField(object, end_offset));
+        start_offset = end_offset;
+        end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
+        scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
+      } while (scan_until_end && start_offset < object_size);
+      chunk->set_progress_bar(start_offset);
+      if (start_offset < object_size) {
+        heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
+        heap->incremental_marking()->NotifyIncompleteScanOfObject(
+            object_size - (start_offset - already_scanned_offset));
+      }
+    } else {
+      FixedArrayVisitor::Visit(map, object);
+    }
+  }
+
+  static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
+    Context* context = Context::cast(object);
+
+    // We will mark cache black with a separate pass when we finish marking.
+    // Note that GC can happen when the context is not fully initialized,
+    // so the cache can be undefined.
+    Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
+    if (!cache->IsUndefined()) {
+      MarkObjectGreyDoNotEnqueue(cache);
+    }
+    VisitNativeContext(map, context);
+  }
+
+  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
+    Object* obj = *p;
+    if (obj->IsHeapObject()) {
+      heap->mark_compact_collector()->RecordSlot(p, p, obj);
+      MarkObject(heap, obj);
+    }
+  }
+
+  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
+    for (Object** p = start; p < end; p++) {
+      Object* obj = *p;
+      if (obj->IsHeapObject()) {
+        heap->mark_compact_collector()->RecordSlot(start, p, obj);
+        MarkObject(heap, obj);
+      }
+    }
+  }
+
+  INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
+                                             Object** start, Object** end)) {
+    for (Object** p = start; p < end; p++) {
+      Object* obj = *p;
+      if (obj->IsHeapObject()) {
+        heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
+        MarkObject(heap, obj);
+      }
+    }
+  }
+
+  // Marks the object grey and pushes it on the marking stack.
+  INLINE(static void MarkObject(Heap* heap, Object* obj)) {
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (mark_bit.data_only()) {
+      MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
+    } else if (Marking::IsWhite(mark_bit)) {
+      heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
+    }
+  }
+
+  // Marks the object black without pushing it on the marking stack.
+  // Returns true if object needed marking and false otherwise.
+  INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (Marking::IsWhite(mark_bit)) {
+      mark_bit.Set();
+      MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
+                                            heap_object->Size());
+      return true;
+    }
+    return false;
+  }
+};
+
+
+class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
+ public:
+  explicit IncrementalMarkingRootMarkingVisitor(
+      IncrementalMarking* incremental_marking)
+      : incremental_marking_(incremental_marking) {}
+
+  void VisitPointer(Object** p) { MarkObjectByPointer(p); }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+  }
+
+ private:
+  void MarkObjectByPointer(Object** p) {
+    Object* obj = *p;
+    if (!obj->IsHeapObject()) return;
+
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (mark_bit.data_only()) {
+      MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
+    } else {
+      if (Marking::IsWhite(mark_bit)) {
+        incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+      }
+    }
+  }
+
+  IncrementalMarking* incremental_marking_;
+};
+
+
+void IncrementalMarking::Initialize() {
+  IncrementalMarkingMarkingVisitor::Initialize();
+}
+
+
+void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
+                                              bool is_marking,
+                                              bool is_compacting) {
+  if (is_marking) {
+    chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+
+    // It's difficult to filter out slots recorded for large objects.
+    if (chunk->owner()->identity() == LO_SPACE &&
+        chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
+      chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+    }
+  } else if (chunk->owner()->identity() == CELL_SPACE ||
+             chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
+             chunk->scan_on_scavenge()) {
+    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  } else {
+    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  }
+}
+
+
+void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
+                                              bool is_marking) {
+  chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+  if (is_marking) {
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  } else {
+    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  }
+  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+    PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    SetOldSpacePageFlags(p, false, false);
+  }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+    NewSpace* space) {
+  NewSpacePageIterator it(space);
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    SetNewSpacePageFlags(p, false);
+  }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
+  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
+
+  LargePage* lop = heap_->lo_space()->first_page();
+  while (lop->is_valid()) {
+    SetOldSpacePageFlags(lop, false, false);
+    lop = lop->next_page();
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    SetOldSpacePageFlags(p, true, is_compacting_);
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
+  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    SetNewSpacePageFlags(p, true);
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier() {
+  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
+  ActivateIncrementalWriteBarrier(heap_->old_data_space());
+  ActivateIncrementalWriteBarrier(heap_->cell_space());
+  ActivateIncrementalWriteBarrier(heap_->property_cell_space());
+  ActivateIncrementalWriteBarrier(heap_->map_space());
+  ActivateIncrementalWriteBarrier(heap_->code_space());
+  ActivateIncrementalWriteBarrier(heap_->new_space());
+
+  LargePage* lop = heap_->lo_space()->first_page();
+  while (lop->is_valid()) {
+    SetOldSpacePageFlags(lop, true, is_compacting_);
+    lop = lop->next_page();
+  }
+}
+
+
+bool IncrementalMarking::ShouldActivate() {
+  return WorthActivating() && heap_->NextGCIsLikelyToBeFull();
+}
+
+
+bool IncrementalMarking::WorthActivating() {
+#ifndef DEBUG
+  static const intptr_t kActivationThreshold = 8 * MB;
+#else
+  // TODO(gc) consider setting this to some low level so that some
+  // debug tests run with incremental marking and some without.
+  static const intptr_t kActivationThreshold = 0;
+#endif
+  // Only start incremental marking in a safe state: 1) when incremental
+  // marking is turned on, 2) when we are currently not in a GC, and
+  // 3) when we are currently not serializing or deserializing the heap.
+  return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
+         heap_->gc_state() == Heap::NOT_IN_GC &&
+         !heap_->isolate()->serializer_enabled() &&
+         heap_->isolate()->IsInitialized() &&
+         heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
+}
+
+
+void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
+  DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
+
+  if (!IsMarking()) {
+    // Initially stub is generated in STORE_BUFFER_ONLY mode thus
+    // we don't need to do anything if incremental marking is
+    // not active.
+  } else if (IsCompacting()) {
+    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
+  } else {
+    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
+  }
+}
+
+
+static void PatchIncrementalMarkingRecordWriteStubs(
+    Heap* heap, RecordWriteStub::Mode mode) {
+  UnseededNumberDictionary* stubs = heap->code_stubs();
+
+  int capacity = stubs->Capacity();
+  for (int i = 0; i < capacity; i++) {
+    Object* k = stubs->KeyAt(i);
+    if (stubs->IsKey(k)) {
+      uint32_t key = NumberToUint32(k);
+
+      if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
+        Object* e = stubs->ValueAt(i);
+        if (e->IsCode()) {
+          RecordWriteStub::Patch(Code::cast(e), mode);
+        }
+      }
+    }
+  }
+}
+
+
+void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
+  if (marking_deque_memory_ == NULL) {
+    marking_deque_memory_ = new base::VirtualMemory(4 * MB);
+  }
+  if (!marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Commit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size(),
+        false);  // Not executable.
+    CHECK(success);
+    marking_deque_memory_committed_ = true;
+  }
+}
+
+
+void IncrementalMarking::UncommitMarkingDeque() {
+  if (state_ == STOPPED && marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Uncommit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size());
+    CHECK(success);
+    marking_deque_memory_committed_ = false;
+  }
+}
+
+
+void IncrementalMarking::Start(CompactionFlag flag) {
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Start\n");
+  }
+  DCHECK(FLAG_incremental_marking);
+  DCHECK(FLAG_incremental_marking_steps);
+  DCHECK(state_ == STOPPED);
+  DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
+  DCHECK(!heap_->isolate()->serializer_enabled());
+  DCHECK(heap_->isolate()->IsInitialized());
+
+  ResetStepCounters();
+
+  if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+    StartMarking(flag);
+  } else {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Start sweeping.\n");
+    }
+    state_ = SWEEPING;
+  }
+
+  heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
+}
+
+
+void IncrementalMarking::StartMarking(CompactionFlag flag) {
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Start marking\n");
+  }
+
+  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
+                   heap_->mark_compact_collector()->StartCompaction(
+                       MarkCompactCollector::INCREMENTAL_COMPACTION);
+
+  state_ = MARKING;
+
+  RecordWriteStub::Mode mode = is_compacting_
+                                   ? RecordWriteStub::INCREMENTAL_COMPACTION
+                                   : RecordWriteStub::INCREMENTAL;
+
+  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
+
+  EnsureMarkingDequeIsCommitted();
+
+  // Initialize marking stack.
+  Address addr = static_cast<Address>(marking_deque_memory_->address());
+  size_t size = marking_deque_memory_->size();
+  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
+  marking_deque_.Initialize(addr, addr + size);
+
+  ActivateIncrementalWriteBarrier();
+
+// Marking bits are cleared by the sweeper.
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
+  }
+#endif
+
+  heap_->CompletelyClearInstanceofCache();
+  heap_->isolate()->compilation_cache()->MarkCompactPrologue();
+
+  if (FLAG_cleanup_code_caches_at_gc) {
+    // We will mark cache black with a separate pass
+    // when we finish marking.
+    MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
+  }
+
+  // Mark strong roots grey.
+  IncrementalMarkingRootMarkingVisitor visitor(this);
+  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+
+  heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
+
+  // Ready to start incremental marking.
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Running\n");
+  }
+}
+
+
+void IncrementalMarking::PrepareForScavenge() {
+  if (!IsMarking()) return;
+  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
+                          heap_->new_space()->FromSpaceEnd());
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
+  if (!IsMarking()) return;
+
+  int current = marking_deque_.bottom();
+  int mask = marking_deque_.mask();
+  int limit = marking_deque_.top();
+  HeapObject** array = marking_deque_.array();
+  int new_top = current;
+
+  Map* filler_map = heap_->one_pointer_filler_map();
+
+  while (current != limit) {
+    HeapObject* obj = array[current];
+    DCHECK(obj->IsHeapObject());
+    current = ((current + 1) & mask);
+    if (heap_->InNewSpace(obj)) {
+      MapWord map_word = obj->map_word();
+      if (map_word.IsForwardingAddress()) {
+        HeapObject* dest = map_word.ToForwardingAddress();
+        array[new_top] = dest;
+        new_top = ((new_top + 1) & mask);
+        DCHECK(new_top != marking_deque_.bottom());
+#ifdef DEBUG
+        MarkBit mark_bit = Marking::MarkBitFrom(obj);
+        DCHECK(Marking::IsGrey(mark_bit) ||
+               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
+#endif
+      }
+    } else if (obj->map() != filler_map) {
+      // Skip one word filler objects that appear on the
+      // stack when we perform in place array shift.
+      array[new_top] = obj;
+      new_top = ((new_top + 1) & mask);
+      DCHECK(new_top != marking_deque_.bottom());
+#ifdef DEBUG
+      MarkBit mark_bit = Marking::MarkBitFrom(obj);
+      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+      DCHECK(Marking::IsGrey(mark_bit) ||
+             (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+             (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+              Marking::IsBlack(mark_bit)));
+#endif
+    }
+  }
+  marking_deque_.set_top(new_top);
+}
+
+
+void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
+  MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+  if (Marking::IsWhite(map_mark_bit)) {
+    WhiteToGreyAndPush(map, map_mark_bit);
+  }
+
+  IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
+
+  MarkBit mark_bit = Marking::MarkBitFrom(obj);
+#if ENABLE_SLOW_DCHECKS
+  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+  SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
+              (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+              (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+               Marking::IsBlack(mark_bit)));
+#endif
+  MarkBlackOrKeepBlack(obj, mark_bit, size);
+}
+
+
+intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
+  intptr_t bytes_processed = 0;
+  Map* filler_map = heap_->one_pointer_filler_map();
+  while (!marking_deque_.IsEmpty() && bytes_processed < bytes_to_process) {
+    HeapObject* obj = marking_deque_.Pop();
+
+    // Explicitly skip one word fillers. Incremental markbit patterns are
+    // correct only for objects that occupy at least two words.
+    Map* map = obj->map();
+    if (map == filler_map) continue;
+
+    int size = obj->SizeFromMap(map);
+    unscanned_bytes_of_large_object_ = 0;
+    VisitObject(map, obj, size);
+    int delta = (size - unscanned_bytes_of_large_object_);
+    // TODO(jochen): remove after http://crbug.com/381820 is resolved.
+    CHECK_LT(0, delta);
+    bytes_processed += delta;
+  }
+  return bytes_processed;
+}
+
+
+void IncrementalMarking::ProcessMarkingDeque() {
+  Map* filler_map = heap_->one_pointer_filler_map();
+  while (!marking_deque_.IsEmpty()) {
+    HeapObject* obj = marking_deque_.Pop();
+
+    // Explicitly skip one word fillers. Incremental markbit patterns are
+    // correct only for objects that occupy at least two words.
+    Map* map = obj->map();
+    if (map == filler_map) continue;
+
+    VisitObject(map, obj, obj->SizeFromMap(map));
+  }
+}
+
+
+void IncrementalMarking::Hurry() {
+  if (state() == MARKING) {
+    double start = 0.0;
+    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
+      start = base::OS::TimeCurrentMillis();
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Hurry\n");
+      }
+    }
+    // TODO(gc) hurry can mark objects it encounters black as mutator
+    // was stopped.
+    ProcessMarkingDeque();
+    state_ = COMPLETE;
+    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
+      double end = base::OS::TimeCurrentMillis();
+      double delta = end - start;
+      heap_->tracer()->AddMarkingTime(delta);
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+               static_cast<int>(delta));
+      }
+    }
+  }
+
+  if (FLAG_cleanup_code_caches_at_gc) {
+    PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
+    Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
+    MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
+                                          PolymorphicCodeCache::kSize);
+  }
+
+  Object* context = heap_->native_contexts_list();
+  while (!context->IsUndefined()) {
+    // GC can happen when the context is not fully initialized,
+    // so the cache can be undefined.
+    HeapObject* cache = HeapObject::cast(
+        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
+    if (!cache->IsUndefined()) {
+      MarkBit mark_bit = Marking::MarkBitFrom(cache);
+      if (Marking::IsGrey(mark_bit)) {
+        Marking::GreyToBlack(mark_bit);
+        MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
+      }
+    }
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void IncrementalMarking::Abort() {
+  if (IsStopped()) return;
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Aborting.\n");
+  }
+  heap_->new_space()->LowerInlineAllocationLimit(0);
+  IncrementalMarking::set_should_hurry(false);
+  ResetStepCounters();
+  if (IsMarking()) {
+    PatchIncrementalMarkingRecordWriteStubs(heap_,
+                                            RecordWriteStub::STORE_BUFFER_ONLY);
+    DeactivateIncrementalWriteBarrier();
+
+    if (is_compacting_) {
+      LargeObjectIterator it(heap_->lo_space());
+      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+        Page* p = Page::FromAddress(obj->address());
+        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+        }
+      }
+    }
+  }
+  heap_->isolate()->stack_guard()->ClearGC();
+  state_ = STOPPED;
+  is_compacting_ = false;
+}
+
+
+void IncrementalMarking::Finalize() {
+  Hurry();
+  state_ = STOPPED;
+  is_compacting_ = false;
+  heap_->new_space()->LowerInlineAllocationLimit(0);
+  IncrementalMarking::set_should_hurry(false);
+  ResetStepCounters();
+  PatchIncrementalMarkingRecordWriteStubs(heap_,
+                                          RecordWriteStub::STORE_BUFFER_ONLY);
+  DeactivateIncrementalWriteBarrier();
+  DCHECK(marking_deque_.IsEmpty());
+  heap_->isolate()->stack_guard()->ClearGC();
+}
+
+
+void IncrementalMarking::MarkingComplete(CompletionAction action) {
+  state_ = COMPLETE;
+  // We will set the stack guard to request a GC now.  This will mean the rest
+  // of the GC gets performed as soon as possible (we can't do a GC here in a
+  // record-write context).  If a few things get allocated between now and then
+  // that shouldn't make us do a scavenge and keep being incremental, so we set
+  // the should-hurry flag to indicate that there can't be much work left to do.
+  set_should_hurry(true);
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Complete (normal).\n");
+  }
+  if (action == GC_VIA_STACK_GUARD) {
+    heap_->isolate()->stack_guard()->RequestGC();
+  }
+}
+
+
+void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
+  if (IsStopped() && ShouldActivate()) {
+    // TODO(hpayer): Let's play safe for now, but compaction should be
+    // in principle possible.
+    Start(PREVENT_COMPACTION);
+  } else {
+    Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
+  }
+}
+
+
+void IncrementalMarking::SpeedUp() {
+  bool speed_up = false;
+
+  if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
+    if (FLAG_trace_gc) {
+      PrintPID("Speed up marking after %d steps\n",
+               static_cast<int>(kMarkingSpeedAccellerationInterval));
+    }
+    speed_up = true;
+  }
+
+  bool space_left_is_very_small =
+      (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
+
+  bool only_1_nth_of_space_that_was_available_still_left =
+      (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
+       old_generation_space_available_at_start_of_incremental_);
+
+  if (space_left_is_very_small ||
+      only_1_nth_of_space_that_was_available_still_left) {
+    if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
+    speed_up = true;
+  }
+
+  bool size_of_old_space_multiplied_by_n_during_marking =
+      (heap_->PromotedTotalSize() >
+       (marking_speed_ + 1) *
+           old_generation_space_used_at_start_of_incremental_);
+  if (size_of_old_space_multiplied_by_n_during_marking) {
+    speed_up = true;
+    if (FLAG_trace_gc) {
+      PrintPID("Speed up marking because of heap size increase\n");
+    }
+  }
+
+  int64_t promoted_during_marking =
+      heap_->PromotedTotalSize() -
+      old_generation_space_used_at_start_of_incremental_;
+  intptr_t delay = marking_speed_ * MB;
+  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
+
+  // We try to scan at at least twice the speed that we are allocating.
+  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
+    if (FLAG_trace_gc) {
+      PrintPID("Speed up marking because marker was not keeping up\n");
+    }
+    speed_up = true;
+  }
+
+  if (speed_up) {
+    if (state_ != MARKING) {
+      if (FLAG_trace_gc) {
+        PrintPID("Postponing speeding up marking until marking starts\n");
+      }
+    } else {
+      marking_speed_ += kMarkingSpeedAccelleration;
+      marking_speed_ = static_cast<int>(
+          Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
+      if (FLAG_trace_gc) {
+        PrintPID("Marking speed increased to %d\n", marking_speed_);
+      }
+    }
+  }
+}
+
+
+void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction action,
+                              bool force_marking) {
+  if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
+      !FLAG_incremental_marking_steps ||
+      (state_ != SWEEPING && state_ != MARKING)) {
+    return;
+  }
+
+  allocated_ += allocated_bytes;
+
+  if (!force_marking && allocated_ < kAllocatedThreshold &&
+      write_barriers_invoked_since_last_step_ <
+          kWriteBarriersInvokedThreshold) {
+    return;
+  }
+
+  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
+
+  {
+    HistogramTimerScope incremental_marking_scope(
+        heap_->isolate()->counters()->gc_incremental_marking());
+    double start = base::OS::TimeCurrentMillis();
+
+    // The marking speed is driven either by the allocation rate or by the rate
+    // at which we are having to check the color of objects in the write
+    // barrier.
+    // It is possible for a tight non-allocating loop to run a lot of write
+    // barriers before we get here and check them (marking can only take place
+    // on
+    // allocation), so to reduce the lumpiness we don't use the write barriers
+    // invoked since last step directly to determine the amount of work to do.
+    intptr_t bytes_to_process =
+        marking_speed_ *
+        Max(allocated_, write_barriers_invoked_since_last_step_);
+    allocated_ = 0;
+    write_barriers_invoked_since_last_step_ = 0;
+
+    bytes_scanned_ += bytes_to_process;
+    intptr_t bytes_processed = 0;
+
+    if (state_ == SWEEPING) {
+      if (heap_->mark_compact_collector()->sweeping_in_progress() &&
+          heap_->mark_compact_collector()->IsSweepingCompleted()) {
+        heap_->mark_compact_collector()->EnsureSweepingCompleted();
+      }
+      if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+        bytes_scanned_ = 0;
+        StartMarking(PREVENT_COMPACTION);
+      }
+    } else if (state_ == MARKING) {
+      bytes_processed = ProcessMarkingDeque(bytes_to_process);
+      if (marking_deque_.IsEmpty()) MarkingComplete(action);
+    }
+
+    steps_count_++;
+
+    // Speed up marking if we are marking too slow or if we are almost done
+    // with marking.
+    SpeedUp();
+
+    double end = base::OS::TimeCurrentMillis();
+    double duration = (end - start);
+    // Note that we report zero bytes here when sweeping was in progress or
+    // when we just started incremental marking. In these cases we did not
+    // process the marking deque.
+    heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
+  }
+}
+
+
+void IncrementalMarking::ResetStepCounters() {
+  steps_count_ = 0;
+  old_generation_space_available_at_start_of_incremental_ =
+      SpaceLeftInOldSpace();
+  old_generation_space_used_at_start_of_incremental_ =
+      heap_->PromotedTotalSize();
+  bytes_rescanned_ = 0;
+  marking_speed_ = kInitialMarkingSpeed;
+  bytes_scanned_ = 0;
+  write_barriers_invoked_since_last_step_ = 0;
+}
+
+
+int64_t IncrementalMarking::SpaceLeftInOldSpace() {
+  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
new file mode 100644
index 0000000..e4a8e97
--- /dev/null
+++ b/src/heap/incremental-marking.h
@@ -0,0 +1,226 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
+#define V8_HEAP_INCREMENTAL_MARKING_H_
+
+
+#include "src/execution.h"
+#include "src/heap/mark-compact.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+class IncrementalMarking {
+ public:
+  enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
+
+  enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
+
+  explicit IncrementalMarking(Heap* heap);
+
+  static void Initialize();
+
+  void TearDown();
+
+  State state() {
+    DCHECK(state_ == STOPPED || FLAG_incremental_marking);
+    return state_;
+  }
+
+  bool should_hurry() { return should_hurry_; }
+  void set_should_hurry(bool val) { should_hurry_ = val; }
+
+  inline bool IsStopped() { return state() == STOPPED; }
+
+  INLINE(bool IsMarking()) { return state() >= MARKING; }
+
+  inline bool IsMarkingIncomplete() { return state() == MARKING; }
+
+  inline bool IsComplete() { return state() == COMPLETE; }
+
+  bool WorthActivating();
+
+  bool ShouldActivate();
+
+  enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
+
+  void Start(CompactionFlag flag = ALLOW_COMPACTION);
+
+  void Stop();
+
+  void PrepareForScavenge();
+
+  void UpdateMarkingDequeAfterScavenge();
+
+  void Hurry();
+
+  void Finalize();
+
+  void Abort();
+
+  void MarkingComplete(CompletionAction action);
+
+  // It's hard to know how much work the incremental marker should do to make
+  // progress in the face of the mutator creating new work for it.  We start
+  // of at a moderate rate of work and gradually increase the speed of the
+  // incremental marker until it completes.
+  // Do some marking every time this much memory has been allocated or that many
+  // heavy (color-checking) write barriers have been invoked.
+  static const intptr_t kAllocatedThreshold = 65536;
+  static const intptr_t kWriteBarriersInvokedThreshold = 32768;
+  // Start off by marking this many times more memory than has been allocated.
+  static const intptr_t kInitialMarkingSpeed = 1;
+  // But if we are promoting a lot of data we need to mark faster to keep up
+  // with the data that is entering the old space through promotion.
+  static const intptr_t kFastMarking = 3;
+  // After this many steps we increase the marking/allocating factor.
+  static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
+  // This is how much we increase the marking/allocating factor by.
+  static const intptr_t kMarkingSpeedAccelleration = 2;
+  static const intptr_t kMaxMarkingSpeed = 1000;
+
+  void OldSpaceStep(intptr_t allocated);
+
+  void Step(intptr_t allocated, CompletionAction action,
+            bool force_marking = false);
+
+  inline void RestartIfNotMarking() {
+    if (state_ == COMPLETE) {
+      state_ = MARKING;
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
+      }
+    }
+  }
+
+  static void RecordWriteFromCode(HeapObject* obj, Object** slot,
+                                  Isolate* isolate);
+
+  // Record a slot for compaction.  Returns false for objects that are
+  // guaranteed to be rescanned or not guaranteed to survive.
+  //
+  // No slots in white objects should be recorded, as some slots are typed and
+  // cannot be interpreted correctly if the underlying object does not survive
+  // the incremental cycle (stays white).
+  INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
+                                  Object* value));
+  INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
+                                     Code* value));
+
+
+  void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
+  void RecordWriteIntoCodeSlow(HeapObject* obj, RelocInfo* rinfo,
+                               Object* value);
+  void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
+  void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
+  void RecordCodeTargetPatch(Address pc, HeapObject* value);
+
+  inline void RecordWrites(HeapObject* obj);
+
+  inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
+
+  inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+
+  inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
+    SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
+  }
+
+  inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
+    SetNewSpacePageFlags(chunk, IsMarking());
+  }
+
+  MarkingDeque* marking_deque() { return &marking_deque_; }
+
+  bool IsCompacting() { return IsMarking() && is_compacting_; }
+
+  void ActivateGeneratedStub(Code* stub);
+
+  void NotifyOfHighPromotionRate() {
+    if (IsMarking()) {
+      if (marking_speed_ < kFastMarking) {
+        if (FLAG_trace_gc) {
+          PrintPID(
+              "Increasing marking speed to %d "
+              "due to high promotion rate\n",
+              static_cast<int>(kFastMarking));
+        }
+        marking_speed_ = kFastMarking;
+      }
+    }
+  }
+
+  void EnterNoMarkingScope() { no_marking_scope_depth_++; }
+
+  void LeaveNoMarkingScope() { no_marking_scope_depth_--; }
+
+  void UncommitMarkingDeque();
+
+  void NotifyIncompleteScanOfObject(int unscanned_bytes) {
+    unscanned_bytes_of_large_object_ = unscanned_bytes;
+  }
+
+ private:
+  int64_t SpaceLeftInOldSpace();
+
+  void SpeedUp();
+
+  void ResetStepCounters();
+
+  void StartMarking(CompactionFlag flag);
+
+  void ActivateIncrementalWriteBarrier(PagedSpace* space);
+  static void ActivateIncrementalWriteBarrier(NewSpace* space);
+  void ActivateIncrementalWriteBarrier();
+
+  static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
+  static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
+  void DeactivateIncrementalWriteBarrier();
+
+  static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
+                                   bool is_compacting);
+
+  static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
+
+  void EnsureMarkingDequeIsCommitted();
+
+  INLINE(void ProcessMarkingDeque());
+
+  INLINE(intptr_t ProcessMarkingDeque(intptr_t bytes_to_process));
+
+  INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
+
+  Heap* heap_;
+
+  State state_;
+  bool is_compacting_;
+
+  base::VirtualMemory* marking_deque_memory_;
+  bool marking_deque_memory_committed_;
+  MarkingDeque marking_deque_;
+
+  int steps_count_;
+  int64_t old_generation_space_available_at_start_of_incremental_;
+  int64_t old_generation_space_used_at_start_of_incremental_;
+  int64_t bytes_rescanned_;
+  bool should_hurry_;
+  int marking_speed_;
+  intptr_t bytes_scanned_;
+  intptr_t allocated_;
+  intptr_t write_barriers_invoked_since_last_step_;
+
+  int no_marking_scope_depth_;
+
+  int unscanned_bytes_of_large_object_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_INCREMENTAL_MARKING_H_
diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h
new file mode 100644
index 0000000..66b0a59
--- /dev/null
+++ b/src/heap/mark-compact-inl.h
@@ -0,0 +1,72 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MARK_COMPACT_INL_H_
+#define V8_HEAP_MARK_COMPACT_INL_H_
+
+#include "src/heap/mark-compact.h"
+#include "src/isolate.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+MarkBit Marking::MarkBitFrom(Address addr) {
+  MemoryChunk* p = MemoryChunk::FromAddress(addr);
+  return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
+                                         p->ContainsOnlyData());
+}
+
+
+void MarkCompactCollector::SetFlags(int flags) {
+  reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
+  abort_incremental_marking_ =
+      ((flags & Heap::kAbortIncrementalMarkingMask) != 0);
+}
+
+
+void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
+  DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
+  if (!mark_bit.Get()) {
+    mark_bit.Set();
+    MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+    DCHECK(IsMarked(obj));
+    DCHECK(obj->GetIsolate()->heap()->Contains(obj));
+    marking_deque_.PushBlack(obj);
+  }
+}
+
+
+void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
+  DCHECK(!mark_bit.Get());
+  DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
+  mark_bit.Set();
+  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+}
+
+
+bool MarkCompactCollector::IsMarked(Object* obj) {
+  DCHECK(obj->IsHeapObject());
+  HeapObject* heap_object = HeapObject::cast(obj);
+  return Marking::MarkBitFrom(heap_object).Get();
+}
+
+
+void MarkCompactCollector::RecordSlot(Object** anchor_slot, Object** slot,
+                                      Object* object,
+                                      SlotsBuffer::AdditionMode mode) {
+  Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
+  if (object_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            object_page->slots_buffer_address(), slot, mode)) {
+      EvictEvacuationCandidate(object_page);
+    }
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_MARK_COMPACT_INL_H_
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
new file mode 100644
index 0000000..9f9a658
--- /dev/null
+++ b/src/heap/mark-compact.cc
@@ -0,0 +1,4562 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/base/atomicops.h"
+#include "src/base/bits.h"
+#include "src/code-stubs.h"
+#include "src/compilation-cache.h"
+#include "src/cpu-profiler.h"
+#include "src/deoptimizer.h"
+#include "src/execution.h"
+#include "src/gdb-jit.h"
+#include "src/global-handles.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/objects-visiting.h"
+#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/spaces-inl.h"
+#include "src/heap/sweeper-thread.h"
+#include "src/heap-profiler.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+const char* Marking::kWhiteBitPattern = "00";
+const char* Marking::kBlackBitPattern = "10";
+const char* Marking::kGreyBitPattern = "11";
+const char* Marking::kImpossibleBitPattern = "01";
+
+
+// -------------------------------------------------------------------------
+// MarkCompactCollector
+
+MarkCompactCollector::MarkCompactCollector(Heap* heap)
+    :  // NOLINT
+#ifdef DEBUG
+      state_(IDLE),
+#endif
+      reduce_memory_footprint_(false),
+      abort_incremental_marking_(false),
+      marking_parity_(ODD_MARKING_PARITY),
+      compacting_(false),
+      was_marked_incrementally_(false),
+      sweeping_in_progress_(false),
+      pending_sweeper_jobs_semaphore_(0),
+      sequential_sweeping_(false),
+      migration_slots_buffer_(NULL),
+      heap_(heap),
+      code_flusher_(NULL),
+      have_code_to_deoptimize_(false) {
+}
+
+#ifdef VERIFY_HEAP
+class VerifyMarkingVisitor : public ObjectVisitor {
+ public:
+  explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        CHECK(heap_->mark_compact_collector()->IsMarked(object));
+      }
+    }
+  }
+
+  void VisitEmbeddedPointer(RelocInfo* rinfo) {
+    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
+      Object* p = rinfo->target_object();
+      VisitPointer(&p);
+    }
+  }
+
+  void VisitCell(RelocInfo* rinfo) {
+    Code* code = rinfo->host();
+    DCHECK(rinfo->rmode() == RelocInfo::CELL);
+    if (!code->IsWeakObject(rinfo->target_cell())) {
+      ObjectVisitor::VisitCell(rinfo);
+    }
+  }
+
+ private:
+  Heap* heap_;
+};
+
+
+static void VerifyMarking(Heap* heap, Address bottom, Address top) {
+  VerifyMarkingVisitor visitor(heap);
+  HeapObject* object;
+  Address next_object_must_be_here_or_later = bottom;
+
+  for (Address current = bottom; current < top; current += kPointerSize) {
+    object = HeapObject::FromAddress(current);
+    if (MarkCompactCollector::IsMarked(object)) {
+      CHECK(current >= next_object_must_be_here_or_later);
+      object->Iterate(&visitor);
+      next_object_must_be_here_or_later = current + object->Size();
+    }
+  }
+}
+
+
+static void VerifyMarking(NewSpace* space) {
+  Address end = space->top();
+  NewSpacePageIterator it(space->bottom(), end);
+  // The bottom position is at the start of its page. Allows us to use
+  // page->area_start() as start of range on all pages.
+  CHECK_EQ(space->bottom(),
+           NewSpacePage::FromAddress(space->bottom())->area_start());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    Address limit = it.has_next() ? page->area_end() : end;
+    CHECK(limit == end || !page->Contains(end));
+    VerifyMarking(space->heap(), page->area_start(), limit);
+  }
+}
+
+
+static void VerifyMarking(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    VerifyMarking(space->heap(), p->area_start(), p->area_end());
+  }
+}
+
+
+static void VerifyMarking(Heap* heap) {
+  VerifyMarking(heap->old_pointer_space());
+  VerifyMarking(heap->old_data_space());
+  VerifyMarking(heap->code_space());
+  VerifyMarking(heap->cell_space());
+  VerifyMarking(heap->property_cell_space());
+  VerifyMarking(heap->map_space());
+  VerifyMarking(heap->new_space());
+
+  VerifyMarkingVisitor visitor(heap);
+
+  LargeObjectIterator it(heap->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    if (MarkCompactCollector::IsMarked(obj)) {
+      obj->Iterate(&visitor);
+    }
+  }
+
+  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+}
+
+
+class VerifyEvacuationVisitor : public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
+      }
+    }
+  }
+};
+
+
+static void VerifyEvacuation(Page* page) {
+  VerifyEvacuationVisitor visitor;
+  HeapObjectIterator iterator(page, NULL);
+  for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
+       heap_object = iterator.Next()) {
+    // We skip free space objects.
+    if (!heap_object->IsFiller()) {
+      heap_object->Iterate(&visitor);
+    }
+  }
+}
+
+
+static void VerifyEvacuation(NewSpace* space) {
+  NewSpacePageIterator it(space->bottom(), space->top());
+  VerifyEvacuationVisitor visitor;
+
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    Address current = page->area_start();
+    Address limit = it.has_next() ? page->area_end() : space->top();
+    CHECK(limit == space->top() || !page->Contains(space->top()));
+    while (current < limit) {
+      HeapObject* object = HeapObject::FromAddress(current);
+      object->Iterate(&visitor);
+      current += object->Size();
+    }
+  }
+}
+
+
+static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
+  if (FLAG_use_allocation_folding &&
+      (space == heap->old_pointer_space() || space == heap->old_data_space())) {
+    return;
+  }
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    if (p->IsEvacuationCandidate()) continue;
+    VerifyEvacuation(p);
+  }
+}
+
+
+static void VerifyEvacuation(Heap* heap) {
+  VerifyEvacuation(heap, heap->old_pointer_space());
+  VerifyEvacuation(heap, heap->old_data_space());
+  VerifyEvacuation(heap, heap->code_space());
+  VerifyEvacuation(heap, heap->cell_space());
+  VerifyEvacuation(heap, heap->property_cell_space());
+  VerifyEvacuation(heap, heap->map_space());
+  VerifyEvacuation(heap->new_space());
+
+  VerifyEvacuationVisitor visitor;
+  heap->IterateStrongRoots(&visitor, VISIT_ALL);
+}
+#endif  // VERIFY_HEAP
+
+
+#ifdef DEBUG
+class VerifyNativeContextSeparationVisitor : public ObjectVisitor {
+ public:
+  VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        if (object->IsString()) continue;
+        switch (object->map()->instance_type()) {
+          case JS_FUNCTION_TYPE:
+            CheckContext(JSFunction::cast(object)->context());
+            break;
+          case JS_GLOBAL_PROXY_TYPE:
+            CheckContext(JSGlobalProxy::cast(object)->native_context());
+            break;
+          case JS_GLOBAL_OBJECT_TYPE:
+          case JS_BUILTINS_OBJECT_TYPE:
+            CheckContext(GlobalObject::cast(object)->native_context());
+            break;
+          case JS_ARRAY_TYPE:
+          case JS_DATE_TYPE:
+          case JS_OBJECT_TYPE:
+          case JS_REGEXP_TYPE:
+            VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
+            break;
+          case MAP_TYPE:
+            VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
+            VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
+            break;
+          case FIXED_ARRAY_TYPE:
+            if (object->IsContext()) {
+              CheckContext(object);
+            } else {
+              FixedArray* array = FixedArray::cast(object);
+              int length = array->length();
+              // Set array length to zero to prevent cycles while iterating
+              // over array bodies, this is easier than intrusive marking.
+              array->set_length(0);
+              array->IterateBody(FIXED_ARRAY_TYPE, FixedArray::SizeFor(length),
+                                 this);
+              array->set_length(length);
+            }
+            break;
+          case CELL_TYPE:
+          case JS_PROXY_TYPE:
+          case JS_VALUE_TYPE:
+          case TYPE_FEEDBACK_INFO_TYPE:
+            object->Iterate(this);
+            break;
+          case DECLARED_ACCESSOR_INFO_TYPE:
+          case EXECUTABLE_ACCESSOR_INFO_TYPE:
+          case BYTE_ARRAY_TYPE:
+          case CALL_HANDLER_INFO_TYPE:
+          case CODE_TYPE:
+          case FIXED_DOUBLE_ARRAY_TYPE:
+          case HEAP_NUMBER_TYPE:
+          case MUTABLE_HEAP_NUMBER_TYPE:
+          case INTERCEPTOR_INFO_TYPE:
+          case ODDBALL_TYPE:
+          case SCRIPT_TYPE:
+          case SHARED_FUNCTION_INFO_TYPE:
+            break;
+          default:
+            UNREACHABLE();
+        }
+      }
+    }
+  }
+
+ private:
+  void CheckContext(Object* context) {
+    if (!context->IsContext()) return;
+    Context* native_context = Context::cast(context)->native_context();
+    if (current_native_context_ == NULL) {
+      current_native_context_ = native_context;
+    } else {
+      CHECK_EQ(current_native_context_, native_context);
+    }
+  }
+
+  Context* current_native_context_;
+};
+
+
+static void VerifyNativeContextSeparation(Heap* heap) {
+  HeapObjectIterator it(heap->code_space());
+
+  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
+    VerifyNativeContextSeparationVisitor visitor;
+    Code::cast(object)->CodeIterateBody(&visitor);
+  }
+}
+#endif
+
+
+void MarkCompactCollector::SetUp() {
+  free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
+  free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
+}
+
+
+void MarkCompactCollector::TearDown() { AbortCompaction(); }
+
+
+void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
+  p->MarkEvacuationCandidate();
+  evacuation_candidates_.Add(p);
+}
+
+
+static void TraceFragmentation(PagedSpace* space) {
+  int number_of_pages = space->CountTotalPages();
+  intptr_t reserved = (number_of_pages * space->AreaSize());
+  intptr_t free = reserved - space->SizeOfObjects();
+  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
+         AllocationSpaceName(space->identity()), number_of_pages,
+         static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
+}
+
+
+bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
+  if (!compacting_) {
+    DCHECK(evacuation_candidates_.length() == 0);
+
+#ifdef ENABLE_GDB_JIT_INTERFACE
+    // If GDBJIT interface is active disable compaction.
+    if (FLAG_gdbjit) return false;
+#endif
+
+    CollectEvacuationCandidates(heap()->old_pointer_space());
+    CollectEvacuationCandidates(heap()->old_data_space());
+
+    if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
+                                    FLAG_incremental_code_compaction)) {
+      CollectEvacuationCandidates(heap()->code_space());
+    } else if (FLAG_trace_fragmentation) {
+      TraceFragmentation(heap()->code_space());
+    }
+
+    if (FLAG_trace_fragmentation) {
+      TraceFragmentation(heap()->map_space());
+      TraceFragmentation(heap()->cell_space());
+      TraceFragmentation(heap()->property_cell_space());
+    }
+
+    heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
+    heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
+    heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
+
+    compacting_ = evacuation_candidates_.length() > 0;
+  }
+
+  return compacting_;
+}
+
+
+void MarkCompactCollector::CollectGarbage() {
+  // Make sure that Prepare() has been called. The individual steps below will
+  // update the state as they proceed.
+  DCHECK(state_ == PREPARE_GC);
+
+  MarkLiveObjects();
+  DCHECK(heap_->incremental_marking()->IsStopped());
+
+  if (FLAG_collect_maps) ClearNonLiveReferences();
+
+  ClearWeakCollections();
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    VerifyMarking(heap_);
+  }
+#endif
+
+  SweepSpaces();
+
+#ifdef DEBUG
+  if (FLAG_verify_native_context_separation) {
+    VerifyNativeContextSeparation(heap_);
+  }
+#endif
+
+#ifdef VERIFY_HEAP
+  if (heap()->weak_embedded_objects_verification_enabled()) {
+    VerifyWeakEmbeddedObjectsInCode();
+  }
+  if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
+    VerifyOmittedMapChecks();
+  }
+#endif
+
+  Finish();
+
+  if (marking_parity_ == EVEN_MARKING_PARITY) {
+    marking_parity_ = ODD_MARKING_PARITY;
+  } else {
+    DCHECK(marking_parity_ == ODD_MARKING_PARITY);
+    marking_parity_ = EVEN_MARKING_PARITY;
+  }
+}
+
+
+#ifdef VERIFY_HEAP
+void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    CHECK(p->markbits()->IsClean());
+    CHECK_EQ(0, p->LiveBytes());
+  }
+}
+
+
+void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
+  NewSpacePageIterator it(space->bottom(), space->top());
+
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    CHECK(p->markbits()->IsClean());
+    CHECK_EQ(0, p->LiveBytes());
+  }
+}
+
+
+void MarkCompactCollector::VerifyMarkbitsAreClean() {
+  VerifyMarkbitsAreClean(heap_->old_pointer_space());
+  VerifyMarkbitsAreClean(heap_->old_data_space());
+  VerifyMarkbitsAreClean(heap_->code_space());
+  VerifyMarkbitsAreClean(heap_->cell_space());
+  VerifyMarkbitsAreClean(heap_->property_cell_space());
+  VerifyMarkbitsAreClean(heap_->map_space());
+  VerifyMarkbitsAreClean(heap_->new_space());
+
+  LargeObjectIterator it(heap_->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(obj);
+    CHECK(Marking::IsWhite(mark_bit));
+    CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
+  }
+}
+
+
+void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
+  HeapObjectIterator code_iterator(heap()->code_space());
+  for (HeapObject* obj = code_iterator.Next(); obj != NULL;
+       obj = code_iterator.Next()) {
+    Code* code = Code::cast(obj);
+    if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
+    if (WillBeDeoptimized(code)) continue;
+    code->VerifyEmbeddedObjectsDependency();
+  }
+}
+
+
+void MarkCompactCollector::VerifyOmittedMapChecks() {
+  HeapObjectIterator iterator(heap()->map_space());
+  for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
+    Map* map = Map::cast(obj);
+    map->VerifyOmittedMapChecks();
+  }
+}
+#endif  // VERIFY_HEAP
+
+
+static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+static void ClearMarkbitsInNewSpace(NewSpace* space) {
+  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+void MarkCompactCollector::ClearMarkbits() {
+  ClearMarkbitsInPagedSpace(heap_->code_space());
+  ClearMarkbitsInPagedSpace(heap_->map_space());
+  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
+  ClearMarkbitsInPagedSpace(heap_->old_data_space());
+  ClearMarkbitsInPagedSpace(heap_->cell_space());
+  ClearMarkbitsInPagedSpace(heap_->property_cell_space());
+  ClearMarkbitsInNewSpace(heap_->new_space());
+
+  LargeObjectIterator it(heap_->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(obj);
+    mark_bit.Clear();
+    mark_bit.Next().Clear();
+    Page::FromAddress(obj->address())->ResetProgressBar();
+    Page::FromAddress(obj->address())->ResetLiveBytes();
+  }
+}
+
+
+class MarkCompactCollector::SweeperTask : public v8::Task {
+ public:
+  SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
+
+  virtual ~SweeperTask() {}
+
+ private:
+  // v8::Task overrides.
+  virtual void Run() OVERRIDE {
+    heap_->mark_compact_collector()->SweepInParallel(space_, 0);
+    heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
+  }
+
+  Heap* heap_;
+  PagedSpace* space_;
+
+  DISALLOW_COPY_AND_ASSIGN(SweeperTask);
+};
+
+
+void MarkCompactCollector::StartSweeperThreads() {
+  DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
+  DCHECK(free_list_old_data_space_.get()->IsEmpty());
+  sweeping_in_progress_ = true;
+  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
+    isolate()->sweeper_threads()[i]->StartSweeping();
+  }
+  if (FLAG_job_based_sweeping) {
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        new SweeperTask(heap(), heap()->old_data_space()),
+        v8::Platform::kShortRunningTask);
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        new SweeperTask(heap(), heap()->old_pointer_space()),
+        v8::Platform::kShortRunningTask);
+  }
+}
+
+
+void MarkCompactCollector::EnsureSweepingCompleted() {
+  DCHECK(sweeping_in_progress_ == true);
+
+  // If sweeping is not completed, we try to complete it here. If we do not
+  // have sweeper threads we have to complete since we do not have a good
+  // indicator for a swept space in that case.
+  if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
+    SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
+    SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
+  }
+
+  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
+    isolate()->sweeper_threads()[i]->WaitForSweeperThread();
+  }
+  if (FLAG_job_based_sweeping) {
+    // Wait twice for both jobs.
+    pending_sweeper_jobs_semaphore_.Wait();
+    pending_sweeper_jobs_semaphore_.Wait();
+  }
+  ParallelSweepSpacesComplete();
+  sweeping_in_progress_ = false;
+  RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
+  RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
+  heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
+  heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    VerifyEvacuation(heap_);
+  }
+#endif
+}
+
+
+bool MarkCompactCollector::IsSweepingCompleted() {
+  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
+    if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) {
+      return false;
+    }
+  }
+
+  if (FLAG_job_based_sweeping) {
+    if (!pending_sweeper_jobs_semaphore_.WaitFor(
+            base::TimeDelta::FromSeconds(0))) {
+      return false;
+    }
+    pending_sweeper_jobs_semaphore_.Signal();
+  }
+
+  return true;
+}
+
+
+void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
+  FreeList* free_list;
+
+  if (space == heap()->old_pointer_space()) {
+    free_list = free_list_old_pointer_space_.get();
+  } else if (space == heap()->old_data_space()) {
+    free_list = free_list_old_data_space_.get();
+  } else {
+    // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
+    // to only refill them for old data and pointer spaces.
+    return;
+  }
+
+  intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
+  space->AddToAccountingStats(freed_bytes);
+  space->DecrementUnsweptFreeBytes(freed_bytes);
+}
+
+
+bool MarkCompactCollector::AreSweeperThreadsActivated() {
+  return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
+}
+
+
+void Marking::TransferMark(Address old_start, Address new_start) {
+  // This is only used when resizing an object.
+  DCHECK(MemoryChunk::FromAddress(old_start) ==
+         MemoryChunk::FromAddress(new_start));
+
+  if (!heap_->incremental_marking()->IsMarking()) return;
+
+  // If the mark doesn't move, we don't check the color of the object.
+  // It doesn't matter whether the object is black, since it hasn't changed
+  // size, so the adjustment to the live data count will be zero anyway.
+  if (old_start == new_start) return;
+
+  MarkBit new_mark_bit = MarkBitFrom(new_start);
+  MarkBit old_mark_bit = MarkBitFrom(old_start);
+
+#ifdef DEBUG
+  ObjectColor old_color = Color(old_mark_bit);
+#endif
+
+  if (Marking::IsBlack(old_mark_bit)) {
+    old_mark_bit.Clear();
+    DCHECK(IsWhite(old_mark_bit));
+    Marking::MarkBlack(new_mark_bit);
+    return;
+  } else if (Marking::IsGrey(old_mark_bit)) {
+    old_mark_bit.Clear();
+    old_mark_bit.Next().Clear();
+    DCHECK(IsWhite(old_mark_bit));
+    heap_->incremental_marking()->WhiteToGreyAndPush(
+        HeapObject::FromAddress(new_start), new_mark_bit);
+    heap_->incremental_marking()->RestartIfNotMarking();
+  }
+
+#ifdef DEBUG
+  ObjectColor new_color = Color(new_mark_bit);
+  DCHECK(new_color == old_color);
+#endif
+}
+
+
+const char* AllocationSpaceName(AllocationSpace space) {
+  switch (space) {
+    case NEW_SPACE:
+      return "NEW_SPACE";
+    case OLD_POINTER_SPACE:
+      return "OLD_POINTER_SPACE";
+    case OLD_DATA_SPACE:
+      return "OLD_DATA_SPACE";
+    case CODE_SPACE:
+      return "CODE_SPACE";
+    case MAP_SPACE:
+      return "MAP_SPACE";
+    case CELL_SPACE:
+      return "CELL_SPACE";
+    case PROPERTY_CELL_SPACE:
+      return "PROPERTY_CELL_SPACE";
+    case LO_SPACE:
+      return "LO_SPACE";
+    default:
+      UNREACHABLE();
+  }
+
+  return NULL;
+}
+
+
+// Returns zero for pages that have so little fragmentation that it is not
+// worth defragmenting them.  Otherwise a positive integer that gives an
+// estimate of fragmentation on an arbitrary scale.
+static int FreeListFragmentation(PagedSpace* space, Page* p) {
+  // If page was not swept then there are no free list items on it.
+  if (!p->WasSwept()) {
+    if (FLAG_trace_fragmentation) {
+      PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
+             AllocationSpaceName(space->identity()), p->LiveBytes());
+    }
+    return 0;
+  }
+
+  PagedSpace::SizeStats sizes;
+  space->ObtainFreeListStatistics(p, &sizes);
+
+  intptr_t ratio;
+  intptr_t ratio_threshold;
+  intptr_t area_size = space->AreaSize();
+  if (space->identity() == CODE_SPACE) {
+    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
+    ratio_threshold = 10;
+  } else {
+    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
+    ratio_threshold = 15;
+  }
+
+  if (FLAG_trace_fragmentation) {
+    PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
+           reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
+           static_cast<int>(sizes.small_size_),
+           static_cast<double>(sizes.small_size_ * 100) / area_size,
+           static_cast<int>(sizes.medium_size_),
+           static_cast<double>(sizes.medium_size_ * 100) / area_size,
+           static_cast<int>(sizes.large_size_),
+           static_cast<double>(sizes.large_size_ * 100) / area_size,
+           static_cast<int>(sizes.huge_size_),
+           static_cast<double>(sizes.huge_size_ * 100) / area_size,
+           (ratio > ratio_threshold) ? "[fragmented]" : "");
+  }
+
+  if (FLAG_always_compact && sizes.Total() != area_size) {
+    return 1;
+  }
+
+  if (ratio <= ratio_threshold) return 0;  // Not fragmented.
+
+  return static_cast<int>(ratio - ratio_threshold);
+}
+
+
+void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
+  DCHECK(space->identity() == OLD_POINTER_SPACE ||
+         space->identity() == OLD_DATA_SPACE ||
+         space->identity() == CODE_SPACE);
+
+  static const int kMaxMaxEvacuationCandidates = 1000;
+  int number_of_pages = space->CountTotalPages();
+  int max_evacuation_candidates =
+      static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
+
+  if (FLAG_stress_compaction || FLAG_always_compact) {
+    max_evacuation_candidates = kMaxMaxEvacuationCandidates;
+  }
+
+  class Candidate {
+   public:
+    Candidate() : fragmentation_(0), page_(NULL) {}
+    Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
+
+    int fragmentation() { return fragmentation_; }
+    Page* page() { return page_; }
+
+   private:
+    int fragmentation_;
+    Page* page_;
+  };
+
+  enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
+
+  CompactionMode mode = COMPACT_FREE_LISTS;
+
+  intptr_t reserved = number_of_pages * space->AreaSize();
+  intptr_t over_reserved = reserved - space->SizeOfObjects();
+  static const intptr_t kFreenessThreshold = 50;
+
+  if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
+    // If reduction of memory footprint was requested, we are aggressive
+    // about choosing pages to free.  We expect that half-empty pages
+    // are easier to compact so slightly bump the limit.
+    mode = REDUCE_MEMORY_FOOTPRINT;
+    max_evacuation_candidates += 2;
+  }
+
+
+  if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
+    // If over-usage is very high (more than a third of the space), we
+    // try to free all mostly empty pages.  We expect that almost empty
+    // pages are even easier to compact so bump the limit even more.
+    mode = REDUCE_MEMORY_FOOTPRINT;
+    max_evacuation_candidates *= 2;
+  }
+
+  if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
+    PrintF(
+        "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
+        "evacuation candidate limit: %d\n",
+        static_cast<double>(over_reserved) / MB,
+        static_cast<double>(reserved) / MB,
+        static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
+  }
+
+  intptr_t estimated_release = 0;
+
+  Candidate candidates[kMaxMaxEvacuationCandidates];
+
+  max_evacuation_candidates =
+      Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
+
+  int count = 0;
+  int fragmentation = 0;
+  Candidate* least = NULL;
+
+  PageIterator it(space);
+  if (it.has_next()) it.next();  // Never compact the first page.
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    p->ClearEvacuationCandidate();
+
+    if (FLAG_stress_compaction) {
+      unsigned int counter = space->heap()->ms_count();
+      uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
+      if ((counter & 1) == (page_number & 1)) fragmentation = 1;
+    } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
+      // Don't try to release too many pages.
+      if (estimated_release >= over_reserved) {
+        continue;
+      }
+
+      intptr_t free_bytes = 0;
+
+      if (!p->WasSwept()) {
+        free_bytes = (p->area_size() - p->LiveBytes());
+      } else {
+        PagedSpace::SizeStats sizes;
+        space->ObtainFreeListStatistics(p, &sizes);
+        free_bytes = sizes.Total();
+      }
+
+      int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
+
+      if (free_pct >= kFreenessThreshold) {
+        estimated_release += free_bytes;
+        fragmentation = free_pct;
+      } else {
+        fragmentation = 0;
+      }
+
+      if (FLAG_trace_fragmentation) {
+        PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
+               AllocationSpaceName(space->identity()),
+               static_cast<int>(free_bytes),
+               static_cast<double>(free_bytes * 100) / p->area_size(),
+               (fragmentation > 0) ? "[fragmented]" : "");
+      }
+    } else {
+      fragmentation = FreeListFragmentation(space, p);
+    }
+
+    if (fragmentation != 0) {
+      if (count < max_evacuation_candidates) {
+        candidates[count++] = Candidate(fragmentation, p);
+      } else {
+        if (least == NULL) {
+          for (int i = 0; i < max_evacuation_candidates; i++) {
+            if (least == NULL ||
+                candidates[i].fragmentation() < least->fragmentation()) {
+              least = candidates + i;
+            }
+          }
+        }
+        if (least->fragmentation() < fragmentation) {
+          *least = Candidate(fragmentation, p);
+          least = NULL;
+        }
+      }
+    }
+  }
+
+  for (int i = 0; i < count; i++) {
+    AddEvacuationCandidate(candidates[i].page());
+  }
+
+  if (count > 0 && FLAG_trace_fragmentation) {
+    PrintF("Collected %d evacuation candidates for space %s\n", count,
+           AllocationSpaceName(space->identity()));
+  }
+}
+
+
+void MarkCompactCollector::AbortCompaction() {
+  if (compacting_) {
+    int npages = evacuation_candidates_.length();
+    for (int i = 0; i < npages; i++) {
+      Page* p = evacuation_candidates_[i];
+      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+      p->ClearEvacuationCandidate();
+      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+    }
+    compacting_ = false;
+    evacuation_candidates_.Rewind(0);
+    invalidated_code_.Rewind(0);
+  }
+  DCHECK_EQ(0, evacuation_candidates_.length());
+}
+
+
+void MarkCompactCollector::Prepare() {
+  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
+
+#ifdef DEBUG
+  DCHECK(state_ == IDLE);
+  state_ = PREPARE_GC;
+#endif
+
+  DCHECK(!FLAG_never_compact || !FLAG_always_compact);
+
+  if (sweeping_in_progress()) {
+    // Instead of waiting we could also abort the sweeper threads here.
+    EnsureSweepingCompleted();
+  }
+
+  // Clear marking bits if incremental marking is aborted.
+  if (was_marked_incrementally_ && abort_incremental_marking_) {
+    heap()->incremental_marking()->Abort();
+    ClearMarkbits();
+    AbortWeakCollections();
+    AbortCompaction();
+    was_marked_incrementally_ = false;
+  }
+
+  // Don't start compaction if we are in the middle of incremental
+  // marking cycle. We did not collect any slots.
+  if (!FLAG_never_compact && !was_marked_incrementally_) {
+    StartCompaction(NON_INCREMENTAL_COMPACTION);
+  }
+
+  PagedSpaces spaces(heap());
+  for (PagedSpace* space = spaces.next(); space != NULL;
+       space = spaces.next()) {
+    space->PrepareForMarkCompact();
+  }
+
+#ifdef VERIFY_HEAP
+  if (!was_marked_incrementally_ && FLAG_verify_heap) {
+    VerifyMarkbitsAreClean();
+  }
+#endif
+}
+
+
+void MarkCompactCollector::Finish() {
+#ifdef DEBUG
+  DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
+  state_ = IDLE;
+#endif
+  // The stub cache is not traversed during GC; clear the cache to
+  // force lazy re-initialization of it. This must be done after the
+  // GC, because it relies on the new address of certain old space
+  // objects (empty string, illegal builtin).
+  isolate()->stub_cache()->Clear();
+
+  if (have_code_to_deoptimize_) {
+    // Some code objects were marked for deoptimization during the GC.
+    Deoptimizer::DeoptimizeMarkedCode(isolate());
+    have_code_to_deoptimize_ = false;
+  }
+}
+
+
+// -------------------------------------------------------------------------
+// Phase 1: tracing and marking live objects.
+//   before: all objects are in normal state.
+//   after: a live object's map pointer is marked as '00'.
+
+// Marking all live objects in the heap as part of mark-sweep or mark-compact
+// collection.  Before marking, all objects are in their normal state.  After
+// marking, live objects' map pointers are marked indicating that the object
+// has been found reachable.
+//
+// The marking algorithm is a (mostly) depth-first (because of possible stack
+// overflow) traversal of the graph of objects reachable from the roots.  It
+// uses an explicit stack of pointers rather than recursion.  The young
+// generation's inactive ('from') space is used as a marking stack.  The
+// objects in the marking stack are the ones that have been reached and marked
+// but their children have not yet been visited.
+//
+// The marking stack can overflow during traversal.  In that case, we set an
+// overflow flag.  When the overflow flag is set, we continue marking objects
+// reachable from the objects on the marking stack, but no longer push them on
+// the marking stack.  Instead, we mark them as both marked and overflowed.
+// When the stack is in the overflowed state, objects marked as overflowed
+// have been reached and marked but their children have not been visited yet.
+// After emptying the marking stack, we clear the overflow flag and traverse
+// the heap looking for objects marked as overflowed, push them on the stack,
+// and continue with marking.  This process repeats until all reachable
+// objects have been marked.
+
+void CodeFlusher::ProcessJSFunctionCandidates() {
+  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
+  Object* undefined = isolate_->heap()->undefined_value();
+
+  JSFunction* candidate = jsfunction_candidates_head_;
+  JSFunction* next_candidate;
+  while (candidate != NULL) {
+    next_candidate = GetNextCandidate(candidate);
+    ClearNextCandidate(candidate, undefined);
+
+    SharedFunctionInfo* shared = candidate->shared();
+
+    Code* code = shared->code();
+    MarkBit code_mark = Marking::MarkBitFrom(code);
+    if (!code_mark.Get()) {
+      if (FLAG_trace_code_flushing && shared->is_compiled()) {
+        PrintF("[code-flushing clears: ");
+        shared->ShortPrint();
+        PrintF(" - age: %d]\n", code->GetAge());
+      }
+      shared->set_code(lazy_compile);
+      candidate->set_code(lazy_compile);
+    } else {
+      candidate->set_code(code);
+    }
+
+    // We are in the middle of a GC cycle so the write barrier in the code
+    // setter did not record the slot update and we have to do that manually.
+    Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
+    Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
+    isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
+                                                                    target);
+
+    Object** shared_code_slot =
+        HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
+    isolate_->heap()->mark_compact_collector()->RecordSlot(
+        shared_code_slot, shared_code_slot, *shared_code_slot);
+
+    candidate = next_candidate;
+  }
+
+  jsfunction_candidates_head_ = NULL;
+}
+
+
+void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
+  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
+
+  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+  SharedFunctionInfo* next_candidate;
+  while (candidate != NULL) {
+    next_candidate = GetNextCandidate(candidate);
+    ClearNextCandidate(candidate);
+
+    Code* code = candidate->code();
+    MarkBit code_mark = Marking::MarkBitFrom(code);
+    if (!code_mark.Get()) {
+      if (FLAG_trace_code_flushing && candidate->is_compiled()) {
+        PrintF("[code-flushing clears: ");
+        candidate->ShortPrint();
+        PrintF(" - age: %d]\n", code->GetAge());
+      }
+      candidate->set_code(lazy_compile);
+    }
+
+    Object** code_slot =
+        HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
+    isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
+                                                           *code_slot);
+
+    candidate = next_candidate;
+  }
+
+  shared_function_info_candidates_head_ = NULL;
+}
+
+
+void CodeFlusher::ProcessOptimizedCodeMaps() {
+  STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
+
+  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+  SharedFunctionInfo* next_holder;
+
+  while (holder != NULL) {
+    next_holder = GetNextCodeMap(holder);
+    ClearNextCodeMap(holder);
+
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    int new_length = SharedFunctionInfo::kEntriesStart;
+    int old_length = code_map->length();
+    for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
+         i += SharedFunctionInfo::kEntryLength) {
+      Code* code =
+          Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
+      if (!Marking::MarkBitFrom(code).Get()) continue;
+
+      // Move every slot in the entry.
+      for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
+        int dst_index = new_length++;
+        Object** slot = code_map->RawFieldOfElementAt(dst_index);
+        Object* object = code_map->get(i + j);
+        code_map->set(dst_index, object);
+        if (j == SharedFunctionInfo::kOsrAstIdOffset) {
+          DCHECK(object->IsSmi());
+        } else {
+          DCHECK(
+              Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
+          isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
+                                                                 *slot);
+        }
+      }
+    }
+
+    // Trim the optimized code map if entries have been removed.
+    if (new_length < old_length) {
+      holder->TrimOptimizedCodeMap(old_length - new_length);
+    }
+
+    holder = next_holder;
+  }
+
+  optimized_code_map_holder_head_ = NULL;
+}
+
+
+void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
+  // Make sure previous flushing decisions are revisited.
+  isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
+
+  if (FLAG_trace_code_flushing) {
+    PrintF("[code-flushing abandons function-info: ");
+    shared_info->ShortPrint();
+    PrintF("]\n");
+  }
+
+  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+  SharedFunctionInfo* next_candidate;
+  if (candidate == shared_info) {
+    next_candidate = GetNextCandidate(shared_info);
+    shared_function_info_candidates_head_ = next_candidate;
+    ClearNextCandidate(shared_info);
+  } else {
+    while (candidate != NULL) {
+      next_candidate = GetNextCandidate(candidate);
+
+      if (next_candidate == shared_info) {
+        next_candidate = GetNextCandidate(shared_info);
+        SetNextCandidate(candidate, next_candidate);
+        ClearNextCandidate(shared_info);
+        break;
+      }
+
+      candidate = next_candidate;
+    }
+  }
+}
+
+
+void CodeFlusher::EvictCandidate(JSFunction* function) {
+  DCHECK(!function->next_function_link()->IsUndefined());
+  Object* undefined = isolate_->heap()->undefined_value();
+
+  // Make sure previous flushing decisions are revisited.
+  isolate_->heap()->incremental_marking()->RecordWrites(function);
+  isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
+
+  if (FLAG_trace_code_flushing) {
+    PrintF("[code-flushing abandons closure: ");
+    function->shared()->ShortPrint();
+    PrintF("]\n");
+  }
+
+  JSFunction* candidate = jsfunction_candidates_head_;
+  JSFunction* next_candidate;
+  if (candidate == function) {
+    next_candidate = GetNextCandidate(function);
+    jsfunction_candidates_head_ = next_candidate;
+    ClearNextCandidate(function, undefined);
+  } else {
+    while (candidate != NULL) {
+      next_candidate = GetNextCandidate(candidate);
+
+      if (next_candidate == function) {
+        next_candidate = GetNextCandidate(function);
+        SetNextCandidate(candidate, next_candidate);
+        ClearNextCandidate(function, undefined);
+        break;
+      }
+
+      candidate = next_candidate;
+    }
+  }
+}
+
+
+void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+  DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
+              ->get(SharedFunctionInfo::kNextMapIndex)
+              ->IsUndefined());
+
+  // Make sure previous flushing decisions are revisited.
+  isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
+
+  if (FLAG_trace_code_flushing) {
+    PrintF("[code-flushing abandons code-map: ");
+    code_map_holder->ShortPrint();
+    PrintF("]\n");
+  }
+
+  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+  SharedFunctionInfo* next_holder;
+  if (holder == code_map_holder) {
+    next_holder = GetNextCodeMap(code_map_holder);
+    optimized_code_map_holder_head_ = next_holder;
+    ClearNextCodeMap(code_map_holder);
+  } else {
+    while (holder != NULL) {
+      next_holder = GetNextCodeMap(holder);
+
+      if (next_holder == code_map_holder) {
+        next_holder = GetNextCodeMap(code_map_holder);
+        SetNextCodeMap(holder, next_holder);
+        ClearNextCodeMap(code_map_holder);
+        break;
+      }
+
+      holder = next_holder;
+    }
+  }
+}
+
+
+void CodeFlusher::EvictJSFunctionCandidates() {
+  JSFunction* candidate = jsfunction_candidates_head_;
+  JSFunction* next_candidate;
+  while (candidate != NULL) {
+    next_candidate = GetNextCandidate(candidate);
+    EvictCandidate(candidate);
+    candidate = next_candidate;
+  }
+  DCHECK(jsfunction_candidates_head_ == NULL);
+}
+
+
+void CodeFlusher::EvictSharedFunctionInfoCandidates() {
+  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+  SharedFunctionInfo* next_candidate;
+  while (candidate != NULL) {
+    next_candidate = GetNextCandidate(candidate);
+    EvictCandidate(candidate);
+    candidate = next_candidate;
+  }
+  DCHECK(shared_function_info_candidates_head_ == NULL);
+}
+
+
+void CodeFlusher::EvictOptimizedCodeMaps() {
+  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+  SharedFunctionInfo* next_holder;
+  while (holder != NULL) {
+    next_holder = GetNextCodeMap(holder);
+    EvictOptimizedCodeMap(holder);
+    holder = next_holder;
+  }
+  DCHECK(optimized_code_map_holder_head_ == NULL);
+}
+
+
+void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
+  Heap* heap = isolate_->heap();
+
+  JSFunction** slot = &jsfunction_candidates_head_;
+  JSFunction* candidate = jsfunction_candidates_head_;
+  while (candidate != NULL) {
+    if (heap->InFromSpace(candidate)) {
+      v->VisitPointer(reinterpret_cast<Object**>(slot));
+    }
+    candidate = GetNextCandidate(*slot);
+    slot = GetNextCandidateSlot(*slot);
+  }
+}
+
+
+MarkCompactCollector::~MarkCompactCollector() {
+  if (code_flusher_ != NULL) {
+    delete code_flusher_;
+    code_flusher_ = NULL;
+  }
+}
+
+
+static inline HeapObject* ShortCircuitConsString(Object** p) {
+  // Optimization: If the heap object pointed to by p is a non-internalized
+  // cons string whose right substring is HEAP->empty_string, update
+  // it in place to its left substring.  Return the updated value.
+  //
+  // Here we assume that if we change *p, we replace it with a heap object
+  // (i.e., the left substring of a cons string is always a heap object).
+  //
+  // The check performed is:
+  //   object->IsConsString() && !object->IsInternalizedString() &&
+  //   (ConsString::cast(object)->second() == HEAP->empty_string())
+  // except the maps for the object and its possible substrings might be
+  // marked.
+  HeapObject* object = HeapObject::cast(*p);
+  if (!FLAG_clever_optimizations) return object;
+  Map* map = object->map();
+  InstanceType type = map->instance_type();
+  if (!IsShortcutCandidate(type)) return object;
+
+  Object* second = reinterpret_cast<ConsString*>(object)->second();
+  Heap* heap = map->GetHeap();
+  if (second != heap->empty_string()) {
+    return object;
+  }
+
+  // Since we don't have the object's start, it is impossible to update the
+  // page dirty marks. Therefore, we only replace the string with its left
+  // substring when page dirty marks do not change.
+  Object* first = reinterpret_cast<ConsString*>(object)->first();
+  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
+
+  *p = first;
+  return HeapObject::cast(first);
+}
+
+
+class MarkCompactMarkingVisitor
+    : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
+ public:
+  static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
+                                   HeapObject* obj);
+
+  static void ObjectStatsCountFixedArray(
+      FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
+      FixedArraySubInstanceType dictionary_type);
+
+  template <MarkCompactMarkingVisitor::VisitorId id>
+  class ObjectStatsTracker {
+   public:
+    static inline void Visit(Map* map, HeapObject* obj);
+  };
+
+  static void Initialize();
+
+  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
+    MarkObjectByPointer(heap->mark_compact_collector(), p, p);
+  }
+
+  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
+    // Mark all objects pointed to in [start, end).
+    const int kMinRangeForMarkingRecursion = 64;
+    if (end - start >= kMinRangeForMarkingRecursion) {
+      if (VisitUnmarkedObjects(heap, start, end)) return;
+      // We are close to a stack overflow, so just mark the objects.
+    }
+    MarkCompactCollector* collector = heap->mark_compact_collector();
+    for (Object** p = start; p < end; p++) {
+      MarkObjectByPointer(collector, start, p);
+    }
+  }
+
+  // Marks the object black and pushes it on the marking stack.
+  INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
+    MarkBit mark = Marking::MarkBitFrom(object);
+    heap->mark_compact_collector()->MarkObject(object, mark);
+  }
+
+  // Marks the object black without pushing it on the marking stack.
+  // Returns true if object needed marking and false otherwise.
+  INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (!mark_bit.Get()) {
+      heap->mark_compact_collector()->SetMark(object, mark_bit);
+      return true;
+    }
+    return false;
+  }
+
+  // Mark object pointed to by p.
+  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
+                                         Object** anchor_slot, Object** p)) {
+    if (!(*p)->IsHeapObject()) return;
+    HeapObject* object = ShortCircuitConsString(p);
+    collector->RecordSlot(anchor_slot, p, object);
+    MarkBit mark = Marking::MarkBitFrom(object);
+    collector->MarkObject(object, mark);
+  }
+
+
+  // Visit an unmarked object.
+  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
+                                         HeapObject* obj)) {
+#ifdef DEBUG
+    DCHECK(collector->heap()->Contains(obj));
+    DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
+#endif
+    Map* map = obj->map();
+    Heap* heap = obj->GetHeap();
+    MarkBit mark = Marking::MarkBitFrom(obj);
+    heap->mark_compact_collector()->SetMark(obj, mark);
+    // Mark the map pointer and the body.
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    heap->mark_compact_collector()->MarkObject(map, map_mark);
+    IterateBody(map, obj);
+  }
+
+  // Visit all unmarked objects pointed to by [start, end).
+  // Returns false if the operation fails (lack of stack space).
+  INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
+                                          Object** end)) {
+    // Return false is we are close to the stack limit.
+    StackLimitCheck check(heap->isolate());
+    if (check.HasOverflowed()) return false;
+
+    MarkCompactCollector* collector = heap->mark_compact_collector();
+    // Visit the unmarked objects.
+    for (Object** p = start; p < end; p++) {
+      Object* o = *p;
+      if (!o->IsHeapObject()) continue;
+      collector->RecordSlot(start, p, o);
+      HeapObject* obj = HeapObject::cast(o);
+      MarkBit mark = Marking::MarkBitFrom(obj);
+      if (mark.Get()) continue;
+      VisitUnmarkedObject(collector, obj);
+    }
+    return true;
+  }
+
+ private:
+  template <int id>
+  static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
+
+  // Code flushing support.
+
+  static const int kRegExpCodeThreshold = 5;
+
+  static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
+                                          bool is_one_byte) {
+    // Make sure that the fixed array is in fact initialized on the RegExp.
+    // We could potentially trigger a GC when initializing the RegExp.
+    if (HeapObject::cast(re->data())->map()->instance_type() !=
+        FIXED_ARRAY_TYPE)
+      return;
+
+    // Make sure this is a RegExp that actually contains code.
+    if (re->TypeTag() != JSRegExp::IRREGEXP) return;
+
+    Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
+    if (!code->IsSmi() &&
+        HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
+      // Save a copy that can be reinstated if we need the code again.
+      re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
+
+      // Saving a copy might create a pointer into compaction candidate
+      // that was not observed by marker.  This might happen if JSRegExp data
+      // was marked through the compilation cache before marker reached JSRegExp
+      // object.
+      FixedArray* data = FixedArray::cast(re->data());
+      Object** slot =
+          data->data_start() + JSRegExp::saved_code_index(is_one_byte);
+      heap->mark_compact_collector()->RecordSlot(slot, slot, code);
+
+      // Set a number in the 0-255 range to guarantee no smi overflow.
+      re->SetDataAt(JSRegExp::code_index(is_one_byte),
+                    Smi::FromInt(heap->sweep_generation() & 0xff));
+    } else if (code->IsSmi()) {
+      int value = Smi::cast(code)->value();
+      // The regexp has not been compiled yet or there was a compilation error.
+      if (value == JSRegExp::kUninitializedValue ||
+          value == JSRegExp::kCompilationErrorValue) {
+        return;
+      }
+
+      // Check if we should flush now.
+      if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
+        re->SetDataAt(JSRegExp::code_index(is_one_byte),
+                      Smi::FromInt(JSRegExp::kUninitializedValue));
+        re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
+                      Smi::FromInt(JSRegExp::kUninitializedValue));
+      }
+    }
+  }
+
+
+  // Works by setting the current sweep_generation (as a smi) in the
+  // code object place in the data array of the RegExp and keeps a copy
+  // around that can be reinstated if we reuse the RegExp before flushing.
+  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
+  // we flush the code.
+  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
+    Heap* heap = map->GetHeap();
+    MarkCompactCollector* collector = heap->mark_compact_collector();
+    if (!collector->is_code_flushing_enabled()) {
+      VisitJSRegExp(map, object);
+      return;
+    }
+    JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
+    // Flush code or set age on both one byte and two byte code.
+    UpdateRegExpCodeAgeAndFlush(heap, re, true);
+    UpdateRegExpCodeAgeAndFlush(heap, re, false);
+    // Visit the fields of the RegExp, including the updated FixedArray.
+    VisitJSRegExp(map, object);
+  }
+
+  static VisitorDispatchTable<Callback> non_count_table_;
+};
+
+
+void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
+    FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
+    FixedArraySubInstanceType dictionary_type) {
+  Heap* heap = fixed_array->map()->GetHeap();
+  if (fixed_array->map() != heap->fixed_cow_array_map() &&
+      fixed_array->map() != heap->fixed_double_array_map() &&
+      fixed_array != heap->empty_fixed_array()) {
+    if (fixed_array->IsDictionary()) {
+      heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
+    } else {
+      heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
+    }
+  }
+}
+
+
+void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
+    MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
+  Heap* heap = map->GetHeap();
+  int object_size = obj->Size();
+  heap->RecordObjectStats(map->instance_type(), object_size);
+  non_count_table_.GetVisitorById(id)(map, obj);
+  if (obj->IsJSObject()) {
+    JSObject* object = JSObject::cast(obj);
+    ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
+                               FAST_ELEMENTS_SUB_TYPE);
+    ObjectStatsCountFixedArray(object->properties(),
+                               DICTIONARY_PROPERTIES_SUB_TYPE,
+                               FAST_PROPERTIES_SUB_TYPE);
+  }
+}
+
+
+template <MarkCompactMarkingVisitor::VisitorId id>
+void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
+                                                              HeapObject* obj) {
+  ObjectStatsVisitBase(id, map, obj);
+}
+
+
+template <>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+    MarkCompactMarkingVisitor::kVisitMap> {
+ public:
+  static inline void Visit(Map* map, HeapObject* obj) {
+    Heap* heap = map->GetHeap();
+    Map* map_obj = Map::cast(obj);
+    DCHECK(map->instance_type() == MAP_TYPE);
+    DescriptorArray* array = map_obj->instance_descriptors();
+    if (map_obj->owns_descriptors() &&
+        array != heap->empty_descriptor_array()) {
+      int fixed_array_size = array->Size();
+      heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
+                                         fixed_array_size);
+    }
+    if (map_obj->HasTransitionArray()) {
+      int fixed_array_size = map_obj->transitions()->Size();
+      heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
+                                         fixed_array_size);
+    }
+    if (map_obj->has_code_cache()) {
+      CodeCache* cache = CodeCache::cast(map_obj->code_cache());
+      heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
+                                         cache->default_cache()->Size());
+      if (!cache->normal_type_cache()->IsUndefined()) {
+        heap->RecordFixedArraySubTypeStats(
+            MAP_CODE_CACHE_SUB_TYPE,
+            FixedArray::cast(cache->normal_type_cache())->Size());
+      }
+    }
+    ObjectStatsVisitBase(kVisitMap, map, obj);
+  }
+};
+
+
+template <>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+    MarkCompactMarkingVisitor::kVisitCode> {
+ public:
+  static inline void Visit(Map* map, HeapObject* obj) {
+    Heap* heap = map->GetHeap();
+    int object_size = obj->Size();
+    DCHECK(map->instance_type() == CODE_TYPE);
+    Code* code_obj = Code::cast(obj);
+    heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
+                                 object_size);
+    ObjectStatsVisitBase(kVisitCode, map, obj);
+  }
+};
+
+
+template <>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+    MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
+ public:
+  static inline void Visit(Map* map, HeapObject* obj) {
+    Heap* heap = map->GetHeap();
+    SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+    if (sfi->scope_info() != heap->empty_fixed_array()) {
+      heap->RecordFixedArraySubTypeStats(
+          SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
+    }
+    ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
+  }
+};
+
+
+template <>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+    MarkCompactMarkingVisitor::kVisitFixedArray> {
+ public:
+  static inline void Visit(Map* map, HeapObject* obj) {
+    Heap* heap = map->GetHeap();
+    FixedArray* fixed_array = FixedArray::cast(obj);
+    if (fixed_array == heap->string_table()) {
+      heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
+                                         fixed_array->Size());
+    }
+    ObjectStatsVisitBase(kVisitFixedArray, map, obj);
+  }
+};
+
+
+void MarkCompactMarkingVisitor::Initialize() {
+  StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
+
+  table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
+
+  if (FLAG_track_gc_object_stats) {
+    // Copy the visitor table to make call-through possible.
+    non_count_table_.CopyFrom(&table_);
+#define VISITOR_ID_COUNT_FUNCTION(id) \
+  table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
+    VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
+#undef VISITOR_ID_COUNT_FUNCTION
+  }
+}
+
+
+VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
+    MarkCompactMarkingVisitor::non_count_table_;
+
+
+class CodeMarkingVisitor : public ThreadVisitor {
+ public:
+  explicit CodeMarkingVisitor(MarkCompactCollector* collector)
+      : collector_(collector) {}
+
+  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    collector_->PrepareThreadForCodeFlushing(isolate, top);
+  }
+
+ private:
+  MarkCompactCollector* collector_;
+};
+
+
+class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
+ public:
+  explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
+      : collector_(collector) {}
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) VisitPointer(p);
+  }
+
+  void VisitPointer(Object** slot) {
+    Object* obj = *slot;
+    if (obj->IsSharedFunctionInfo()) {
+      SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
+      MarkBit shared_mark = Marking::MarkBitFrom(shared);
+      MarkBit code_mark = Marking::MarkBitFrom(shared->code());
+      collector_->MarkObject(shared->code(), code_mark);
+      collector_->MarkObject(shared, shared_mark);
+    }
+  }
+
+ private:
+  MarkCompactCollector* collector_;
+};
+
+
+void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
+                                                        ThreadLocalTop* top) {
+  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+    // Note: for the frame that has a pending lazy deoptimization
+    // StackFrame::unchecked_code will return a non-optimized code object for
+    // the outermost function and StackFrame::LookupCode will return
+    // actual optimized code object.
+    StackFrame* frame = it.frame();
+    Code* code = frame->unchecked_code();
+    MarkBit code_mark = Marking::MarkBitFrom(code);
+    MarkObject(code, code_mark);
+    if (frame->is_optimized()) {
+      MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
+                                                          frame->LookupCode());
+    }
+  }
+}
+
+
+void MarkCompactCollector::PrepareForCodeFlushing() {
+  // Enable code flushing for non-incremental cycles.
+  if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
+    EnableCodeFlushing(!was_marked_incrementally_);
+  }
+
+  // If code flushing is disabled, there is no need to prepare for it.
+  if (!is_code_flushing_enabled()) return;
+
+  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
+  // relies on it being marked before any other descriptor array.
+  HeapObject* descriptor_array = heap()->empty_descriptor_array();
+  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
+  MarkObject(descriptor_array, descriptor_array_mark);
+
+  // Make sure we are not referencing the code from the stack.
+  DCHECK(this == heap()->mark_compact_collector());
+  PrepareThreadForCodeFlushing(heap()->isolate(),
+                               heap()->isolate()->thread_local_top());
+
+  // Iterate the archived stacks in all threads to check if
+  // the code is referenced.
+  CodeMarkingVisitor code_marking_visitor(this);
+  heap()->isolate()->thread_manager()->IterateArchivedThreads(
+      &code_marking_visitor);
+
+  SharedFunctionInfoMarkingVisitor visitor(this);
+  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
+  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
+
+  ProcessMarkingDeque();
+}
+
+
+// Visitor class for marking heap roots.
+class RootMarkingVisitor : public ObjectVisitor {
+ public:
+  explicit RootMarkingVisitor(Heap* heap)
+      : collector_(heap->mark_compact_collector()) {}
+
+  void VisitPointer(Object** p) { MarkObjectByPointer(p); }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+  }
+
+  // Skip the weak next code link in a code object, which is visited in
+  // ProcessTopOptimizedFrame.
+  void VisitNextCodeLink(Object** p) {}
+
+ private:
+  void MarkObjectByPointer(Object** p) {
+    if (!(*p)->IsHeapObject()) return;
+
+    // Replace flat cons strings in place.
+    HeapObject* object = ShortCircuitConsString(p);
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) return;
+
+    Map* map = object->map();
+    // Mark the object.
+    collector_->SetMark(object, mark_bit);
+
+    // Mark the map pointer and body, and push them on the marking stack.
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    collector_->MarkObject(map, map_mark);
+    MarkCompactMarkingVisitor::IterateBody(map, object);
+
+    // Mark all the objects reachable from the map and body.  May leave
+    // overflowed objects in the heap.
+    collector_->EmptyMarkingDeque();
+  }
+
+  MarkCompactCollector* collector_;
+};
+
+
+// Helper class for pruning the string table.
+template <bool finalize_external_strings>
+class StringTableCleaner : public ObjectVisitor {
+ public:
+  explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
+
+  virtual void VisitPointers(Object** start, Object** end) {
+    // Visit all HeapObject pointers in [start, end).
+    for (Object** p = start; p < end; p++) {
+      Object* o = *p;
+      if (o->IsHeapObject() &&
+          !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
+        if (finalize_external_strings) {
+          DCHECK(o->IsExternalString());
+          heap_->FinalizeExternalString(String::cast(*p));
+        } else {
+          pointers_removed_++;
+        }
+        // Set the entry to the_hole_value (as deleted).
+        *p = heap_->the_hole_value();
+      }
+    }
+  }
+
+  int PointersRemoved() {
+    DCHECK(!finalize_external_strings);
+    return pointers_removed_;
+  }
+
+ private:
+  Heap* heap_;
+  int pointers_removed_;
+};
+
+
+typedef StringTableCleaner<false> InternalizedStringTableCleaner;
+typedef StringTableCleaner<true> ExternalStringTableCleaner;
+
+
+// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
+// are retained.
+class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+  virtual Object* RetainAs(Object* object) {
+    if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
+      return object;
+    } else if (object->IsAllocationSite() &&
+               !(AllocationSite::cast(object)->IsZombie())) {
+      // "dead" AllocationSites need to live long enough for a traversal of new
+      // space. These sites get a one-time reprieve.
+      AllocationSite* site = AllocationSite::cast(object);
+      site->MarkZombie();
+      site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
+      return object;
+    } else {
+      return NULL;
+    }
+  }
+};
+
+
+// Fill the marking stack with overflowed objects returned by the given
+// iterator.  Stop when the marking stack is filled or the end of the space
+// is reached, whichever comes first.
+template <class T>
+static void DiscoverGreyObjectsWithIterator(Heap* heap,
+                                            MarkingDeque* marking_deque,
+                                            T* it) {
+  // The caller should ensure that the marking stack is initially not full,
+  // so that we don't waste effort pointlessly scanning for objects.
+  DCHECK(!marking_deque->IsFull());
+
+  Map* filler_map = heap->one_pointer_filler_map();
+  for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
+    MarkBit markbit = Marking::MarkBitFrom(object);
+    if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
+      Marking::GreyToBlack(markbit);
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
+      marking_deque->PushBlack(object);
+      if (marking_deque->IsFull()) return;
+    }
+  }
+}
+
+
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
+
+
+static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
+                                      MemoryChunk* p) {
+  DCHECK(!marking_deque->IsFull());
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+    Address cell_base = it.CurrentCellBase();
+    MarkBit::CellType* cell = it.CurrentCell();
+
+    const MarkBit::CellType current_cell = *cell;
+    if (current_cell == 0) continue;
+
+    MarkBit::CellType grey_objects;
+    if (it.HasNext()) {
+      const MarkBit::CellType next_cell = *(cell + 1);
+      grey_objects = current_cell & ((current_cell >> 1) |
+                                     (next_cell << (Bitmap::kBitsPerCell - 1)));
+    } else {
+      grey_objects = current_cell & (current_cell >> 1);
+    }
+
+    int offset = 0;
+    while (grey_objects != 0) {
+      int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
+      grey_objects >>= trailing_zeros;
+      offset += trailing_zeros;
+      MarkBit markbit(cell, 1 << offset, false);
+      DCHECK(Marking::IsGrey(markbit));
+      Marking::GreyToBlack(markbit);
+      Address addr = cell_base + offset * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(addr);
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
+      marking_deque->PushBlack(object);
+      if (marking_deque->IsFull()) return;
+      offset += 2;
+      grey_objects >>= 2;
+    }
+
+    grey_objects >>= (Bitmap::kBitsPerCell - 1);
+  }
+}
+
+
+int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
+    NewSpace* new_space, NewSpacePage* p) {
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  MarkBit::CellType* cells = p->markbits()->cells();
+  int survivors_size = 0;
+
+  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+    Address cell_base = it.CurrentCellBase();
+    MarkBit::CellType* cell = it.CurrentCell();
+
+    MarkBit::CellType current_cell = *cell;
+    if (current_cell == 0) continue;
+
+    int offset = 0;
+    while (current_cell != 0) {
+      int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
+      current_cell >>= trailing_zeros;
+      offset += trailing_zeros;
+      Address address = cell_base + offset * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(address);
+
+      int size = object->Size();
+      survivors_size += size;
+
+      Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
+
+      offset++;
+      current_cell >>= 1;
+
+      // TODO(hpayer): Refactor EvacuateObject and call this function instead.
+      if (heap()->ShouldBePromoted(object->address(), size) &&
+          TryPromoteObject(object, size)) {
+        continue;
+      }
+
+      AllocationResult allocation = new_space->AllocateRaw(size);
+      if (allocation.IsRetry()) {
+        if (!new_space->AddFreshPage()) {
+          // Shouldn't happen. We are sweeping linearly, and to-space
+          // has the same number of pages as from-space, so there is
+          // always room.
+          UNREACHABLE();
+        }
+        allocation = new_space->AllocateRaw(size);
+        DCHECK(!allocation.IsRetry());
+      }
+      Object* target = allocation.ToObjectChecked();
+
+      MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
+      heap()->IncrementSemiSpaceCopiedObjectSize(size);
+    }
+    *cells = 0;
+  }
+  return survivors_size;
+}
+
+
+static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
+                                       PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    DiscoverGreyObjectsOnPage(marking_deque, p);
+    if (marking_deque->IsFull()) return;
+  }
+}
+
+
+static void DiscoverGreyObjectsInNewSpace(Heap* heap,
+                                          MarkingDeque* marking_deque) {
+  NewSpace* space = heap->new_space();
+  NewSpacePageIterator it(space->bottom(), space->top());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    DiscoverGreyObjectsOnPage(marking_deque, page);
+    if (marking_deque->IsFull()) return;
+  }
+}
+
+
+bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
+  Object* o = *p;
+  if (!o->IsHeapObject()) return false;
+  HeapObject* heap_object = HeapObject::cast(o);
+  MarkBit mark = Marking::MarkBitFrom(heap_object);
+  return !mark.Get();
+}
+
+
+bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
+                                                        Object** p) {
+  Object* o = *p;
+  DCHECK(o->IsHeapObject());
+  HeapObject* heap_object = HeapObject::cast(o);
+  MarkBit mark = Marking::MarkBitFrom(heap_object);
+  return !mark.Get();
+}
+
+
+void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
+  StringTable* string_table = heap()->string_table();
+  // Mark the string table itself.
+  MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
+  if (!string_table_mark.Get()) {
+    // String table could have already been marked by visiting the handles list.
+    SetMark(string_table, string_table_mark);
+  }
+  // Explicitly mark the prefix.
+  string_table->IteratePrefix(visitor);
+  ProcessMarkingDeque();
+}
+
+
+void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
+  MarkBit mark_bit = Marking::MarkBitFrom(site);
+  SetMark(site, mark_bit);
+}
+
+
+void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
+  // Mark the heap roots including global variables, stack variables,
+  // etc., and all objects reachable from them.
+  heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
+
+  // Handle the string table specially.
+  MarkStringTable(visitor);
+
+  MarkWeakObjectToCodeTable();
+
+  // There may be overflowed objects in the heap.  Visit them now.
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
+  }
+}
+
+
+void MarkCompactCollector::MarkImplicitRefGroups() {
+  List<ImplicitRefGroup*>* ref_groups =
+      isolate()->global_handles()->implicit_ref_groups();
+
+  int last = 0;
+  for (int i = 0; i < ref_groups->length(); i++) {
+    ImplicitRefGroup* entry = ref_groups->at(i);
+    DCHECK(entry != NULL);
+
+    if (!IsMarked(*entry->parent)) {
+      (*ref_groups)[last++] = entry;
+      continue;
+    }
+
+    Object*** children = entry->children;
+    // A parent object is marked, so mark all child heap objects.
+    for (size_t j = 0; j < entry->length; ++j) {
+      if ((*children[j])->IsHeapObject()) {
+        HeapObject* child = HeapObject::cast(*children[j]);
+        MarkBit mark = Marking::MarkBitFrom(child);
+        MarkObject(child, mark);
+      }
+    }
+
+    // Once the entire group has been marked, dispose it because it's
+    // not needed anymore.
+    delete entry;
+  }
+  ref_groups->Rewind(last);
+}
+
+
+void MarkCompactCollector::MarkWeakObjectToCodeTable() {
+  HeapObject* weak_object_to_code_table =
+      HeapObject::cast(heap()->weak_object_to_code_table());
+  if (!IsMarked(weak_object_to_code_table)) {
+    MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
+    SetMark(weak_object_to_code_table, mark);
+  }
+}
+
+
+// Mark all objects reachable from the objects on the marking stack.
+// Before: the marking stack contains zero or more heap object pointers.
+// After: the marking stack is empty, and all objects reachable from the
+// marking stack have been marked, or are overflowed in the heap.
+void MarkCompactCollector::EmptyMarkingDeque() {
+  while (!marking_deque_.IsEmpty()) {
+    HeapObject* object = marking_deque_.Pop();
+    DCHECK(object->IsHeapObject());
+    DCHECK(heap()->Contains(object));
+    DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+
+    Map* map = object->map();
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    MarkObject(map, map_mark);
+
+    MarkCompactMarkingVisitor::IterateBody(map, object);
+  }
+}
+
+
+// Sweep the heap for overflowed objects, clear their overflow bits, and
+// push them on the marking stack.  Stop early if the marking stack fills
+// before sweeping completes.  If sweeping completes, there are no remaining
+// overflowed objects in the heap so the overflow flag on the markings stack
+// is cleared.
+void MarkCompactCollector::RefillMarkingDeque() {
+  DCHECK(marking_deque_.overflowed());
+
+  DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
+                             heap()->old_pointer_space());
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
+                             heap()->property_cell_space());
+  if (marking_deque_.IsFull()) return;
+
+  LargeObjectIterator lo_it(heap()->lo_space());
+  DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
+  if (marking_deque_.IsFull()) return;
+
+  marking_deque_.ClearOverflowed();
+}
+
+
+// Mark all objects reachable (transitively) from objects on the marking
+// stack.  Before: the marking stack contains zero or more heap object
+// pointers.  After: the marking stack is empty and there are no overflowed
+// objects in the heap.
+void MarkCompactCollector::ProcessMarkingDeque() {
+  EmptyMarkingDeque();
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
+  }
+}
+
+
+// Mark all objects reachable (transitively) from objects on the marking
+// stack including references only considered in the atomic marking pause.
+void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
+  bool work_to_do = true;
+  DCHECK(marking_deque_.IsEmpty());
+  while (work_to_do) {
+    isolate()->global_handles()->IterateObjectGroups(
+        visitor, &IsUnmarkedHeapObjectWithHeap);
+    MarkImplicitRefGroups();
+    ProcessWeakCollections();
+    work_to_do = !marking_deque_.IsEmpty();
+    ProcessMarkingDeque();
+  }
+}
+
+
+void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
+  for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
+       !it.done(); it.Advance()) {
+    if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
+      return;
+    }
+    if (it.frame()->type() == StackFrame::OPTIMIZED) {
+      Code* code = it.frame()->LookupCode();
+      if (!code->CanDeoptAt(it.frame()->pc())) {
+        code->CodeIterateBody(visitor);
+      }
+      ProcessMarkingDeque();
+      return;
+    }
+  }
+}
+
+
+void MarkCompactCollector::MarkLiveObjects() {
+  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
+  double start_time = 0.0;
+  if (FLAG_print_cumulative_gc_stat) {
+    start_time = base::OS::TimeCurrentMillis();
+  }
+  // The recursive GC marker detects when it is nearing stack overflow,
+  // and switches to a different marking system.  JS interrupts interfere
+  // with the C stack limit check.
+  PostponeInterruptsScope postpone(isolate());
+
+  bool incremental_marking_overflowed = false;
+  IncrementalMarking* incremental_marking = heap_->incremental_marking();
+  if (was_marked_incrementally_) {
+    // Finalize the incremental marking and check whether we had an overflow.
+    // Both markers use grey color to mark overflowed objects so
+    // non-incremental marker can deal with them as if overflow
+    // occured during normal marking.
+    // But incremental marker uses a separate marking deque
+    // so we have to explicitly copy its overflow state.
+    incremental_marking->Finalize();
+    incremental_marking_overflowed =
+        incremental_marking->marking_deque()->overflowed();
+    incremental_marking->marking_deque()->ClearOverflowed();
+  } else {
+    // Abort any pending incremental activities e.g. incremental sweeping.
+    incremental_marking->Abort();
+  }
+
+#ifdef DEBUG
+  DCHECK(state_ == PREPARE_GC);
+  state_ = MARK_LIVE_OBJECTS;
+#endif
+  // The to space contains live objects, a page in from space is used as a
+  // marking stack.
+  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
+  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
+  if (FLAG_force_marking_deque_overflows) {
+    marking_deque_end = marking_deque_start + 64 * kPointerSize;
+  }
+  marking_deque_.Initialize(marking_deque_start, marking_deque_end);
+  DCHECK(!marking_deque_.overflowed());
+
+  if (incremental_marking_overflowed) {
+    // There are overflowed objects left in the heap after incremental marking.
+    marking_deque_.SetOverflowed();
+  }
+
+  PrepareForCodeFlushing();
+
+  if (was_marked_incrementally_) {
+    // There is no write barrier on cells so we have to scan them now at the end
+    // of the incremental marking.
+    {
+      HeapObjectIterator cell_iterator(heap()->cell_space());
+      HeapObject* cell;
+      while ((cell = cell_iterator.Next()) != NULL) {
+        DCHECK(cell->IsCell());
+        if (IsMarked(cell)) {
+          int offset = Cell::kValueOffset;
+          MarkCompactMarkingVisitor::VisitPointer(
+              heap(), reinterpret_cast<Object**>(cell->address() + offset));
+        }
+      }
+    }
+    {
+      HeapObjectIterator js_global_property_cell_iterator(
+          heap()->property_cell_space());
+      HeapObject* cell;
+      while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
+        DCHECK(cell->IsPropertyCell());
+        if (IsMarked(cell)) {
+          MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
+        }
+      }
+    }
+  }
+
+  RootMarkingVisitor root_visitor(heap());
+  MarkRoots(&root_visitor);
+
+  ProcessTopOptimizedFrame(&root_visitor);
+
+  // The objects reachable from the roots are marked, yet unreachable
+  // objects are unmarked.  Mark objects reachable due to host
+  // application specific logic or through Harmony weak maps.
+  ProcessEphemeralMarking(&root_visitor);
+
+  // The objects reachable from the roots, weak maps or object groups
+  // are marked, yet unreachable objects are unmarked.  Mark objects
+  // reachable only from weak global handles.
+  //
+  // First we identify nonlive weak handles and mark them as pending
+  // destruction.
+  heap()->isolate()->global_handles()->IdentifyWeakHandles(
+      &IsUnmarkedHeapObject);
+  // Then we mark the objects and process the transitive closure.
+  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
+  }
+
+  // Repeat host application specific and Harmony weak maps marking to
+  // mark unmarked objects reachable from the weak roots.
+  ProcessEphemeralMarking(&root_visitor);
+
+  AfterMarking();
+
+  if (FLAG_print_cumulative_gc_stat) {
+    heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
+  }
+}
+
+
+void MarkCompactCollector::AfterMarking() {
+  // Object literal map caches reference strings (cache keys) and maps
+  // (cache values). At this point still useful maps have already been
+  // marked. Mark the keys for the alive values before we process the
+  // string table.
+  ProcessMapCaches();
+
+  // Prune the string table removing all strings only pointed to by the
+  // string table.  Cannot use string_table() here because the string
+  // table is marked.
+  StringTable* string_table = heap()->string_table();
+  InternalizedStringTableCleaner internalized_visitor(heap());
+  string_table->IterateElements(&internalized_visitor);
+  string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
+
+  ExternalStringTableCleaner external_visitor(heap());
+  heap()->external_string_table_.Iterate(&external_visitor);
+  heap()->external_string_table_.CleanUp();
+
+  // Process the weak references.
+  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
+  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
+
+  // Remove object groups after marking phase.
+  heap()->isolate()->global_handles()->RemoveObjectGroups();
+  heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
+
+  // Flush code from collected candidates.
+  if (is_code_flushing_enabled()) {
+    code_flusher_->ProcessCandidates();
+    // If incremental marker does not support code flushing, we need to
+    // disable it before incremental marking steps for next cycle.
+    if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
+      EnableCodeFlushing(false);
+    }
+  }
+
+  if (FLAG_track_gc_object_stats) {
+    heap()->CheckpointObjectStats();
+  }
+}
+
+
+void MarkCompactCollector::ProcessMapCaches() {
+  Object* raw_context = heap()->native_contexts_list();
+  while (raw_context != heap()->undefined_value()) {
+    Context* context = reinterpret_cast<Context*>(raw_context);
+    if (IsMarked(context)) {
+      HeapObject* raw_map_cache =
+          HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
+      // A map cache may be reachable from the stack. In this case
+      // it's already transitively marked and it's too late to clean
+      // up its parts.
+      if (!IsMarked(raw_map_cache) &&
+          raw_map_cache != heap()->undefined_value()) {
+        MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
+        int existing_elements = map_cache->NumberOfElements();
+        int used_elements = 0;
+        for (int i = MapCache::kElementsStartIndex; i < map_cache->length();
+             i += MapCache::kEntrySize) {
+          Object* raw_key = map_cache->get(i);
+          if (raw_key == heap()->undefined_value() ||
+              raw_key == heap()->the_hole_value())
+            continue;
+          STATIC_ASSERT(MapCache::kEntrySize == 2);
+          Object* raw_map = map_cache->get(i + 1);
+          if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
+            ++used_elements;
+          } else {
+            // Delete useless entries with unmarked maps.
+            DCHECK(raw_map->IsMap());
+            map_cache->set_the_hole(i);
+            map_cache->set_the_hole(i + 1);
+          }
+        }
+        if (used_elements == 0) {
+          context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
+        } else {
+          // Note: we don't actually shrink the cache here to avoid
+          // extra complexity during GC. We rely on subsequent cache
+          // usages (EnsureCapacity) to do this.
+          map_cache->ElementsRemoved(existing_elements - used_elements);
+          MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
+          MarkObject(map_cache, map_cache_markbit);
+        }
+      }
+    }
+    // Move to next element in the list.
+    raw_context = context->get(Context::NEXT_CONTEXT_LINK);
+  }
+  ProcessMarkingDeque();
+}
+
+
+void MarkCompactCollector::ClearNonLiveReferences() {
+  // Iterate over the map space, setting map transitions that go from
+  // a marked map to an unmarked map to null transitions.  This action
+  // is carried out only on maps of JSObjects and related subtypes.
+  HeapObjectIterator map_iterator(heap()->map_space());
+  for (HeapObject* obj = map_iterator.Next(); obj != NULL;
+       obj = map_iterator.Next()) {
+    Map* map = Map::cast(obj);
+
+    if (!map->CanTransition()) continue;
+
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    ClearNonLivePrototypeTransitions(map);
+    ClearNonLiveMapTransitions(map, map_mark);
+
+    if (map_mark.Get()) {
+      ClearNonLiveDependentCode(map->dependent_code());
+    } else {
+      ClearDependentCode(map->dependent_code());
+      map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
+    }
+  }
+
+  // Iterate over property cell space, removing dependent code that is not
+  // otherwise kept alive by strong references.
+  HeapObjectIterator cell_iterator(heap_->property_cell_space());
+  for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
+       cell = cell_iterator.Next()) {
+    if (IsMarked(cell)) {
+      ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
+    }
+  }
+
+  // Iterate over allocation sites, removing dependent code that is not
+  // otherwise kept alive by strong references.
+  Object* undefined = heap()->undefined_value();
+  for (Object* site = heap()->allocation_sites_list(); site != undefined;
+       site = AllocationSite::cast(site)->weak_next()) {
+    if (IsMarked(site)) {
+      ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
+    }
+  }
+
+  if (heap_->weak_object_to_code_table()->IsHashTable()) {
+    WeakHashTable* table =
+        WeakHashTable::cast(heap_->weak_object_to_code_table());
+    uint32_t capacity = table->Capacity();
+    for (uint32_t i = 0; i < capacity; i++) {
+      uint32_t key_index = table->EntryToIndex(i);
+      Object* key = table->get(key_index);
+      if (!table->IsKey(key)) continue;
+      uint32_t value_index = table->EntryToValueIndex(i);
+      Object* value = table->get(value_index);
+      if (key->IsCell() && !IsMarked(key)) {
+        Cell* cell = Cell::cast(key);
+        Object* object = cell->value();
+        if (IsMarked(object)) {
+          MarkBit mark = Marking::MarkBitFrom(cell);
+          SetMark(cell, mark);
+          Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
+          RecordSlot(value_slot, value_slot, *value_slot);
+        }
+      }
+      if (IsMarked(key)) {
+        if (!IsMarked(value)) {
+          HeapObject* obj = HeapObject::cast(value);
+          MarkBit mark = Marking::MarkBitFrom(obj);
+          SetMark(obj, mark);
+        }
+        ClearNonLiveDependentCode(DependentCode::cast(value));
+      } else {
+        ClearDependentCode(DependentCode::cast(value));
+        table->set(key_index, heap_->the_hole_value());
+        table->set(value_index, heap_->the_hole_value());
+        table->ElementRemoved();
+      }
+    }
+  }
+}
+
+
+void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
+  int number_of_transitions = map->NumberOfProtoTransitions();
+  FixedArray* prototype_transitions = map->GetPrototypeTransitions();
+
+  int new_number_of_transitions = 0;
+  const int header = Map::kProtoTransitionHeaderSize;
+  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
+  const int map_offset = header + Map::kProtoTransitionMapOffset;
+  const int step = Map::kProtoTransitionElementsPerEntry;
+  for (int i = 0; i < number_of_transitions; i++) {
+    Object* prototype = prototype_transitions->get(proto_offset + i * step);
+    Object* cached_map = prototype_transitions->get(map_offset + i * step);
+    if (IsMarked(prototype) && IsMarked(cached_map)) {
+      DCHECK(!prototype->IsUndefined());
+      int proto_index = proto_offset + new_number_of_transitions * step;
+      int map_index = map_offset + new_number_of_transitions * step;
+      if (new_number_of_transitions != i) {
+        prototype_transitions->set(proto_index, prototype,
+                                   UPDATE_WRITE_BARRIER);
+        prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
+      }
+      Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
+      RecordSlot(slot, slot, prototype);
+      new_number_of_transitions++;
+    }
+  }
+
+  if (new_number_of_transitions != number_of_transitions) {
+    map->SetNumberOfProtoTransitions(new_number_of_transitions);
+  }
+
+  // Fill slots that became free with undefined value.
+  for (int i = new_number_of_transitions * step;
+       i < number_of_transitions * step; i++) {
+    prototype_transitions->set_undefined(header + i);
+  }
+}
+
+
+void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
+                                                      MarkBit map_mark) {
+  Object* potential_parent = map->GetBackPointer();
+  if (!potential_parent->IsMap()) return;
+  Map* parent = Map::cast(potential_parent);
+
+  // Follow back pointer, check whether we are dealing with a map transition
+  // from a live map to a dead path and in case clear transitions of parent.
+  bool current_is_alive = map_mark.Get();
+  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
+  if (!current_is_alive && parent_is_alive) {
+    ClearMapTransitions(parent);
+  }
+}
+
+
+// Clear a possible back pointer in case the transition leads to a dead map.
+// Return true in case a back pointer has been cleared and false otherwise.
+bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
+  if (Marking::MarkBitFrom(target).Get()) return false;
+  target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
+  return true;
+}
+
+
+void MarkCompactCollector::ClearMapTransitions(Map* map) {
+  // If there are no transitions to be cleared, return.
+  // TODO(verwaest) Should be an assert, otherwise back pointers are not
+  // properly cleared.
+  if (!map->HasTransitionArray()) return;
+
+  TransitionArray* t = map->transitions();
+
+  int transition_index = 0;
+
+  DescriptorArray* descriptors = map->instance_descriptors();
+  bool descriptors_owner_died = false;
+
+  // Compact all live descriptors to the left.
+  for (int i = 0; i < t->number_of_transitions(); ++i) {
+    Map* target = t->GetTarget(i);
+    if (ClearMapBackPointer(target)) {
+      if (target->instance_descriptors() == descriptors) {
+        descriptors_owner_died = true;
+      }
+    } else {
+      if (i != transition_index) {
+        Name* key = t->GetKey(i);
+        t->SetKey(transition_index, key);
+        Object** key_slot = t->GetKeySlot(transition_index);
+        RecordSlot(key_slot, key_slot, key);
+        // Target slots do not need to be recorded since maps are not compacted.
+        t->SetTarget(transition_index, t->GetTarget(i));
+      }
+      transition_index++;
+    }
+  }
+
+  // If there are no transitions to be cleared, return.
+  // TODO(verwaest) Should be an assert, otherwise back pointers are not
+  // properly cleared.
+  if (transition_index == t->number_of_transitions()) return;
+
+  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+
+  if (descriptors_owner_died) {
+    if (number_of_own_descriptors > 0) {
+      TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
+      DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+      map->set_owns_descriptors(true);
+    } else {
+      DCHECK(descriptors == heap_->empty_descriptor_array());
+    }
+  }
+
+  // Note that we never eliminate a transition array, though we might right-trim
+  // such that number_of_transitions() == 0. If this assumption changes,
+  // TransitionArray::CopyInsert() will need to deal with the case that a
+  // transition array disappeared during GC.
+  int trim = t->number_of_transitions() - transition_index;
+  if (trim > 0) {
+    heap_->RightTrimFixedArray<Heap::FROM_GC>(
+        t, t->IsSimpleTransition() ? trim
+                                   : trim * TransitionArray::kTransitionSize);
+  }
+  DCHECK(map->HasTransitionArray());
+}
+
+
+void MarkCompactCollector::TrimDescriptorArray(Map* map,
+                                               DescriptorArray* descriptors,
+                                               int number_of_own_descriptors) {
+  int number_of_descriptors = descriptors->number_of_descriptors_storage();
+  int to_trim = number_of_descriptors - number_of_own_descriptors;
+  if (to_trim == 0) return;
+
+  heap_->RightTrimFixedArray<Heap::FROM_GC>(
+      descriptors, to_trim * DescriptorArray::kDescriptorSize);
+  descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
+
+  if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
+  descriptors->Sort();
+}
+
+
+void MarkCompactCollector::TrimEnumCache(Map* map,
+                                         DescriptorArray* descriptors) {
+  int live_enum = map->EnumLength();
+  if (live_enum == kInvalidEnumCacheSentinel) {
+    live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
+  }
+  if (live_enum == 0) return descriptors->ClearEnumCache();
+
+  FixedArray* enum_cache = descriptors->GetEnumCache();
+
+  int to_trim = enum_cache->length() - live_enum;
+  if (to_trim <= 0) return;
+  heap_->RightTrimFixedArray<Heap::FROM_GC>(descriptors->GetEnumCache(),
+                                            to_trim);
+
+  if (!descriptors->HasEnumIndicesCache()) return;
+  FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
+  heap_->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim);
+}
+
+
+void MarkCompactCollector::ClearDependentICList(Object* head) {
+  Object* current = head;
+  Object* undefined = heap()->undefined_value();
+  while (current != undefined) {
+    Code* code = Code::cast(current);
+    if (IsMarked(code)) {
+      DCHECK(code->is_weak_stub());
+      IC::InvalidateMaps(code);
+    }
+    current = code->next_code_link();
+    code->set_next_code_link(undefined);
+  }
+}
+
+
+void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
+  DisallowHeapAllocation no_allocation;
+  DependentCode::GroupStartIndexes starts(entries);
+  int number_of_entries = starts.number_of_entries();
+  if (number_of_entries == 0) return;
+  int g = DependentCode::kWeakICGroup;
+  if (starts.at(g) != starts.at(g + 1)) {
+    int i = starts.at(g);
+    DCHECK(i + 1 == starts.at(g + 1));
+    Object* head = entries->object_at(i);
+    ClearDependentICList(head);
+  }
+  g = DependentCode::kWeakCodeGroup;
+  for (int i = starts.at(g); i < starts.at(g + 1); i++) {
+    // If the entry is compilation info then the map must be alive,
+    // and ClearDependentCode shouldn't be called.
+    DCHECK(entries->is_code_at(i));
+    Code* code = entries->code_at(i);
+    if (IsMarked(code) && !code->marked_for_deoptimization()) {
+      DependentCode::SetMarkedForDeoptimization(
+          code, static_cast<DependentCode::DependencyGroup>(g));
+      code->InvalidateEmbeddedObjects();
+      have_code_to_deoptimize_ = true;
+    }
+  }
+  for (int i = 0; i < number_of_entries; i++) {
+    entries->clear_at(i);
+  }
+}
+
+
+int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
+    DependentCode* entries, int group, int start, int end, int new_start) {
+  int survived = 0;
+  if (group == DependentCode::kWeakICGroup) {
+    // Dependent weak IC stubs form a linked list and only the head is stored
+    // in the dependent code array.
+    if (start != end) {
+      DCHECK(start + 1 == end);
+      Object* old_head = entries->object_at(start);
+      MarkCompactWeakObjectRetainer retainer;
+      Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
+      entries->set_object_at(new_start, head);
+      Object** slot = entries->slot_at(new_start);
+      RecordSlot(slot, slot, head);
+      // We do not compact this group even if the head is undefined,
+      // more dependent ICs are likely to be added later.
+      survived = 1;
+    }
+  } else {
+    for (int i = start; i < end; i++) {
+      Object* obj = entries->object_at(i);
+      DCHECK(obj->IsCode() || IsMarked(obj));
+      if (IsMarked(obj) &&
+          (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
+        if (new_start + survived != i) {
+          entries->set_object_at(new_start + survived, obj);
+        }
+        Object** slot = entries->slot_at(new_start + survived);
+        RecordSlot(slot, slot, obj);
+        survived++;
+      }
+    }
+  }
+  entries->set_number_of_entries(
+      static_cast<DependentCode::DependencyGroup>(group), survived);
+  return survived;
+}
+
+
+void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
+  DisallowHeapAllocation no_allocation;
+  DependentCode::GroupStartIndexes starts(entries);
+  int number_of_entries = starts.number_of_entries();
+  if (number_of_entries == 0) return;
+  int new_number_of_entries = 0;
+  // Go through all groups, remove dead codes and compact.
+  for (int g = 0; g < DependentCode::kGroupCount; g++) {
+    int survived = ClearNonLiveDependentCodeInGroup(
+        entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
+    new_number_of_entries += survived;
+  }
+  for (int i = new_number_of_entries; i < number_of_entries; i++) {
+    entries->clear_at(i);
+  }
+}
+
+
+void MarkCompactCollector::ProcessWeakCollections() {
+  GCTracer::Scope gc_scope(heap()->tracer(),
+                           GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
+  Object* weak_collection_obj = heap()->encountered_weak_collections();
+  while (weak_collection_obj != Smi::FromInt(0)) {
+    JSWeakCollection* weak_collection =
+        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
+    DCHECK(MarkCompactCollector::IsMarked(weak_collection));
+    if (weak_collection->table()->IsHashTable()) {
+      ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
+      Object** anchor = reinterpret_cast<Object**>(table->address());
+      for (int i = 0; i < table->Capacity(); i++) {
+        if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
+          Object** key_slot =
+              table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
+          RecordSlot(anchor, key_slot, *key_slot);
+          Object** value_slot =
+              table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
+          MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
+                                                         value_slot);
+        }
+      }
+    }
+    weak_collection_obj = weak_collection->next();
+  }
+}
+
+
+void MarkCompactCollector::ClearWeakCollections() {
+  GCTracer::Scope gc_scope(heap()->tracer(),
+                           GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
+  Object* weak_collection_obj = heap()->encountered_weak_collections();
+  while (weak_collection_obj != Smi::FromInt(0)) {
+    JSWeakCollection* weak_collection =
+        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
+    DCHECK(MarkCompactCollector::IsMarked(weak_collection));
+    if (weak_collection->table()->IsHashTable()) {
+      ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
+      for (int i = 0; i < table->Capacity(); i++) {
+        HeapObject* key = HeapObject::cast(table->KeyAt(i));
+        if (!MarkCompactCollector::IsMarked(key)) {
+          table->RemoveEntry(i);
+        }
+      }
+    }
+    weak_collection_obj = weak_collection->next();
+    weak_collection->set_next(heap()->undefined_value());
+  }
+  heap()->set_encountered_weak_collections(Smi::FromInt(0));
+}
+
+
+void MarkCompactCollector::AbortWeakCollections() {
+  GCTracer::Scope gc_scope(heap()->tracer(),
+                           GCTracer::Scope::MC_WEAKCOLLECTION_ABORT);
+  Object* weak_collection_obj = heap()->encountered_weak_collections();
+  while (weak_collection_obj != Smi::FromInt(0)) {
+    JSWeakCollection* weak_collection =
+        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
+    weak_collection_obj = weak_collection->next();
+    weak_collection->set_next(heap()->undefined_value());
+  }
+  heap()->set_encountered_weak_collections(Smi::FromInt(0));
+}
+
+
+void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
+  if (heap_->InNewSpace(value)) {
+    heap_->store_buffer()->Mark(slot);
+  } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
+    SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+                       reinterpret_cast<Object**>(slot),
+                       SlotsBuffer::IGNORE_OVERFLOW);
+  }
+}
+
+
+// We scavange new space simultaneously with sweeping. This is done in two
+// passes.
+//
+// The first pass migrates all alive objects from one semispace to another or
+// promotes them to old space.  Forwarding address is written directly into
+// first word of object without any encoding.  If object is dead we write
+// NULL as a forwarding address.
+//
+// The second pass updates pointers to new space in all spaces.  It is possible
+// to encounter pointers to dead new space objects during traversal of pointers
+// to new space.  We should clear them to avoid encountering them during next
+// pointer iteration.  This is an issue if the store buffer overflows and we
+// have to scan the entire old space, including dead objects, looking for
+// pointers to new space.
+void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
+                                         int size, AllocationSpace dest) {
+  Address dst_addr = dst->address();
+  Address src_addr = src->address();
+  DCHECK(heap()->AllowedToBeMigrated(src, dest));
+  DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
+  if (dest == OLD_POINTER_SPACE) {
+    Address src_slot = src_addr;
+    Address dst_slot = dst_addr;
+    DCHECK(IsAligned(size, kPointerSize));
+
+    for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
+      Object* value = Memory::Object_at(src_slot);
+
+      Memory::Object_at(dst_slot) = value;
+
+      if (!src->MayContainRawValues()) {
+        RecordMigratedSlot(value, dst_slot);
+      }
+
+      src_slot += kPointerSize;
+      dst_slot += kPointerSize;
+    }
+
+    if (compacting_ && dst->IsJSFunction()) {
+      Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
+      Address code_entry = Memory::Address_at(code_entry_slot);
+
+      if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+        SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+                           SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
+                           SlotsBuffer::IGNORE_OVERFLOW);
+      }
+    } else if (dst->IsConstantPoolArray()) {
+      // We special case ConstantPoolArrays since they could contain integers
+      // value entries which look like tagged pointers.
+      // TODO(mstarzinger): restructure this code to avoid this special-casing.
+      ConstantPoolArray* array = ConstantPoolArray::cast(dst);
+      ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
+      while (!code_iter.is_finished()) {
+        Address code_entry_slot =
+            dst_addr + array->OffsetOfElementAt(code_iter.next_index());
+        Address code_entry = Memory::Address_at(code_entry_slot);
+
+        if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+          SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+                             SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
+                             SlotsBuffer::IGNORE_OVERFLOW);
+        }
+      }
+      ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
+      while (!heap_iter.is_finished()) {
+        Address heap_slot =
+            dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
+        Object* value = Memory::Object_at(heap_slot);
+        RecordMigratedSlot(value, heap_slot);
+      }
+    }
+  } else if (dest == CODE_SPACE) {
+    PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
+    heap()->MoveBlock(dst_addr, src_addr, size);
+    SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+                       SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
+                       SlotsBuffer::IGNORE_OVERFLOW);
+    Code::cast(dst)->Relocate(dst_addr - src_addr);
+  } else {
+    DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
+    heap()->MoveBlock(dst_addr, src_addr, size);
+  }
+  heap()->OnMoveEvent(dst, src, size);
+  Memory::Address_at(src_addr) = dst_addr;
+}
+
+
+// Visitor for updating pointers from live objects in old spaces to new space.
+// It does not expect to encounter pointers to dead objects.
+class PointersUpdatingVisitor : public ObjectVisitor {
+ public:
+  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
+
+  void VisitPointer(Object** p) { UpdatePointer(p); }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) UpdatePointer(p);
+  }
+
+  void VisitEmbeddedPointer(RelocInfo* rinfo) {
+    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    Object* target = rinfo->target_object();
+    Object* old_target = target;
+    VisitPointer(&target);
+    // Avoid unnecessary changes that might unnecessary flush the instruction
+    // cache.
+    if (target != old_target) {
+      rinfo->set_target_object(target);
+    }
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) {
+    DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    Object* old_target = target;
+    VisitPointer(&target);
+    if (target != old_target) {
+      rinfo->set_target_address(Code::cast(target)->instruction_start());
+    }
+  }
+
+  void VisitCodeAgeSequence(RelocInfo* rinfo) {
+    DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+    Object* stub = rinfo->code_age_stub();
+    DCHECK(stub != NULL);
+    VisitPointer(&stub);
+    if (stub != rinfo->code_age_stub()) {
+      rinfo->set_code_age_stub(Code::cast(stub));
+    }
+  }
+
+  void VisitDebugTarget(RelocInfo* rinfo) {
+    DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+            rinfo->IsPatchedReturnSequence()) ||
+           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+            rinfo->IsPatchedDebugBreakSlotSequence()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    VisitPointer(&target);
+    rinfo->set_call_address(Code::cast(target)->instruction_start());
+  }
+
+  static inline void UpdateSlot(Heap* heap, Object** slot) {
+    Object* obj = *slot;
+
+    if (!obj->IsHeapObject()) return;
+
+    HeapObject* heap_obj = HeapObject::cast(obj);
+
+    MapWord map_word = heap_obj->map_word();
+    if (map_word.IsForwardingAddress()) {
+      DCHECK(heap->InFromSpace(heap_obj) ||
+             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
+      HeapObject* target = map_word.ToForwardingAddress();
+      *slot = target;
+      DCHECK(!heap->InFromSpace(target) &&
+             !MarkCompactCollector::IsOnEvacuationCandidate(target));
+    }
+  }
+
+ private:
+  inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
+
+  Heap* heap_;
+};
+
+
+static void UpdatePointer(HeapObject** address, HeapObject* object) {
+  Address new_addr = Memory::Address_at(object->address());
+
+  // The new space sweep will overwrite the map word of dead objects
+  // with NULL. In this case we do not need to transfer this entry to
+  // the store buffer which we are rebuilding.
+  // We perform the pointer update with a no barrier compare-and-swap. The
+  // compare and swap may fail in the case where the pointer update tries to
+  // update garbage memory which was concurrently accessed by the sweeper.
+  if (new_addr != NULL) {
+    base::NoBarrier_CompareAndSwap(
+        reinterpret_cast<base::AtomicWord*>(address),
+        reinterpret_cast<base::AtomicWord>(object),
+        reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
+  }
+}
+
+
+static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
+                                                         Object** p) {
+  MapWord map_word = HeapObject::cast(*p)->map_word();
+
+  if (map_word.IsForwardingAddress()) {
+    return String::cast(map_word.ToForwardingAddress());
+  }
+
+  return String::cast(*p);
+}
+
+
+bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
+                                            int object_size) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+
+  OldSpace* target_space = heap()->TargetSpace(object);
+
+  DCHECK(target_space == heap()->old_pointer_space() ||
+         target_space == heap()->old_data_space());
+  HeapObject* target;
+  AllocationResult allocation = target_space->AllocateRaw(object_size);
+  if (allocation.To(&target)) {
+    MigrateObject(target, object, object_size, target_space->identity());
+    heap()->IncrementPromotedObjectsSize(object_size);
+    return true;
+  }
+
+  return false;
+}
+
+
+void MarkCompactCollector::EvacuateNewSpace() {
+  // There are soft limits in the allocation code, designed trigger a mark
+  // sweep collection by failing allocations.  But since we are already in
+  // a mark-sweep allocation, there is no sense in trying to trigger one.
+  AlwaysAllocateScope scope(isolate());
+
+  NewSpace* new_space = heap()->new_space();
+
+  // Store allocation range before flipping semispaces.
+  Address from_bottom = new_space->bottom();
+  Address from_top = new_space->top();
+
+  // Flip the semispaces.  After flipping, to space is empty, from space has
+  // live objects.
+  new_space->Flip();
+  new_space->ResetAllocationInfo();
+
+  int survivors_size = 0;
+
+  // First pass: traverse all objects in inactive semispace, remove marks,
+  // migrate live objects and write forwarding addresses.  This stage puts
+  // new entries in the store buffer and may cause some pages to be marked
+  // scan-on-scavenge.
+  NewSpacePageIterator it(from_bottom, from_top);
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
+  }
+
+  heap_->IncrementYoungSurvivorsCounter(survivors_size);
+  new_space->set_age_mark(new_space->top());
+}
+
+
+void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
+  AlwaysAllocateScope always_allocate(isolate());
+  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+  DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
+  p->SetWasSwept();
+
+  int offsets[16];
+
+  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+    Address cell_base = it.CurrentCellBase();
+    MarkBit::CellType* cell = it.CurrentCell();
+
+    if (*cell == 0) continue;
+
+    int live_objects = MarkWordToObjectStarts(*cell, offsets);
+    for (int i = 0; i < live_objects; i++) {
+      Address object_addr = cell_base + offsets[i] * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(object_addr);
+      DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+
+      int size = object->Size();
+
+      HeapObject* target_object;
+      AllocationResult allocation = space->AllocateRaw(size);
+      if (!allocation.To(&target_object)) {
+        // If allocation failed, use emergency memory and re-try allocation.
+        CHECK(space->HasEmergencyMemory());
+        space->UseEmergencyMemory();
+        allocation = space->AllocateRaw(size);
+      }
+      if (!allocation.To(&target_object)) {
+        // OS refused to give us memory.
+        V8::FatalProcessOutOfMemory("Evacuation");
+        return;
+      }
+
+      MigrateObject(target_object, object, size, space->identity());
+      DCHECK(object->map_word().IsForwardingAddress());
+    }
+
+    // Clear marking bits for current cell.
+    *cell = 0;
+  }
+  p->ResetLiveBytes();
+}
+
+
+void MarkCompactCollector::EvacuatePages() {
+  int npages = evacuation_candidates_.length();
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    DCHECK(p->IsEvacuationCandidate() ||
+           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+    DCHECK(static_cast<int>(p->parallel_sweeping()) ==
+           MemoryChunk::SWEEPING_DONE);
+    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+    // Allocate emergency memory for the case when compaction fails due to out
+    // of memory.
+    if (!space->HasEmergencyMemory()) {
+      space->CreateEmergencyMemory();
+    }
+    if (p->IsEvacuationCandidate()) {
+      // During compaction we might have to request a new page. Check that we
+      // have an emergency page and the space still has room for that.
+      if (space->HasEmergencyMemory() && space->CanExpand()) {
+        EvacuateLiveObjectsFromPage(p);
+      } else {
+        // Without room for expansion evacuation is not guaranteed to succeed.
+        // Pessimistically abandon unevacuated pages.
+        for (int j = i; j < npages; j++) {
+          Page* page = evacuation_candidates_[j];
+          slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
+          page->ClearEvacuationCandidate();
+          page->SetFlag(Page::RESCAN_ON_EVACUATION);
+        }
+        break;
+      }
+    }
+  }
+  if (npages > 0) {
+    // Release emergency memory.
+    PagedSpaces spaces(heap());
+    for (PagedSpace* space = spaces.next(); space != NULL;
+         space = spaces.next()) {
+      if (space->HasEmergencyMemory()) {
+        space->FreeEmergencyMemory();
+      }
+    }
+  }
+}
+
+
+class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+  virtual Object* RetainAs(Object* object) {
+    if (object->IsHeapObject()) {
+      HeapObject* heap_object = HeapObject::cast(object);
+      MapWord map_word = heap_object->map_word();
+      if (map_word.IsForwardingAddress()) {
+        return map_word.ToForwardingAddress();
+      }
+    }
+    return object;
+  }
+};
+
+
+static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
+                              SlotsBuffer::SlotType slot_type, Address addr) {
+  switch (slot_type) {
+    case SlotsBuffer::CODE_TARGET_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
+      rinfo.Visit(isolate, v);
+      break;
+    }
+    case SlotsBuffer::CODE_ENTRY_SLOT: {
+      v->VisitCodeEntry(addr);
+      break;
+    }
+    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
+      HeapObject* obj = HeapObject::FromAddress(addr);
+      Code::cast(obj)->CodeIterateBody(v);
+      break;
+    }
+    case SlotsBuffer::DEBUG_TARGET_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
+      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
+      break;
+    }
+    case SlotsBuffer::JS_RETURN_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
+      if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
+      break;
+    }
+    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+      rinfo.Visit(isolate, v);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
+
+
+enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
+
+
+enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
+
+
+template <MarkCompactCollector::SweepingParallelism mode>
+static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
+                     int size) {
+  if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
+    DCHECK(free_list == NULL);
+    return space->Free(start, size);
+  } else {
+    // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
+    return size - free_list->Free(start, size);
+  }
+}
+
+
+// Sweeps a page. After sweeping the page can be iterated.
+// Slots in live objects pointing into evacuation candidates are updated
+// if requested.
+// Returns the size of the biggest continuous freed memory chunk in bytes.
+template <SweepingMode sweeping_mode,
+          MarkCompactCollector::SweepingParallelism parallelism,
+          SkipListRebuildingMode skip_list_mode,
+          FreeSpaceTreatmentMode free_space_mode>
+static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
+                 ObjectVisitor* v) {
+  DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
+  DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
+            space->identity() == CODE_SPACE);
+  DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
+  DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
+         sweeping_mode == SWEEP_ONLY);
+
+  Address free_start = p->area_start();
+  DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
+  int offsets[16];
+
+  SkipList* skip_list = p->skip_list();
+  int curr_region = -1;
+  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
+    skip_list->Clear();
+  }
+
+  intptr_t freed_bytes = 0;
+  intptr_t max_freed_bytes = 0;
+
+  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+    Address cell_base = it.CurrentCellBase();
+    MarkBit::CellType* cell = it.CurrentCell();
+    int live_objects = MarkWordToObjectStarts(*cell, offsets);
+    int live_index = 0;
+    for (; live_objects != 0; live_objects--) {
+      Address free_end = cell_base + offsets[live_index++] * kPointerSize;
+      if (free_end != free_start) {
+        int size = static_cast<int>(free_end - free_start);
+        if (free_space_mode == ZAP_FREE_SPACE) {
+          memset(free_start, 0xcc, size);
+        }
+        freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+        max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+#ifdef ENABLE_GDB_JIT_INTERFACE
+        if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
+          GDBJITInterface::RemoveCodeRange(free_start, free_end);
+        }
+#endif
+      }
+      HeapObject* live_object = HeapObject::FromAddress(free_end);
+      DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
+      Map* map = live_object->map();
+      int size = live_object->SizeFromMap(map);
+      if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
+        live_object->IterateBody(map->instance_type(), size, v);
+      }
+      if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
+        int new_region_start = SkipList::RegionNumber(free_end);
+        int new_region_end =
+            SkipList::RegionNumber(free_end + size - kPointerSize);
+        if (new_region_start != curr_region || new_region_end != curr_region) {
+          skip_list->AddObject(free_end, size);
+          curr_region = new_region_end;
+        }
+      }
+      free_start = free_end + size;
+    }
+    // Clear marking bits for current cell.
+    *cell = 0;
+  }
+  if (free_start != p->area_end()) {
+    int size = static_cast<int>(p->area_end() - free_start);
+    if (free_space_mode == ZAP_FREE_SPACE) {
+      memset(free_start, 0xcc, size);
+    }
+    freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+    max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+#ifdef ENABLE_GDB_JIT_INTERFACE
+    if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
+      GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
+    }
+#endif
+  }
+  p->ResetLiveBytes();
+
+  if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
+    // When concurrent sweeping is active, the page will be marked after
+    // sweeping by the main thread.
+    p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
+  } else {
+    p->SetWasSwept();
+  }
+  return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
+}
+
+
+static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
+  Page* p = Page::FromAddress(code->address());
+
+  if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+    return false;
+  }
+
+  Address code_start = code->address();
+  Address code_end = code_start + code->Size();
+
+  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
+  uint32_t end_index =
+      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
+
+  Bitmap* b = p->markbits();
+
+  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
+  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
+
+  MarkBit::CellType* start_cell = start_mark_bit.cell();
+  MarkBit::CellType* end_cell = end_mark_bit.cell();
+
+  if (value) {
+    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
+    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
+
+    if (start_cell == end_cell) {
+      *start_cell |= start_mask & end_mask;
+    } else {
+      *start_cell |= start_mask;
+      for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
+        *cell = ~0;
+      }
+      *end_cell |= end_mask;
+    }
+  } else {
+    for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
+      *cell = 0;
+    }
+  }
+
+  return true;
+}
+
+
+static bool IsOnInvalidatedCodeObject(Address addr) {
+  // We did not record any slots in large objects thus
+  // we can safely go to the page from the slot address.
+  Page* p = Page::FromAddress(addr);
+
+  // First check owner's identity because old pointer and old data spaces
+  // are swept lazily and might still have non-zero mark-bits on some
+  // pages.
+  if (p->owner()->identity() != CODE_SPACE) return false;
+
+  // In code space only bits on evacuation candidates (but we don't record
+  // any slots on them) and under invalidated code objects are non-zero.
+  MarkBit mark_bit =
+      p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
+
+  return mark_bit.Get();
+}
+
+
+void MarkCompactCollector::InvalidateCode(Code* code) {
+  if (heap_->incremental_marking()->IsCompacting() &&
+      !ShouldSkipEvacuationSlotRecording(code)) {
+    DCHECK(compacting_);
+
+    // If the object is white than no slots were recorded on it yet.
+    MarkBit mark_bit = Marking::MarkBitFrom(code);
+    if (Marking::IsWhite(mark_bit)) return;
+
+    invalidated_code_.Add(code);
+  }
+}
+
+
+// Return true if the given code is deoptimized or will be deoptimized.
+bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
+  return code->is_optimized_code() && code->marked_for_deoptimization();
+}
+
+
+bool MarkCompactCollector::MarkInvalidatedCode() {
+  bool code_marked = false;
+
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+
+    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
+      code_marked = true;
+    }
+  }
+
+  return code_marked;
+}
+
+
+void MarkCompactCollector::RemoveDeadInvalidatedCode() {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
+  }
+}
+
+
+void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+    if (code != NULL) {
+      code->Iterate(visitor);
+      SetMarkBitsUnderInvalidatedCode(code, false);
+    }
+  }
+  invalidated_code_.Rewind(0);
+}
+
+
+void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+  Heap::RelocationLock relocation_lock(heap());
+
+  bool code_slots_filtering_required;
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_SWEEP_NEWSPACE);
+    code_slots_filtering_required = MarkInvalidatedCode();
+    EvacuateNewSpace();
+  }
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_EVACUATE_PAGES);
+    EvacuatePages();
+  }
+
+  // Second pass: find pointers to new space and update them.
+  PointersUpdatingVisitor updating_visitor(heap());
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
+    // Update pointers in to space.
+    SemiSpaceIterator to_it(heap()->new_space()->bottom(),
+                            heap()->new_space()->top());
+    for (HeapObject* object = to_it.Next(); object != NULL;
+         object = to_it.Next()) {
+      Map* map = object->map();
+      object->IterateBody(map->instance_type(), object->SizeFromMap(map),
+                          &updating_visitor);
+    }
+  }
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
+    // Update roots.
+    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
+  }
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
+    StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
+                                  &Heap::ScavengeStoreBufferCallback);
+    heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
+        &UpdatePointer);
+  }
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
+    SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
+                                       code_slots_filtering_required);
+    if (FLAG_trace_fragmentation) {
+      PrintF("  migration slots buffer: %d\n",
+             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
+    }
+
+    if (compacting_ && was_marked_incrementally_) {
+      // It's difficult to filter out slots recorded for large objects.
+      LargeObjectIterator it(heap_->lo_space());
+      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+        // LargeObjectSpace is not swept yet thus we have to skip
+        // dead objects explicitly.
+        if (!IsMarked(obj)) continue;
+
+        Page* p = Page::FromAddress(obj->address());
+        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+          obj->Iterate(&updating_visitor);
+          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+        }
+      }
+    }
+  }
+
+  int npages = evacuation_candidates_.length();
+  {
+    GCTracer::Scope gc_scope(
+        heap()->tracer(),
+        GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+    for (int i = 0; i < npages; i++) {
+      Page* p = evacuation_candidates_[i];
+      DCHECK(p->IsEvacuationCandidate() ||
+             p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+
+      if (p->IsEvacuationCandidate()) {
+        SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
+                                           code_slots_filtering_required);
+        if (FLAG_trace_fragmentation) {
+          PrintF("  page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
+                 SlotsBuffer::SizeOfChain(p->slots_buffer()));
+        }
+
+        // Important: skip list should be cleared only after roots were updated
+        // because root iteration traverses the stack and might have to find
+        // code objects from non-updated pc pointing into evacuation candidate.
+        SkipList* list = p->skip_list();
+        if (list != NULL) list->Clear();
+      } else {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+        p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+
+        switch (space->identity()) {
+          case OLD_DATA_SPACE:
+            Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                  IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+                                                       &updating_visitor);
+            break;
+          case OLD_POINTER_SPACE:
+            Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                  IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+                                                       &updating_visitor);
+            break;
+          case CODE_SPACE:
+            if (FLAG_zap_code_space) {
+              Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                    REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
+                                                       &updating_visitor);
+            } else {
+              Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                    REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+                                                          &updating_visitor);
+            }
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
+      }
+    }
+  }
+
+  GCTracer::Scope gc_scope(heap()->tracer(),
+                           GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
+
+  // Update pointers from cells.
+  HeapObjectIterator cell_iterator(heap_->cell_space());
+  for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
+       cell = cell_iterator.Next()) {
+    if (cell->IsCell()) {
+      Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
+    }
+  }
+
+  HeapObjectIterator js_global_property_cell_iterator(
+      heap_->property_cell_space());
+  for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
+       cell = js_global_property_cell_iterator.Next()) {
+    if (cell->IsPropertyCell()) {
+      PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
+    }
+  }
+
+  heap_->string_table()->Iterate(&updating_visitor);
+  updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
+  if (heap_->weak_object_to_code_table()->IsHashTable()) {
+    WeakHashTable* table =
+        WeakHashTable::cast(heap_->weak_object_to_code_table());
+    table->Iterate(&updating_visitor);
+    table->Rehash(heap_->isolate()->factory()->undefined_value());
+  }
+
+  // Update pointers from external string table.
+  heap_->UpdateReferencesInExternalStringTable(
+      &UpdateReferenceInExternalStringTableEntry);
+
+  EvacuationWeakObjectRetainer evacuation_object_retainer;
+  heap()->ProcessWeakReferences(&evacuation_object_retainer);
+
+  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
+  // under it.
+  ProcessInvalidatedCode(&updating_visitor);
+
+  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+
+  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
+  DCHECK(migration_slots_buffer_ == NULL);
+}
+
+
+void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
+  int npages = evacuation_candidates_.length();
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    if (!p->IsEvacuationCandidate()) continue;
+    p->Unlink();
+    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+    p->InsertAfter(space->LastPage());
+  }
+}
+
+
+void MarkCompactCollector::ReleaseEvacuationCandidates() {
+  int npages = evacuation_candidates_.length();
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    if (!p->IsEvacuationCandidate()) continue;
+    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+    space->Free(p->area_start(), p->area_size());
+    p->set_scan_on_scavenge(false);
+    slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+    p->ResetLiveBytes();
+    space->ReleasePage(p);
+  }
+  evacuation_candidates_.Rewind(0);
+  compacting_ = false;
+  heap()->FreeQueuedChunks();
+}
+
+
+static const int kStartTableEntriesPerLine = 5;
+static const int kStartTableLines = 171;
+static const int kStartTableInvalidLine = 127;
+static const int kStartTableUnusedEntry = 126;
+
+#define _ kStartTableUnusedEntry
+#define X kStartTableInvalidLine
+// Mark-bit to object start offset table.
+//
+// The line is indexed by the mark bits in a byte.  The first number on
+// the line describes the number of live object starts for the line and the
+// other numbers on the line describe the offsets (in words) of the object
+// starts.
+//
+// Since objects are at least 2 words large we don't have entries for two
+// consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
+char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
+    0, _, _,
+    _, _,  // 0
+    1, 0, _,
+    _, _,  // 1
+    1, 1, _,
+    _, _,  // 2
+    X, _, _,
+    _, _,  // 3
+    1, 2, _,
+    _, _,  // 4
+    2, 0, 2,
+    _, _,  // 5
+    X, _, _,
+    _, _,  // 6
+    X, _, _,
+    _, _,  // 7
+    1, 3, _,
+    _, _,  // 8
+    2, 0, 3,
+    _, _,  // 9
+    2, 1, 3,
+    _, _,  // 10
+    X, _, _,
+    _, _,  // 11
+    X, _, _,
+    _, _,  // 12
+    X, _, _,
+    _, _,  // 13
+    X, _, _,
+    _, _,  // 14
+    X, _, _,
+    _, _,  // 15
+    1, 4, _,
+    _, _,  // 16
+    2, 0, 4,
+    _, _,  // 17
+    2, 1, 4,
+    _, _,  // 18
+    X, _, _,
+    _, _,  // 19
+    2, 2, 4,
+    _, _,  // 20
+    3, 0, 2,
+    4, _,  // 21
+    X, _, _,
+    _, _,  // 22
+    X, _, _,
+    _, _,  // 23
+    X, _, _,
+    _, _,  // 24
+    X, _, _,
+    _, _,  // 25
+    X, _, _,
+    _, _,  // 26
+    X, _, _,
+    _, _,  // 27
+    X, _, _,
+    _, _,  // 28
+    X, _, _,
+    _, _,  // 29
+    X, _, _,
+    _, _,  // 30
+    X, _, _,
+    _, _,  // 31
+    1, 5, _,
+    _, _,  // 32
+    2, 0, 5,
+    _, _,  // 33
+    2, 1, 5,
+    _, _,  // 34
+    X, _, _,
+    _, _,  // 35
+    2, 2, 5,
+    _, _,  // 36
+    3, 0, 2,
+    5, _,  // 37
+    X, _, _,
+    _, _,  // 38
+    X, _, _,
+    _, _,  // 39
+    2, 3, 5,
+    _, _,  // 40
+    3, 0, 3,
+    5, _,  // 41
+    3, 1, 3,
+    5, _,  // 42
+    X, _, _,
+    _, _,  // 43
+    X, _, _,
+    _, _,  // 44
+    X, _, _,
+    _, _,  // 45
+    X, _, _,
+    _, _,  // 46
+    X, _, _,
+    _, _,  // 47
+    X, _, _,
+    _, _,  // 48
+    X, _, _,
+    _, _,  // 49
+    X, _, _,
+    _, _,  // 50
+    X, _, _,
+    _, _,  // 51
+    X, _, _,
+    _, _,  // 52
+    X, _, _,
+    _, _,  // 53
+    X, _, _,
+    _, _,  // 54
+    X, _, _,
+    _, _,  // 55
+    X, _, _,
+    _, _,  // 56
+    X, _, _,
+    _, _,  // 57
+    X, _, _,
+    _, _,  // 58
+    X, _, _,
+    _, _,  // 59
+    X, _, _,
+    _, _,  // 60
+    X, _, _,
+    _, _,  // 61
+    X, _, _,
+    _, _,  // 62
+    X, _, _,
+    _, _,  // 63
+    1, 6, _,
+    _, _,  // 64
+    2, 0, 6,
+    _, _,  // 65
+    2, 1, 6,
+    _, _,  // 66
+    X, _, _,
+    _, _,  // 67
+    2, 2, 6,
+    _, _,  // 68
+    3, 0, 2,
+    6, _,  // 69
+    X, _, _,
+    _, _,  // 70
+    X, _, _,
+    _, _,  // 71
+    2, 3, 6,
+    _, _,  // 72
+    3, 0, 3,
+    6, _,  // 73
+    3, 1, 3,
+    6, _,  // 74
+    X, _, _,
+    _, _,  // 75
+    X, _, _,
+    _, _,  // 76
+    X, _, _,
+    _, _,  // 77
+    X, _, _,
+    _, _,  // 78
+    X, _, _,
+    _, _,  // 79
+    2, 4, 6,
+    _, _,  // 80
+    3, 0, 4,
+    6, _,  // 81
+    3, 1, 4,
+    6, _,  // 82
+    X, _, _,
+    _, _,  // 83
+    3, 2, 4,
+    6, _,  // 84
+    4, 0, 2,
+    4, 6,  // 85
+    X, _, _,
+    _, _,  // 86
+    X, _, _,
+    _, _,  // 87
+    X, _, _,
+    _, _,  // 88
+    X, _, _,
+    _, _,  // 89
+    X, _, _,
+    _, _,  // 90
+    X, _, _,
+    _, _,  // 91
+    X, _, _,
+    _, _,  // 92
+    X, _, _,
+    _, _,  // 93
+    X, _, _,
+    _, _,  // 94
+    X, _, _,
+    _, _,  // 95
+    X, _, _,
+    _, _,  // 96
+    X, _, _,
+    _, _,  // 97
+    X, _, _,
+    _, _,  // 98
+    X, _, _,
+    _, _,  // 99
+    X, _, _,
+    _, _,  // 100
+    X, _, _,
+    _, _,  // 101
+    X, _, _,
+    _, _,  // 102
+    X, _, _,
+    _, _,  // 103
+    X, _, _,
+    _, _,  // 104
+    X, _, _,
+    _, _,  // 105
+    X, _, _,
+    _, _,  // 106
+    X, _, _,
+    _, _,  // 107
+    X, _, _,
+    _, _,  // 108
+    X, _, _,
+    _, _,  // 109
+    X, _, _,
+    _, _,  // 110
+    X, _, _,
+    _, _,  // 111
+    X, _, _,
+    _, _,  // 112
+    X, _, _,
+    _, _,  // 113
+    X, _, _,
+    _, _,  // 114
+    X, _, _,
+    _, _,  // 115
+    X, _, _,
+    _, _,  // 116
+    X, _, _,
+    _, _,  // 117
+    X, _, _,
+    _, _,  // 118
+    X, _, _,
+    _, _,  // 119
+    X, _, _,
+    _, _,  // 120
+    X, _, _,
+    _, _,  // 121
+    X, _, _,
+    _, _,  // 122
+    X, _, _,
+    _, _,  // 123
+    X, _, _,
+    _, _,  // 124
+    X, _, _,
+    _, _,  // 125
+    X, _, _,
+    _, _,  // 126
+    X, _, _,
+    _, _,  // 127
+    1, 7, _,
+    _, _,  // 128
+    2, 0, 7,
+    _, _,  // 129
+    2, 1, 7,
+    _, _,  // 130
+    X, _, _,
+    _, _,  // 131
+    2, 2, 7,
+    _, _,  // 132
+    3, 0, 2,
+    7, _,  // 133
+    X, _, _,
+    _, _,  // 134
+    X, _, _,
+    _, _,  // 135
+    2, 3, 7,
+    _, _,  // 136
+    3, 0, 3,
+    7, _,  // 137
+    3, 1, 3,
+    7, _,  // 138
+    X, _, _,
+    _, _,  // 139
+    X, _, _,
+    _, _,  // 140
+    X, _, _,
+    _, _,  // 141
+    X, _, _,
+    _, _,  // 142
+    X, _, _,
+    _, _,  // 143
+    2, 4, 7,
+    _, _,  // 144
+    3, 0, 4,
+    7, _,  // 145
+    3, 1, 4,
+    7, _,  // 146
+    X, _, _,
+    _, _,  // 147
+    3, 2, 4,
+    7, _,  // 148
+    4, 0, 2,
+    4, 7,  // 149
+    X, _, _,
+    _, _,  // 150
+    X, _, _,
+    _, _,  // 151
+    X, _, _,
+    _, _,  // 152
+    X, _, _,
+    _, _,  // 153
+    X, _, _,
+    _, _,  // 154
+    X, _, _,
+    _, _,  // 155
+    X, _, _,
+    _, _,  // 156
+    X, _, _,
+    _, _,  // 157
+    X, _, _,
+    _, _,  // 158
+    X, _, _,
+    _, _,  // 159
+    2, 5, 7,
+    _, _,  // 160
+    3, 0, 5,
+    7, _,  // 161
+    3, 1, 5,
+    7, _,  // 162
+    X, _, _,
+    _, _,  // 163
+    3, 2, 5,
+    7, _,  // 164
+    4, 0, 2,
+    5, 7,  // 165
+    X, _, _,
+    _, _,  // 166
+    X, _, _,
+    _, _,  // 167
+    3, 3, 5,
+    7, _,  // 168
+    4, 0, 3,
+    5, 7,  // 169
+    4, 1, 3,
+    5, 7  // 170
+};
+#undef _
+#undef X
+
+
+// Takes a word of mark bits.  Returns the number of objects that start in the
+// range.  Puts the offsets of the words in the supplied array.
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
+  int objects = 0;
+  int offset = 0;
+
+  // No consecutive 1 bits.
+  DCHECK((mark_bits & 0x180) != 0x180);
+  DCHECK((mark_bits & 0x18000) != 0x18000);
+  DCHECK((mark_bits & 0x1800000) != 0x1800000);
+
+  while (mark_bits != 0) {
+    int byte = (mark_bits & 0xff);
+    mark_bits >>= 8;
+    if (byte != 0) {
+      DCHECK(byte < kStartTableLines);  // No consecutive 1 bits.
+      char* table = kStartTable + byte * kStartTableEntriesPerLine;
+      int objects_in_these_8_words = table[0];
+      DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
+      DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
+      for (int i = 0; i < objects_in_these_8_words; i++) {
+        starts[objects++] = offset + table[1 + i];
+      }
+    }
+    offset += 8;
+  }
+  return objects;
+}
+
+
+int MarkCompactCollector::SweepInParallel(PagedSpace* space,
+                                          int required_freed_bytes) {
+  int max_freed = 0;
+  int max_freed_overall = 0;
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    max_freed = SweepInParallel(p, space);
+    DCHECK(max_freed >= 0);
+    if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
+      return max_freed;
+    }
+    max_freed_overall = Max(max_freed, max_freed_overall);
+    if (p == space->end_of_unswept_pages()) break;
+  }
+  return max_freed_overall;
+}
+
+
+int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
+  int max_freed = 0;
+  if (page->TryParallelSweeping()) {
+    FreeList* free_list = space == heap()->old_pointer_space()
+                              ? free_list_old_pointer_space_.get()
+                              : free_list_old_data_space_.get();
+    FreeList private_free_list(space);
+    max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+                      IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+    free_list->Concatenate(&private_free_list);
+  }
+  return max_freed;
+}
+
+
+void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
+  space->ClearStats();
+
+  // We defensively initialize end_of_unswept_pages_ here with the first page
+  // of the pages list.
+  space->set_end_of_unswept_pages(space->FirstPage());
+
+  PageIterator it(space);
+
+  int pages_swept = 0;
+  bool unused_page_present = false;
+  bool parallel_sweeping_active = false;
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+
+    // Clear sweeping flags indicating that marking bits are still intact.
+    p->ClearWasSwept();
+
+    if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
+        p->IsEvacuationCandidate()) {
+      // Will be processed in EvacuateNewSpaceAndCandidates.
+      DCHECK(evacuation_candidates_.length() > 0);
+      continue;
+    }
+
+    // One unused page is kept, all further are released before sweeping them.
+    if (p->LiveBytes() == 0) {
+      if (unused_page_present) {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        // Adjust unswept free bytes because releasing a page expects said
+        // counter to be accurate for unswept pages.
+        space->IncreaseUnsweptFreeBytes(p);
+        space->ReleasePage(p);
+        continue;
+      }
+      unused_page_present = true;
+    }
+
+    switch (sweeper) {
+      case CONCURRENT_SWEEPING:
+      case PARALLEL_SWEEPING:
+        if (!parallel_sweeping_active) {
+          if (FLAG_gc_verbose) {
+            PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
+                   reinterpret_cast<intptr_t>(p));
+          }
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+          pages_swept++;
+          parallel_sweeping_active = true;
+        } else {
+          if (FLAG_gc_verbose) {
+            PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
+                   reinterpret_cast<intptr_t>(p));
+          }
+          p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
+          space->IncreaseUnsweptFreeBytes(p);
+        }
+        space->set_end_of_unswept_pages(p);
+        break;
+      case SEQUENTIAL_SWEEPING: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
+        }
+        if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+                ZAP_FREE_SPACE>(space, NULL, p, NULL);
+        } else if (space->identity() == CODE_SPACE) {
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+        } else {
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+        }
+        pages_swept++;
+        break;
+      }
+      default: { UNREACHABLE(); }
+    }
+  }
+
+  if (FLAG_gc_verbose) {
+    PrintF("SweepSpace: %s (%d pages swept)\n",
+           AllocationSpaceName(space->identity()), pages_swept);
+  }
+
+  // Give pages that are queued to be freed back to the OS.
+  heap()->FreeQueuedChunks();
+}
+
+
+static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
+  return type == MarkCompactCollector::PARALLEL_SWEEPING ||
+         type == MarkCompactCollector::CONCURRENT_SWEEPING;
+}
+
+
+static bool ShouldWaitForSweeperThreads(
+    MarkCompactCollector::SweeperType type) {
+  return type == MarkCompactCollector::PARALLEL_SWEEPING;
+}
+
+
+void MarkCompactCollector::SweepSpaces() {
+  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
+  double start_time = 0.0;
+  if (FLAG_print_cumulative_gc_stat) {
+    start_time = base::OS::TimeCurrentMillis();
+  }
+
+#ifdef DEBUG
+  state_ = SWEEP_SPACES;
+#endif
+  SweeperType how_to_sweep = CONCURRENT_SWEEPING;
+  if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_SWEEPING;
+  if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_SWEEPING;
+
+  MoveEvacuationCandidatesToEndOfPagesList();
+
+  // Noncompacting collections simply sweep the spaces to clear the mark
+  // bits and free the nonlive blocks (for old and map spaces).  We sweep
+  // the map space last because freeing non-live maps overwrites them and
+  // the other spaces rely on possibly non-live maps to get the sizes for
+  // non-live objects.
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
+                                GCTracer::Scope::MC_SWEEP_OLDSPACE);
+    {
+      SequentialSweepingScope scope(this);
+      SweepSpace(heap()->old_pointer_space(), how_to_sweep);
+      SweepSpace(heap()->old_data_space(), how_to_sweep);
+    }
+
+    if (ShouldStartSweeperThreads(how_to_sweep)) {
+      StartSweeperThreads();
+    }
+
+    if (ShouldWaitForSweeperThreads(how_to_sweep)) {
+      EnsureSweepingCompleted();
+    }
+  }
+  RemoveDeadInvalidatedCode();
+
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
+                                GCTracer::Scope::MC_SWEEP_CODE);
+    SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
+  }
+
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
+                                GCTracer::Scope::MC_SWEEP_CELL);
+    SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
+    SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
+  }
+
+  EvacuateNewSpaceAndCandidates();
+
+  // ClearNonLiveTransitions depends on precise sweeping of map space to
+  // detect whether unmarked map became dead in this collection or in one
+  // of the previous ones.
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
+                                GCTracer::Scope::MC_SWEEP_MAP);
+    SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
+  }
+
+  // Deallocate unmarked objects and clear marked bits for marked objects.
+  heap_->lo_space()->FreeUnmarkedObjects();
+
+  // Deallocate evacuated candidate pages.
+  ReleaseEvacuationCandidates();
+
+  if (FLAG_print_cumulative_gc_stat) {
+    heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
+                                     start_time);
+  }
+}
+
+
+void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
+      p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
+      p->SetWasSwept();
+    }
+    DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+  }
+}
+
+
+void MarkCompactCollector::ParallelSweepSpacesComplete() {
+  ParallelSweepSpaceComplete(heap()->old_pointer_space());
+  ParallelSweepSpaceComplete(heap()->old_data_space());
+}
+
+
+void MarkCompactCollector::EnableCodeFlushing(bool enable) {
+  if (isolate()->debug()->is_loaded() ||
+      isolate()->debug()->has_break_points()) {
+    enable = false;
+  }
+
+  if (enable) {
+    if (code_flusher_ != NULL) return;
+    code_flusher_ = new CodeFlusher(isolate());
+  } else {
+    if (code_flusher_ == NULL) return;
+    code_flusher_->EvictAllCandidates();
+    delete code_flusher_;
+    code_flusher_ = NULL;
+  }
+
+  if (FLAG_trace_code_flushing) {
+    PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
+  }
+}
+
+
+// TODO(1466) ReportDeleteIfNeeded is not called currently.
+// Our profiling tools do not expect intersections between
+// code objects. We should either reenable it or change our tools.
+void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
+                                                Isolate* isolate) {
+  if (obj->IsCode()) {
+    PROFILE(isolate, CodeDeleteEvent(obj->address()));
+  }
+}
+
+
+Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
+
+
+void MarkCompactCollector::Initialize() {
+  MarkCompactMarkingVisitor::Initialize();
+  IncrementalMarking::Initialize();
+}
+
+
+bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
+  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
+}
+
+
+bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
+                        SlotsBuffer** buffer_address, SlotType type,
+                        Address addr, AdditionMode mode) {
+  SlotsBuffer* buffer = *buffer_address;
+  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
+    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+      allocator->DeallocateChain(buffer_address);
+      return false;
+    }
+    buffer = allocator->AllocateBuffer(buffer);
+    *buffer_address = buffer;
+  }
+  DCHECK(buffer->HasSpaceForTypedSlot());
+  buffer->Add(reinterpret_cast<ObjectSlot>(type));
+  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
+  return true;
+}
+
+
+static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
+  if (RelocInfo::IsCodeTarget(rmode)) {
+    return SlotsBuffer::CODE_TARGET_SLOT;
+  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
+    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
+  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
+    return SlotsBuffer::DEBUG_TARGET_SLOT;
+  } else if (RelocInfo::IsJSReturn(rmode)) {
+    return SlotsBuffer::JS_RETURN_SLOT;
+  }
+  UNREACHABLE();
+  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
+}
+
+
+void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
+  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  RelocInfo::Mode rmode = rinfo->rmode();
+  if (target_page->IsEvacuationCandidate() &&
+      (rinfo->host() == NULL ||
+       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+    bool success;
+    if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
+      // This doesn't need to be typed since it is just a normal heap pointer.
+      Object** target_pointer =
+          reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
+      success = SlotsBuffer::AddTo(
+          &slots_buffer_allocator_, target_page->slots_buffer_address(),
+          target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
+    } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
+      success = SlotsBuffer::AddTo(
+          &slots_buffer_allocator_, target_page->slots_buffer_address(),
+          SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
+          SlotsBuffer::FAIL_ON_OVERFLOW);
+    } else {
+      success = SlotsBuffer::AddTo(
+          &slots_buffer_allocator_, target_page->slots_buffer_address(),
+          SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
+    }
+    if (!success) {
+      EvictEvacuationCandidate(target_page);
+    }
+  }
+}
+
+
+void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
+  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  if (target_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            target_page->slots_buffer_address(),
+                            SlotsBuffer::CODE_ENTRY_SLOT, slot,
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictEvacuationCandidate(target_page);
+    }
+  }
+}
+
+
+void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
+  DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
+  if (is_compacting()) {
+    Code* host =
+        isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
+            pc);
+    MarkBit mark_bit = Marking::MarkBitFrom(host);
+    if (Marking::IsBlack(mark_bit)) {
+      RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+      RecordRelocSlot(&rinfo, target);
+    }
+  }
+}
+
+
+static inline SlotsBuffer::SlotType DecodeSlotType(
+    SlotsBuffer::ObjectSlot slot) {
+  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
+}
+
+
+void SlotsBuffer::UpdateSlots(Heap* heap) {
+  PointersUpdatingVisitor v(heap);
+
+  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+    ObjectSlot slot = slots_[slot_idx];
+    if (!IsTypedSlot(slot)) {
+      PointersUpdatingVisitor::UpdateSlot(heap, slot);
+    } else {
+      ++slot_idx;
+      DCHECK(slot_idx < idx_);
+      UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
+                 reinterpret_cast<Address>(slots_[slot_idx]));
+    }
+  }
+}
+
+
+void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
+  PointersUpdatingVisitor v(heap);
+
+  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+    ObjectSlot slot = slots_[slot_idx];
+    if (!IsTypedSlot(slot)) {
+      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
+        PointersUpdatingVisitor::UpdateSlot(heap, slot);
+      }
+    } else {
+      ++slot_idx;
+      DCHECK(slot_idx < idx_);
+      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
+      if (!IsOnInvalidatedCodeObject(pc)) {
+        UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
+                   reinterpret_cast<Address>(slots_[slot_idx]));
+      }
+    }
+  }
+}
+
+
+SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
+  return new SlotsBuffer(next_buffer);
+}
+
+
+void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
+  delete buffer;
+}
+
+
+void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
+  SlotsBuffer* buffer = *buffer_address;
+  while (buffer != NULL) {
+    SlotsBuffer* next_buffer = buffer->next();
+    DeallocateBuffer(buffer);
+    buffer = next_buffer;
+  }
+  *buffer_address = NULL;
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
new file mode 100644
index 0000000..c5087b4
--- /dev/null
+++ b/src/heap/mark-compact.h
@@ -0,0 +1,956 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MARK_COMPACT_H_
+#define V8_HEAP_MARK_COMPACT_H_
+
+#include "src/base/bits.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+// Callback function, returns whether an object is alive. The heap size
+// of the object is returned in size. It optionally updates the offset
+// to the first live object in the page (only used for old and map objects).
+typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
+
+// Forward declarations.
+class CodeFlusher;
+class MarkCompactCollector;
+class MarkingVisitor;
+class RootMarkingVisitor;
+
+
+class Marking {
+ public:
+  explicit Marking(Heap* heap) : heap_(heap) {}
+
+  INLINE(static MarkBit MarkBitFrom(Address addr));
+
+  INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
+    return MarkBitFrom(reinterpret_cast<Address>(obj));
+  }
+
+  // Impossible markbits: 01
+  static const char* kImpossibleBitPattern;
+  INLINE(static bool IsImpossible(MarkBit mark_bit)) {
+    return !mark_bit.Get() && mark_bit.Next().Get();
+  }
+
+  // Black markbits: 10 - this is required by the sweeper.
+  static const char* kBlackBitPattern;
+  INLINE(static bool IsBlack(MarkBit mark_bit)) {
+    return mark_bit.Get() && !mark_bit.Next().Get();
+  }
+
+  // White markbits: 00 - this is required by the mark bit clearer.
+  static const char* kWhiteBitPattern;
+  INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); }
+
+  // Grey markbits: 11
+  static const char* kGreyBitPattern;
+  INLINE(static bool IsGrey(MarkBit mark_bit)) {
+    return mark_bit.Get() && mark_bit.Next().Get();
+  }
+
+  INLINE(static void MarkBlack(MarkBit mark_bit)) {
+    mark_bit.Set();
+    mark_bit.Next().Clear();
+  }
+
+  INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); }
+
+  INLINE(static void WhiteToGrey(MarkBit markbit)) {
+    markbit.Set();
+    markbit.Next().Set();
+  }
+
+  INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); }
+
+  INLINE(static void BlackToGrey(HeapObject* obj)) {
+    BlackToGrey(MarkBitFrom(obj));
+  }
+
+  INLINE(static void AnyToGrey(MarkBit markbit)) {
+    markbit.Set();
+    markbit.Next().Set();
+  }
+
+  void TransferMark(Address old_start, Address new_start);
+
+#ifdef DEBUG
+  enum ObjectColor {
+    BLACK_OBJECT,
+    WHITE_OBJECT,
+    GREY_OBJECT,
+    IMPOSSIBLE_COLOR
+  };
+
+  static const char* ColorName(ObjectColor color) {
+    switch (color) {
+      case BLACK_OBJECT:
+        return "black";
+      case WHITE_OBJECT:
+        return "white";
+      case GREY_OBJECT:
+        return "grey";
+      case IMPOSSIBLE_COLOR:
+        return "impossible";
+    }
+    return "error";
+  }
+
+  static ObjectColor Color(HeapObject* obj) {
+    return Color(Marking::MarkBitFrom(obj));
+  }
+
+  static ObjectColor Color(MarkBit mark_bit) {
+    if (IsBlack(mark_bit)) return BLACK_OBJECT;
+    if (IsWhite(mark_bit)) return WHITE_OBJECT;
+    if (IsGrey(mark_bit)) return GREY_OBJECT;
+    UNREACHABLE();
+    return IMPOSSIBLE_COLOR;
+  }
+#endif
+
+  // Returns true if the transferred color is black.
+  INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
+    MarkBit from_mark_bit = MarkBitFrom(from);
+    MarkBit to_mark_bit = MarkBitFrom(to);
+    bool is_black = false;
+    if (from_mark_bit.Get()) {
+      to_mark_bit.Set();
+      is_black = true;  // Looks black so far.
+    }
+    if (from_mark_bit.Next().Get()) {
+      to_mark_bit.Next().Set();
+      is_black = false;  // Was actually gray.
+    }
+    return is_black;
+  }
+
+ private:
+  Heap* heap_;
+};
+
+// ----------------------------------------------------------------------------
+// Marking deque for tracing live objects.
+class MarkingDeque {
+ public:
+  MarkingDeque()
+      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
+
+  void Initialize(Address low, Address high) {
+    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
+    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
+    array_ = obj_low;
+    mask_ = base::bits::RoundDownToPowerOfTwo32(
+                static_cast<uint32_t>(obj_high - obj_low)) -
+            1;
+    top_ = bottom_ = 0;
+    overflowed_ = false;
+  }
+
+  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
+
+  inline bool IsEmpty() { return top_ == bottom_; }
+
+  bool overflowed() const { return overflowed_; }
+
+  void ClearOverflowed() { overflowed_ = false; }
+
+  void SetOverflowed() { overflowed_ = true; }
+
+  // Push the (marked) object on the marking stack if there is room,
+  // otherwise mark the object as overflowed and wait for a rescan of the
+  // heap.
+  INLINE(void PushBlack(HeapObject* object)) {
+    DCHECK(object->IsHeapObject());
+    if (IsFull()) {
+      Marking::BlackToGrey(object);
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
+      SetOverflowed();
+    } else {
+      array_[top_] = object;
+      top_ = ((top_ + 1) & mask_);
+    }
+  }
+
+  INLINE(void PushGrey(HeapObject* object)) {
+    DCHECK(object->IsHeapObject());
+    if (IsFull()) {
+      SetOverflowed();
+    } else {
+      array_[top_] = object;
+      top_ = ((top_ + 1) & mask_);
+    }
+  }
+
+  INLINE(HeapObject* Pop()) {
+    DCHECK(!IsEmpty());
+    top_ = ((top_ - 1) & mask_);
+    HeapObject* object = array_[top_];
+    DCHECK(object->IsHeapObject());
+    return object;
+  }
+
+  INLINE(void UnshiftGrey(HeapObject* object)) {
+    DCHECK(object->IsHeapObject());
+    if (IsFull()) {
+      SetOverflowed();
+    } else {
+      bottom_ = ((bottom_ - 1) & mask_);
+      array_[bottom_] = object;
+    }
+  }
+
+  HeapObject** array() { return array_; }
+  int bottom() { return bottom_; }
+  int top() { return top_; }
+  int mask() { return mask_; }
+  void set_top(int top) { top_ = top; }
+
+ private:
+  HeapObject** array_;
+  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
+  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
+  // (mod mask + 1).
+  int top_;
+  int bottom_;
+  int mask_;
+  bool overflowed_;
+
+  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
+};
+
+
+class SlotsBufferAllocator {
+ public:
+  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
+  void DeallocateBuffer(SlotsBuffer* buffer);
+
+  void DeallocateChain(SlotsBuffer** buffer_address);
+};
+
+
+// SlotsBuffer records a sequence of slots that has to be updated
+// after live objects were relocated from evacuation candidates.
+// All slots are either untyped or typed:
+//    - Untyped slots are expected to contain a tagged object pointer.
+//      They are recorded by an address.
+//    - Typed slots are expected to contain an encoded pointer to a heap
+//      object where the way of encoding depends on the type of the slot.
+//      They are recorded as a pair (SlotType, slot address).
+// We assume that zero-page is never mapped this allows us to distinguish
+// untyped slots from typed slots during iteration by a simple comparison:
+// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
+// is the first element of typed slot's pair.
+class SlotsBuffer {
+ public:
+  typedef Object** ObjectSlot;
+
+  explicit SlotsBuffer(SlotsBuffer* next_buffer)
+      : idx_(0), chain_length_(1), next_(next_buffer) {
+    if (next_ != NULL) {
+      chain_length_ = next_->chain_length_ + 1;
+    }
+  }
+
+  ~SlotsBuffer() {}
+
+  void Add(ObjectSlot slot) {
+    DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
+    slots_[idx_++] = slot;
+  }
+
+  enum SlotType {
+    EMBEDDED_OBJECT_SLOT,
+    RELOCATED_CODE_OBJECT,
+    CODE_TARGET_SLOT,
+    CODE_ENTRY_SLOT,
+    DEBUG_TARGET_SLOT,
+    JS_RETURN_SLOT,
+    NUMBER_OF_SLOT_TYPES
+  };
+
+  static const char* SlotTypeToString(SlotType type) {
+    switch (type) {
+      case EMBEDDED_OBJECT_SLOT:
+        return "EMBEDDED_OBJECT_SLOT";
+      case RELOCATED_CODE_OBJECT:
+        return "RELOCATED_CODE_OBJECT";
+      case CODE_TARGET_SLOT:
+        return "CODE_TARGET_SLOT";
+      case CODE_ENTRY_SLOT:
+        return "CODE_ENTRY_SLOT";
+      case DEBUG_TARGET_SLOT:
+        return "DEBUG_TARGET_SLOT";
+      case JS_RETURN_SLOT:
+        return "JS_RETURN_SLOT";
+      case NUMBER_OF_SLOT_TYPES:
+        return "NUMBER_OF_SLOT_TYPES";
+    }
+    return "UNKNOWN SlotType";
+  }
+
+  void UpdateSlots(Heap* heap);
+
+  void UpdateSlotsWithFilter(Heap* heap);
+
+  SlotsBuffer* next() { return next_; }
+
+  static int SizeOfChain(SlotsBuffer* buffer) {
+    if (buffer == NULL) return 0;
+    return static_cast<int>(buffer->idx_ +
+                            (buffer->chain_length_ - 1) * kNumberOfElements);
+  }
+
+  inline bool IsFull() { return idx_ == kNumberOfElements; }
+
+  inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
+
+  static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
+                                    bool code_slots_filtering_required) {
+    while (buffer != NULL) {
+      if (code_slots_filtering_required) {
+        buffer->UpdateSlotsWithFilter(heap);
+      } else {
+        buffer->UpdateSlots(heap);
+      }
+      buffer = buffer->next();
+    }
+  }
+
+  enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
+
+  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
+    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
+  }
+
+  INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
+                           SlotsBuffer** buffer_address, ObjectSlot slot,
+                           AdditionMode mode)) {
+    SlotsBuffer* buffer = *buffer_address;
+    if (buffer == NULL || buffer->IsFull()) {
+      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+        allocator->DeallocateChain(buffer_address);
+        return false;
+      }
+      buffer = allocator->AllocateBuffer(buffer);
+      *buffer_address = buffer;
+    }
+    buffer->Add(slot);
+    return true;
+  }
+
+  static bool IsTypedSlot(ObjectSlot slot);
+
+  static bool AddTo(SlotsBufferAllocator* allocator,
+                    SlotsBuffer** buffer_address, SlotType type, Address addr,
+                    AdditionMode mode);
+
+  static const int kNumberOfElements = 1021;
+
+ private:
+  static const int kChainLengthThreshold = 15;
+
+  intptr_t idx_;
+  intptr_t chain_length_;
+  SlotsBuffer* next_;
+  ObjectSlot slots_[kNumberOfElements];
+};
+
+
+// CodeFlusher collects candidates for code flushing during marking and
+// processes those candidates after marking has completed in order to
+// reset those functions referencing code objects that would otherwise
+// be unreachable. Code objects can be referenced in three ways:
+//    - SharedFunctionInfo references unoptimized code.
+//    - JSFunction references either unoptimized or optimized code.
+//    - OptimizedCodeMap references optimized code.
+// We are not allowed to flush unoptimized code for functions that got
+// optimized or inlined into optimized code, because we might bailout
+// into the unoptimized code again during deoptimization.
+class CodeFlusher {
+ public:
+  explicit CodeFlusher(Isolate* isolate)
+      : isolate_(isolate),
+        jsfunction_candidates_head_(NULL),
+        shared_function_info_candidates_head_(NULL),
+        optimized_code_map_holder_head_(NULL) {}
+
+  void AddCandidate(SharedFunctionInfo* shared_info) {
+    if (GetNextCandidate(shared_info) == NULL) {
+      SetNextCandidate(shared_info, shared_function_info_candidates_head_);
+      shared_function_info_candidates_head_ = shared_info;
+    }
+  }
+
+  void AddCandidate(JSFunction* function) {
+    DCHECK(function->code() == function->shared()->code());
+    if (GetNextCandidate(function)->IsUndefined()) {
+      SetNextCandidate(function, jsfunction_candidates_head_);
+      jsfunction_candidates_head_ = function;
+    }
+  }
+
+  void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+    if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
+      SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
+      optimized_code_map_holder_head_ = code_map_holder;
+    }
+  }
+
+  void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
+  void EvictCandidate(SharedFunctionInfo* shared_info);
+  void EvictCandidate(JSFunction* function);
+
+  void ProcessCandidates() {
+    ProcessOptimizedCodeMaps();
+    ProcessSharedFunctionInfoCandidates();
+    ProcessJSFunctionCandidates();
+  }
+
+  void EvictAllCandidates() {
+    EvictOptimizedCodeMaps();
+    EvictJSFunctionCandidates();
+    EvictSharedFunctionInfoCandidates();
+  }
+
+  void IteratePointersToFromSpace(ObjectVisitor* v);
+
+ private:
+  void ProcessOptimizedCodeMaps();
+  void ProcessJSFunctionCandidates();
+  void ProcessSharedFunctionInfoCandidates();
+  void EvictOptimizedCodeMaps();
+  void EvictJSFunctionCandidates();
+  void EvictSharedFunctionInfoCandidates();
+
+  static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
+    return reinterpret_cast<JSFunction**>(
+        HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
+  }
+
+  static JSFunction* GetNextCandidate(JSFunction* candidate) {
+    Object* next_candidate = candidate->next_function_link();
+    return reinterpret_cast<JSFunction*>(next_candidate);
+  }
+
+  static void SetNextCandidate(JSFunction* candidate,
+                               JSFunction* next_candidate) {
+    candidate->set_next_function_link(next_candidate);
+  }
+
+  static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
+    DCHECK(undefined->IsUndefined());
+    candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
+  }
+
+  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
+    Object* next_candidate = candidate->code()->gc_metadata();
+    return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
+  }
+
+  static void SetNextCandidate(SharedFunctionInfo* candidate,
+                               SharedFunctionInfo* next_candidate) {
+    candidate->code()->set_gc_metadata(next_candidate);
+  }
+
+  static void ClearNextCandidate(SharedFunctionInfo* candidate) {
+    candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
+  }
+
+  static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
+    return reinterpret_cast<SharedFunctionInfo*>(next_map);
+  }
+
+  static void SetNextCodeMap(SharedFunctionInfo* holder,
+                             SharedFunctionInfo* next_holder) {
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
+  }
+
+  static void ClearNextCodeMap(SharedFunctionInfo* holder) {
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
+  }
+
+  Isolate* isolate_;
+  JSFunction* jsfunction_candidates_head_;
+  SharedFunctionInfo* shared_function_info_candidates_head_;
+  SharedFunctionInfo* optimized_code_map_holder_head_;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
+};
+
+
+// Defined in isolate.h.
+class ThreadLocalTop;
+
+
+// -------------------------------------------------------------------------
+// Mark-Compact collector
+class MarkCompactCollector {
+ public:
+  // Set the global flags, it must be called before Prepare to take effect.
+  inline void SetFlags(int flags);
+
+  static void Initialize();
+
+  void SetUp();
+
+  void TearDown();
+
+  void CollectEvacuationCandidates(PagedSpace* space);
+
+  void AddEvacuationCandidate(Page* p);
+
+  // Prepares for GC by resetting relocation info in old and map spaces and
+  // choosing spaces to compact.
+  void Prepare();
+
+  // Performs a global garbage collection.
+  void CollectGarbage();
+
+  enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
+
+  bool StartCompaction(CompactionMode mode);
+
+  void AbortCompaction();
+
+#ifdef DEBUG
+  // Checks whether performing mark-compact collection.
+  bool in_use() { return state_ > PREPARE_GC; }
+  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
+#endif
+
+  // Determine type of object and emit deletion log event.
+  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
+
+  // Distinguishable invalid map encodings (for single word and multiple words)
+  // that indicate free regions.
+  static const uint32_t kSingleFreeEncoding = 0;
+  static const uint32_t kMultiFreeEncoding = 1;
+
+  static inline bool IsMarked(Object* obj);
+
+  inline Heap* heap() const { return heap_; }
+  inline Isolate* isolate() const;
+
+  CodeFlusher* code_flusher() { return code_flusher_; }
+  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
+  void EnableCodeFlushing(bool enable);
+
+  enum SweeperType {
+    PARALLEL_SWEEPING,
+    CONCURRENT_SWEEPING,
+    SEQUENTIAL_SWEEPING
+  };
+
+  enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
+
+#ifdef VERIFY_HEAP
+  void VerifyMarkbitsAreClean();
+  static void VerifyMarkbitsAreClean(PagedSpace* space);
+  static void VerifyMarkbitsAreClean(NewSpace* space);
+  void VerifyWeakEmbeddedObjectsInCode();
+  void VerifyOmittedMapChecks();
+#endif
+
+  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
+    return Page::FromAddress(reinterpret_cast<Address>(anchor))
+        ->ShouldSkipEvacuationSlotRecording();
+  }
+
+  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
+    return Page::FromAddress(reinterpret_cast<Address>(host))
+        ->ShouldSkipEvacuationSlotRecording();
+  }
+
+  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
+    return Page::FromAddress(reinterpret_cast<Address>(obj))
+        ->IsEvacuationCandidate();
+  }
+
+  INLINE(void EvictEvacuationCandidate(Page* page)) {
+    if (FLAG_trace_fragmentation) {
+      PrintF("Page %p is too popular. Disabling evacuation.\n",
+             reinterpret_cast<void*>(page));
+    }
+
+    // TODO(gc) If all evacuation candidates are too popular we
+    // should stop slots recording entirely.
+    page->ClearEvacuationCandidate();
+
+    // We were not collecting slots on this page that point
+    // to other evacuation candidates thus we have to
+    // rescan the page after evacuation to discover and update all
+    // pointers to evacuated objects.
+    if (page->owner()->identity() == OLD_DATA_SPACE) {
+      evacuation_candidates_.RemoveElement(page);
+    } else {
+      page->SetFlag(Page::RESCAN_ON_EVACUATION);
+    }
+  }
+
+  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
+  void RecordCodeEntrySlot(Address slot, Code* target);
+  void RecordCodeTargetPatch(Address pc, Code* target);
+
+  INLINE(void RecordSlot(
+      Object** anchor_slot, Object** slot, Object* object,
+      SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
+
+  void MigrateObject(HeapObject* dst, HeapObject* src, int size,
+                     AllocationSpace to_old_space);
+
+  bool TryPromoteObject(HeapObject* object, int object_size);
+
+  void InvalidateCode(Code* code);
+
+  void ClearMarkbits();
+
+  bool abort_incremental_marking() const { return abort_incremental_marking_; }
+
+  bool is_compacting() const { return compacting_; }
+
+  MarkingParity marking_parity() { return marking_parity_; }
+
+  // Concurrent and parallel sweeping support. If required_freed_bytes was set
+  // to a value larger than 0, then sweeping returns after a block of at least
+  // required_freed_bytes was freed. If required_freed_bytes was set to zero
+  // then the whole given space is swept. It returns the size of the maximum
+  // continuous freed memory chunk.
+  int SweepInParallel(PagedSpace* space, int required_freed_bytes);
+
+  // Sweeps a given page concurrently to the sweeper threads. It returns the
+  // size of the maximum continuous freed memory chunk.
+  int SweepInParallel(Page* page, PagedSpace* space);
+
+  void EnsureSweepingCompleted();
+
+  // If sweeper threads are not active this method will return true. If
+  // this is a latency issue we should be smarter here. Otherwise, it will
+  // return true if the sweeper threads are done processing the pages.
+  bool IsSweepingCompleted();
+
+  void RefillFreeList(PagedSpace* space);
+
+  bool AreSweeperThreadsActivated();
+
+  // Checks if sweeping is in progress right now on any space.
+  bool sweeping_in_progress() { return sweeping_in_progress_; }
+
+  void set_sequential_sweeping(bool sequential_sweeping) {
+    sequential_sweeping_ = sequential_sweeping;
+  }
+
+  bool sequential_sweeping() const { return sequential_sweeping_; }
+
+  // Mark the global table which maps weak objects to dependent code without
+  // marking its contents.
+  void MarkWeakObjectToCodeTable();
+
+  // Special case for processing weak references in a full collection. We need
+  // to artificially keep AllocationSites alive for a time.
+  void MarkAllocationSite(AllocationSite* site);
+
+ private:
+  class SweeperTask;
+
+  explicit MarkCompactCollector(Heap* heap);
+  ~MarkCompactCollector();
+
+  bool MarkInvalidatedCode();
+  bool WillBeDeoptimized(Code* code);
+  void RemoveDeadInvalidatedCode();
+  void ProcessInvalidatedCode(ObjectVisitor* visitor);
+
+  void StartSweeperThreads();
+
+#ifdef DEBUG
+  enum CollectorState {
+    IDLE,
+    PREPARE_GC,
+    MARK_LIVE_OBJECTS,
+    SWEEP_SPACES,
+    ENCODE_FORWARDING_ADDRESSES,
+    UPDATE_POINTERS,
+    RELOCATE_OBJECTS
+  };
+
+  // The current stage of the collector.
+  CollectorState state_;
+#endif
+
+  bool reduce_memory_footprint_;
+
+  bool abort_incremental_marking_;
+
+  MarkingParity marking_parity_;
+
+  // True if we are collecting slots to perform evacuation from evacuation
+  // candidates.
+  bool compacting_;
+
+  bool was_marked_incrementally_;
+
+  // True if concurrent or parallel sweeping is currently in progress.
+  bool sweeping_in_progress_;
+
+  base::Semaphore pending_sweeper_jobs_semaphore_;
+
+  bool sequential_sweeping_;
+
+  SlotsBufferAllocator slots_buffer_allocator_;
+
+  SlotsBuffer* migration_slots_buffer_;
+
+  // Finishes GC, performs heap verification if enabled.
+  void Finish();
+
+  // -----------------------------------------------------------------------
+  // Phase 1: Marking live objects.
+  //
+  //  Before: The heap has been prepared for garbage collection by
+  //          MarkCompactCollector::Prepare() and is otherwise in its
+  //          normal state.
+  //
+  //   After: Live objects are marked and non-live objects are unmarked.
+
+  friend class RootMarkingVisitor;
+  friend class MarkingVisitor;
+  friend class MarkCompactMarkingVisitor;
+  friend class CodeMarkingVisitor;
+  friend class SharedFunctionInfoMarkingVisitor;
+
+  // Mark code objects that are active on the stack to prevent them
+  // from being flushed.
+  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
+
+  void PrepareForCodeFlushing();
+
+  // Marking operations for objects reachable from roots.
+  void MarkLiveObjects();
+
+  void AfterMarking();
+
+  // Marks the object black and pushes it on the marking stack.
+  // This is for non-incremental marking only.
+  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
+
+  // Marks the object black assuming that it is not yet marked.
+  // This is for non-incremental marking only.
+  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
+
+  // Mark the heap roots and all objects reachable from them.
+  void MarkRoots(RootMarkingVisitor* visitor);
+
+  // Mark the string table specially.  References to internalized strings from
+  // the string table are weak.
+  void MarkStringTable(RootMarkingVisitor* visitor);
+
+  // Mark objects in implicit references groups if their parent object
+  // is marked.
+  void MarkImplicitRefGroups();
+
+  // Mark objects reachable (transitively) from objects in the marking stack
+  // or overflowed in the heap.
+  void ProcessMarkingDeque();
+
+  // Mark objects reachable (transitively) from objects in the marking stack
+  // or overflowed in the heap.  This respects references only considered in
+  // the final atomic marking pause including the following:
+  //    - Processing of objects reachable through Harmony WeakMaps.
+  //    - Objects reachable due to host application logic like object groups
+  //      or implicit references' groups.
+  void ProcessEphemeralMarking(ObjectVisitor* visitor);
+
+  // If the call-site of the top optimized code was not prepared for
+  // deoptimization, then treat the maps in the code as strong pointers,
+  // otherwise a map can die and deoptimize the code.
+  void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
+
+  // Mark objects reachable (transitively) from objects in the marking
+  // stack.  This function empties the marking stack, but may leave
+  // overflowed objects in the heap, in which case the marking stack's
+  // overflow flag will be set.
+  void EmptyMarkingDeque();
+
+  // Refill the marking stack with overflowed objects from the heap.  This
+  // function either leaves the marking stack full or clears the overflow
+  // flag on the marking stack.
+  void RefillMarkingDeque();
+
+  // After reachable maps have been marked process per context object
+  // literal map caches removing unmarked entries.
+  void ProcessMapCaches();
+
+  // Callback function for telling whether the object *p is an unmarked
+  // heap object.
+  static bool IsUnmarkedHeapObject(Object** p);
+  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
+
+  // Map transitions from a live map to a dead map must be killed.
+  // We replace them with a null descriptor, with the same key.
+  void ClearNonLiveReferences();
+  void ClearNonLivePrototypeTransitions(Map* map);
+  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
+  void ClearMapTransitions(Map* map);
+  bool ClearMapBackPointer(Map* map);
+  void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
+                           int number_of_own_descriptors);
+  void TrimEnumCache(Map* map, DescriptorArray* descriptors);
+
+  void ClearDependentCode(DependentCode* dependent_code);
+  void ClearDependentICList(Object* head);
+  void ClearNonLiveDependentCode(DependentCode* dependent_code);
+  int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group,
+                                       int start, int end, int new_start);
+
+  // Mark all values associated with reachable keys in weak collections
+  // encountered so far.  This might push new object or even new weak maps onto
+  // the marking stack.
+  void ProcessWeakCollections();
+
+  // After all reachable objects have been marked those weak map entries
+  // with an unreachable key are removed from all encountered weak maps.
+  // The linked list of all encountered weak maps is destroyed.
+  void ClearWeakCollections();
+
+  // We have to remove all encountered weak maps from the list of weak
+  // collections when incremental marking is aborted.
+  void AbortWeakCollections();
+
+  // -----------------------------------------------------------------------
+  // Phase 2: Sweeping to clear mark bits and free non-live objects for
+  // a non-compacting collection.
+  //
+  //  Before: Live objects are marked and non-live objects are unmarked.
+  //
+  //   After: Live objects are unmarked, non-live regions have been added to
+  //          their space's free list. Active eden semispace is compacted by
+  //          evacuation.
+  //
+
+  // If we are not compacting the heap, we simply sweep the spaces except
+  // for the large object space, clearing mark bits and adding unmarked
+  // regions to each space's free list.
+  void SweepSpaces();
+
+  int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
+                                            NewSpacePage* p);
+
+  void EvacuateNewSpace();
+
+  void EvacuateLiveObjectsFromPage(Page* p);
+
+  void EvacuatePages();
+
+  void EvacuateNewSpaceAndCandidates();
+
+  void ReleaseEvacuationCandidates();
+
+  // Moves the pages of the evacuation_candidates_ list to the end of their
+  // corresponding space pages list.
+  void MoveEvacuationCandidatesToEndOfPagesList();
+
+  void SweepSpace(PagedSpace* space, SweeperType sweeper);
+
+  // Finalizes the parallel sweeping phase. Marks all the pages that were
+  // swept in parallel.
+  void ParallelSweepSpacesComplete();
+
+  void ParallelSweepSpaceComplete(PagedSpace* space);
+
+  // Updates store buffer and slot buffer for a pointer in a migrating object.
+  void RecordMigratedSlot(Object* value, Address slot);
+
+#ifdef DEBUG
+  friend class MarkObjectVisitor;
+  static void VisitObject(HeapObject* obj);
+
+  friend class UnmarkObjectVisitor;
+  static void UnmarkObject(HeapObject* obj);
+#endif
+
+  Heap* heap_;
+  MarkingDeque marking_deque_;
+  CodeFlusher* code_flusher_;
+  bool have_code_to_deoptimize_;
+
+  List<Page*> evacuation_candidates_;
+  List<Code*> invalidated_code_;
+
+  SmartPointer<FreeList> free_list_old_data_space_;
+  SmartPointer<FreeList> free_list_old_pointer_space_;
+
+  friend class Heap;
+};
+
+
+class MarkBitCellIterator BASE_EMBEDDED {
+ public:
+  explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
+    last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+        chunk_->AddressToMarkbitIndex(chunk_->area_end())));
+    cell_base_ = chunk_->area_start();
+    cell_index_ = Bitmap::IndexToCell(
+        Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
+    cells_ = chunk_->markbits()->cells();
+  }
+
+  inline bool Done() { return cell_index_ == last_cell_index_; }
+
+  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
+
+  inline MarkBit::CellType* CurrentCell() {
+    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+                              chunk_->AddressToMarkbitIndex(cell_base_))));
+    return &cells_[cell_index_];
+  }
+
+  inline Address CurrentCellBase() {
+    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+                              chunk_->AddressToMarkbitIndex(cell_base_))));
+    return cell_base_;
+  }
+
+  inline void Advance() {
+    cell_index_++;
+    cell_base_ += 32 * kPointerSize;
+  }
+
+ private:
+  MemoryChunk* chunk_;
+  MarkBit::CellType* cells_;
+  unsigned int last_cell_index_;
+  unsigned int cell_index_;
+  Address cell_base_;
+};
+
+
+class SequentialSweepingScope BASE_EMBEDDED {
+ public:
+  explicit SequentialSweepingScope(MarkCompactCollector* collector)
+      : collector_(collector) {
+    collector_->set_sequential_sweeping(true);
+  }
+
+  ~SequentialSweepingScope() { collector_->set_sequential_sweeping(false); }
+
+ private:
+  MarkCompactCollector* collector_;
+};
+
+
+const char* AllocationSpaceName(AllocationSpace space);
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_MARK_COMPACT_H_
diff --git a/src/heap/objects-visiting-inl.h b/src/heap/objects-visiting-inl.h
new file mode 100644
index 0000000..d220118
--- /dev/null
+++ b/src/heap/objects-visiting-inl.h
@@ -0,0 +1,934 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_VISITING_INL_H_
+#define V8_OBJECTS_VISITING_INL_H_
+
+
+namespace v8 {
+namespace internal {
+
+template <typename StaticVisitor>
+void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
+  table_.Register(
+      kVisitShortcutCandidate,
+      &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit);
+
+  table_.Register(
+      kVisitConsString,
+      &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit);
+
+  table_.Register(kVisitSlicedString,
+                  &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
+                                    int>::Visit);
+
+  table_.Register(
+      kVisitSymbol,
+      &FixedBodyVisitor<StaticVisitor, Symbol::BodyDescriptor, int>::Visit);
+
+  table_.Register(kVisitFixedArray,
+                  &FlexibleBodyVisitor<StaticVisitor,
+                                       FixedArray::BodyDescriptor, int>::Visit);
+
+  table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+  table_.Register(kVisitFixedTypedArray, &VisitFixedTypedArray);
+  table_.Register(kVisitFixedFloat64Array, &VisitFixedTypedArray);
+
+  table_.Register(
+      kVisitNativeContext,
+      &FixedBodyVisitor<StaticVisitor, Context::ScavengeBodyDescriptor,
+                        int>::Visit);
+
+  table_.Register(kVisitByteArray, &VisitByteArray);
+
+  table_.Register(
+      kVisitSharedFunctionInfo,
+      &FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptor,
+                        int>::Visit);
+
+  table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString);
+
+  table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
+
+  table_.Register(kVisitJSFunction, &VisitJSFunction);
+
+  table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+
+  table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+
+  table_.Register(kVisitJSDataView, &VisitJSDataView);
+
+  table_.Register(kVisitFreeSpace, &VisitFreeSpace);
+
+  table_.Register(kVisitJSWeakCollection, &JSObjectVisitor::Visit);
+
+  table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
+
+  table_.template RegisterSpecializations<DataObjectVisitor, kVisitDataObject,
+                                          kVisitDataObjectGeneric>();
+
+  table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSObject,
+                                          kVisitJSObjectGeneric>();
+  table_.template RegisterSpecializations<StructVisitor, kVisitStruct,
+                                          kVisitStructGeneric>();
+}
+
+
+template <typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+
+  STATIC_ASSERT(JSArrayBuffer::kWeakFirstViewOffset ==
+                JSArrayBuffer::kWeakNextOffset + kPointerSize);
+  VisitPointers(heap, HeapObject::RawField(
+                          object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+                HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
+  VisitPointers(
+      heap, HeapObject::RawField(
+                object, JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
+      HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
+  return JSArrayBuffer::kSizeWithInternalFields;
+}
+
+
+template <typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
+    Map* map, HeapObject* object) {
+  VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
+  VisitPointers(
+      map->GetHeap(), HeapObject::RawField(
+                          object, JSTypedArray::kWeakNextOffset + kPointerSize),
+      HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
+  return JSTypedArray::kSizeWithInternalFields;
+}
+
+
+template <typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(Map* map,
+                                                          HeapObject* object) {
+  VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSDataView::kWeakNextOffset));
+  VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSDataView::kWeakNextOffset + kPointerSize),
+      HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
+  return JSDataView::kSizeWithInternalFields;
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::Initialize() {
+  table_.Register(kVisitShortcutCandidate,
+                  &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor,
+                                    void>::Visit);
+
+  table_.Register(kVisitConsString,
+                  &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor,
+                                    void>::Visit);
+
+  table_.Register(kVisitSlicedString,
+                  &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
+                                    void>::Visit);
+
+  table_.Register(
+      kVisitSymbol,
+      &FixedBodyVisitor<StaticVisitor, Symbol::BodyDescriptor, void>::Visit);
+
+  table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit);
+
+  table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitFixedTypedArray, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitFixedFloat64Array, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray);
+
+  table_.Register(kVisitNativeContext, &VisitNativeContext);
+
+  table_.Register(kVisitAllocationSite, &VisitAllocationSite);
+
+  table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitJSWeakCollection, &VisitWeakCollection);
+
+  table_.Register(
+      kVisitOddball,
+      &FixedBodyVisitor<StaticVisitor, Oddball::BodyDescriptor, void>::Visit);
+
+  table_.Register(kVisitMap, &VisitMap);
+
+  table_.Register(kVisitCode, &VisitCode);
+
+  table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
+
+  table_.Register(kVisitJSFunction, &VisitJSFunction);
+
+  table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+
+  table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+
+  table_.Register(kVisitJSDataView, &VisitJSDataView);
+
+  // Registration for kVisitJSRegExp is done by StaticVisitor.
+
+  table_.Register(
+      kVisitCell,
+      &FixedBodyVisitor<StaticVisitor, Cell::BodyDescriptor, void>::Visit);
+
+  table_.Register(kVisitPropertyCell, &VisitPropertyCell);
+
+  table_.template RegisterSpecializations<DataObjectVisitor, kVisitDataObject,
+                                          kVisitDataObjectGeneric>();
+
+  table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSObject,
+                                          kVisitJSObjectGeneric>();
+
+  table_.template RegisterSpecializations<StructObjectVisitor, kVisitStruct,
+                                          kVisitStructGeneric>();
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry(
+    Heap* heap, Address entry_address) {
+  Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+  heap->mark_compact_collector()->RecordCodeEntrySlot(entry_address, code);
+  StaticVisitor::MarkObject(heap, code);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
+    Heap* heap, RelocInfo* rinfo) {
+  DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+  HeapObject* object = HeapObject::cast(rinfo->target_object());
+  heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+  // TODO(ulan): It could be better to record slots only for strongly embedded
+  // objects here and record slots for weakly embedded object during clearing
+  // of non-live references in mark-compact.
+  if (!rinfo->host()->IsWeakObject(object)) {
+    StaticVisitor::MarkObject(heap, object);
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCell(Heap* heap,
+                                                    RelocInfo* rinfo) {
+  DCHECK(rinfo->rmode() == RelocInfo::CELL);
+  Cell* cell = rinfo->target_cell();
+  // No need to record slots because the cell space is not compacted during GC.
+  if (!rinfo->host()->IsWeakObject(cell)) {
+    StaticVisitor::MarkObject(heap, cell);
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(Heap* heap,
+                                                           RelocInfo* rinfo) {
+  DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+          rinfo->IsPatchedReturnSequence()) ||
+         (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+          rinfo->IsPatchedDebugBreakSlotSequence()));
+  Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+  StaticVisitor::MarkObject(heap, target);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
+                                                          RelocInfo* rinfo) {
+  DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+  Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+  // Monomorphic ICs are preserved when possible, but need to be flushed
+  // when they might be keeping a Context alive, or when the heap is about
+  // to be serialized.
+  if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() &&
+      (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
+       target->ic_state() == POLYMORPHIC ||
+       (heap->flush_monomorphic_ics() && !target->is_weak_stub()) ||
+       heap->isolate()->serializer_enabled() ||
+       target->ic_age() != heap->global_ic_age() ||
+       target->is_invalidated_weak_stub())) {
+    ICUtility::Clear(heap->isolate(), rinfo->pc(),
+                     rinfo->host()->constant_pool());
+    target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+  }
+  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+  StaticVisitor::MarkObject(heap, target);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
+    Heap* heap, RelocInfo* rinfo) {
+  DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+  Code* target = rinfo->code_age_stub();
+  DCHECK(target != NULL);
+  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+  StaticVisitor::MarkObject(heap, target);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
+    Map* map, HeapObject* object) {
+  FixedBodyVisitor<StaticVisitor, Context::MarkCompactBodyDescriptor,
+                   void>::Visit(map, object);
+
+  MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+  for (int idx = Context::FIRST_WEAK_SLOT; idx < Context::NATIVE_CONTEXT_SLOTS;
+       ++idx) {
+    Object** slot = Context::cast(object)->RawFieldOfElementAt(idx);
+    collector->RecordSlot(slot, slot, *slot);
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitMap(Map* map,
+                                                   HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  Map* map_object = Map::cast(object);
+
+  // Clears the cache of ICs related to this map.
+  if (FLAG_cleanup_code_caches_at_gc) {
+    map_object->ClearCodeCache(heap);
+  }
+
+  // When map collection is enabled we have to mark through map's transitions
+  // and back pointers in a special way to make these links weak.
+  if (FLAG_collect_maps && map_object->CanTransition()) {
+    MarkMapContents(heap, map_object);
+  } else {
+    StaticVisitor::VisitPointers(
+        heap, HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
+        HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+
+  Object** slot =
+      HeapObject::RawField(object, PropertyCell::kDependentCodeOffset);
+  if (FLAG_collect_maps) {
+    // Mark property cell dependent codes array but do not push it onto marking
+    // stack, this will make references from it weak. We will clean dead
+    // codes when we iterate over property cells in ClearNonLiveReferences.
+    HeapObject* obj = HeapObject::cast(*slot);
+    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+    StaticVisitor::MarkObjectWithoutPush(heap, obj);
+  } else {
+    StaticVisitor::VisitPointer(heap, slot);
+  }
+
+  StaticVisitor::VisitPointers(
+      heap,
+      HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
+      HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+
+  Object** slot =
+      HeapObject::RawField(object, AllocationSite::kDependentCodeOffset);
+  if (FLAG_collect_maps) {
+    // Mark allocation site dependent codes array but do not push it onto
+    // marking stack, this will make references from it weak. We will clean
+    // dead codes when we iterate over allocation sites in
+    // ClearNonLiveReferences.
+    HeapObject* obj = HeapObject::cast(*slot);
+    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+    StaticVisitor::MarkObjectWithoutPush(heap, obj);
+  } else {
+    StaticVisitor::VisitPointer(heap, slot);
+  }
+
+  StaticVisitor::VisitPointers(
+      heap,
+      HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset),
+      HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  JSWeakCollection* weak_collection =
+      reinterpret_cast<JSWeakCollection*>(object);
+
+  // Enqueue weak collection in linked list of encountered weak collections.
+  if (weak_collection->next() == heap->undefined_value()) {
+    weak_collection->set_next(heap->encountered_weak_collections());
+    heap->set_encountered_weak_collections(weak_collection);
+  }
+
+  // Skip visiting the backing hash table containing the mappings and the
+  // pointer to the other enqueued weak collections, both are post-processed.
+  StaticVisitor::VisitPointers(
+      heap, HeapObject::RawField(object, JSWeakCollection::kPropertiesOffset),
+      HeapObject::RawField(object, JSWeakCollection::kTableOffset));
+  STATIC_ASSERT(JSWeakCollection::kTableOffset + kPointerSize ==
+                JSWeakCollection::kNextOffset);
+  STATIC_ASSERT(JSWeakCollection::kNextOffset + kPointerSize ==
+                JSWeakCollection::kSize);
+
+  // Partially initialized weak collection is enqueued, but table is ignored.
+  if (!weak_collection->table()->IsHashTable()) return;
+
+  // Mark the backing hash table without pushing it on the marking stack.
+  Object** slot = HeapObject::RawField(object, JSWeakCollection::kTableOffset);
+  HeapObject* obj = HeapObject::cast(*slot);
+  heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+  StaticVisitor::MarkObjectWithoutPush(heap, obj);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCode(Map* map,
+                                                    HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  Code* code = Code::cast(object);
+  if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
+    code->MakeOlder(heap->mark_compact_collector()->marking_parity());
+  }
+  code->CodeIterateBody<StaticVisitor>(heap);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
+  if (shared->ic_age() != heap->global_ic_age()) {
+    shared->ResetForNewContext(heap->global_ic_age());
+  }
+  if (FLAG_cleanup_code_caches_at_gc) {
+    shared->ClearTypeFeedbackInfo();
+  }
+  if (FLAG_cache_optimized_code && FLAG_flush_optimized_code_cache &&
+      !shared->optimized_code_map()->IsSmi()) {
+    // Always flush the optimized code map if requested by flag.
+    shared->ClearOptimizedCodeMap();
+  }
+  MarkCompactCollector* collector = heap->mark_compact_collector();
+  if (collector->is_code_flushing_enabled()) {
+    if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+      // Add the shared function info holding an optimized code map to
+      // the code flusher for processing of code maps after marking.
+      collector->code_flusher()->AddOptimizedCodeMap(shared);
+      // Treat all references within the code map weakly by marking the
+      // code map itself but not pushing it onto the marking deque.
+      FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
+      StaticVisitor::MarkObjectWithoutPush(heap, code_map);
+    }
+    if (IsFlushable(heap, shared)) {
+      // This function's code looks flushable. But we have to postpone
+      // the decision until we see all functions that point to the same
+      // SharedFunctionInfo because some of them might be optimized.
+      // That would also make the non-optimized version of the code
+      // non-flushable, because it is required for bailing out from
+      // optimized code.
+      collector->code_flusher()->AddCandidate(shared);
+      // Treat the reference to the code object weakly.
+      VisitSharedFunctionInfoWeakCode(heap, object);
+      return;
+    }
+  } else {
+    if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+      // Flush optimized code map on major GCs without code flushing,
+      // needed because cached code doesn't contain breakpoints.
+      shared->ClearOptimizedCodeMap();
+    }
+  }
+  VisitSharedFunctionInfoStrongCode(heap, object);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  ConstantPoolArray* array = ConstantPoolArray::cast(object);
+  ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
+  while (!code_iter.is_finished()) {
+    Address code_entry = reinterpret_cast<Address>(
+        array->RawFieldOfElementAt(code_iter.next_index()));
+    StaticVisitor::VisitCodeEntry(heap, code_entry);
+  }
+
+  ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
+  while (!heap_iter.is_finished()) {
+    Object** slot = array->RawFieldOfElementAt(heap_iter.next_index());
+    HeapObject* object = HeapObject::cast(*slot);
+    heap->mark_compact_collector()->RecordSlot(slot, slot, object);
+    bool is_weak_object =
+        (array->get_weak_object_state() ==
+             ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE &&
+         Code::IsWeakObjectInOptimizedCode(object)) ||
+        (array->get_weak_object_state() ==
+             ConstantPoolArray::WEAK_OBJECTS_IN_IC &&
+         Code::IsWeakObjectInIC(object));
+    if (!is_weak_object) {
+      StaticVisitor::MarkObject(heap, object);
+    }
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
+                                                          HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  JSFunction* function = JSFunction::cast(object);
+  MarkCompactCollector* collector = heap->mark_compact_collector();
+  if (collector->is_code_flushing_enabled()) {
+    if (IsFlushable(heap, function)) {
+      // This function's code looks flushable. But we have to postpone
+      // the decision until we see all functions that point to the same
+      // SharedFunctionInfo because some of them might be optimized.
+      // That would also make the non-optimized version of the code
+      // non-flushable, because it is required for bailing out from
+      // optimized code.
+      collector->code_flusher()->AddCandidate(function);
+      // Visit shared function info immediately to avoid double checking
+      // of its flushability later. This is just an optimization because
+      // the shared function info would eventually be visited.
+      SharedFunctionInfo* shared = function->shared();
+      if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) {
+        StaticVisitor::MarkObject(heap, shared->map());
+        VisitSharedFunctionInfoWeakCode(heap, shared);
+      }
+      // Treat the reference to the code object weakly.
+      VisitJSFunctionWeakCode(heap, object);
+      return;
+    } else {
+      // Visit all unoptimized code objects to prevent flushing them.
+      StaticVisitor::MarkObject(heap, function->shared()->code());
+      if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
+        MarkInlinedFunctionsCode(heap, function->code());
+      }
+    }
+  }
+  VisitJSFunctionStrongCode(heap, object);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(Map* map,
+                                                        HeapObject* object) {
+  int last_property_offset =
+      JSRegExp::kSize + kPointerSize * map->inobject_properties();
+  StaticVisitor::VisitPointers(
+      map->GetHeap(), HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
+      HeapObject::RawField(object, last_property_offset));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+
+  STATIC_ASSERT(JSArrayBuffer::kWeakFirstViewOffset ==
+                JSArrayBuffer::kWeakNextOffset + kPointerSize);
+  StaticVisitor::VisitPointers(
+      heap,
+      HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
+  StaticVisitor::VisitPointers(
+      heap, HeapObject::RawField(
+                object, JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
+      HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
+    Map* map, HeapObject* object) {
+  StaticVisitor::VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
+  StaticVisitor::VisitPointers(
+      map->GetHeap(), HeapObject::RawField(
+                          object, JSTypedArray::kWeakNextOffset + kPointerSize),
+      HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(Map* map,
+                                                          HeapObject* object) {
+  StaticVisitor::VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSDataView::kWeakNextOffset));
+  StaticVisitor::VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSDataView::kWeakNextOffset + kPointerSize),
+      HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
+                                                          Map* map) {
+  // Make sure that the back pointer stored either in the map itself or
+  // inside its transitions array is marked. Skip recording the back
+  // pointer slot since map space is not compacted.
+  StaticVisitor::MarkObject(heap, HeapObject::cast(map->GetBackPointer()));
+
+  // Treat pointers in the transitions array as weak and also mark that
+  // array to prevent visiting it later. Skip recording the transition
+  // array slot, since it will be implicitly recorded when the pointer
+  // fields of this map are visited.
+  if (map->HasTransitionArray()) {
+    TransitionArray* transitions = map->transitions();
+    MarkTransitionArray(heap, transitions);
+  }
+
+  // Since descriptor arrays are potentially shared, ensure that only the
+  // descriptors that belong to this map are marked. The first time a
+  // non-empty descriptor array is marked, its header is also visited. The slot
+  // holding the descriptor array will be implicitly recorded when the pointer
+  // fields of this map are visited.
+  DescriptorArray* descriptors = map->instance_descriptors();
+  if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
+      descriptors->length() > 0) {
+    StaticVisitor::VisitPointers(heap, descriptors->GetFirstElementAddress(),
+                                 descriptors->GetDescriptorEndSlot(0));
+  }
+  int start = 0;
+  int end = map->NumberOfOwnDescriptors();
+  if (start < end) {
+    StaticVisitor::VisitPointers(heap,
+                                 descriptors->GetDescriptorStartSlot(start),
+                                 descriptors->GetDescriptorEndSlot(end));
+  }
+
+  // Mark prototype dependent codes array but do not push it onto marking
+  // stack, this will make references from it weak. We will clean dead
+  // codes when we iterate over maps in ClearNonLiveTransitions.
+  Object** slot = HeapObject::RawField(map, Map::kDependentCodeOffset);
+  HeapObject* obj = HeapObject::cast(*slot);
+  heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+  StaticVisitor::MarkObjectWithoutPush(heap, obj);
+
+  // Mark the pointer fields of the Map. Since the transitions array has
+  // been marked already, it is fine that one of these fields contains a
+  // pointer to it.
+  StaticVisitor::VisitPointers(
+      heap, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+      HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
+    Heap* heap, TransitionArray* transitions) {
+  if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
+
+  // Simple transitions do not have keys nor prototype transitions.
+  if (transitions->IsSimpleTransition()) return;
+
+  if (transitions->HasPrototypeTransitions()) {
+    // Mark prototype transitions array but do not push it onto marking
+    // stack, this will make references from it weak. We will clean dead
+    // prototype transitions in ClearNonLiveTransitions.
+    Object** slot = transitions->GetPrototypeTransitionsSlot();
+    HeapObject* obj = HeapObject::cast(*slot);
+    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+    StaticVisitor::MarkObjectWithoutPush(heap, obj);
+  }
+
+  for (int i = 0; i < transitions->number_of_transitions(); ++i) {
+    StaticVisitor::VisitPointer(heap, transitions->GetKeySlot(i));
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(Heap* heap,
+                                                                   Code* code) {
+  // Skip in absence of inlining.
+  // TODO(turbofan): Revisit once we support inlining.
+  if (code->is_turbofanned()) return;
+  // For optimized functions we should retain both non-optimized version
+  // of its code and non-optimized version of all inlined functions.
+  // This is required to support bailing out from inlined code.
+  DeoptimizationInputData* data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+  FixedArray* literals = data->LiteralArray();
+  for (int i = 0, count = data->InlinedFunctionCount()->value(); i < count;
+       i++) {
+    JSFunction* inlined = JSFunction::cast(literals->get(i));
+    StaticVisitor::MarkObject(heap, inlined->shared()->code());
+  }
+}
+
+
+inline static bool IsValidNonBuiltinContext(Object* context) {
+  return context->IsContext() &&
+         !Context::cast(context)->global_object()->IsJSBuiltinsObject();
+}
+
+
+inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
+  Object* undefined = heap->undefined_value();
+  return (info->script() != undefined) &&
+         (reinterpret_cast<Script*>(info->script())->source() != undefined);
+}
+
+
+template <typename StaticVisitor>
+bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(Heap* heap,
+                                                      JSFunction* function) {
+  SharedFunctionInfo* shared_info = function->shared();
+
+  // Code is either on stack, in compilation cache or referenced
+  // by optimized version of function.
+  MarkBit code_mark = Marking::MarkBitFrom(function->code());
+  if (code_mark.Get()) {
+    return false;
+  }
+
+  // The function must have a valid context and not be a builtin.
+  if (!IsValidNonBuiltinContext(function->context())) {
+    return false;
+  }
+
+  // We do not (yet) flush code for optimized functions.
+  if (function->code() != shared_info->code()) {
+    return false;
+  }
+
+  // Check age of optimized code.
+  if (FLAG_age_code && !function->code()->IsOld()) {
+    return false;
+  }
+
+  return IsFlushable(heap, shared_info);
+}
+
+
+template <typename StaticVisitor>
+bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
+    Heap* heap, SharedFunctionInfo* shared_info) {
+  // Code is either on stack, in compilation cache or referenced
+  // by optimized version of function.
+  MarkBit code_mark = Marking::MarkBitFrom(shared_info->code());
+  if (code_mark.Get()) {
+    return false;
+  }
+
+  // The function must be compiled and have the source code available,
+  // to be able to recompile it in case we need the function again.
+  if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
+    return false;
+  }
+
+  // We never flush code for API functions.
+  Object* function_data = shared_info->function_data();
+  if (function_data->IsFunctionTemplateInfo()) {
+    return false;
+  }
+
+  // Only flush code for functions.
+  if (shared_info->code()->kind() != Code::FUNCTION) {
+    return false;
+  }
+
+  // Function must be lazy compilable.
+  if (!shared_info->allows_lazy_compilation()) {
+    return false;
+  }
+
+  // We do not (yet?) flush code for generator functions, because we don't know
+  // if there are still live activations (generator objects) on the heap.
+  if (shared_info->is_generator()) {
+    return false;
+  }
+
+  // If this is a full script wrapped in a function we do not flush the code.
+  if (shared_info->is_toplevel()) {
+    return false;
+  }
+
+  // If this is a function initialized with %SetCode then the one-to-one
+  // relation between SharedFunctionInfo and Code is broken.
+  if (shared_info->dont_flush()) {
+    return false;
+  }
+
+  // Check age of code. If code aging is disabled we never flush.
+  if (!FLAG_age_code || !shared_info->code()->IsOld()) {
+    return false;
+  }
+
+  return true;
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
+    Heap* heap, HeapObject* object) {
+  Object** start_slot = HeapObject::RawField(
+      object, SharedFunctionInfo::BodyDescriptor::kStartOffset);
+  Object** end_slot = HeapObject::RawField(
+      object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
+    Heap* heap, HeapObject* object) {
+  Object** name_slot =
+      HeapObject::RawField(object, SharedFunctionInfo::kNameOffset);
+  StaticVisitor::VisitPointer(heap, name_slot);
+
+  // Skip visiting kCodeOffset as it is treated weakly here.
+  STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize ==
+                SharedFunctionInfo::kCodeOffset);
+  STATIC_ASSERT(SharedFunctionInfo::kCodeOffset + kPointerSize ==
+                SharedFunctionInfo::kOptimizedCodeMapOffset);
+
+  Object** start_slot =
+      HeapObject::RawField(object, SharedFunctionInfo::kOptimizedCodeMapOffset);
+  Object** end_slot = HeapObject::RawField(
+      object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
+    Heap* heap, HeapObject* object) {
+  Object** start_slot =
+      HeapObject::RawField(object, JSFunction::kPropertiesOffset);
+  Object** end_slot =
+      HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+
+  VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
+  STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
+                JSFunction::kPrototypeOrInitialMapOffset);
+
+  start_slot =
+      HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
+  end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
+    Heap* heap, HeapObject* object) {
+  Object** start_slot =
+      HeapObject::RawField(object, JSFunction::kPropertiesOffset);
+  Object** end_slot =
+      HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+
+  // Skip visiting kCodeEntryOffset as it is treated weakly here.
+  STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
+                JSFunction::kPrototypeOrInitialMapOffset);
+
+  start_slot =
+      HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
+  end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+void Code::CodeIterateBody(ObjectVisitor* v) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  // There are two places where we iterate code bodies: here and the
+  // templated CodeIterateBody (below). They should be kept in sync.
+  IteratePointer(v, kRelocationInfoOffset);
+  IteratePointer(v, kHandlerTableOffset);
+  IteratePointer(v, kDeoptimizationDataOffset);
+  IteratePointer(v, kTypeFeedbackInfoOffset);
+  IterateNextCodeLink(v, kNextCodeLinkOffset);
+  IteratePointer(v, kConstantPoolOffset);
+
+  RelocIterator it(this, mode_mask);
+  Isolate* isolate = this->GetIsolate();
+  for (; !it.done(); it.next()) {
+    it.rinfo()->Visit(isolate, v);
+  }
+}
+
+
+template <typename StaticVisitor>
+void Code::CodeIterateBody(Heap* heap) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  // There are two places where we iterate code bodies: here and the non-
+  // templated CodeIterateBody (above). They should be kept in sync.
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
+  StaticVisitor::VisitPointer(
+      heap, reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
+  StaticVisitor::VisitNextCodeLink(
+      heap, reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset));
+  StaticVisitor::VisitPointer(
+      heap, reinterpret_cast<Object**>(this->address() + kConstantPoolOffset));
+
+
+  RelocIterator it(this, mode_mask);
+  for (; !it.done(); it.next()) {
+    it.rinfo()->template Visit<StaticVisitor>(heap);
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_OBJECTS_VISITING_INL_H_
diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc
new file mode 100644
index 0000000..a0fc231
--- /dev/null
+++ b/src/heap/objects-visiting.cc
@@ -0,0 +1,413 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/heap/objects-visiting.h"
+
+namespace v8 {
+namespace internal {
+
+
+StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
+    int instance_type, int instance_size) {
+  if (instance_type < FIRST_NONSTRING_TYPE) {
+    switch (instance_type & kStringRepresentationMask) {
+      case kSeqStringTag:
+        if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
+          return kVisitSeqOneByteString;
+        } else {
+          return kVisitSeqTwoByteString;
+        }
+
+      case kConsStringTag:
+        if (IsShortcutCandidate(instance_type)) {
+          return kVisitShortcutCandidate;
+        } else {
+          return kVisitConsString;
+        }
+
+      case kSlicedStringTag:
+        return kVisitSlicedString;
+
+      case kExternalStringTag:
+        return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
+                                   instance_size);
+    }
+    UNREACHABLE();
+  }
+
+  switch (instance_type) {
+    case BYTE_ARRAY_TYPE:
+      return kVisitByteArray;
+
+    case FREE_SPACE_TYPE:
+      return kVisitFreeSpace;
+
+    case FIXED_ARRAY_TYPE:
+      return kVisitFixedArray;
+
+    case FIXED_DOUBLE_ARRAY_TYPE:
+      return kVisitFixedDoubleArray;
+
+    case CONSTANT_POOL_ARRAY_TYPE:
+      return kVisitConstantPoolArray;
+
+    case ODDBALL_TYPE:
+      return kVisitOddball;
+
+    case MAP_TYPE:
+      return kVisitMap;
+
+    case CODE_TYPE:
+      return kVisitCode;
+
+    case CELL_TYPE:
+      return kVisitCell;
+
+    case PROPERTY_CELL_TYPE:
+      return kVisitPropertyCell;
+
+    case JS_SET_TYPE:
+      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
+                                 JSSet::kSize);
+
+    case JS_MAP_TYPE:
+      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
+                                 JSMap::kSize);
+
+    case JS_WEAK_MAP_TYPE:
+    case JS_WEAK_SET_TYPE:
+      return kVisitJSWeakCollection;
+
+    case JS_REGEXP_TYPE:
+      return kVisitJSRegExp;
+
+    case SHARED_FUNCTION_INFO_TYPE:
+      return kVisitSharedFunctionInfo;
+
+    case JS_PROXY_TYPE:
+      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
+                                 JSProxy::kSize);
+
+    case JS_FUNCTION_PROXY_TYPE:
+      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
+                                 JSFunctionProxy::kSize);
+
+    case FOREIGN_TYPE:
+      return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
+                                 Foreign::kSize);
+
+    case SYMBOL_TYPE:
+      return kVisitSymbol;
+
+    case FILLER_TYPE:
+      return kVisitDataObjectGeneric;
+
+    case JS_ARRAY_BUFFER_TYPE:
+      return kVisitJSArrayBuffer;
+
+    case JS_TYPED_ARRAY_TYPE:
+      return kVisitJSTypedArray;
+
+    case JS_DATA_VIEW_TYPE:
+      return kVisitJSDataView;
+
+    case JS_OBJECT_TYPE:
+    case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+    case JS_GENERATOR_OBJECT_TYPE:
+    case JS_MODULE_TYPE:
+    case JS_VALUE_TYPE:
+    case JS_DATE_TYPE:
+    case JS_ARRAY_TYPE:
+    case JS_GLOBAL_PROXY_TYPE:
+    case JS_GLOBAL_OBJECT_TYPE:
+    case JS_BUILTINS_OBJECT_TYPE:
+    case JS_MESSAGE_OBJECT_TYPE:
+    case JS_SET_ITERATOR_TYPE:
+    case JS_MAP_ITERATOR_TYPE:
+      return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
+                                 instance_size);
+
+    case JS_FUNCTION_TYPE:
+      return kVisitJSFunction;
+
+    case HEAP_NUMBER_TYPE:
+    case MUTABLE_HEAP_NUMBER_TYPE:
+#define EXTERNAL_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case EXTERNAL_##TYPE##_ARRAY_TYPE:
+
+      TYPED_ARRAYS(EXTERNAL_ARRAY_CASE)
+      return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
+                                 instance_size);
+#undef EXTERNAL_ARRAY_CASE
+
+    case FIXED_UINT8_ARRAY_TYPE:
+    case FIXED_INT8_ARRAY_TYPE:
+    case FIXED_UINT16_ARRAY_TYPE:
+    case FIXED_INT16_ARRAY_TYPE:
+    case FIXED_UINT32_ARRAY_TYPE:
+    case FIXED_INT32_ARRAY_TYPE:
+    case FIXED_FLOAT32_ARRAY_TYPE:
+    case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
+      return kVisitFixedTypedArray;
+
+    case FIXED_FLOAT64_ARRAY_TYPE:
+      return kVisitFixedFloat64Array;
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
+      STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+      if (instance_type == ALLOCATION_SITE_TYPE) {
+        return kVisitAllocationSite;
+      }
+
+      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
+                                 instance_size);
+
+    default:
+      UNREACHABLE();
+      return kVisitorIdCount;
+  }
+}
+
+
+// We don't record weak slots during marking or scavenges. Instead we do it
+// once when we complete mark-compact cycle.  Note that write barrier has no
+// effect if we are already in the middle of compacting mark-sweep cycle and we
+// have to record slots manually.
+static bool MustRecordSlots(Heap* heap) {
+  return heap->gc_state() == Heap::MARK_COMPACT &&
+         heap->mark_compact_collector()->is_compacting();
+}
+
+
+template <class T>
+struct WeakListVisitor;
+
+
+template <class T>
+Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
+  Object* undefined = heap->undefined_value();
+  Object* head = undefined;
+  T* tail = NULL;
+  MarkCompactCollector* collector = heap->mark_compact_collector();
+  bool record_slots = MustRecordSlots(heap);
+  while (list != undefined) {
+    // Check whether to keep the candidate in the list.
+    T* candidate = reinterpret_cast<T*>(list);
+    Object* retained = retainer->RetainAs(list);
+    if (retained != NULL) {
+      if (head == undefined) {
+        // First element in the list.
+        head = retained;
+      } else {
+        // Subsequent elements in the list.
+        DCHECK(tail != NULL);
+        WeakListVisitor<T>::SetWeakNext(tail, retained);
+        if (record_slots) {
+          Object** next_slot =
+              HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
+          collector->RecordSlot(next_slot, next_slot, retained);
+        }
+      }
+      // Retained object is new tail.
+      DCHECK(!retained->IsUndefined());
+      candidate = reinterpret_cast<T*>(retained);
+      tail = candidate;
+
+
+      // tail is a live object, visit it.
+      WeakListVisitor<T>::VisitLiveObject(heap, tail, retainer);
+    } else {
+      WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
+    }
+
+    // Move to next element in the list.
+    list = WeakListVisitor<T>::WeakNext(candidate);
+  }
+
+  // Terminate the list if there is one or more elements.
+  if (tail != NULL) {
+    WeakListVisitor<T>::SetWeakNext(tail, undefined);
+  }
+  return head;
+}
+
+
+template <class T>
+static void ClearWeakList(Heap* heap, Object* list) {
+  Object* undefined = heap->undefined_value();
+  while (list != undefined) {
+    T* candidate = reinterpret_cast<T*>(list);
+    list = WeakListVisitor<T>::WeakNext(candidate);
+    WeakListVisitor<T>::SetWeakNext(candidate, undefined);
+  }
+}
+
+
+template <>
+struct WeakListVisitor<JSFunction> {
+  static void SetWeakNext(JSFunction* function, Object* next) {
+    function->set_next_function_link(next);
+  }
+
+  static Object* WeakNext(JSFunction* function) {
+    return function->next_function_link();
+  }
+
+  static int WeakNextOffset() { return JSFunction::kNextFunctionLinkOffset; }
+
+  static void VisitLiveObject(Heap*, JSFunction*, WeakObjectRetainer*) {}
+
+  static void VisitPhantomObject(Heap*, JSFunction*) {}
+};
+
+
+template <>
+struct WeakListVisitor<Code> {
+  static void SetWeakNext(Code* code, Object* next) {
+    code->set_next_code_link(next);
+  }
+
+  static Object* WeakNext(Code* code) { return code->next_code_link(); }
+
+  static int WeakNextOffset() { return Code::kNextCodeLinkOffset; }
+
+  static void VisitLiveObject(Heap*, Code*, WeakObjectRetainer*) {}
+
+  static void VisitPhantomObject(Heap*, Code*) {}
+};
+
+
+template <>
+struct WeakListVisitor<Context> {
+  static void SetWeakNext(Context* context, Object* next) {
+    context->set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WRITE_BARRIER);
+  }
+
+  static Object* WeakNext(Context* context) {
+    return context->get(Context::NEXT_CONTEXT_LINK);
+  }
+
+  static int WeakNextOffset() {
+    return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
+  }
+
+  static void VisitLiveObject(Heap* heap, Context* context,
+                              WeakObjectRetainer* retainer) {
+    // Process the three weak lists linked off the context.
+    DoWeakList<JSFunction>(heap, context, retainer,
+                           Context::OPTIMIZED_FUNCTIONS_LIST);
+    DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
+    DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
+  }
+
+  template <class T>
+  static void DoWeakList(Heap* heap, Context* context,
+                         WeakObjectRetainer* retainer, int index) {
+    // Visit the weak list, removing dead intermediate elements.
+    Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer);
+
+    // Update the list head.
+    context->set(index, list_head, UPDATE_WRITE_BARRIER);
+
+    if (MustRecordSlots(heap)) {
+      // Record the updated slot if necessary.
+      Object** head_slot =
+          HeapObject::RawField(context, FixedArray::SizeFor(index));
+      heap->mark_compact_collector()->RecordSlot(head_slot, head_slot,
+                                                 list_head);
+    }
+  }
+
+  static void VisitPhantomObject(Heap* heap, Context* context) {
+    ClearWeakList<JSFunction>(heap,
+                              context->get(Context::OPTIMIZED_FUNCTIONS_LIST));
+    ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
+    ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
+  }
+};
+
+
+template <>
+struct WeakListVisitor<JSArrayBufferView> {
+  static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
+    obj->set_weak_next(next);
+  }
+
+  static Object* WeakNext(JSArrayBufferView* obj) { return obj->weak_next(); }
+
+  static int WeakNextOffset() { return JSArrayBufferView::kWeakNextOffset; }
+
+  static void VisitLiveObject(Heap*, JSArrayBufferView*, WeakObjectRetainer*) {}
+
+  static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
+};
+
+
+template <>
+struct WeakListVisitor<JSArrayBuffer> {
+  static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
+    obj->set_weak_next(next);
+  }
+
+  static Object* WeakNext(JSArrayBuffer* obj) { return obj->weak_next(); }
+
+  static int WeakNextOffset() { return JSArrayBuffer::kWeakNextOffset; }
+
+  static void VisitLiveObject(Heap* heap, JSArrayBuffer* array_buffer,
+                              WeakObjectRetainer* retainer) {
+    Object* typed_array_obj = VisitWeakList<JSArrayBufferView>(
+        heap, array_buffer->weak_first_view(), retainer);
+    array_buffer->set_weak_first_view(typed_array_obj);
+    if (typed_array_obj != heap->undefined_value() && MustRecordSlots(heap)) {
+      Object** slot = HeapObject::RawField(array_buffer,
+                                           JSArrayBuffer::kWeakFirstViewOffset);
+      heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
+    }
+  }
+
+  static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
+    Runtime::FreeArrayBuffer(heap->isolate(), phantom);
+  }
+};
+
+
+template <>
+struct WeakListVisitor<AllocationSite> {
+  static void SetWeakNext(AllocationSite* obj, Object* next) {
+    obj->set_weak_next(next);
+  }
+
+  static Object* WeakNext(AllocationSite* obj) { return obj->weak_next(); }
+
+  static int WeakNextOffset() { return AllocationSite::kWeakNextOffset; }
+
+  static void VisitLiveObject(Heap*, AllocationSite*, WeakObjectRetainer*) {}
+
+  static void VisitPhantomObject(Heap*, AllocationSite*) {}
+};
+
+
+template Object* VisitWeakList<Code>(Heap* heap, Object* list,
+                                     WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<JSFunction>(Heap* heap, Object* list,
+                                           WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<Context>(Heap* heap, Object* list,
+                                        WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<JSArrayBuffer>(Heap* heap, Object* list,
+                                              WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<AllocationSite>(Heap* heap, Object* list,
+                                               WeakObjectRetainer* retainer);
+}
+}  // namespace v8::internal
diff --git a/src/heap/objects-visiting.h b/src/heap/objects-visiting.h
new file mode 100644
index 0000000..919a800
--- /dev/null
+++ b/src/heap/objects-visiting.h
@@ -0,0 +1,452 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_VISITING_H_
+#define V8_OBJECTS_VISITING_H_
+
+#include "src/allocation.h"
+
+// This file provides base classes and auxiliary methods for defining
+// static object visitors used during GC.
+// Visiting HeapObject body with a normal ObjectVisitor requires performing
+// two switches on object's instance type to determine object size and layout
+// and one or more virtual method calls on visitor itself.
+// Static visitor is different: it provides a dispatch table which contains
+// pointers to specialized visit functions. Each map has the visitor_id
+// field which contains an index of specialized visitor to use.
+
+namespace v8 {
+namespace internal {
+
+
+// Base class for all static visitors.
+class StaticVisitorBase : public AllStatic {
+ public:
+#define VISITOR_ID_LIST(V) \
+  V(SeqOneByteString)      \
+  V(SeqTwoByteString)      \
+  V(ShortcutCandidate)     \
+  V(ByteArray)             \
+  V(FreeSpace)             \
+  V(FixedArray)            \
+  V(FixedDoubleArray)      \
+  V(FixedTypedArray)       \
+  V(FixedFloat64Array)     \
+  V(ConstantPoolArray)     \
+  V(NativeContext)         \
+  V(AllocationSite)        \
+  V(DataObject2)           \
+  V(DataObject3)           \
+  V(DataObject4)           \
+  V(DataObject5)           \
+  V(DataObject6)           \
+  V(DataObject7)           \
+  V(DataObject8)           \
+  V(DataObject9)           \
+  V(DataObjectGeneric)     \
+  V(JSObject2)             \
+  V(JSObject3)             \
+  V(JSObject4)             \
+  V(JSObject5)             \
+  V(JSObject6)             \
+  V(JSObject7)             \
+  V(JSObject8)             \
+  V(JSObject9)             \
+  V(JSObjectGeneric)       \
+  V(Struct2)               \
+  V(Struct3)               \
+  V(Struct4)               \
+  V(Struct5)               \
+  V(Struct6)               \
+  V(Struct7)               \
+  V(Struct8)               \
+  V(Struct9)               \
+  V(StructGeneric)         \
+  V(ConsString)            \
+  V(SlicedString)          \
+  V(Symbol)                \
+  V(Oddball)               \
+  V(Code)                  \
+  V(Map)                   \
+  V(Cell)                  \
+  V(PropertyCell)          \
+  V(SharedFunctionInfo)    \
+  V(JSFunction)            \
+  V(JSWeakCollection)      \
+  V(JSArrayBuffer)         \
+  V(JSTypedArray)          \
+  V(JSDataView)            \
+  V(JSRegExp)
+
+  // For data objects, JS objects and structs along with generic visitor which
+  // can visit object of any size we provide visitors specialized by
+  // object size in words.
+  // Ids of specialized visitors are declared in a linear order (without
+  // holes) starting from the id of visitor specialized for 2 words objects
+  // (base visitor id) and ending with the id of generic visitor.
+  // Method GetVisitorIdForSize depends on this ordering to calculate visitor
+  // id of specialized visitor from given instance size, base visitor id and
+  // generic visitor's id.
+  enum VisitorId {
+#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
+    VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
+#undef VISITOR_ID_ENUM_DECL
+    kVisitorIdCount,
+    kVisitDataObject = kVisitDataObject2,
+    kVisitJSObject = kVisitJSObject2,
+    kVisitStruct = kVisitStruct2,
+    kMinObjectSizeInWords = 2
+  };
+
+  // Visitor ID should fit in one byte.
+  STATIC_ASSERT(kVisitorIdCount <= 256);
+
+  // Determine which specialized visitor should be used for given instance type
+  // and instance type.
+  static VisitorId GetVisitorId(int instance_type, int instance_size);
+
+  static VisitorId GetVisitorId(Map* map) {
+    return GetVisitorId(map->instance_type(), map->instance_size());
+  }
+
+  // For visitors that allow specialization by size calculate VisitorId based
+  // on size, base visitor id and generic visitor id.
+  static VisitorId GetVisitorIdForSize(VisitorId base, VisitorId generic,
+                                       int object_size) {
+    DCHECK((base == kVisitDataObject) || (base == kVisitStruct) ||
+           (base == kVisitJSObject));
+    DCHECK(IsAligned(object_size, kPointerSize));
+    DCHECK(kMinObjectSizeInWords * kPointerSize <= object_size);
+    DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+
+    const VisitorId specialization = static_cast<VisitorId>(
+        base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
+
+    return Min(specialization, generic);
+  }
+};
+
+
+template <typename Callback>
+class VisitorDispatchTable {
+ public:
+  void CopyFrom(VisitorDispatchTable* other) {
+    // We are not using memcpy to guarantee that during update
+    // every element of callbacks_ array will remain correct
+    // pointer (memcpy might be implemented as a byte copying loop).
+    for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) {
+      base::NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
+    }
+  }
+
+  inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
+    return reinterpret_cast<Callback>(callbacks_[id]);
+  }
+
+  inline Callback GetVisitor(Map* map) {
+    return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
+  }
+
+  void Register(StaticVisitorBase::VisitorId id, Callback callback) {
+    DCHECK(id < StaticVisitorBase::kVisitorIdCount);  // id is unsigned.
+    callbacks_[id] = reinterpret_cast<base::AtomicWord>(callback);
+  }
+
+  template <typename Visitor, StaticVisitorBase::VisitorId base,
+            StaticVisitorBase::VisitorId generic, int object_size_in_words>
+  void RegisterSpecialization() {
+    static const int size = object_size_in_words * kPointerSize;
+    Register(StaticVisitorBase::GetVisitorIdForSize(base, generic, size),
+             &Visitor::template VisitSpecialized<size>);
+  }
+
+
+  template <typename Visitor, StaticVisitorBase::VisitorId base,
+            StaticVisitorBase::VisitorId generic>
+  void RegisterSpecializations() {
+    STATIC_ASSERT((generic - base + StaticVisitorBase::kMinObjectSizeInWords) ==
+                  10);
+    RegisterSpecialization<Visitor, base, generic, 2>();
+    RegisterSpecialization<Visitor, base, generic, 3>();
+    RegisterSpecialization<Visitor, base, generic, 4>();
+    RegisterSpecialization<Visitor, base, generic, 5>();
+    RegisterSpecialization<Visitor, base, generic, 6>();
+    RegisterSpecialization<Visitor, base, generic, 7>();
+    RegisterSpecialization<Visitor, base, generic, 8>();
+    RegisterSpecialization<Visitor, base, generic, 9>();
+    Register(generic, &Visitor::Visit);
+  }
+
+ private:
+  base::AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
+};
+
+
+template <typename StaticVisitor>
+class BodyVisitorBase : public AllStatic {
+ public:
+  INLINE(static void IteratePointers(Heap* heap, HeapObject* object,
+                                     int start_offset, int end_offset)) {
+    Object** start_slot =
+        reinterpret_cast<Object**>(object->address() + start_offset);
+    Object** end_slot =
+        reinterpret_cast<Object**>(object->address() + end_offset);
+    StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+  }
+};
+
+
+template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
+class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+ public:
+  INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
+    int object_size = BodyDescriptor::SizeOf(map, object);
+    BodyVisitorBase<StaticVisitor>::IteratePointers(
+        map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size);
+    return static_cast<ReturnType>(object_size);
+  }
+
+  template <int object_size>
+  static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
+    DCHECK(BodyDescriptor::SizeOf(map, object) == object_size);
+    BodyVisitorBase<StaticVisitor>::IteratePointers(
+        map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size);
+    return static_cast<ReturnType>(object_size);
+  }
+};
+
+
+template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
+class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+ public:
+  INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
+    BodyVisitorBase<StaticVisitor>::IteratePointers(
+        map->GetHeap(), object, BodyDescriptor::kStartOffset,
+        BodyDescriptor::kEndOffset);
+    return static_cast<ReturnType>(BodyDescriptor::kSize);
+  }
+};
+
+
+// Base class for visitors used for a linear new space iteration.
+// IterateBody returns size of visited object.
+// Certain types of objects (i.e. Code objects) are not handled
+// by dispatch table of this visitor because they cannot appear
+// in the new space.
+//
+// This class is intended to be used in the following way:
+//
+//   class SomeVisitor : public StaticNewSpaceVisitor<SomeVisitor> {
+//     ...
+//   }
+//
+// This is an example of Curiously recurring template pattern
+// (see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).
+// We use CRTP to guarantee aggressive compile time optimizations (i.e.
+// inlining and specialization of StaticVisitor::VisitPointers methods).
+template <typename StaticVisitor>
+class StaticNewSpaceVisitor : public StaticVisitorBase {
+ public:
+  static void Initialize();
+
+  INLINE(static int IterateBody(Map* map, HeapObject* obj)) {
+    return table_.GetVisitor(map)(map, obj);
+  }
+
+  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
+    for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
+  }
+
+ private:
+  INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) {
+    Heap* heap = map->GetHeap();
+    VisitPointers(heap,
+                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
+                  HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
+
+    // Don't visit code entry. We are using this visitor only during scavenges.
+
+    VisitPointers(
+        heap, HeapObject::RawField(object,
+                                   JSFunction::kCodeEntryOffset + kPointerSize),
+        HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset));
+    return JSFunction::kSize;
+  }
+
+  INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
+    return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
+  }
+
+  INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) {
+    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+    return FixedDoubleArray::SizeFor(length);
+  }
+
+  INLINE(static int VisitFixedTypedArray(Map* map, HeapObject* object)) {
+    return reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+  }
+
+  INLINE(static int VisitJSObject(Map* map, HeapObject* object)) {
+    return JSObjectVisitor::Visit(map, object);
+  }
+
+  INLINE(static int VisitSeqOneByteString(Map* map, HeapObject* object)) {
+    return SeqOneByteString::cast(object)
+        ->SeqOneByteStringSize(map->instance_type());
+  }
+
+  INLINE(static int VisitSeqTwoByteString(Map* map, HeapObject* object)) {
+    return SeqTwoByteString::cast(object)
+        ->SeqTwoByteStringSize(map->instance_type());
+  }
+
+  INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) {
+    return FreeSpace::cast(object)->Size();
+  }
+
+  INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
+  INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
+  INLINE(static int VisitJSDataView(Map* map, HeapObject* object));
+
+  class DataObjectVisitor {
+   public:
+    template <int object_size>
+    static inline int VisitSpecialized(Map* map, HeapObject* object) {
+      return object_size;
+    }
+
+    INLINE(static int Visit(Map* map, HeapObject* object)) {
+      return map->instance_size();
+    }
+  };
+
+  typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, int>
+      StructVisitor;
+
+  typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, int>
+      JSObjectVisitor;
+
+  typedef int (*Callback)(Map* map, HeapObject* object);
+
+  static VisitorDispatchTable<Callback> table_;
+};
+
+
+template <typename StaticVisitor>
+VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
+    StaticNewSpaceVisitor<StaticVisitor>::table_;
+
+
+// Base class for visitors used to transitively mark the entire heap.
+// IterateBody returns nothing.
+// Certain types of objects might not be handled by this base class and
+// no visitor function is registered by the generic initialization. A
+// specialized visitor function needs to be provided by the inheriting
+// class itself for those cases.
+//
+// This class is intended to be used in the following way:
+//
+//   class SomeVisitor : public StaticMarkingVisitor<SomeVisitor> {
+//     ...
+//   }
+//
+// This is an example of Curiously recurring template pattern.
+template <typename StaticVisitor>
+class StaticMarkingVisitor : public StaticVisitorBase {
+ public:
+  static void Initialize();
+
+  INLINE(static void IterateBody(Map* map, HeapObject* obj)) {
+    table_.GetVisitor(map)(map, obj);
+  }
+
+  INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
+  INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
+  INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
+  INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
+  INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo));
+  INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo));
+  INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
+  INLINE(static void VisitExternalReference(RelocInfo* rinfo)) {}
+  INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) {}
+  // Skip the weak next code link in a code object.
+  INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) {}
+
+  // TODO(mstarzinger): This should be made protected once refactoring is done.
+  // Mark non-optimize code for functions inlined into the given optimized
+  // code. This will prevent it from being flushed.
+  static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
+
+ protected:
+  INLINE(static void VisitMap(Map* map, HeapObject* object));
+  INLINE(static void VisitCode(Map* map, HeapObject* object));
+  INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
+  INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object));
+  INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
+  INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
+  INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
+  INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
+  INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
+  INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
+  INLINE(static void VisitJSDataView(Map* map, HeapObject* object));
+  INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
+
+  // Mark pointers in a Map and its TransitionArray together, possibly
+  // treating transitions or back pointers weak.
+  static void MarkMapContents(Heap* heap, Map* map);
+  static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
+
+  // Code flushing support.
+  INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
+  INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
+
+  // Helpers used by code flushing support that visit pointer fields and treat
+  // references to code objects either strongly or weakly.
+  static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object);
+  static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object);
+  static void VisitJSFunctionStrongCode(Heap* heap, HeapObject* object);
+  static void VisitJSFunctionWeakCode(Heap* heap, HeapObject* object);
+
+  class DataObjectVisitor {
+   public:
+    template <int size>
+    static inline void VisitSpecialized(Map* map, HeapObject* object) {}
+
+    INLINE(static void Visit(Map* map, HeapObject* object)) {}
+  };
+
+  typedef FlexibleBodyVisitor<StaticVisitor, FixedArray::BodyDescriptor, void>
+      FixedArrayVisitor;
+
+  typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, void>
+      JSObjectVisitor;
+
+  typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, void>
+      StructObjectVisitor;
+
+  typedef void (*Callback)(Map* map, HeapObject* object);
+
+  static VisitorDispatchTable<Callback> table_;
+};
+
+
+template <typename StaticVisitor>
+VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
+    StaticMarkingVisitor<StaticVisitor>::table_;
+
+
+class WeakObjectRetainer;
+
+
+// A weak list is single linked list where each element has a weak pointer to
+// the next element. Given the head of the list, this function removes dead
+// elements from the list and if requested records slots for next-element
+// pointers. The template parameter T is a WeakListVisitor that defines how to
+// access the next-element pointers.
+template <class T>
+Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
+}
+}  // namespace v8::internal
+
+#endif  // V8_OBJECTS_VISITING_H_
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
new file mode 100644
index 0000000..d81d253
--- /dev/null
+++ b/src/heap/spaces-inl.h
@@ -0,0 +1,313 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SPACES_INL_H_
+#define V8_HEAP_SPACES_INL_H_
+
+#include "src/heap/spaces.h"
+#include "src/heap-profiler.h"
+#include "src/isolate.h"
+#include "src/msan.h"
+#include "src/v8memory.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Bitmap
+
+void Bitmap::Clear(MemoryChunk* chunk) {
+  Bitmap* bitmap = chunk->markbits();
+  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
+  chunk->ResetLiveBytes();
+}
+
+
+// -----------------------------------------------------------------------------
+// PageIterator
+
+
+PageIterator::PageIterator(PagedSpace* space)
+    : space_(space),
+      prev_page_(&space->anchor_),
+      next_page_(prev_page_->next_page()) {}
+
+
+bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
+
+
+Page* PageIterator::next() {
+  DCHECK(has_next());
+  prev_page_ = next_page_;
+  next_page_ = next_page_->next_page();
+  return prev_page_;
+}
+
+
+// -----------------------------------------------------------------------------
+// NewSpacePageIterator
+
+
+NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
+    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
+      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
+      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
+
+NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
+    : prev_page_(space->anchor()),
+      next_page_(prev_page_->next_page()),
+      last_page_(prev_page_->prev_page()) {}
+
+NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
+    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
+      next_page_(NewSpacePage::FromAddress(start)),
+      last_page_(NewSpacePage::FromLimit(limit)) {
+  SemiSpace::AssertValidRange(start, limit);
+}
+
+
+bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
+
+
+NewSpacePage* NewSpacePageIterator::next() {
+  DCHECK(has_next());
+  prev_page_ = next_page_;
+  next_page_ = next_page_->next_page();
+  return prev_page_;
+}
+
+
+// -----------------------------------------------------------------------------
+// HeapObjectIterator
+HeapObject* HeapObjectIterator::FromCurrentPage() {
+  while (cur_addr_ != cur_end_) {
+    if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
+      cur_addr_ = space_->limit();
+      continue;
+    }
+    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+    cur_addr_ += obj_size;
+    DCHECK(cur_addr_ <= cur_end_);
+    if (!obj->IsFiller()) {
+      DCHECK_OBJECT_SIZE(obj_size);
+      return obj;
+    }
+  }
+  return NULL;
+}
+
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void MemoryAllocator::Protect(Address start, size_t size) {
+  base::OS::Protect(start, size);
+}
+
+
+void MemoryAllocator::Unprotect(Address start, size_t size,
+                                Executability executable) {
+  base::OS::Unprotect(start, size, executable);
+}
+
+
+void MemoryAllocator::ProtectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  base::OS::Protect(chunks_[id].address(), chunks_[id].size());
+}
+
+
+void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
+                      chunks_[id].owner()->executable() == EXECUTABLE);
+}
+
+#endif
+
+
+// --------------------------------------------------------------------------
+// PagedSpace
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
+                       PagedSpace* owner) {
+  Page* page = reinterpret_cast<Page*>(chunk);
+  DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
+  DCHECK(chunk->owner() == owner);
+  owner->IncreaseCapacity(page->area_size());
+  owner->Free(page->area_start(), page->area_size());
+
+  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+  return page;
+}
+
+
+bool PagedSpace::Contains(Address addr) {
+  Page* p = Page::FromAddress(addr);
+  if (!p->is_valid()) return false;
+  return p->owner() == this;
+}
+
+
+void MemoryChunk::set_scan_on_scavenge(bool scan) {
+  if (scan) {
+    if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
+    SetFlag(SCAN_ON_SCAVENGE);
+  } else {
+    if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
+    ClearFlag(SCAN_ON_SCAVENGE);
+  }
+  heap_->incremental_marking()->SetOldSpacePageFlags(this);
+}
+
+
+MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
+  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
+      OffsetFrom(addr) & ~Page::kPageAlignmentMask);
+  if (maybe->owner() != NULL) return maybe;
+  LargeObjectIterator iterator(heap->lo_space());
+  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
+    // Fixed arrays are the only pointer-containing objects in large object
+    // space.
+    if (o->IsFixedArray()) {
+      MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
+      if (chunk->Contains(addr)) {
+        return chunk;
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+void MemoryChunk::UpdateHighWaterMark(Address mark) {
+  if (mark == NULL) return;
+  // Need to subtract one from the mark because when a chunk is full the
+  // top points to the next address after the chunk, which effectively belongs
+  // to another chunk. See the comment to Page::FromAllocationTop.
+  MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
+  int new_mark = static_cast<int>(mark - chunk->address());
+  if (new_mark > chunk->high_water_mark_) {
+    chunk->high_water_mark_ = new_mark;
+  }
+}
+
+
+PointerChunkIterator::PointerChunkIterator(Heap* heap)
+    : state_(kOldPointerState),
+      old_pointer_iterator_(heap->old_pointer_space()),
+      map_iterator_(heap->map_space()),
+      lo_iterator_(heap->lo_space()) {}
+
+
+Page* Page::next_page() {
+  DCHECK(next_chunk()->owner() == owner());
+  return static_cast<Page*>(next_chunk());
+}
+
+
+Page* Page::prev_page() {
+  DCHECK(prev_chunk()->owner() == owner());
+  return static_cast<Page*>(prev_chunk());
+}
+
+
+void Page::set_next_page(Page* page) {
+  DCHECK(page->owner() == owner());
+  set_next_chunk(page);
+}
+
+
+void Page::set_prev_page(Page* page) {
+  DCHECK(page->owner() == owner());
+  set_prev_chunk(page);
+}
+
+
+// Try linear allocation in the page of alloc_info's allocation top.  Does
+// not contain slow case logic (e.g. move to the next page or try free list
+// allocation) so it can be used by all the allocation functions and for all
+// the paged spaces.
+HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
+  Address current_top = allocation_info_.top();
+  Address new_top = current_top + size_in_bytes;
+  if (new_top > allocation_info_.limit()) return NULL;
+
+  allocation_info_.set_top(new_top);
+  return HeapObject::FromAddress(current_top);
+}
+
+
+// Raw allocation.
+AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
+  HeapObject* object = AllocateLinearly(size_in_bytes);
+
+  if (object == NULL) {
+    object = free_list_.Allocate(size_in_bytes);
+    if (object == NULL) {
+      object = SlowAllocateRaw(size_in_bytes);
+    }
+  }
+
+  if (object != NULL) {
+    if (identity() == CODE_SPACE) {
+      SkipList::Update(object->address(), size_in_bytes);
+    }
+    MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
+    return object;
+  }
+
+  return AllocationResult::Retry(identity());
+}
+
+
+// -----------------------------------------------------------------------------
+// NewSpace
+
+
+AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
+  Address old_top = allocation_info_.top();
+
+  if (allocation_info_.limit() - old_top < size_in_bytes) {
+    return SlowAllocateRaw(size_in_bytes);
+  }
+
+  HeapObject* obj = HeapObject::FromAddress(old_top);
+  allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+  // The slow path above ultimately goes through AllocateRaw, so this suffices.
+  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
+
+  return obj;
+}
+
+
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
+  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+  return static_cast<LargePage*>(chunk);
+}
+
+
+intptr_t LargeObjectSpace::Available() {
+  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
+}
+
+
+bool FreeListNode::IsFreeListNode(HeapObject* object) {
+  Map* map = object->map();
+  Heap* heap = object->GetHeap();
+  return map == heap->raw_unchecked_free_space_map() ||
+         map == heap->raw_unchecked_one_pointer_filler_map() ||
+         map == heap->raw_unchecked_two_pointer_filler_map();
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_SPACES_INL_H_
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
new file mode 100644
index 0000000..f8d340f
--- /dev/null
+++ b/src/heap/spaces.cc
@@ -0,0 +1,3107 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/base/bits.h"
+#include "src/base/platform/platform.h"
+#include "src/full-codegen.h"
+#include "src/heap/mark-compact.h"
+#include "src/macro-assembler.h"
+#include "src/msan.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// HeapObjectIterator
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
+  // You can't actually iterate over the anchor page.  It is not a real page,
+  // just an anchor for the double linked page list.  Initialize as if we have
+  // reached the end of the anchor page, then the first iteration will move on
+  // to the first page.
+  Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
+                                       HeapObjectCallback size_func) {
+  // You can't actually iterate over the anchor page.  It is not a real page,
+  // just an anchor for the double linked page list.  Initialize the current
+  // address and end as NULL, then the first iteration will move on
+  // to the first page.
+  Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(Page* page,
+                                       HeapObjectCallback size_func) {
+  Space* owner = page->owner();
+  DCHECK(owner == page->heap()->old_pointer_space() ||
+         owner == page->heap()->old_data_space() ||
+         owner == page->heap()->map_space() ||
+         owner == page->heap()->cell_space() ||
+         owner == page->heap()->property_cell_space() ||
+         owner == page->heap()->code_space());
+  Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
+             page->area_end(), kOnePageOnly, size_func);
+  DCHECK(page->WasSwept() || page->SweepingCompleted());
+}
+
+
+void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
+                                    HeapObjectIterator::PageMode mode,
+                                    HeapObjectCallback size_f) {
+  space_ = space;
+  cur_addr_ = cur;
+  cur_end_ = end;
+  page_mode_ = mode;
+  size_func_ = size_f;
+}
+
+
+// We have hit the end of the page and should advance to the next block of
+// objects.  This happens at the end of the page.
+bool HeapObjectIterator::AdvanceToNextPage() {
+  DCHECK(cur_addr_ == cur_end_);
+  if (page_mode_ == kOnePageOnly) return false;
+  Page* cur_page;
+  if (cur_addr_ == NULL) {
+    cur_page = space_->anchor();
+  } else {
+    cur_page = Page::FromAddress(cur_addr_ - 1);
+    DCHECK(cur_addr_ == cur_page->area_end());
+  }
+  cur_page = cur_page->next_page();
+  if (cur_page == space_->anchor()) return false;
+  cur_addr_ = cur_page->area_start();
+  cur_end_ = cur_page->area_end();
+  DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
+  return true;
+}
+
+
+// -----------------------------------------------------------------------------
+// CodeRange
+
+
+CodeRange::CodeRange(Isolate* isolate)
+    : isolate_(isolate),
+      code_range_(NULL),
+      free_list_(0),
+      allocation_list_(0),
+      current_allocation_block_index_(0) {}
+
+
+bool CodeRange::SetUp(size_t requested) {
+  DCHECK(code_range_ == NULL);
+
+  if (requested == 0) {
+    // When a target requires the code range feature, we put all code objects
+    // in a kMaximalCodeRangeSize range of virtual address space, so that
+    // they can call each other with near calls.
+    if (kRequiresCodeRange) {
+      requested = kMaximalCodeRangeSize;
+    } else {
+      return true;
+    }
+  }
+
+  DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
+  code_range_ = new base::VirtualMemory(requested);
+  CHECK(code_range_ != NULL);
+  if (!code_range_->IsReserved()) {
+    delete code_range_;
+    code_range_ = NULL;
+    return false;
+  }
+
+  // We are sure that we have mapped a block of requested addresses.
+  DCHECK(code_range_->size() == requested);
+  LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
+  Address base = reinterpret_cast<Address>(code_range_->address());
+  Address aligned_base =
+      RoundUp(reinterpret_cast<Address>(code_range_->address()),
+              MemoryChunk::kAlignment);
+  size_t size = code_range_->size() - (aligned_base - base);
+  allocation_list_.Add(FreeBlock(aligned_base, size));
+  current_allocation_block_index_ = 0;
+  return true;
+}
+
+
+int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
+                                       const FreeBlock* right) {
+  // The entire point of CodeRange is that the difference between two
+  // addresses in the range can be represented as a signed 32-bit int,
+  // so the cast is semantically correct.
+  return static_cast<int>(left->start - right->start);
+}
+
+
+bool CodeRange::GetNextAllocationBlock(size_t requested) {
+  for (current_allocation_block_index_++;
+       current_allocation_block_index_ < allocation_list_.length();
+       current_allocation_block_index_++) {
+    if (requested <= allocation_list_[current_allocation_block_index_].size) {
+      return true;  // Found a large enough allocation block.
+    }
+  }
+
+  // Sort and merge the free blocks on the free list and the allocation list.
+  free_list_.AddAll(allocation_list_);
+  allocation_list_.Clear();
+  free_list_.Sort(&CompareFreeBlockAddress);
+  for (int i = 0; i < free_list_.length();) {
+    FreeBlock merged = free_list_[i];
+    i++;
+    // Add adjacent free blocks to the current merged block.
+    while (i < free_list_.length() &&
+           free_list_[i].start == merged.start + merged.size) {
+      merged.size += free_list_[i].size;
+      i++;
+    }
+    if (merged.size > 0) {
+      allocation_list_.Add(merged);
+    }
+  }
+  free_list_.Clear();
+
+  for (current_allocation_block_index_ = 0;
+       current_allocation_block_index_ < allocation_list_.length();
+       current_allocation_block_index_++) {
+    if (requested <= allocation_list_[current_allocation_block_index_].size) {
+      return true;  // Found a large enough allocation block.
+    }
+  }
+  current_allocation_block_index_ = 0;
+  // Code range is full or too fragmented.
+  return false;
+}
+
+
+Address CodeRange::AllocateRawMemory(const size_t requested_size,
+                                     const size_t commit_size,
+                                     size_t* allocated) {
+  DCHECK(commit_size <= requested_size);
+  DCHECK(allocation_list_.length() == 0 ||
+         current_allocation_block_index_ < allocation_list_.length());
+  if (allocation_list_.length() == 0 ||
+      requested_size > allocation_list_[current_allocation_block_index_].size) {
+    // Find an allocation block large enough.
+    if (!GetNextAllocationBlock(requested_size)) return NULL;
+  }
+  // Commit the requested memory at the start of the current allocation block.
+  size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
+  FreeBlock current = allocation_list_[current_allocation_block_index_];
+  if (aligned_requested >= (current.size - Page::kPageSize)) {
+    // Don't leave a small free block, useless for a large object or chunk.
+    *allocated = current.size;
+  } else {
+    *allocated = aligned_requested;
+  }
+  DCHECK(*allocated <= current.size);
+  DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
+  if (!isolate_->memory_allocator()->CommitExecutableMemory(
+          code_range_, current.start, commit_size, *allocated)) {
+    *allocated = 0;
+    return NULL;
+  }
+  allocation_list_[current_allocation_block_index_].start += *allocated;
+  allocation_list_[current_allocation_block_index_].size -= *allocated;
+  if (*allocated == current.size) {
+    // This block is used up, get the next one.
+    GetNextAllocationBlock(0);
+  }
+  return current.start;
+}
+
+
+bool CodeRange::CommitRawMemory(Address start, size_t length) {
+  return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
+}
+
+
+bool CodeRange::UncommitRawMemory(Address start, size_t length) {
+  return code_range_->Uncommit(start, length);
+}
+
+
+void CodeRange::FreeRawMemory(Address address, size_t length) {
+  DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
+  free_list_.Add(FreeBlock(address, length));
+  code_range_->Uncommit(address, length);
+}
+
+
+void CodeRange::TearDown() {
+  delete code_range_;  // Frees all memory in the virtual memory range.
+  code_range_ = NULL;
+  free_list_.Free();
+  allocation_list_.Free();
+}
+
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+//
+
+MemoryAllocator::MemoryAllocator(Isolate* isolate)
+    : isolate_(isolate),
+      capacity_(0),
+      capacity_executable_(0),
+      size_(0),
+      size_executable_(0),
+      lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
+      highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
+
+
+bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
+  capacity_ = RoundUp(capacity, Page::kPageSize);
+  capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
+  DCHECK_GE(capacity_, capacity_executable_);
+
+  size_ = 0;
+  size_executable_ = 0;
+
+  return true;
+}
+
+
+void MemoryAllocator::TearDown() {
+  // Check that spaces were torn down before MemoryAllocator.
+  DCHECK(size_ == 0);
+  // TODO(gc) this will be true again when we fix FreeMemory.
+  // DCHECK(size_executable_ == 0);
+  capacity_ = 0;
+  capacity_executable_ = 0;
+}
+
+
+bool MemoryAllocator::CommitMemory(Address base, size_t size,
+                                   Executability executable) {
+  if (!base::VirtualMemory::CommitRegion(base, size,
+                                         executable == EXECUTABLE)) {
+    return false;
+  }
+  UpdateAllocatedSpaceLimits(base, base + size);
+  return true;
+}
+
+
+void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
+                                 Executability executable) {
+  // TODO(gc) make code_range part of memory allocator?
+  DCHECK(reservation->IsReserved());
+  size_t size = reservation->size();
+  DCHECK(size_ >= size);
+  size_ -= size;
+
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+  if (executable == EXECUTABLE) {
+    DCHECK(size_executable_ >= size);
+    size_executable_ -= size;
+  }
+  // Code which is part of the code-range does not have its own VirtualMemory.
+  DCHECK(isolate_->code_range() == NULL ||
+         !isolate_->code_range()->contains(
+             static_cast<Address>(reservation->address())));
+  DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
+         !isolate_->code_range()->valid());
+  reservation->Release();
+}
+
+
+void MemoryAllocator::FreeMemory(Address base, size_t size,
+                                 Executability executable) {
+  // TODO(gc) make code_range part of memory allocator?
+  DCHECK(size_ >= size);
+  size_ -= size;
+
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+  if (executable == EXECUTABLE) {
+    DCHECK(size_executable_ >= size);
+    size_executable_ -= size;
+  }
+  if (isolate_->code_range() != NULL &&
+      isolate_->code_range()->contains(static_cast<Address>(base))) {
+    DCHECK(executable == EXECUTABLE);
+    isolate_->code_range()->FreeRawMemory(base, size);
+  } else {
+    DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
+           !isolate_->code_range()->valid());
+    bool result = base::VirtualMemory::ReleaseRegion(base, size);
+    USE(result);
+    DCHECK(result);
+  }
+}
+
+
+Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
+                                              base::VirtualMemory* controller) {
+  base::VirtualMemory reservation(size, alignment);
+
+  if (!reservation.IsReserved()) return NULL;
+  size_ += reservation.size();
+  Address base =
+      RoundUp(static_cast<Address>(reservation.address()), alignment);
+  controller->TakeControl(&reservation);
+  return base;
+}
+
+
+Address MemoryAllocator::AllocateAlignedMemory(
+    size_t reserve_size, size_t commit_size, size_t alignment,
+    Executability executable, base::VirtualMemory* controller) {
+  DCHECK(commit_size <= reserve_size);
+  base::VirtualMemory reservation;
+  Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
+  if (base == NULL) return NULL;
+
+  if (executable == EXECUTABLE) {
+    if (!CommitExecutableMemory(&reservation, base, commit_size,
+                                reserve_size)) {
+      base = NULL;
+    }
+  } else {
+    if (reservation.Commit(base, commit_size, false)) {
+      UpdateAllocatedSpaceLimits(base, base + commit_size);
+    } else {
+      base = NULL;
+    }
+  }
+
+  if (base == NULL) {
+    // Failed to commit the body. Release the mapping and any partially
+    // commited regions inside it.
+    reservation.Release();
+    return NULL;
+  }
+
+  controller->TakeControl(&reservation);
+  return base;
+}
+
+
+void Page::InitializeAsAnchor(PagedSpace* owner) {
+  set_owner(owner);
+  set_prev_page(this);
+  set_next_page(this);
+}
+
+
+NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
+                                       SemiSpace* semi_space) {
+  Address area_start = start + NewSpacePage::kObjectStartOffset;
+  Address area_end = start + Page::kPageSize;
+
+  MemoryChunk* chunk =
+      MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
+                              area_end, NOT_EXECUTABLE, semi_space);
+  chunk->set_next_chunk(NULL);
+  chunk->set_prev_chunk(NULL);
+  chunk->initialize_scan_on_scavenge(true);
+  bool in_to_space = (semi_space->id() != kFromSpace);
+  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
+                             : MemoryChunk::IN_FROM_SPACE);
+  DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
+                                       : MemoryChunk::IN_TO_SPACE));
+  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
+  heap->incremental_marking()->SetNewSpacePageFlags(page);
+  return page;
+}
+
+
+void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
+  set_owner(semi_space);
+  set_next_chunk(this);
+  set_prev_chunk(this);
+  // Flags marks this invalid page as not being in new-space.
+  // All real new-space pages will be in new-space.
+  SetFlags(0, ~0);
+}
+
+
+MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
+                                     Address area_start, Address area_end,
+                                     Executability executable, Space* owner) {
+  MemoryChunk* chunk = FromAddress(base);
+
+  DCHECK(base == chunk->address());
+
+  chunk->heap_ = heap;
+  chunk->size_ = size;
+  chunk->area_start_ = area_start;
+  chunk->area_end_ = area_end;
+  chunk->flags_ = 0;
+  chunk->set_owner(owner);
+  chunk->InitializeReservedMemory();
+  chunk->slots_buffer_ = NULL;
+  chunk->skip_list_ = NULL;
+  chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
+  chunk->progress_bar_ = 0;
+  chunk->high_water_mark_ = static_cast<int>(area_start - base);
+  chunk->set_parallel_sweeping(SWEEPING_DONE);
+  chunk->available_in_small_free_list_ = 0;
+  chunk->available_in_medium_free_list_ = 0;
+  chunk->available_in_large_free_list_ = 0;
+  chunk->available_in_huge_free_list_ = 0;
+  chunk->non_available_small_blocks_ = 0;
+  chunk->ResetLiveBytes();
+  Bitmap::Clear(chunk);
+  chunk->initialize_scan_on_scavenge(false);
+  chunk->SetFlag(WAS_SWEPT);
+
+  DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
+  DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
+
+  if (executable == EXECUTABLE) {
+    chunk->SetFlag(IS_EXECUTABLE);
+  }
+
+  if (owner == heap->old_data_space()) {
+    chunk->SetFlag(CONTAINS_ONLY_DATA);
+  }
+
+  return chunk;
+}
+
+
+// Commit MemoryChunk area to the requested size.
+bool MemoryChunk::CommitArea(size_t requested) {
+  size_t guard_size =
+      IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
+  size_t header_size = area_start() - address() - guard_size;
+  size_t commit_size =
+      RoundUp(header_size + requested, base::OS::CommitPageSize());
+  size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
+                                  base::OS::CommitPageSize());
+
+  if (commit_size > committed_size) {
+    // Commit size should be less or equal than the reserved size.
+    DCHECK(commit_size <= size() - 2 * guard_size);
+    // Append the committed area.
+    Address start = address() + committed_size + guard_size;
+    size_t length = commit_size - committed_size;
+    if (reservation_.IsReserved()) {
+      Executability executable =
+          IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+      if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
+                                                               executable)) {
+        return false;
+      }
+    } else {
+      CodeRange* code_range = heap_->isolate()->code_range();
+      DCHECK(code_range != NULL && code_range->valid() &&
+             IsFlagSet(IS_EXECUTABLE));
+      if (!code_range->CommitRawMemory(start, length)) return false;
+    }
+
+    if (Heap::ShouldZapGarbage()) {
+      heap_->isolate()->memory_allocator()->ZapBlock(start, length);
+    }
+  } else if (commit_size < committed_size) {
+    DCHECK(commit_size > 0);
+    // Shrink the committed area.
+    size_t length = committed_size - commit_size;
+    Address start = address() + committed_size + guard_size - length;
+    if (reservation_.IsReserved()) {
+      if (!reservation_.Uncommit(start, length)) return false;
+    } else {
+      CodeRange* code_range = heap_->isolate()->code_range();
+      DCHECK(code_range != NULL && code_range->valid() &&
+             IsFlagSet(IS_EXECUTABLE));
+      if (!code_range->UncommitRawMemory(start, length)) return false;
+    }
+  }
+
+  area_end_ = area_start_ + requested;
+  return true;
+}
+
+
+void MemoryChunk::InsertAfter(MemoryChunk* other) {
+  MemoryChunk* other_next = other->next_chunk();
+
+  set_next_chunk(other_next);
+  set_prev_chunk(other);
+  other_next->set_prev_chunk(this);
+  other->set_next_chunk(this);
+}
+
+
+void MemoryChunk::Unlink() {
+  MemoryChunk* next_element = next_chunk();
+  MemoryChunk* prev_element = prev_chunk();
+  next_element->set_prev_chunk(prev_element);
+  prev_element->set_next_chunk(next_element);
+  set_prev_chunk(NULL);
+  set_next_chunk(NULL);
+}
+
+
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
+                                            intptr_t commit_area_size,
+                                            Executability executable,
+                                            Space* owner) {
+  DCHECK(commit_area_size <= reserve_area_size);
+
+  size_t chunk_size;
+  Heap* heap = isolate_->heap();
+  Address base = NULL;
+  base::VirtualMemory reservation;
+  Address area_start = NULL;
+  Address area_end = NULL;
+
+  //
+  // MemoryChunk layout:
+  //
+  //             Executable
+  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+  // |           Header           |
+  // +----------------------------+<- base + CodePageGuardStartOffset
+  // |           Guard            |
+  // +----------------------------+<- area_start_
+  // |           Area             |
+  // +----------------------------+<- area_end_ (area_start + commit_area_size)
+  // |   Committed but not used   |
+  // +----------------------------+<- aligned at OS page boundary
+  // | Reserved but not committed |
+  // +----------------------------+<- aligned at OS page boundary
+  // |           Guard            |
+  // +----------------------------+<- base + chunk_size
+  //
+  //           Non-executable
+  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+  // |          Header            |
+  // +----------------------------+<- area_start_ (base + kObjectStartOffset)
+  // |           Area             |
+  // +----------------------------+<- area_end_ (area_start + commit_area_size)
+  // |  Committed but not used    |
+  // +----------------------------+<- aligned at OS page boundary
+  // | Reserved but not committed |
+  // +----------------------------+<- base + chunk_size
+  //
+
+  if (executable == EXECUTABLE) {
+    chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
+                         base::OS::CommitPageSize()) +
+                 CodePageGuardSize();
+
+    // Check executable memory limit.
+    if (size_executable_ + chunk_size > capacity_executable_) {
+      LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
+                                "V8 Executable Allocation capacity exceeded"));
+      return NULL;
+    }
+
+    // Size of header (not executable) plus area (executable).
+    size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
+                                 base::OS::CommitPageSize());
+    // Allocate executable memory either from code range or from the
+    // OS.
+    if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
+      base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
+                                                       &chunk_size);
+      DCHECK(
+          IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
+      if (base == NULL) return NULL;
+      size_ += chunk_size;
+      // Update executable memory size.
+      size_executable_ += chunk_size;
+    } else {
+      base = AllocateAlignedMemory(chunk_size, commit_size,
+                                   MemoryChunk::kAlignment, executable,
+                                   &reservation);
+      if (base == NULL) return NULL;
+      // Update executable memory size.
+      size_executable_ += reservation.size();
+    }
+
+    if (Heap::ShouldZapGarbage()) {
+      ZapBlock(base, CodePageGuardStartOffset());
+      ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
+    }
+
+    area_start = base + CodePageAreaStartOffset();
+    area_end = area_start + commit_area_size;
+  } else {
+    chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
+                         base::OS::CommitPageSize());
+    size_t commit_size =
+        RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
+                base::OS::CommitPageSize());
+    base =
+        AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
+                              executable, &reservation);
+
+    if (base == NULL) return NULL;
+
+    if (Heap::ShouldZapGarbage()) {
+      ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
+    }
+
+    area_start = base + Page::kObjectStartOffset;
+    area_end = area_start + commit_area_size;
+  }
+
+  // Use chunk_size for statistics and callbacks because we assume that they
+  // treat reserved but not-yet committed memory regions of chunks as allocated.
+  isolate_->counters()->memory_allocated()->Increment(
+      static_cast<int>(chunk_size));
+
+  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
+  if (owner != NULL) {
+    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
+  }
+
+  MemoryChunk* result = MemoryChunk::Initialize(
+      heap, base, chunk_size, area_start, area_end, executable, owner);
+  result->set_reserved_memory(&reservation);
+  return result;
+}
+
+
+void Page::ResetFreeListStatistics() {
+  non_available_small_blocks_ = 0;
+  available_in_small_free_list_ = 0;
+  available_in_medium_free_list_ = 0;
+  available_in_large_free_list_ = 0;
+  available_in_huge_free_list_ = 0;
+}
+
+
+Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
+                                    Executability executable) {
+  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+
+  if (chunk == NULL) return NULL;
+
+  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
+}
+
+
+LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
+                                              Space* owner,
+                                              Executability executable) {
+  MemoryChunk* chunk =
+      AllocateChunk(object_size, object_size, executable, owner);
+  if (chunk == NULL) return NULL;
+  return LargePage::Initialize(isolate_->heap(), chunk);
+}
+
+
+void MemoryAllocator::Free(MemoryChunk* chunk) {
+  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
+  if (chunk->owner() != NULL) {
+    ObjectSpace space =
+        static_cast<ObjectSpace>(1 << chunk->owner()->identity());
+    PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
+  }
+
+  isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
+                                         chunk->IsEvacuationCandidate());
+
+  delete chunk->slots_buffer();
+  delete chunk->skip_list();
+
+  base::VirtualMemory* reservation = chunk->reserved_memory();
+  if (reservation->IsReserved()) {
+    FreeMemory(reservation, chunk->executable());
+  } else {
+    FreeMemory(chunk->address(), chunk->size(), chunk->executable());
+  }
+}
+
+
+bool MemoryAllocator::CommitBlock(Address start, size_t size,
+                                  Executability executable) {
+  if (!CommitMemory(start, size, executable)) return false;
+
+  if (Heap::ShouldZapGarbage()) {
+    ZapBlock(start, size);
+  }
+
+  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
+  return true;
+}
+
+
+bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
+  if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+  return true;
+}
+
+
+void MemoryAllocator::ZapBlock(Address start, size_t size) {
+  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
+    Memory::Address_at(start + s) = kZapValue;
+  }
+}
+
+
+void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
+                                                AllocationAction action,
+                                                size_t size) {
+  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+    MemoryAllocationCallbackRegistration registration =
+        memory_allocation_callbacks_[i];
+    if ((registration.space & space) == space &&
+        (registration.action & action) == action)
+      registration.callback(space, action, static_cast<int>(size));
+  }
+}
+
+
+bool MemoryAllocator::MemoryAllocationCallbackRegistered(
+    MemoryAllocationCallback callback) {
+  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+    if (memory_allocation_callbacks_[i].callback == callback) return true;
+  }
+  return false;
+}
+
+
+void MemoryAllocator::AddMemoryAllocationCallback(
+    MemoryAllocationCallback callback, ObjectSpace space,
+    AllocationAction action) {
+  DCHECK(callback != NULL);
+  MemoryAllocationCallbackRegistration registration(callback, space, action);
+  DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
+  return memory_allocation_callbacks_.Add(registration);
+}
+
+
+void MemoryAllocator::RemoveMemoryAllocationCallback(
+    MemoryAllocationCallback callback) {
+  DCHECK(callback != NULL);
+  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+    if (memory_allocation_callbacks_[i].callback == callback) {
+      memory_allocation_callbacks_.Remove(i);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
+#ifdef DEBUG
+void MemoryAllocator::ReportStatistics() {
+  float pct = static_cast<float>(capacity_ - size_) / capacity_;
+  PrintF("  capacity: %" V8_PTR_PREFIX
+         "d"
+         ", used: %" V8_PTR_PREFIX
+         "d"
+         ", available: %%%d\n\n",
+         capacity_, size_, static_cast<int>(pct * 100));
+}
+#endif
+
+
+int MemoryAllocator::CodePageGuardStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
+}
+
+
+int MemoryAllocator::CodePageGuardSize() {
+  return static_cast<int>(base::OS::CommitPageSize());
+}
+
+
+int MemoryAllocator::CodePageAreaStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return CodePageGuardStartOffset() + CodePageGuardSize();
+}
+
+
+int MemoryAllocator::CodePageAreaEndOffset() {
+  // We are guarding code pages: the last OS page will be protected as
+  // non-writable.
+  return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
+}
+
+
+bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
+                                             Address start, size_t commit_size,
+                                             size_t reserved_size) {
+  // Commit page header (not executable).
+  if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
+    return false;
+  }
+
+  // Create guard page after the header.
+  if (!vm->Guard(start + CodePageGuardStartOffset())) {
+    return false;
+  }
+
+  // Commit page body (executable).
+  if (!vm->Commit(start + CodePageAreaStartOffset(),
+                  commit_size - CodePageGuardStartOffset(), true)) {
+    return false;
+  }
+
+  // Create guard page before the end.
+  if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
+                                        commit_size -
+                                        CodePageGuardStartOffset());
+  return true;
+}
+
+
+// -----------------------------------------------------------------------------
+// MemoryChunk implementation
+
+void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
+  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
+    static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
+  }
+  chunk->IncrementLiveBytes(by);
+}
+
+
+// -----------------------------------------------------------------------------
+// PagedSpace implementation
+
+PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
+                       Executability executable)
+    : Space(heap, id, executable),
+      free_list_(this),
+      unswept_free_bytes_(0),
+      end_of_unswept_pages_(NULL),
+      emergency_memory_(NULL) {
+  if (id == CODE_SPACE) {
+    area_size_ = heap->isolate()->memory_allocator()->CodePageAreaSize();
+  } else {
+    area_size_ = Page::kPageSize - Page::kObjectStartOffset;
+  }
+  max_capacity_ =
+      (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
+  accounting_stats_.Clear();
+
+  allocation_info_.set_top(NULL);
+  allocation_info_.set_limit(NULL);
+
+  anchor_.InitializeAsAnchor(this);
+}
+
+
+bool PagedSpace::SetUp() { return true; }
+
+
+bool PagedSpace::HasBeenSetUp() { return true; }
+
+
+void PagedSpace::TearDown() {
+  PageIterator iterator(this);
+  while (iterator.has_next()) {
+    heap()->isolate()->memory_allocator()->Free(iterator.next());
+  }
+  anchor_.set_next_page(&anchor_);
+  anchor_.set_prev_page(&anchor_);
+  accounting_stats_.Clear();
+}
+
+
+size_t PagedSpace::CommittedPhysicalMemory() {
+  if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+  size_t size = 0;
+  PageIterator it(this);
+  while (it.has_next()) {
+    size += it.next()->CommittedPhysicalMemory();
+  }
+  return size;
+}
+
+
+Object* PagedSpace::FindObject(Address addr) {
+  // Note: this function can only be called on iterable spaces.
+  DCHECK(!heap()->mark_compact_collector()->in_use());
+
+  if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
+
+  Page* p = Page::FromAddress(addr);
+  HeapObjectIterator it(p, NULL);
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    Address cur = obj->address();
+    Address next = cur + obj->Size();
+    if ((cur <= addr) && (addr < next)) return obj;
+  }
+
+  UNREACHABLE();
+  return Smi::FromInt(0);
+}
+
+
+bool PagedSpace::CanExpand() {
+  DCHECK(max_capacity_ % AreaSize() == 0);
+
+  if (Capacity() == max_capacity_) return false;
+
+  DCHECK(Capacity() < max_capacity_);
+
+  // Are we going to exceed capacity for this space?
+  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
+
+  return true;
+}
+
+
+bool PagedSpace::Expand() {
+  if (!CanExpand()) return false;
+
+  intptr_t size = AreaSize();
+
+  if (anchor_.next_page() == &anchor_) {
+    size = SizeOfFirstPage();
+  }
+
+  Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
+                                                                executable());
+  if (p == NULL) return false;
+
+  DCHECK(Capacity() <= max_capacity_);
+
+  p->InsertAfter(anchor_.prev_page());
+
+  return true;
+}
+
+
+intptr_t PagedSpace::SizeOfFirstPage() {
+  // If using an ool constant pool then transfer the constant pool allowance
+  // from the code space to the old pointer space.
+  static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
+  int size = 0;
+  switch (identity()) {
+    case OLD_POINTER_SPACE:
+      size = (112 + constant_pool_delta) * kPointerSize * KB;
+      break;
+    case OLD_DATA_SPACE:
+      size = 192 * KB;
+      break;
+    case MAP_SPACE:
+      size = 16 * kPointerSize * KB;
+      break;
+    case CELL_SPACE:
+      size = 16 * kPointerSize * KB;
+      break;
+    case PROPERTY_CELL_SPACE:
+      size = 8 * kPointerSize * KB;
+      break;
+    case CODE_SPACE: {
+      CodeRange* code_range = heap()->isolate()->code_range();
+      if (code_range != NULL && code_range->valid()) {
+        // When code range exists, code pages are allocated in a special way
+        // (from the reserved code range). That part of the code is not yet
+        // upgraded to handle small pages.
+        size = AreaSize();
+      } else {
+        size = RoundUp((480 - constant_pool_delta) * KB *
+                           FullCodeGenerator::kBootCodeSizeMultiplier / 100,
+                       kPointerSize);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+  return Min(size, AreaSize());
+}
+
+
+int PagedSpace::CountTotalPages() {
+  PageIterator it(this);
+  int count = 0;
+  while (it.has_next()) {
+    it.next();
+    count++;
+  }
+  return count;
+}
+
+
+void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
+  sizes->huge_size_ = page->available_in_huge_free_list();
+  sizes->small_size_ = page->available_in_small_free_list();
+  sizes->medium_size_ = page->available_in_medium_free_list();
+  sizes->large_size_ = page->available_in_large_free_list();
+}
+
+
+void PagedSpace::ResetFreeListStatistics() {
+  PageIterator page_iterator(this);
+  while (page_iterator.has_next()) {
+    Page* page = page_iterator.next();
+    page->ResetFreeListStatistics();
+  }
+}
+
+
+void PagedSpace::IncreaseCapacity(int size) {
+  accounting_stats_.ExpandSpace(size);
+}
+
+
+void PagedSpace::ReleasePage(Page* page) {
+  DCHECK(page->LiveBytes() == 0);
+  DCHECK(AreaSize() == page->area_size());
+
+  if (page->WasSwept()) {
+    intptr_t size = free_list_.EvictFreeListItems(page);
+    accounting_stats_.AllocateBytes(size);
+    DCHECK_EQ(AreaSize(), static_cast<int>(size));
+  } else {
+    DecreaseUnsweptFreeBytes(page);
+  }
+
+  if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
+    heap()->decrement_scan_on_scavenge_pages();
+    page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  DCHECK(!free_list_.ContainsPageFreeListItems(page));
+
+  if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+    allocation_info_.set_top(NULL);
+    allocation_info_.set_limit(NULL);
+  }
+
+  page->Unlink();
+  if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
+    heap()->isolate()->memory_allocator()->Free(page);
+  } else {
+    heap()->QueueMemoryChunkForFree(page);
+  }
+
+  DCHECK(Capacity() > 0);
+  accounting_stats_.ShrinkSpace(AreaSize());
+}
+
+
+void PagedSpace::CreateEmergencyMemory() {
+  emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
+      AreaSize(), AreaSize(), executable(), this);
+}
+
+
+void PagedSpace::FreeEmergencyMemory() {
+  Page* page = static_cast<Page*>(emergency_memory_);
+  DCHECK(page->LiveBytes() == 0);
+  DCHECK(AreaSize() == page->area_size());
+  DCHECK(!free_list_.ContainsPageFreeListItems(page));
+  heap()->isolate()->memory_allocator()->Free(page);
+  emergency_memory_ = NULL;
+}
+
+
+void PagedSpace::UseEmergencyMemory() {
+  Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
+  page->InsertAfter(anchor_.prev_page());
+  emergency_memory_ = NULL;
+}
+
+
+#ifdef DEBUG
+void PagedSpace::Print() {}
+#endif
+
+#ifdef VERIFY_HEAP
+void PagedSpace::Verify(ObjectVisitor* visitor) {
+  bool allocation_pointer_found_in_space =
+      (allocation_info_.top() == allocation_info_.limit());
+  PageIterator page_iterator(this);
+  while (page_iterator.has_next()) {
+    Page* page = page_iterator.next();
+    CHECK(page->owner() == this);
+    if (page == Page::FromAllocationTop(allocation_info_.top())) {
+      allocation_pointer_found_in_space = true;
+    }
+    CHECK(page->WasSwept());
+    HeapObjectIterator it(page, NULL);
+    Address end_of_previous_object = page->area_start();
+    Address top = page->area_end();
+    int black_size = 0;
+    for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+      CHECK(end_of_previous_object <= object->address());
+
+      // The first word should be a map, and we expect all map pointers to
+      // be in map space.
+      Map* map = object->map();
+      CHECK(map->IsMap());
+      CHECK(heap()->map_space()->Contains(map));
+
+      // Perform space-specific object verification.
+      VerifyObject(object);
+
+      // The object itself should look OK.
+      object->ObjectVerify();
+
+      // All the interior pointers should be contained in the heap.
+      int size = object->Size();
+      object->IterateBody(map->instance_type(), size, visitor);
+      if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
+        black_size += size;
+      }
+
+      CHECK(object->address() + size <= top);
+      end_of_previous_object = object->address() + size;
+    }
+    CHECK_LE(black_size, page->LiveBytes());
+  }
+  CHECK(allocation_pointer_found_in_space);
+}
+#endif  // VERIFY_HEAP
+
+// -----------------------------------------------------------------------------
+// NewSpace implementation
+
+
+bool NewSpace::SetUp(int reserved_semispace_capacity,
+                     int maximum_semispace_capacity) {
+  // Set up new space based on the preallocated memory block defined by
+  // start and size. The provided space is divided into two semi-spaces.
+  // To support fast containment testing in the new space, the size of
+  // this chunk must be a power of two and it must be aligned to its size.
+  int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
+
+  size_t size = 2 * reserved_semispace_capacity;
+  Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
+      size, size, &reservation_);
+  if (base == NULL) return false;
+
+  chunk_base_ = base;
+  chunk_size_ = static_cast<uintptr_t>(size);
+  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
+
+  DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
+  DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
+
+  // Allocate and set up the histogram arrays if necessary.
+  allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
+  promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
+
+#define SET_NAME(name)                        \
+  allocated_histogram_[name].set_name(#name); \
+  promoted_histogram_[name].set_name(#name);
+  INSTANCE_TYPE_LIST(SET_NAME)
+#undef SET_NAME
+
+  DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
+  DCHECK(static_cast<intptr_t>(chunk_size_) >=
+         2 * heap()->ReservedSemiSpaceSize());
+  DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
+
+  to_space_.SetUp(chunk_base_, initial_semispace_capacity,
+                  maximum_semispace_capacity);
+  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
+                    initial_semispace_capacity, maximum_semispace_capacity);
+  if (!to_space_.Commit()) {
+    return false;
+  }
+  DCHECK(!from_space_.is_committed());  // No need to use memory yet.
+
+  start_ = chunk_base_;
+  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
+  object_mask_ = address_mask_ | kHeapObjectTagMask;
+  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
+
+  ResetAllocationInfo();
+
+  return true;
+}
+
+
+void NewSpace::TearDown() {
+  if (allocated_histogram_) {
+    DeleteArray(allocated_histogram_);
+    allocated_histogram_ = NULL;
+  }
+  if (promoted_histogram_) {
+    DeleteArray(promoted_histogram_);
+    promoted_histogram_ = NULL;
+  }
+
+  start_ = NULL;
+  allocation_info_.set_top(NULL);
+  allocation_info_.set_limit(NULL);
+
+  to_space_.TearDown();
+  from_space_.TearDown();
+
+  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
+
+  DCHECK(reservation_.IsReserved());
+  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
+                                                    NOT_EXECUTABLE);
+  chunk_base_ = NULL;
+  chunk_size_ = 0;
+}
+
+
+void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
+
+
+void NewSpace::Grow() {
+  // Double the semispace size but only up to maximum capacity.
+  DCHECK(TotalCapacity() < MaximumCapacity());
+  int new_capacity =
+      Min(MaximumCapacity(), 2 * static_cast<int>(TotalCapacity()));
+  if (to_space_.GrowTo(new_capacity)) {
+    // Only grow from space if we managed to grow to-space.
+    if (!from_space_.GrowTo(new_capacity)) {
+      // If we managed to grow to-space but couldn't grow from-space,
+      // attempt to shrink to-space.
+      if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
+        // We are in an inconsistent state because we could not
+        // commit/uncommit memory from new space.
+        V8::FatalProcessOutOfMemory("Failed to grow new space.");
+      }
+    }
+  }
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::Shrink() {
+  int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
+  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
+  if (rounded_new_capacity < TotalCapacity() &&
+      to_space_.ShrinkTo(rounded_new_capacity)) {
+    // Only shrink from-space if we managed to shrink to-space.
+    from_space_.Reset();
+    if (!from_space_.ShrinkTo(rounded_new_capacity)) {
+      // If we managed to shrink to-space but couldn't shrink from
+      // space, attempt to grow to-space again.
+      if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
+        // We are in an inconsistent state because we could not
+        // commit/uncommit memory from new space.
+        V8::FatalProcessOutOfMemory("Failed to shrink new space.");
+      }
+    }
+  }
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::UpdateAllocationInfo() {
+  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+  allocation_info_.set_top(to_space_.page_low());
+  allocation_info_.set_limit(to_space_.page_high());
+  UpdateInlineAllocationLimit(0);
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::ResetAllocationInfo() {
+  to_space_.Reset();
+  UpdateAllocationInfo();
+  pages_used_ = 0;
+  // Clear all mark-bits in the to-space.
+  NewSpacePageIterator it(&to_space_);
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
+  if (heap()->inline_allocation_disabled()) {
+    // Lowest limit when linear allocation was disabled.
+    Address high = to_space_.page_high();
+    Address new_top = allocation_info_.top() + size_in_bytes;
+    allocation_info_.set_limit(Min(new_top, high));
+  } else if (inline_allocation_limit_step() == 0) {
+    // Normal limit is the end of the current page.
+    allocation_info_.set_limit(to_space_.page_high());
+  } else {
+    // Lower limit during incremental marking.
+    Address high = to_space_.page_high();
+    Address new_top = allocation_info_.top() + size_in_bytes;
+    Address new_limit = new_top + inline_allocation_limit_step_;
+    allocation_info_.set_limit(Min(new_limit, high));
+  }
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+bool NewSpace::AddFreshPage() {
+  Address top = allocation_info_.top();
+  if (NewSpacePage::IsAtStart(top)) {
+    // The current page is already empty. Don't try to make another.
+
+    // We should only get here if someone asks to allocate more
+    // than what can be stored in a single page.
+    // TODO(gc): Change the limit on new-space allocation to prevent this
+    // from happening (all such allocations should go directly to LOSpace).
+    return false;
+  }
+  if (!to_space_.AdvancePage()) {
+    // Failed to get a new page in to-space.
+    return false;
+  }
+
+  // Clear remainder of current page.
+  Address limit = NewSpacePage::FromLimit(top)->area_end();
+  if (heap()->gc_state() == Heap::SCAVENGE) {
+    heap()->promotion_queue()->SetNewLimit(limit);
+  }
+
+  int remaining_in_page = static_cast<int>(limit - top);
+  heap()->CreateFillerObjectAt(top, remaining_in_page);
+  pages_used_++;
+  UpdateAllocationInfo();
+
+  return true;
+}
+
+
+AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
+  Address old_top = allocation_info_.top();
+  Address high = to_space_.page_high();
+  if (allocation_info_.limit() < high) {
+    // Either the limit has been lowered because linear allocation was disabled
+    // or because incremental marking wants to get a chance to do a step. Set
+    // the new limit accordingly.
+    Address new_top = old_top + size_in_bytes;
+    int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
+    heap()->incremental_marking()->Step(bytes_allocated,
+                                        IncrementalMarking::GC_VIA_STACK_GUARD);
+    UpdateInlineAllocationLimit(size_in_bytes);
+    top_on_previous_step_ = new_top;
+    return AllocateRaw(size_in_bytes);
+  } else if (AddFreshPage()) {
+    // Switched to new page. Try allocating again.
+    int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
+    heap()->incremental_marking()->Step(bytes_allocated,
+                                        IncrementalMarking::GC_VIA_STACK_GUARD);
+    top_on_previous_step_ = to_space_.page_low();
+    return AllocateRaw(size_in_bytes);
+  } else {
+    return AllocationResult::Retry();
+  }
+}
+
+
+#ifdef VERIFY_HEAP
+// We do not use the SemiSpaceIterator because verification doesn't assume
+// that it works (it depends on the invariants we are checking).
+void NewSpace::Verify() {
+  // The allocation pointer should be in the space or at the very end.
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+  // There should be objects packed in from the low address up to the
+  // allocation pointer.
+  Address current = to_space_.first_page()->area_start();
+  CHECK_EQ(current, to_space_.space_start());
+
+  while (current != top()) {
+    if (!NewSpacePage::IsAtEnd(current)) {
+      // The allocation pointer should not be in the middle of an object.
+      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
+            current < top());
+
+      HeapObject* object = HeapObject::FromAddress(current);
+
+      // The first word should be a map, and we expect all map pointers to
+      // be in map space.
+      Map* map = object->map();
+      CHECK(map->IsMap());
+      CHECK(heap()->map_space()->Contains(map));
+
+      // The object should not be code or a map.
+      CHECK(!object->IsMap());
+      CHECK(!object->IsCode());
+
+      // The object itself should look OK.
+      object->ObjectVerify();
+
+      // All the interior pointers should be contained in the heap.
+      VerifyPointersVisitor visitor;
+      int size = object->Size();
+      object->IterateBody(map->instance_type(), size, &visitor);
+
+      current += size;
+    } else {
+      // At end of page, switch to next page.
+      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
+      // Next page should be valid.
+      CHECK(!page->is_anchor());
+      current = page->area_start();
+    }
+  }
+
+  // Check semi-spaces.
+  CHECK_EQ(from_space_.id(), kFromSpace);
+  CHECK_EQ(to_space_.id(), kToSpace);
+  from_space_.Verify();
+  to_space_.Verify();
+}
+#endif
+
+// -----------------------------------------------------------------------------
+// SemiSpace implementation
+
+void SemiSpace::SetUp(Address start, int initial_capacity,
+                      int maximum_capacity) {
+  // Creates a space in the young generation. The constructor does not
+  // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
+  // memory of size 'capacity' when set up, and does not grow or shrink
+  // otherwise.  In the mark-compact collector, the memory region of the from
+  // space is used as the marking stack. It requires contiguous memory
+  // addresses.
+  DCHECK(maximum_capacity >= Page::kPageSize);
+  initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
+  total_capacity_ = initial_capacity;
+  maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+  maximum_committed_ = 0;
+  committed_ = false;
+  start_ = start;
+  address_mask_ = ~(maximum_capacity - 1);
+  object_mask_ = address_mask_ | kHeapObjectTagMask;
+  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
+  age_mark_ = start_;
+}
+
+
+void SemiSpace::TearDown() {
+  start_ = NULL;
+  total_capacity_ = 0;
+}
+
+
+bool SemiSpace::Commit() {
+  DCHECK(!is_committed());
+  int pages = total_capacity_ / Page::kPageSize;
+  if (!heap()->isolate()->memory_allocator()->CommitBlock(
+          start_, total_capacity_, executable())) {
+    return false;
+  }
+
+  NewSpacePage* current = anchor();
+  for (int i = 0; i < pages; i++) {
+    NewSpacePage* new_page =
+        NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
+    new_page->InsertAfter(current);
+    current = new_page;
+  }
+
+  SetCapacity(total_capacity_);
+  committed_ = true;
+  Reset();
+  return true;
+}
+
+
+bool SemiSpace::Uncommit() {
+  DCHECK(is_committed());
+  Address start = start_ + maximum_total_capacity_ - total_capacity_;
+  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
+                                                            total_capacity_)) {
+    return false;
+  }
+  anchor()->set_next_page(anchor());
+  anchor()->set_prev_page(anchor());
+
+  committed_ = false;
+  return true;
+}
+
+
+size_t SemiSpace::CommittedPhysicalMemory() {
+  if (!is_committed()) return 0;
+  size_t size = 0;
+  NewSpacePageIterator it(this);
+  while (it.has_next()) {
+    size += it.next()->CommittedPhysicalMemory();
+  }
+  return size;
+}
+
+
+bool SemiSpace::GrowTo(int new_capacity) {
+  if (!is_committed()) {
+    if (!Commit()) return false;
+  }
+  DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
+  DCHECK(new_capacity <= maximum_total_capacity_);
+  DCHECK(new_capacity > total_capacity_);
+  int pages_before = total_capacity_ / Page::kPageSize;
+  int pages_after = new_capacity / Page::kPageSize;
+
+  size_t delta = new_capacity - total_capacity_;
+
+  DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
+  if (!heap()->isolate()->memory_allocator()->CommitBlock(
+          start_ + total_capacity_, delta, executable())) {
+    return false;
+  }
+  SetCapacity(new_capacity);
+  NewSpacePage* last_page = anchor()->prev_page();
+  DCHECK(last_page != anchor());
+  for (int i = pages_before; i < pages_after; i++) {
+    Address page_address = start_ + i * Page::kPageSize;
+    NewSpacePage* new_page =
+        NewSpacePage::Initialize(heap(), page_address, this);
+    new_page->InsertAfter(last_page);
+    Bitmap::Clear(new_page);
+    // Duplicate the flags that was set on the old page.
+    new_page->SetFlags(last_page->GetFlags(),
+                       NewSpacePage::kCopyOnFlipFlagsMask);
+    last_page = new_page;
+  }
+  return true;
+}
+
+
+bool SemiSpace::ShrinkTo(int new_capacity) {
+  DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
+  DCHECK(new_capacity >= initial_total_capacity_);
+  DCHECK(new_capacity < total_capacity_);
+  if (is_committed()) {
+    size_t delta = total_capacity_ - new_capacity;
+    DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
+
+    MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
+    if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
+      return false;
+    }
+
+    int pages_after = new_capacity / Page::kPageSize;
+    NewSpacePage* new_last_page =
+        NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
+    new_last_page->set_next_page(anchor());
+    anchor()->set_prev_page(new_last_page);
+    DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
+  }
+
+  SetCapacity(new_capacity);
+
+  return true;
+}
+
+
+void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
+  anchor_.set_owner(this);
+  // Fixup back-pointers to anchor. Address of anchor changes
+  // when we swap.
+  anchor_.prev_page()->set_next_page(&anchor_);
+  anchor_.next_page()->set_prev_page(&anchor_);
+
+  bool becomes_to_space = (id_ == kFromSpace);
+  id_ = becomes_to_space ? kToSpace : kFromSpace;
+  NewSpacePage* page = anchor_.next_page();
+  while (page != &anchor_) {
+    page->set_owner(this);
+    page->SetFlags(flags, mask);
+    if (becomes_to_space) {
+      page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
+      page->SetFlag(MemoryChunk::IN_TO_SPACE);
+      page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+      page->ResetLiveBytes();
+    } else {
+      page->SetFlag(MemoryChunk::IN_FROM_SPACE);
+      page->ClearFlag(MemoryChunk::IN_TO_SPACE);
+    }
+    DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+    DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
+           page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
+    page = page->next_page();
+  }
+}
+
+
+void SemiSpace::Reset() {
+  DCHECK(anchor_.next_page() != &anchor_);
+  current_page_ = anchor_.next_page();
+}
+
+
+void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
+  // We won't be swapping semispaces without data in them.
+  DCHECK(from->anchor_.next_page() != &from->anchor_);
+  DCHECK(to->anchor_.next_page() != &to->anchor_);
+
+  // Swap bits.
+  SemiSpace tmp = *from;
+  *from = *to;
+  *to = tmp;
+
+  // Fixup back-pointers to the page list anchor now that its address
+  // has changed.
+  // Swap to/from-space bits on pages.
+  // Copy GC flags from old active space (from-space) to new (to-space).
+  intptr_t flags = from->current_page()->GetFlags();
+  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
+
+  from->FlipPages(0, 0);
+}
+
+
+void SemiSpace::SetCapacity(int new_capacity) {
+  total_capacity_ = new_capacity;
+  if (total_capacity_ > maximum_committed_) {
+    maximum_committed_ = total_capacity_;
+  }
+}
+
+
+void SemiSpace::set_age_mark(Address mark) {
+  DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
+  age_mark_ = mark;
+  // Mark all pages up to the one containing mark.
+  NewSpacePageIterator it(space_start(), mark);
+  while (it.has_next()) {
+    it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+  }
+}
+
+
+#ifdef DEBUG
+void SemiSpace::Print() {}
+#endif
+
+#ifdef VERIFY_HEAP
+void SemiSpace::Verify() {
+  bool is_from_space = (id_ == kFromSpace);
+  NewSpacePage* page = anchor_.next_page();
+  CHECK(anchor_.semi_space() == this);
+  while (page != &anchor_) {
+    CHECK(page->semi_space() == this);
+    CHECK(page->InNewSpace());
+    CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
+                                        : MemoryChunk::IN_TO_SPACE));
+    CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
+                                         : MemoryChunk::IN_FROM_SPACE));
+    CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
+    if (!is_from_space) {
+      // The pointers-from-here-are-interesting flag isn't updated dynamically
+      // on from-space pages, so it might be out of sync with the marking state.
+      if (page->heap()->incremental_marking()->IsMarking()) {
+        CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+      } else {
+        CHECK(
+            !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+      }
+      // TODO(gc): Check that the live_bytes_count_ field matches the
+      // black marking on the page (if we make it match in new-space).
+    }
+    CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+    CHECK(page->prev_page()->next_page() == page);
+    page = page->next_page();
+  }
+}
+#endif
+
+#ifdef DEBUG
+void SemiSpace::AssertValidRange(Address start, Address end) {
+  // Addresses belong to same semi-space
+  NewSpacePage* page = NewSpacePage::FromLimit(start);
+  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
+  SemiSpace* space = page->semi_space();
+  CHECK_EQ(space, end_page->semi_space());
+  // Start address is before end address, either on same page,
+  // or end address is on a later page in the linked list of
+  // semi-space pages.
+  if (page == end_page) {
+    CHECK(start <= end);
+  } else {
+    while (page != end_page) {
+      page = page->next_page();
+      CHECK_NE(page, space->anchor());
+    }
+  }
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// SemiSpaceIterator implementation.
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
+  Initialize(space->bottom(), space->top(), NULL);
+}
+
+
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
+                                     HeapObjectCallback size_func) {
+  Initialize(space->bottom(), space->top(), size_func);
+}
+
+
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
+  Initialize(start, space->top(), NULL);
+}
+
+
+SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
+  Initialize(from, to, NULL);
+}
+
+
+void SemiSpaceIterator::Initialize(Address start, Address end,
+                                   HeapObjectCallback size_func) {
+  SemiSpace::AssertValidRange(start, end);
+  current_ = start;
+  limit_ = end;
+  size_func_ = size_func;
+}
+
+
+#ifdef DEBUG
+// heap_histograms is shared, always clear it before using it.
+static void ClearHistograms(Isolate* isolate) {
+// We reset the name each time, though it hasn't changed.
+#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
+  INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
+#undef DEF_TYPE_NAME
+
+#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
+  INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
+#undef CLEAR_HISTOGRAM
+
+  isolate->js_spill_information()->Clear();
+}
+
+
+static void ClearCodeKindStatistics(int* code_kind_statistics) {
+  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+    code_kind_statistics[i] = 0;
+  }
+}
+
+
+static void ReportCodeKindStatistics(int* code_kind_statistics) {
+  PrintF("\n   Code kind histograms: \n");
+  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+    if (code_kind_statistics[i] > 0) {
+      PrintF("     %-20s: %10d bytes\n",
+             Code::Kind2String(static_cast<Code::Kind>(i)),
+             code_kind_statistics[i]);
+    }
+  }
+  PrintF("\n");
+}
+
+
+static int CollectHistogramInfo(HeapObject* obj) {
+  Isolate* isolate = obj->GetIsolate();
+  InstanceType type = obj->map()->instance_type();
+  DCHECK(0 <= type && type <= LAST_TYPE);
+  DCHECK(isolate->heap_histograms()[type].name() != NULL);
+  isolate->heap_histograms()[type].increment_number(1);
+  isolate->heap_histograms()[type].increment_bytes(obj->Size());
+
+  if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
+    JSObject::cast(obj)
+        ->IncrementSpillStatistics(isolate->js_spill_information());
+  }
+
+  return obj->Size();
+}
+
+
+static void ReportHistogram(Isolate* isolate, bool print_spill) {
+  PrintF("\n  Object Histogram:\n");
+  for (int i = 0; i <= LAST_TYPE; i++) {
+    if (isolate->heap_histograms()[i].number() > 0) {
+      PrintF("    %-34s%10d (%10d bytes)\n",
+             isolate->heap_histograms()[i].name(),
+             isolate->heap_histograms()[i].number(),
+             isolate->heap_histograms()[i].bytes());
+    }
+  }
+  PrintF("\n");
+
+  // Summarize string types.
+  int string_number = 0;
+  int string_bytes = 0;
+#define INCREMENT(type, size, name, camel_name)               \
+  string_number += isolate->heap_histograms()[type].number(); \
+  string_bytes += isolate->heap_histograms()[type].bytes();
+  STRING_TYPE_LIST(INCREMENT)
+#undef INCREMENT
+  if (string_number > 0) {
+    PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
+           string_bytes);
+  }
+
+  if (FLAG_collect_heap_spill_statistics && print_spill) {
+    isolate->js_spill_information()->Print();
+  }
+}
+#endif  // DEBUG
+
+
+// Support for statistics gathering for --heap-stats and --log-gc.
+void NewSpace::ClearHistograms() {
+  for (int i = 0; i <= LAST_TYPE; i++) {
+    allocated_histogram_[i].clear();
+    promoted_histogram_[i].clear();
+  }
+}
+
+
+// Because the copying collector does not touch garbage objects, we iterate
+// the new space before a collection to get a histogram of allocated objects.
+// This only happens when --log-gc flag is set.
+void NewSpace::CollectStatistics() {
+  ClearHistograms();
+  SemiSpaceIterator it(this);
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
+    RecordAllocation(obj);
+}
+
+
+static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
+                               const char* description) {
+  LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
+  // Lump all the string types together.
+  int string_number = 0;
+  int string_bytes = 0;
+#define INCREMENT(type, size, name, camel_name) \
+  string_number += info[type].number();         \
+  string_bytes += info[type].bytes();
+  STRING_TYPE_LIST(INCREMENT)
+#undef INCREMENT
+  if (string_number > 0) {
+    LOG(isolate,
+        HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
+  }
+
+  // Then do the other types.
+  for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
+    if (info[i].number() > 0) {
+      LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
+                                       info[i].bytes()));
+    }
+  }
+  LOG(isolate, HeapSampleEndEvent("NewSpace", description));
+}
+
+
+void NewSpace::ReportStatistics() {
+#ifdef DEBUG
+  if (FLAG_heap_stats) {
+    float pct = static_cast<float>(Available()) / TotalCapacity();
+    PrintF("  capacity: %" V8_PTR_PREFIX
+           "d"
+           ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+           TotalCapacity(), Available(), static_cast<int>(pct * 100));
+    PrintF("\n  Object Histogram:\n");
+    for (int i = 0; i <= LAST_TYPE; i++) {
+      if (allocated_histogram_[i].number() > 0) {
+        PrintF("    %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
+               allocated_histogram_[i].number(),
+               allocated_histogram_[i].bytes());
+      }
+    }
+    PrintF("\n");
+  }
+#endif  // DEBUG
+
+  if (FLAG_log_gc) {
+    Isolate* isolate = heap()->isolate();
+    DoReportStatistics(isolate, allocated_histogram_, "allocated");
+    DoReportStatistics(isolate, promoted_histogram_, "promoted");
+  }
+}
+
+
+void NewSpace::RecordAllocation(HeapObject* obj) {
+  InstanceType type = obj->map()->instance_type();
+  DCHECK(0 <= type && type <= LAST_TYPE);
+  allocated_histogram_[type].increment_number(1);
+  allocated_histogram_[type].increment_bytes(obj->Size());
+}
+
+
+void NewSpace::RecordPromotion(HeapObject* obj) {
+  InstanceType type = obj->map()->instance_type();
+  DCHECK(0 <= type && type <= LAST_TYPE);
+  promoted_histogram_[type].increment_number(1);
+  promoted_histogram_[type].increment_bytes(obj->Size());
+}
+
+
+size_t NewSpace::CommittedPhysicalMemory() {
+  if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+  size_t size = to_space_.CommittedPhysicalMemory();
+  if (from_space_.is_committed()) {
+    size += from_space_.CommittedPhysicalMemory();
+  }
+  return size;
+}
+
+
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces implementation
+
+void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
+  DCHECK(size_in_bytes > 0);
+  DCHECK(IsAligned(size_in_bytes, kPointerSize));
+
+  // We write a map and possibly size information to the block.  If the block
+  // is big enough to be a FreeSpace with at least one extra word (the next
+  // pointer), we set its map to be the free space map and its size to an
+  // appropriate array length for the desired size from HeapObject::Size().
+  // If the block is too small (eg, one or two words), to hold both a size
+  // field and a next pointer, we give it a filler map that gives it the
+  // correct size.
+  if (size_in_bytes > FreeSpace::kHeaderSize) {
+    // Can't use FreeSpace::cast because it fails during deserialization.
+    // We have to set the size first with a release store before we store
+    // the map because a concurrent store buffer scan on scavenge must not
+    // observe a map with an invalid size.
+    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
+    this_as_free_space->nobarrier_set_size(size_in_bytes);
+    synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
+  } else if (size_in_bytes == kPointerSize) {
+    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
+  } else if (size_in_bytes == 2 * kPointerSize) {
+    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
+  } else {
+    UNREACHABLE();
+  }
+  // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
+  // deserialization because the free space map is not done yet.
+}
+
+
+FreeListNode* FreeListNode::next() {
+  DCHECK(IsFreeListNode(this));
+  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
+    DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kNextOffset));
+  } else {
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kPointerSize));
+  }
+}
+
+
+FreeListNode** FreeListNode::next_address() {
+  DCHECK(IsFreeListNode(this));
+  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
+    DCHECK(Size() >= kNextOffset + kPointerSize);
+    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
+  } else {
+    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
+  }
+}
+
+
+void FreeListNode::set_next(FreeListNode* next) {
+  DCHECK(IsFreeListNode(this));
+  // While we are booting the VM the free space map will actually be null.  So
+  // we have to make sure that we don't try to use it for anything at that
+  // stage.
+  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
+    DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    base::NoBarrier_Store(
+        reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
+        reinterpret_cast<base::AtomicWord>(next));
+  } else {
+    base::NoBarrier_Store(
+        reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
+        reinterpret_cast<base::AtomicWord>(next));
+  }
+}
+
+
+intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
+  intptr_t free_bytes = 0;
+  if (category->top() != NULL) {
+    // This is safe (not going to deadlock) since Concatenate operations
+    // are never performed on the same free lists at the same time in
+    // reverse order.
+    base::LockGuard<base::Mutex> target_lock_guard(mutex());
+    base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
+    DCHECK(category->end_ != NULL);
+    free_bytes = category->available();
+    if (end_ == NULL) {
+      end_ = category->end();
+    } else {
+      category->end()->set_next(top());
+    }
+    set_top(category->top());
+    base::NoBarrier_Store(&top_, category->top_);
+    available_ += category->available();
+    category->Reset();
+  }
+  return free_bytes;
+}
+
+
+void FreeListCategory::Reset() {
+  set_top(NULL);
+  set_end(NULL);
+  set_available(0);
+}
+
+
+intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
+  int sum = 0;
+  FreeListNode* t = top();
+  FreeListNode** n = &t;
+  while (*n != NULL) {
+    if (Page::FromAddress((*n)->address()) == p) {
+      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+      sum += free_space->Size();
+      *n = (*n)->next();
+    } else {
+      n = (*n)->next_address();
+    }
+  }
+  set_top(t);
+  if (top() == NULL) {
+    set_end(NULL);
+  }
+  available_ -= sum;
+  return sum;
+}
+
+
+bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
+  FreeListNode* node = top();
+  while (node != NULL) {
+    if (Page::FromAddress(node->address()) == p) return true;
+    node = node->next();
+  }
+  return false;
+}
+
+
+FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
+  FreeListNode* node = top();
+
+  if (node == NULL) return NULL;
+
+  while (node != NULL &&
+         Page::FromAddress(node->address())->IsEvacuationCandidate()) {
+    available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
+    node = node->next();
+  }
+
+  if (node != NULL) {
+    set_top(node->next());
+    *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
+    available_ -= *node_size;
+  } else {
+    set_top(NULL);
+  }
+
+  if (top() == NULL) {
+    set_end(NULL);
+  }
+
+  return node;
+}
+
+
+FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
+                                                 int* node_size) {
+  FreeListNode* node = PickNodeFromList(node_size);
+  if (node != NULL && *node_size < size_in_bytes) {
+    Free(node, *node_size);
+    *node_size = 0;
+    return NULL;
+  }
+  return node;
+}
+
+
+void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
+  node->set_next(top());
+  set_top(node);
+  if (end_ == NULL) {
+    end_ = node;
+  }
+  available_ += size_in_bytes;
+}
+
+
+void FreeListCategory::RepairFreeList(Heap* heap) {
+  FreeListNode* n = top();
+  while (n != NULL) {
+    Map** map_location = reinterpret_cast<Map**>(n->address());
+    if (*map_location == NULL) {
+      *map_location = heap->free_space_map();
+    } else {
+      DCHECK(*map_location == heap->free_space_map());
+    }
+    n = n->next();
+  }
+}
+
+
+FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
+  Reset();
+}
+
+
+intptr_t FreeList::Concatenate(FreeList* free_list) {
+  intptr_t free_bytes = 0;
+  free_bytes += small_list_.Concatenate(free_list->small_list());
+  free_bytes += medium_list_.Concatenate(free_list->medium_list());
+  free_bytes += large_list_.Concatenate(free_list->large_list());
+  free_bytes += huge_list_.Concatenate(free_list->huge_list());
+  return free_bytes;
+}
+
+
+void FreeList::Reset() {
+  small_list_.Reset();
+  medium_list_.Reset();
+  large_list_.Reset();
+  huge_list_.Reset();
+}
+
+
+int FreeList::Free(Address start, int size_in_bytes) {
+  if (size_in_bytes == 0) return 0;
+
+  FreeListNode* node = FreeListNode::FromAddress(start);
+  node->set_size(heap_, size_in_bytes);
+  Page* page = Page::FromAddress(start);
+
+  // Early return to drop too-small blocks on the floor.
+  if (size_in_bytes < kSmallListMin) {
+    page->add_non_available_small_blocks(size_in_bytes);
+    return size_in_bytes;
+  }
+
+  // Insert other blocks at the head of a free list of the appropriate
+  // magnitude.
+  if (size_in_bytes <= kSmallListMax) {
+    small_list_.Free(node, size_in_bytes);
+    page->add_available_in_small_free_list(size_in_bytes);
+  } else if (size_in_bytes <= kMediumListMax) {
+    medium_list_.Free(node, size_in_bytes);
+    page->add_available_in_medium_free_list(size_in_bytes);
+  } else if (size_in_bytes <= kLargeListMax) {
+    large_list_.Free(node, size_in_bytes);
+    page->add_available_in_large_free_list(size_in_bytes);
+  } else {
+    huge_list_.Free(node, size_in_bytes);
+    page->add_available_in_huge_free_list(size_in_bytes);
+  }
+
+  DCHECK(IsVeryLong() || available() == SumFreeLists());
+  return 0;
+}
+
+
+FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+  FreeListNode* node = NULL;
+  Page* page = NULL;
+
+  if (size_in_bytes <= kSmallAllocationMax) {
+    node = small_list_.PickNodeFromList(node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_small_free_list(-(*node_size));
+      DCHECK(IsVeryLong() || available() == SumFreeLists());
+      return node;
+    }
+  }
+
+  if (size_in_bytes <= kMediumAllocationMax) {
+    node = medium_list_.PickNodeFromList(node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_medium_free_list(-(*node_size));
+      DCHECK(IsVeryLong() || available() == SumFreeLists());
+      return node;
+    }
+  }
+
+  if (size_in_bytes <= kLargeAllocationMax) {
+    node = large_list_.PickNodeFromList(node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_large_free_list(-(*node_size));
+      DCHECK(IsVeryLong() || available() == SumFreeLists());
+      return node;
+    }
+  }
+
+  int huge_list_available = huge_list_.available();
+  FreeListNode* top_node = huge_list_.top();
+  for (FreeListNode** cur = &top_node; *cur != NULL;
+       cur = (*cur)->next_address()) {
+    FreeListNode* cur_node = *cur;
+    while (cur_node != NULL &&
+           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
+      int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
+      huge_list_available -= size;
+      page = Page::FromAddress(cur_node->address());
+      page->add_available_in_huge_free_list(-size);
+      cur_node = cur_node->next();
+    }
+
+    *cur = cur_node;
+    if (cur_node == NULL) {
+      huge_list_.set_end(NULL);
+      break;
+    }
+
+    DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
+    int size = cur_as_free_space->Size();
+    if (size >= size_in_bytes) {
+      // Large enough node found.  Unlink it from the list.
+      node = *cur;
+      *cur = node->next();
+      *node_size = size;
+      huge_list_available -= size;
+      page = Page::FromAddress(node->address());
+      page->add_available_in_huge_free_list(-size);
+      break;
+    }
+  }
+
+  huge_list_.set_top(top_node);
+  if (huge_list_.top() == NULL) {
+    huge_list_.set_end(NULL);
+  }
+  huge_list_.set_available(huge_list_available);
+
+  if (node != NULL) {
+    DCHECK(IsVeryLong() || available() == SumFreeLists());
+    return node;
+  }
+
+  if (size_in_bytes <= kSmallListMax) {
+    node = small_list_.PickNodeFromList(size_in_bytes, node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_small_free_list(-(*node_size));
+    }
+  } else if (size_in_bytes <= kMediumListMax) {
+    node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_medium_free_list(-(*node_size));
+    }
+  } else if (size_in_bytes <= kLargeListMax) {
+    node = large_list_.PickNodeFromList(size_in_bytes, node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_large_free_list(-(*node_size));
+    }
+  }
+
+  DCHECK(IsVeryLong() || available() == SumFreeLists());
+  return node;
+}
+
+
+// Allocation on the old space free list.  If it succeeds then a new linear
+// allocation space has been set up with the top and limit of the space.  If
+// the allocation fails then NULL is returned, and the caller can perform a GC
+// or allocate a new page before retrying.
+HeapObject* FreeList::Allocate(int size_in_bytes) {
+  DCHECK(0 < size_in_bytes);
+  DCHECK(size_in_bytes <= kMaxBlockSize);
+  DCHECK(IsAligned(size_in_bytes, kPointerSize));
+  // Don't free list allocate if there is linear space available.
+  DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
+
+  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
+  // Mark the old linear allocation area with a free space map so it can be
+  // skipped when scanning the heap.  This also puts it back in the free list
+  // if it is big enough.
+  owner_->Free(owner_->top(), old_linear_size);
+
+  owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
+                                                      old_linear_size);
+
+  int new_node_size = 0;
+  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+  if (new_node == NULL) {
+    owner_->SetTopAndLimit(NULL, NULL);
+    return NULL;
+  }
+
+  int bytes_left = new_node_size - size_in_bytes;
+  DCHECK(bytes_left >= 0);
+
+#ifdef DEBUG
+  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
+    reinterpret_cast<Object**>(new_node->address())[i] =
+        Smi::FromInt(kCodeZapValue);
+  }
+#endif
+
+  // The old-space-step might have finished sweeping and restarted marking.
+  // Verify that it did not turn the page of the new node into an evacuation
+  // candidate.
+  DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+
+  // Memory in the linear allocation area is counted as allocated.  We may free
+  // a little of this again immediately - see below.
+  owner_->Allocate(new_node_size);
+
+  if (owner_->heap()->inline_allocation_disabled()) {
+    // Keep the linear allocation area empty if requested to do so, just
+    // return area back to the free list instead.
+    owner_->Free(new_node->address() + size_in_bytes, bytes_left);
+    DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
+  } else if (bytes_left > kThreshold &&
+             owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
+             FLAG_incremental_marking_steps) {
+    int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+    // We don't want to give too large linear areas to the allocator while
+    // incremental marking is going on, because we won't check again whether
+    // we want to do another increment until the linear area is used up.
+    owner_->Free(new_node->address() + size_in_bytes + linear_size,
+                 new_node_size - size_in_bytes - linear_size);
+    owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+                           new_node->address() + size_in_bytes + linear_size);
+  } else if (bytes_left > 0) {
+    // Normally we give the rest of the node to the allocator as its new
+    // linear allocation area.
+    owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+                           new_node->address() + new_node_size);
+  } else {
+    // TODO(gc) Try not freeing linear allocation region when bytes_left
+    // are zero.
+    owner_->SetTopAndLimit(NULL, NULL);
+  }
+
+  return new_node;
+}
+
+
+intptr_t FreeList::EvictFreeListItems(Page* p) {
+  intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
+  p->set_available_in_huge_free_list(0);
+
+  if (sum < p->area_size()) {
+    sum += small_list_.EvictFreeListItemsInList(p) +
+           medium_list_.EvictFreeListItemsInList(p) +
+           large_list_.EvictFreeListItemsInList(p);
+    p->set_available_in_small_free_list(0);
+    p->set_available_in_medium_free_list(0);
+    p->set_available_in_large_free_list(0);
+  }
+
+  return sum;
+}
+
+
+bool FreeList::ContainsPageFreeListItems(Page* p) {
+  return huge_list_.EvictFreeListItemsInList(p) ||
+         small_list_.EvictFreeListItemsInList(p) ||
+         medium_list_.EvictFreeListItemsInList(p) ||
+         large_list_.EvictFreeListItemsInList(p);
+}
+
+
+void FreeList::RepairLists(Heap* heap) {
+  small_list_.RepairFreeList(heap);
+  medium_list_.RepairFreeList(heap);
+  large_list_.RepairFreeList(heap);
+  huge_list_.RepairFreeList(heap);
+}
+
+
+#ifdef DEBUG
+intptr_t FreeListCategory::SumFreeList() {
+  intptr_t sum = 0;
+  FreeListNode* cur = top();
+  while (cur != NULL) {
+    DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
+    sum += cur_as_free_space->nobarrier_size();
+    cur = cur->next();
+  }
+  return sum;
+}
+
+
+static const int kVeryLongFreeList = 500;
+
+
+int FreeListCategory::FreeListLength() {
+  int length = 0;
+  FreeListNode* cur = top();
+  while (cur != NULL) {
+    length++;
+    cur = cur->next();
+    if (length == kVeryLongFreeList) return length;
+  }
+  return length;
+}
+
+
+bool FreeList::IsVeryLong() {
+  if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
+  if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
+  if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
+  if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
+  return false;
+}
+
+
+// This can take a very long time because it is linear in the number of entries
+// on the free list, so it should not be called if FreeListLength returns
+// kVeryLongFreeList.
+intptr_t FreeList::SumFreeLists() {
+  intptr_t sum = small_list_.SumFreeList();
+  sum += medium_list_.SumFreeList();
+  sum += large_list_.SumFreeList();
+  sum += huge_list_.SumFreeList();
+  return sum;
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// OldSpace implementation
+
+void PagedSpace::PrepareForMarkCompact() {
+  // We don't have a linear allocation area while sweeping.  It will be restored
+  // on the first allocation after the sweep.
+  EmptyAllocationInfo();
+
+  // This counter will be increased for pages which will be swept by the
+  // sweeper threads.
+  unswept_free_bytes_ = 0;
+
+  // Clear the free list before a full GC---it will be rebuilt afterward.
+  free_list_.Reset();
+}
+
+
+intptr_t PagedSpace::SizeOfObjects() {
+  DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() ||
+         (unswept_free_bytes_ == 0));
+  return Size() - unswept_free_bytes_ - (limit() - top());
+}
+
+
+// After we have booted, we have created a map which represents free space
+// on the heap.  If there was already a free list then the elements on it
+// were created with the wrong FreeSpaceMap (normally NULL), so we need to
+// fix them.
+void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
+
+
+void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
+  if (allocation_info_.top() >= allocation_info_.limit()) return;
+
+  if (Page::FromAllocationTop(allocation_info_.top())
+          ->IsEvacuationCandidate()) {
+    // Create filler object to keep page iterable if it was iterable.
+    int remaining =
+        static_cast<int>(allocation_info_.limit() - allocation_info_.top());
+    heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
+
+    allocation_info_.set_top(NULL);
+    allocation_info_.set_limit(NULL);
+  }
+}
+
+
+HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
+    int size_in_bytes) {
+  MarkCompactCollector* collector = heap()->mark_compact_collector();
+  if (collector->sweeping_in_progress()) {
+    // Wait for the sweeper threads here and complete the sweeping phase.
+    collector->EnsureSweepingCompleted();
+
+    // After waiting for the sweeper threads, there may be new free-list
+    // entries.
+    return free_list_.Allocate(size_in_bytes);
+  }
+  return NULL;
+}
+
+
+HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+  // Allocation in this space has failed.
+
+  MarkCompactCollector* collector = heap()->mark_compact_collector();
+  // Sweeping is still in progress.
+  if (collector->sweeping_in_progress()) {
+    // First try to refill the free-list, concurrent sweeper threads
+    // may have freed some objects in the meantime.
+    collector->RefillFreeList(this);
+
+    // Retry the free list allocation.
+    HeapObject* object = free_list_.Allocate(size_in_bytes);
+    if (object != NULL) return object;
+
+    // If sweeping is still in progress try to sweep pages on the main thread.
+    int free_chunk = collector->SweepInParallel(this, size_in_bytes);
+    collector->RefillFreeList(this);
+    if (free_chunk >= size_in_bytes) {
+      HeapObject* object = free_list_.Allocate(size_in_bytes);
+      // We should be able to allocate an object here since we just freed that
+      // much memory.
+      DCHECK(object != NULL);
+      if (object != NULL) return object;
+    }
+  }
+
+  // Free list allocation failed and there is no next page.  Fail if we have
+  // hit the old generation size limit that should cause a garbage
+  // collection.
+  if (!heap()->always_allocate() &&
+      heap()->OldGenerationAllocationLimitReached()) {
+    // If sweeper threads are active, wait for them at that point and steal
+    // elements form their free-lists.
+    HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+    if (object != NULL) return object;
+  }
+
+  // Try to expand the space and allocate in the new next page.
+  if (Expand()) {
+    DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
+    return free_list_.Allocate(size_in_bytes);
+  }
+
+  // If sweeper threads are active, wait for them at that point and steal
+  // elements form their free-lists. Allocation may still fail their which
+  // would indicate that there is not enough memory for the given allocation.
+  return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+}
+
+
+#ifdef DEBUG
+void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
+  CommentStatistic* comments_statistics =
+      isolate->paged_space_comments_statistics();
+  ReportCodeKindStatistics(isolate->code_kind_statistics());
+  PrintF(
+      "Code comment statistics (\"   [ comment-txt   :    size/   "
+      "count  (average)\"):\n");
+  for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
+    const CommentStatistic& cs = comments_statistics[i];
+    if (cs.size > 0) {
+      PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
+             cs.size / cs.count);
+    }
+  }
+  PrintF("\n");
+}
+
+
+void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
+  CommentStatistic* comments_statistics =
+      isolate->paged_space_comments_statistics();
+  ClearCodeKindStatistics(isolate->code_kind_statistics());
+  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
+    comments_statistics[i].Clear();
+  }
+  comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
+  comments_statistics[CommentStatistic::kMaxComments].size = 0;
+  comments_statistics[CommentStatistic::kMaxComments].count = 0;
+}
+
+
+// Adds comment to 'comment_statistics' table. Performance OK as long as
+// 'kMaxComments' is small
+static void EnterComment(Isolate* isolate, const char* comment, int delta) {
+  CommentStatistic* comments_statistics =
+      isolate->paged_space_comments_statistics();
+  // Do not count empty comments
+  if (delta <= 0) return;
+  CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
+  // Search for a free or matching entry in 'comments_statistics': 'cs'
+  // points to result.
+  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
+    if (comments_statistics[i].comment == NULL) {
+      cs = &comments_statistics[i];
+      cs->comment = comment;
+      break;
+    } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
+      cs = &comments_statistics[i];
+      break;
+    }
+  }
+  // Update entry for 'comment'
+  cs->size += delta;
+  cs->count += 1;
+}
+
+
+// Call for each nested comment start (start marked with '[ xxx', end marked
+// with ']'.  RelocIterator 'it' must point to a comment reloc info.
+static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
+  DCHECK(!it->done());
+  DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
+  const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
+  if (tmp[0] != '[') {
+    // Not a nested comment; skip
+    return;
+  }
+
+  // Search for end of nested comment or a new nested comment
+  const char* const comment_txt =
+      reinterpret_cast<const char*>(it->rinfo()->data());
+  const byte* prev_pc = it->rinfo()->pc();
+  int flat_delta = 0;
+  it->next();
+  while (true) {
+    // All nested comments must be terminated properly, and therefore exit
+    // from loop.
+    DCHECK(!it->done());
+    if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
+      const char* const txt =
+          reinterpret_cast<const char*>(it->rinfo()->data());
+      flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
+      if (txt[0] == ']') break;  // End of nested  comment
+      // A new comment
+      CollectCommentStatistics(isolate, it);
+      // Skip code that was covered with previous comment
+      prev_pc = it->rinfo()->pc();
+    }
+    it->next();
+  }
+  EnterComment(isolate, comment_txt, flat_delta);
+}
+
+
+// Collects code size statistics:
+// - by code kind
+// - by code comment
+void PagedSpace::CollectCodeStatistics() {
+  Isolate* isolate = heap()->isolate();
+  HeapObjectIterator obj_it(this);
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+    if (obj->IsCode()) {
+      Code* code = Code::cast(obj);
+      isolate->code_kind_statistics()[code->kind()] += code->Size();
+      RelocIterator it(code);
+      int delta = 0;
+      const byte* prev_pc = code->instruction_start();
+      while (!it.done()) {
+        if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
+          delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
+          CollectCommentStatistics(isolate, &it);
+          prev_pc = it.rinfo()->pc();
+        }
+        it.next();
+      }
+
+      DCHECK(code->instruction_start() <= prev_pc &&
+             prev_pc <= code->instruction_end());
+      delta += static_cast<int>(code->instruction_end() - prev_pc);
+      EnterComment(isolate, "NoComment", delta);
+    }
+  }
+}
+
+
+void PagedSpace::ReportStatistics() {
+  int pct = static_cast<int>(Available() * 100 / Capacity());
+  PrintF("  capacity: %" V8_PTR_PREFIX
+         "d"
+         ", waste: %" V8_PTR_PREFIX
+         "d"
+         ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+         Capacity(), Waste(), Available(), pct);
+
+  if (heap()->mark_compact_collector()->sweeping_in_progress()) {
+    heap()->mark_compact_collector()->EnsureSweepingCompleted();
+  }
+  ClearHistograms(heap()->isolate());
+  HeapObjectIterator obj_it(this);
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
+    CollectHistogramInfo(obj);
+  ReportHistogram(heap()->isolate(), true);
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// MapSpace implementation
+// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
+// there is at least one non-inlined virtual function. I would prefer to hide
+// the VerifyObject definition behind VERIFY_HEAP.
+
+void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
+
+
+// -----------------------------------------------------------------------------
+// CellSpace and PropertyCellSpace implementation
+// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
+// there is at least one non-inlined virtual function. I would prefer to hide
+// the VerifyObject definition behind VERIFY_HEAP.
+
+void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
+
+
+void PropertyCellSpace::VerifyObject(HeapObject* object) {
+  CHECK(object->IsPropertyCell());
+}
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectIterator
+
+LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
+  current_ = space->first_page_;
+  size_func_ = NULL;
+}
+
+
+LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
+                                         HeapObjectCallback size_func) {
+  current_ = space->first_page_;
+  size_func_ = size_func;
+}
+
+
+HeapObject* LargeObjectIterator::Next() {
+  if (current_ == NULL) return NULL;
+
+  HeapObject* object = current_->GetObject();
+  current_ = current_->next_page();
+  return object;
+}
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectSpace
+static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
+
+
+LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
+                                   AllocationSpace id)
+    : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
+      max_capacity_(max_capacity),
+      first_page_(NULL),
+      size_(0),
+      page_count_(0),
+      objects_size_(0),
+      chunk_map_(ComparePointers, 1024) {}
+
+
+bool LargeObjectSpace::SetUp() {
+  first_page_ = NULL;
+  size_ = 0;
+  maximum_committed_ = 0;
+  page_count_ = 0;
+  objects_size_ = 0;
+  chunk_map_.Clear();
+  return true;
+}
+
+
+void LargeObjectSpace::TearDown() {
+  while (first_page_ != NULL) {
+    LargePage* page = first_page_;
+    first_page_ = first_page_->next_page();
+    LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
+
+    ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
+    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
+        space, kAllocationActionFree, page->size());
+    heap()->isolate()->memory_allocator()->Free(page);
+  }
+  SetUp();
+}
+
+
+AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
+                                               Executability executable) {
+  // Check if we want to force a GC before growing the old space further.
+  // If so, fail the allocation.
+  if (!heap()->always_allocate() &&
+      heap()->OldGenerationAllocationLimitReached()) {
+    return AllocationResult::Retry(identity());
+  }
+
+  if (Size() + object_size > max_capacity_) {
+    return AllocationResult::Retry(identity());
+  }
+
+  LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
+      object_size, this, executable);
+  if (page == NULL) return AllocationResult::Retry(identity());
+  DCHECK(page->area_size() >= object_size);
+
+  size_ += static_cast<int>(page->size());
+  objects_size_ += object_size;
+  page_count_++;
+  page->set_next_page(first_page_);
+  first_page_ = page;
+
+  if (size_ > maximum_committed_) {
+    maximum_committed_ = size_;
+  }
+
+  // Register all MemoryChunk::kAlignment-aligned chunks covered by
+  // this large page in the chunk map.
+  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
+  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
+  for (uintptr_t key = base; key <= limit; key++) {
+    HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+                                              static_cast<uint32_t>(key), true);
+    DCHECK(entry != NULL);
+    entry->value = page;
+  }
+
+  HeapObject* object = page->GetObject();
+
+  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
+
+  if (Heap::ShouldZapGarbage()) {
+    // Make the object consistent so the heap can be verified in OldSpaceStep.
+    // We only need to do this in debug builds or if verify_heap is on.
+    reinterpret_cast<Object**>(object->address())[0] =
+        heap()->fixed_array_map();
+    reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+  }
+
+  heap()->incremental_marking()->OldSpaceStep(object_size);
+  return object;
+}
+
+
+size_t LargeObjectSpace::CommittedPhysicalMemory() {
+  if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+  size_t size = 0;
+  LargePage* current = first_page_;
+  while (current != NULL) {
+    size += current->CommittedPhysicalMemory();
+    current = current->next_page();
+  }
+  return size;
+}
+
+
+// GC support
+Object* LargeObjectSpace::FindObject(Address a) {
+  LargePage* page = FindPage(a);
+  if (page != NULL) {
+    return page->GetObject();
+  }
+  return Smi::FromInt(0);  // Signaling not found.
+}
+
+
+LargePage* LargeObjectSpace::FindPage(Address a) {
+  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
+  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+                                        static_cast<uint32_t>(key), false);
+  if (e != NULL) {
+    DCHECK(e->value != NULL);
+    LargePage* page = reinterpret_cast<LargePage*>(e->value);
+    DCHECK(page->is_valid());
+    if (page->Contains(a)) {
+      return page;
+    }
+  }
+  return NULL;
+}
+
+
+void LargeObjectSpace::FreeUnmarkedObjects() {
+  LargePage* previous = NULL;
+  LargePage* current = first_page_;
+  while (current != NULL) {
+    HeapObject* object = current->GetObject();
+    // Can this large page contain pointers to non-trivial objects.  No other
+    // pointer object is this big.
+    bool is_pointer_object = object->IsFixedArray();
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) {
+      mark_bit.Clear();
+      Page::FromAddress(object->address())->ResetProgressBar();
+      Page::FromAddress(object->address())->ResetLiveBytes();
+      previous = current;
+      current = current->next_page();
+    } else {
+      LargePage* page = current;
+      // Cut the chunk out from the chunk list.
+      current = current->next_page();
+      if (previous == NULL) {
+        first_page_ = current;
+      } else {
+        previous->set_next_page(current);
+      }
+
+      // Free the chunk.
+      heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
+                                                             heap()->isolate());
+      size_ -= static_cast<int>(page->size());
+      objects_size_ -= object->Size();
+      page_count_--;
+
+      // Remove entries belonging to this page.
+      // Use variable alignment to help pass length check (<= 80 characters)
+      // of single line in tools/presubmit.py.
+      const intptr_t alignment = MemoryChunk::kAlignment;
+      uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
+      uintptr_t limit = base + (page->size() - 1) / alignment;
+      for (uintptr_t key = base; key <= limit; key++) {
+        chunk_map_.Remove(reinterpret_cast<void*>(key),
+                          static_cast<uint32_t>(key));
+      }
+
+      if (is_pointer_object) {
+        heap()->QueueMemoryChunkForFree(page);
+      } else {
+        heap()->isolate()->memory_allocator()->Free(page);
+      }
+    }
+  }
+  heap()->FreeQueuedChunks();
+}
+
+
+bool LargeObjectSpace::Contains(HeapObject* object) {
+  Address address = object->address();
+  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+
+  bool owned = (chunk->owner() == this);
+
+  SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
+
+  return owned;
+}
+
+
+#ifdef VERIFY_HEAP
+// We do not assume that the large object iterator works, because it depends
+// on the invariants we are checking during verification.
+void LargeObjectSpace::Verify() {
+  for (LargePage* chunk = first_page_; chunk != NULL;
+       chunk = chunk->next_page()) {
+    // Each chunk contains an object that starts at the large object page's
+    // object area start.
+    HeapObject* object = chunk->GetObject();
+    Page* page = Page::FromAddress(object->address());
+    CHECK(object->address() == page->area_start());
+
+    // The first word should be a map, and we expect all map pointers to be
+    // in map space.
+    Map* map = object->map();
+    CHECK(map->IsMap());
+    CHECK(heap()->map_space()->Contains(map));
+
+    // We have only code, sequential strings, external strings
+    // (sequential strings that have been morphed into external
+    // strings), fixed arrays, byte arrays, and constant pool arrays in the
+    // large object space.
+    CHECK(object->IsCode() || object->IsSeqString() ||
+          object->IsExternalString() || object->IsFixedArray() ||
+          object->IsFixedDoubleArray() || object->IsByteArray() ||
+          object->IsConstantPoolArray());
+
+    // The object itself should look OK.
+    object->ObjectVerify();
+
+    // Byte arrays and strings don't have interior pointers.
+    if (object->IsCode()) {
+      VerifyPointersVisitor code_visitor;
+      object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
+    } else if (object->IsFixedArray()) {
+      FixedArray* array = FixedArray::cast(object);
+      for (int j = 0; j < array->length(); j++) {
+        Object* element = array->get(j);
+        if (element->IsHeapObject()) {
+          HeapObject* element_object = HeapObject::cast(element);
+          CHECK(heap()->Contains(element_object));
+          CHECK(element_object->map()->IsMap());
+        }
+      }
+    }
+  }
+}
+#endif
+
+
+#ifdef DEBUG
+void LargeObjectSpace::Print() {
+  OFStream os(stdout);
+  LargeObjectIterator it(this);
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    obj->Print(os);
+  }
+}
+
+
+void LargeObjectSpace::ReportStatistics() {
+  PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
+  int num_objects = 0;
+  ClearHistograms(heap()->isolate());
+  LargeObjectIterator it(this);
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    num_objects++;
+    CollectHistogramInfo(obj);
+  }
+
+  PrintF(
+      "  number of objects %d, "
+      "size of objects %" V8_PTR_PREFIX "d\n",
+      num_objects, objects_size_);
+  if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
+}
+
+
+void LargeObjectSpace::CollectCodeStatistics() {
+  Isolate* isolate = heap()->isolate();
+  LargeObjectIterator obj_it(this);
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+    if (obj->IsCode()) {
+      Code* code = Code::cast(obj);
+      isolate->code_kind_statistics()[code->kind()] += code->Size();
+    }
+  }
+}
+
+
+void Page::Print() {
+  // Make a best-effort to print the objects in the page.
+  PrintF("Page@%p in %s\n", this->address(),
+         AllocationSpaceName(this->owner()->identity()));
+  printf(" --------------------------------------\n");
+  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
+  unsigned mark_size = 0;
+  for (HeapObject* object = objects.Next(); object != NULL;
+       object = objects.Next()) {
+    bool is_marked = Marking::MarkBitFrom(object).Get();
+    PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
+    if (is_marked) {
+      mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
+    }
+    object->ShortPrint();
+    PrintF("\n");
+  }
+  printf(" --------------------------------------\n");
+  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
+}
+
+#endif  // DEBUG
+}
+}  // namespace v8::internal
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
new file mode 100644
index 0000000..9ecb3c4
--- /dev/null
+++ b/src/heap/spaces.h
@@ -0,0 +1,2886 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SPACES_H_
+#define V8_HEAP_SPACES_H_
+
+#include "src/allocation.h"
+#include "src/base/atomicops.h"
+#include "src/base/bits.h"
+#include "src/base/platform/mutex.h"
+#include "src/hashmap.h"
+#include "src/list.h"
+#include "src/log.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// -----------------------------------------------------------------------------
+// Heap structures:
+//
+// A JS heap consists of a young generation, an old generation, and a large
+// object space. The young generation is divided into two semispaces. A
+// scavenger implements Cheney's copying algorithm. The old generation is
+// separated into a map space and an old object space. The map space contains
+// all (and only) map objects, the rest of old objects go into the old space.
+// The old generation is collected by a mark-sweep-compact collector.
+//
+// The semispaces of the young generation are contiguous.  The old and map
+// spaces consists of a list of pages. A page has a page header and an object
+// area.
+//
+// There is a separate large object space for objects larger than
+// Page::kMaxHeapObjectSize, so that they do not have to move during
+// collection. The large object space is paged. Pages in large object space
+// may be larger than the page size.
+//
+// A store-buffer based write barrier is used to keep track of intergenerational
+// references.  See heap/store-buffer.h.
+//
+// During scavenges and mark-sweep collections we sometimes (after a store
+// buffer overflow) iterate intergenerational pointers without decoding heap
+// object maps so if the page belongs to old pointer space or large object
+// space it is essential to guarantee that the page does not contain any
+// garbage pointers to new space: every pointer aligned word which satisfies
+// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
+// new space. Thus objects in old pointer and large object spaces should have a
+// special layout (e.g. no bare integer fields). This requirement does not
+// apply to map space which is iterated in a special fashion. However we still
+// require pointer fields of dead maps to be cleaned.
+//
+// To enable lazy cleaning of old space pages we can mark chunks of the page
+// as being garbage.  Garbage sections are marked with a special map.  These
+// sections are skipped when scanning the page, even if we are otherwise
+// scanning without regard for object boundaries.  Garbage sections are chained
+// together to form a free list after a GC.  Garbage sections created outside
+// of GCs by object trunctation etc. may not be in the free list chain.  Very
+// small free spaces are ignored, they need only be cleaned of bogus pointers
+// into new space.
+//
+// Each page may have up to one special garbage section.  The start of this
+// section is denoted by the top field in the space.  The end of the section
+// is denoted by the limit field in the space.  This special garbage section
+// is not marked with a free space map in the data.  The point of this section
+// is to enable linear allocation without having to constantly update the byte
+// array every time the top field is updated and a new object is created.  The
+// special garbage section is not in the chain of garbage sections.
+//
+// Since the top and limit fields are in the space, not the page, only one page
+// has a special garbage section, and if the top and limit are equal then there
+// is no special garbage section.
+
+// Some assertion macros used in the debugging mode.
+
+#define DCHECK_PAGE_ALIGNED(address) \
+  DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
+
+#define DCHECK_OBJECT_ALIGNED(address) \
+  DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
+
+#define DCHECK_OBJECT_SIZE(size) \
+  DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
+
+#define DCHECK_PAGE_OFFSET(offset) \
+  DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
+
+#define DCHECK_MAP_PAGE_INDEX(index) \
+  DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
+
+
+class PagedSpace;
+class MemoryAllocator;
+class AllocationInfo;
+class Space;
+class FreeList;
+class MemoryChunk;
+
+class MarkBit {
+ public:
+  typedef uint32_t CellType;
+
+  inline MarkBit(CellType* cell, CellType mask, bool data_only)
+      : cell_(cell), mask_(mask), data_only_(data_only) {}
+
+  inline CellType* cell() { return cell_; }
+  inline CellType mask() { return mask_; }
+
+#ifdef DEBUG
+  bool operator==(const MarkBit& other) {
+    return cell_ == other.cell_ && mask_ == other.mask_;
+  }
+#endif
+
+  inline void Set() { *cell_ |= mask_; }
+  inline bool Get() { return (*cell_ & mask_) != 0; }
+  inline void Clear() { *cell_ &= ~mask_; }
+
+  inline bool data_only() { return data_only_; }
+
+  inline MarkBit Next() {
+    CellType new_mask = mask_ << 1;
+    if (new_mask == 0) {
+      return MarkBit(cell_ + 1, 1, data_only_);
+    } else {
+      return MarkBit(cell_, new_mask, data_only_);
+    }
+  }
+
+ private:
+  CellType* cell_;
+  CellType mask_;
+  // This boolean indicates that the object is in a data-only space with no
+  // pointers.  This enables some optimizations when marking.
+  // It is expected that this field is inlined and turned into control flow
+  // at the place where the MarkBit object is created.
+  bool data_only_;
+};
+
+
+// Bitmap is a sequence of cells each containing fixed number of bits.
+class Bitmap {
+ public:
+  static const uint32_t kBitsPerCell = 32;
+  static const uint32_t kBitsPerCellLog2 = 5;
+  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
+  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
+  static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
+
+  static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
+
+  static const size_t kSize =
+      (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
+
+
+  static int CellsForLength(int length) {
+    return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
+  }
+
+  int CellsCount() { return CellsForLength(kLength); }
+
+  static int SizeFor(int cells_count) {
+    return sizeof(MarkBit::CellType) * cells_count;
+  }
+
+  INLINE(static uint32_t IndexToCell(uint32_t index)) {
+    return index >> kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t CellToIndex(uint32_t index)) {
+    return index << kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
+    return (index + kBitIndexMask) & ~kBitIndexMask;
+  }
+
+  INLINE(MarkBit::CellType* cells()) {
+    return reinterpret_cast<MarkBit::CellType*>(this);
+  }
+
+  INLINE(Address address()) { return reinterpret_cast<Address>(this); }
+
+  INLINE(static Bitmap* FromAddress(Address addr)) {
+    return reinterpret_cast<Bitmap*>(addr);
+  }
+
+  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
+    MarkBit::CellType mask = 1 << (index & kBitIndexMask);
+    MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
+    return MarkBit(cell, mask, data_only);
+  }
+
+  static inline void Clear(MemoryChunk* chunk);
+
+  static void PrintWord(uint32_t word, uint32_t himask = 0) {
+    for (uint32_t mask = 1; mask != 0; mask <<= 1) {
+      if ((mask & himask) != 0) PrintF("[");
+      PrintF((mask & word) ? "1" : "0");
+      if ((mask & himask) != 0) PrintF("]");
+    }
+  }
+
+  class CellPrinter {
+   public:
+    CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
+
+    void Print(uint32_t pos, uint32_t cell) {
+      if (cell == seq_type) {
+        seq_length++;
+        return;
+      }
+
+      Flush();
+
+      if (IsSeq(cell)) {
+        seq_start = pos;
+        seq_length = 0;
+        seq_type = cell;
+        return;
+      }
+
+      PrintF("%d: ", pos);
+      PrintWord(cell);
+      PrintF("\n");
+    }
+
+    void Flush() {
+      if (seq_length > 0) {
+        PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
+               seq_length * kBitsPerCell);
+        seq_length = 0;
+      }
+    }
+
+    static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+
+   private:
+    uint32_t seq_start;
+    uint32_t seq_type;
+    uint32_t seq_length;
+  };
+
+  void Print() {
+    CellPrinter printer;
+    for (int i = 0; i < CellsCount(); i++) {
+      printer.Print(i, cells()[i]);
+    }
+    printer.Flush();
+    PrintF("\n");
+  }
+
+  bool IsClean() {
+    for (int i = 0; i < CellsCount(); i++) {
+      if (cells()[i] != 0) {
+        return false;
+      }
+    }
+    return true;
+  }
+};
+
+
+class SkipList;
+class SlotsBuffer;
+
+// MemoryChunk represents a memory region owned by a specific space.
+// It is divided into the header and the body. Chunk start is always
+// 1MB aligned. Start of the body is aligned so it can accommodate
+// any heap object.
+class MemoryChunk {
+ public:
+  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
+  static MemoryChunk* FromAddress(Address a) {
+    return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
+  }
+  static const MemoryChunk* FromAddress(const byte* a) {
+    return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
+                                                ~kAlignmentMask);
+  }
+
+  // Only works for addresses in pointer spaces, not data or code spaces.
+  static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
+
+  Address address() { return reinterpret_cast<Address>(this); }
+
+  bool is_valid() { return address() != NULL; }
+
+  MemoryChunk* next_chunk() const {
+    return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
+  }
+
+  MemoryChunk* prev_chunk() const {
+    return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
+  }
+
+  void set_next_chunk(MemoryChunk* next) {
+    base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
+  }
+
+  void set_prev_chunk(MemoryChunk* prev) {
+    base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
+  }
+
+  Space* owner() const {
+    if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+        kPageHeaderTag) {
+      return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
+                                      kPageHeaderTag);
+    } else {
+      return NULL;
+    }
+  }
+
+  void set_owner(Space* space) {
+    DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
+    owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
+    DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+           kPageHeaderTag);
+  }
+
+  base::VirtualMemory* reserved_memory() { return &reservation_; }
+
+  void InitializeReservedMemory() { reservation_.Reset(); }
+
+  void set_reserved_memory(base::VirtualMemory* reservation) {
+    DCHECK_NOT_NULL(reservation);
+    reservation_.TakeControl(reservation);
+  }
+
+  bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
+  void initialize_scan_on_scavenge(bool scan) {
+    if (scan) {
+      SetFlag(SCAN_ON_SCAVENGE);
+    } else {
+      ClearFlag(SCAN_ON_SCAVENGE);
+    }
+  }
+  inline void set_scan_on_scavenge(bool scan);
+
+  int store_buffer_counter() { return store_buffer_counter_; }
+  void set_store_buffer_counter(int counter) {
+    store_buffer_counter_ = counter;
+  }
+
+  bool Contains(Address addr) {
+    return addr >= area_start() && addr < area_end();
+  }
+
+  // Checks whether addr can be a limit of addresses in this page.
+  // It's a limit if it's in the page, or if it's just after the
+  // last byte of the page.
+  bool ContainsLimit(Address addr) {
+    return addr >= area_start() && addr <= area_end();
+  }
+
+  // Every n write barrier invocations we go to runtime even though
+  // we could have handled it in generated code.  This lets us check
+  // whether we have hit the limit and should do some more marking.
+  static const int kWriteBarrierCounterGranularity = 500;
+
+  enum MemoryChunkFlags {
+    IS_EXECUTABLE,
+    ABOUT_TO_BE_FREED,
+    POINTERS_TO_HERE_ARE_INTERESTING,
+    POINTERS_FROM_HERE_ARE_INTERESTING,
+    SCAN_ON_SCAVENGE,
+    IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
+    IN_TO_SPACE,    // All pages in new space has one of these two set.
+    NEW_SPACE_BELOW_AGE_MARK,
+    CONTAINS_ONLY_DATA,
+    EVACUATION_CANDIDATE,
+    RESCAN_ON_EVACUATION,
+
+    // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
+    // otherwise marking bits are still intact.
+    WAS_SWEPT,
+
+    // Large objects can have a progress bar in their page header. These object
+    // are scanned in increments and will be kept black while being scanned.
+    // Even if the mutator writes to them they will be kept black and a white
+    // to grey transition is performed in the value.
+    HAS_PROGRESS_BAR,
+
+    // Last flag, keep at bottom.
+    NUM_MEMORY_CHUNK_FLAGS
+  };
+
+
+  static const int kPointersToHereAreInterestingMask =
+      1 << POINTERS_TO_HERE_ARE_INTERESTING;
+
+  static const int kPointersFromHereAreInterestingMask =
+      1 << POINTERS_FROM_HERE_ARE_INTERESTING;
+
+  static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
+
+  static const int kSkipEvacuationSlotsRecordingMask =
+      (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
+      (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
+
+
+  void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
+
+  void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
+
+  void SetFlagTo(int flag, bool value) {
+    if (value) {
+      SetFlag(flag);
+    } else {
+      ClearFlag(flag);
+    }
+  }
+
+  bool IsFlagSet(int flag) {
+    return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
+  }
+
+  // Set or clear multiple flags at a time. The flags in the mask
+  // are set to the value in "flags", the rest retain the current value
+  // in flags_.
+  void SetFlags(intptr_t flags, intptr_t mask) {
+    flags_ = (flags_ & ~mask) | (flags & mask);
+  }
+
+  // Return all current flags.
+  intptr_t GetFlags() { return flags_; }
+
+
+  // SWEEPING_DONE - The page state when sweeping is complete or sweeping must
+  // not be performed on that page.
+  // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
+  // not touch the page memory anymore.
+  // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
+  // SWEEPING_PENDING - This page is ready for parallel sweeping.
+  enum ParallelSweepingState {
+    SWEEPING_DONE,
+    SWEEPING_FINALIZE,
+    SWEEPING_IN_PROGRESS,
+    SWEEPING_PENDING
+  };
+
+  ParallelSweepingState parallel_sweeping() {
+    return static_cast<ParallelSweepingState>(
+        base::Acquire_Load(&parallel_sweeping_));
+  }
+
+  void set_parallel_sweeping(ParallelSweepingState state) {
+    base::Release_Store(&parallel_sweeping_, state);
+  }
+
+  bool TryParallelSweeping() {
+    return base::Acquire_CompareAndSwap(&parallel_sweeping_, SWEEPING_PENDING,
+                                        SWEEPING_IN_PROGRESS) ==
+           SWEEPING_PENDING;
+  }
+
+  bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
+
+  // Manage live byte count (count of bytes known to be live,
+  // because they are marked black).
+  void ResetLiveBytes() {
+    if (FLAG_gc_verbose) {
+      PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
+             live_byte_count_);
+    }
+    live_byte_count_ = 0;
+  }
+  void IncrementLiveBytes(int by) {
+    if (FLAG_gc_verbose) {
+      printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
+             live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
+             live_byte_count_ + by);
+    }
+    live_byte_count_ += by;
+    DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
+  }
+  int LiveBytes() {
+    DCHECK(static_cast<unsigned>(live_byte_count_) <= size_);
+    return live_byte_count_;
+  }
+
+  int write_barrier_counter() {
+    return static_cast<int>(write_barrier_counter_);
+  }
+
+  void set_write_barrier_counter(int counter) {
+    write_barrier_counter_ = counter;
+  }
+
+  int progress_bar() {
+    DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
+    return progress_bar_;
+  }
+
+  void set_progress_bar(int progress_bar) {
+    DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
+    progress_bar_ = progress_bar;
+  }
+
+  void ResetProgressBar() {
+    if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+      set_progress_bar(0);
+      ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
+    }
+  }
+
+  bool IsLeftOfProgressBar(Object** slot) {
+    Address slot_address = reinterpret_cast<Address>(slot);
+    DCHECK(slot_address > this->address());
+    return (slot_address - (this->address() + kObjectStartOffset)) <
+           progress_bar();
+  }
+
+  static void IncrementLiveBytesFromGC(Address address, int by) {
+    MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
+  }
+
+  static void IncrementLiveBytesFromMutator(Address address, int by);
+
+  static const intptr_t kAlignment =
+      (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+  static const intptr_t kAlignmentMask = kAlignment - 1;
+
+  static const intptr_t kSizeOffset = 0;
+
+  static const intptr_t kLiveBytesOffset =
+      kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize +
+      kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize;
+
+  static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
+
+  static const size_t kWriteBarrierCounterOffset =
+      kSlotsBufferOffset + kPointerSize + kPointerSize;
+
+  static const size_t kHeaderSize =
+      kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize +
+      kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize;
+
+  static const int kBodyOffset =
+      CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
+
+  // The start offset of the object area in a page. Aligned to both maps and
+  // code alignment to be suitable for both.  Also aligned to 32 words because
+  // the marking bitmap is arranged in 32 bit chunks.
+  static const int kObjectStartAlignment = 32 * kPointerSize;
+  static const int kObjectStartOffset =
+      kBodyOffset - 1 +
+      (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
+
+  size_t size() const { return size_; }
+
+  void set_size(size_t size) { size_ = size; }
+
+  void SetArea(Address area_start, Address area_end) {
+    area_start_ = area_start;
+    area_end_ = area_end;
+  }
+
+  Executability executable() {
+    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+  }
+
+  bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); }
+
+  bool InNewSpace() {
+    return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
+  }
+
+  bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
+
+  bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
+
+  // ---------------------------------------------------------------------
+  // Markbits support
+
+  inline Bitmap* markbits() {
+    return Bitmap::FromAddress(address() + kHeaderSize);
+  }
+
+  void PrintMarkbits() { markbits()->Print(); }
+
+  inline uint32_t AddressToMarkbitIndex(Address addr) {
+    return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
+  }
+
+  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
+    const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+
+    return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
+  }
+
+  inline Address MarkbitIndexToAddress(uint32_t index) {
+    return this->address() + (index << kPointerSizeLog2);
+  }
+
+  void InsertAfter(MemoryChunk* other);
+  void Unlink();
+
+  inline Heap* heap() const { return heap_; }
+
+  static const int kFlagsOffset = kPointerSize;
+
+  bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
+
+  bool ShouldSkipEvacuationSlotRecording() {
+    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
+  }
+
+  inline SkipList* skip_list() { return skip_list_; }
+
+  inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
+
+  inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
+
+  inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
+
+  void MarkEvacuationCandidate() {
+    DCHECK(slots_buffer_ == NULL);
+    SetFlag(EVACUATION_CANDIDATE);
+  }
+
+  void ClearEvacuationCandidate() {
+    DCHECK(slots_buffer_ == NULL);
+    ClearFlag(EVACUATION_CANDIDATE);
+  }
+
+  Address area_start() { return area_start_; }
+  Address area_end() { return area_end_; }
+  int area_size() { return static_cast<int>(area_end() - area_start()); }
+  bool CommitArea(size_t requested);
+
+  // Approximate amount of physical memory committed for this chunk.
+  size_t CommittedPhysicalMemory() { return high_water_mark_; }
+
+  static inline void UpdateHighWaterMark(Address mark);
+
+ protected:
+  size_t size_;
+  intptr_t flags_;
+
+  // Start and end of allocatable memory on this chunk.
+  Address area_start_;
+  Address area_end_;
+
+  // If the chunk needs to remember its memory reservation, it is stored here.
+  base::VirtualMemory reservation_;
+  // The identity of the owning space.  This is tagged as a failure pointer, but
+  // no failure can be in an object, so this can be distinguished from any entry
+  // in a fixed array.
+  Address owner_;
+  Heap* heap_;
+  // Used by the store buffer to keep track of which pages to mark scan-on-
+  // scavenge.
+  int store_buffer_counter_;
+  // Count of bytes marked black on page.
+  int live_byte_count_;
+  SlotsBuffer* slots_buffer_;
+  SkipList* skip_list_;
+  intptr_t write_barrier_counter_;
+  // Used by the incremental marker to keep track of the scanning progress in
+  // large objects that have a progress bar and are scanned in increments.
+  int progress_bar_;
+  // Assuming the initial allocation on a page is sequential,
+  // count highest number of bytes ever allocated on the page.
+  int high_water_mark_;
+
+  base::AtomicWord parallel_sweeping_;
+
+  // PagedSpace free-list statistics.
+  intptr_t available_in_small_free_list_;
+  intptr_t available_in_medium_free_list_;
+  intptr_t available_in_large_free_list_;
+  intptr_t available_in_huge_free_list_;
+  intptr_t non_available_small_blocks_;
+
+  static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
+                                 Address area_start, Address area_end,
+                                 Executability executable, Space* owner);
+
+ private:
+  // next_chunk_ holds a pointer of type MemoryChunk
+  base::AtomicWord next_chunk_;
+  // prev_chunk_ holds a pointer of type MemoryChunk
+  base::AtomicWord prev_chunk_;
+
+  friend class MemoryAllocator;
+};
+
+
+STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+
+
+// -----------------------------------------------------------------------------
+// A page is a memory chunk of a size 1MB. Large object pages may be larger.
+//
+// The only way to get a page pointer is by calling factory methods:
+//   Page* p = Page::FromAddress(addr); or
+//   Page* p = Page::FromAllocationTop(top);
+class Page : public MemoryChunk {
+ public:
+  // Returns the page containing a given address. The address ranges
+  // from [page_addr .. page_addr + kPageSize[
+  // This only works if the object is in fact in a page.  See also MemoryChunk::
+  // FromAddress() and FromAnyAddress().
+  INLINE(static Page* FromAddress(Address a)) {
+    return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
+  }
+
+  // Returns the page containing an allocation top. Because an allocation
+  // top address can be the upper bound of the page, we need to subtract
+  // it with kPointerSize first. The address ranges from
+  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
+  INLINE(static Page* FromAllocationTop(Address top)) {
+    Page* p = FromAddress(top - kPointerSize);
+    return p;
+  }
+
+  // Returns the next page in the chain of pages owned by a space.
+  inline Page* next_page();
+  inline Page* prev_page();
+  inline void set_next_page(Page* page);
+  inline void set_prev_page(Page* page);
+
+  // Checks whether an address is page aligned.
+  static bool IsAlignedToPageSize(Address a) {
+    return 0 == (OffsetFrom(a) & kPageAlignmentMask);
+  }
+
+  // Returns the offset of a given address to this page.
+  INLINE(int Offset(Address a)) {
+    int offset = static_cast<int>(a - address());
+    return offset;
+  }
+
+  // Returns the address for a given offset to the this page.
+  Address OffsetToAddress(int offset) {
+    DCHECK_PAGE_OFFSET(offset);
+    return address() + offset;
+  }
+
+  // ---------------------------------------------------------------------
+
+  // Page size in bytes.  This must be a multiple of the OS page size.
+  static const int kPageSize = 1 << kPageSizeBits;
+
+  // Maximum object size that fits in a page. Objects larger than that size
+  // are allocated in large object space and are never moved in memory. This
+  // also applies to new space allocation, since objects are never migrated
+  // from new space to large object space.  Takes double alignment into account.
+  static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
+
+  // Page size mask.
+  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+
+  inline void ClearGCFields();
+
+  static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
+                                 Executability executable, PagedSpace* owner);
+
+  void InitializeAsAnchor(PagedSpace* owner);
+
+  bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
+  void SetWasSwept() { SetFlag(WAS_SWEPT); }
+  void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
+
+  void ResetFreeListStatistics();
+
+#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
+  type name() { return name##_; }                 \
+  void set_##name(type name) { name##_ = name; }  \
+  void add_##name(type name) { name##_ += name; }
+
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
+
+#undef FRAGMENTATION_STATS_ACCESSORS
+
+#ifdef DEBUG
+  void Print();
+#endif  // DEBUG
+
+  friend class MemoryAllocator;
+};
+
+
+STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
+
+
+class LargePage : public MemoryChunk {
+ public:
+  HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
+
+  inline LargePage* next_page() const {
+    return static_cast<LargePage*>(next_chunk());
+  }
+
+  inline void set_next_page(LargePage* page) { set_next_chunk(page); }
+
+ private:
+  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
+
+  friend class MemoryAllocator;
+};
+
+STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+
+// ----------------------------------------------------------------------------
+// Space is the abstract superclass for all allocation spaces.
+class Space : public Malloced {
+ public:
+  Space(Heap* heap, AllocationSpace id, Executability executable)
+      : heap_(heap), id_(id), executable_(executable) {}
+
+  virtual ~Space() {}
+
+  Heap* heap() const { return heap_; }
+
+  // Does the space need executable memory?
+  Executability executable() { return executable_; }
+
+  // Identity used in error reporting.
+  AllocationSpace identity() { return id_; }
+
+  // Returns allocated size.
+  virtual intptr_t Size() = 0;
+
+  // Returns size of objects. Can differ from the allocated size
+  // (e.g. see LargeObjectSpace).
+  virtual intptr_t SizeOfObjects() { return Size(); }
+
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (id_ == CODE_SPACE) {
+      return RoundDown(size, kCodeAlignment);
+    } else {
+      return RoundDown(size, kPointerSize);
+    }
+  }
+
+#ifdef DEBUG
+  virtual void Print() = 0;
+#endif
+
+ private:
+  Heap* heap_;
+  AllocationSpace id_;
+  Executability executable_;
+};
+
+
+// ----------------------------------------------------------------------------
+// All heap objects containing executable code (code objects) must be allocated
+// from a 2 GB range of memory, so that they can call each other using 32-bit
+// displacements.  This happens automatically on 32-bit platforms, where 32-bit
+// displacements cover the entire 4GB virtual address space.  On 64-bit
+// platforms, we support this using the CodeRange object, which reserves and
+// manages a range of virtual memory.
+class CodeRange {
+ public:
+  explicit CodeRange(Isolate* isolate);
+  ~CodeRange() { TearDown(); }
+
+  // Reserves a range of virtual memory, but does not commit any of it.
+  // Can only be called once, at heap initialization time.
+  // Returns false on failure.
+  bool SetUp(size_t requested_size);
+
+  // Frees the range of virtual memory, and frees the data structures used to
+  // manage it.
+  void TearDown();
+
+  bool valid() { return code_range_ != NULL; }
+  Address start() {
+    DCHECK(valid());
+    return static_cast<Address>(code_range_->address());
+  }
+  bool contains(Address address) {
+    if (!valid()) return false;
+    Address start = static_cast<Address>(code_range_->address());
+    return start <= address && address < start + code_range_->size();
+  }
+
+  // Allocates a chunk of memory from the large-object portion of
+  // the code range.  On platforms with no separate code range, should
+  // not be called.
+  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
+                                            const size_t commit_size,
+                                            size_t* allocated);
+  bool CommitRawMemory(Address start, size_t length);
+  bool UncommitRawMemory(Address start, size_t length);
+  void FreeRawMemory(Address buf, size_t length);
+
+ private:
+  Isolate* isolate_;
+
+  // The reserved range of virtual memory that all code objects are put in.
+  base::VirtualMemory* code_range_;
+  // Plain old data class, just a struct plus a constructor.
+  class FreeBlock {
+   public:
+    FreeBlock(Address start_arg, size_t size_arg)
+        : start(start_arg), size(size_arg) {
+      DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
+      DCHECK(size >= static_cast<size_t>(Page::kPageSize));
+    }
+    FreeBlock(void* start_arg, size_t size_arg)
+        : start(static_cast<Address>(start_arg)), size(size_arg) {
+      DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
+      DCHECK(size >= static_cast<size_t>(Page::kPageSize));
+    }
+
+    Address start;
+    size_t size;
+  };
+
+  // Freed blocks of memory are added to the free list.  When the allocation
+  // list is exhausted, the free list is sorted and merged to make the new
+  // allocation list.
+  List<FreeBlock> free_list_;
+  // Memory is allocated from the free blocks on the allocation list.
+  // The block at current_allocation_block_index_ is the current block.
+  List<FreeBlock> allocation_list_;
+  int current_allocation_block_index_;
+
+  // Finds a block on the allocation list that contains at least the
+  // requested amount of memory.  If none is found, sorts and merges
+  // the existing free memory blocks, and searches again.
+  // If none can be found, returns false.
+  bool GetNextAllocationBlock(size_t requested);
+  // Compares the start addresses of two free blocks.
+  static int CompareFreeBlockAddress(const FreeBlock* left,
+                                     const FreeBlock* right);
+
+  DISALLOW_COPY_AND_ASSIGN(CodeRange);
+};
+
+
+class SkipList {
+ public:
+  SkipList() { Clear(); }
+
+  void Clear() {
+    for (int idx = 0; idx < kSize; idx++) {
+      starts_[idx] = reinterpret_cast<Address>(-1);
+    }
+  }
+
+  Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
+
+  void AddObject(Address addr, int size) {
+    int start_region = RegionNumber(addr);
+    int end_region = RegionNumber(addr + size - kPointerSize);
+    for (int idx = start_region; idx <= end_region; idx++) {
+      if (starts_[idx] > addr) starts_[idx] = addr;
+    }
+  }
+
+  static inline int RegionNumber(Address addr) {
+    return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
+  }
+
+  static void Update(Address addr, int size) {
+    Page* page = Page::FromAddress(addr);
+    SkipList* list = page->skip_list();
+    if (list == NULL) {
+      list = new SkipList();
+      page->set_skip_list(list);
+    }
+
+    list->AddObject(addr, size);
+  }
+
+ private:
+  static const int kRegionSizeLog2 = 13;
+  static const int kRegionSize = 1 << kRegionSizeLog2;
+  static const int kSize = Page::kPageSize / kRegionSize;
+
+  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
+
+  Address starts_[kSize];
+};
+
+
+// ----------------------------------------------------------------------------
+// A space acquires chunks of memory from the operating system. The memory
+// allocator allocated and deallocates pages for the paged heap spaces and large
+// pages for large object space.
+//
+// Each space has to manage it's own pages.
+//
+class MemoryAllocator {
+ public:
+  explicit MemoryAllocator(Isolate* isolate);
+
+  // Initializes its internal bookkeeping structures.
+  // Max capacity of the total space and executable memory limit.
+  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
+
+  void TearDown();
+
+  Page* AllocatePage(intptr_t size, PagedSpace* owner,
+                     Executability executable);
+
+  LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
+                               Executability executable);
+
+  void Free(MemoryChunk* chunk);
+
+  // Returns the maximum available bytes of heaps.
+  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
+
+  // Returns allocated spaces in bytes.
+  intptr_t Size() { return size_; }
+
+  // Returns the maximum available executable bytes of heaps.
+  intptr_t AvailableExecutable() {
+    if (capacity_executable_ < size_executable_) return 0;
+    return capacity_executable_ - size_executable_;
+  }
+
+  // Returns allocated executable spaces in bytes.
+  intptr_t SizeExecutable() { return size_executable_; }
+
+  // Returns maximum available bytes that the old space can have.
+  intptr_t MaxAvailable() {
+    return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
+  }
+
+  // Returns an indication of whether a pointer is in a space that has
+  // been allocated by this MemoryAllocator.
+  V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
+    return address < lowest_ever_allocated_ ||
+           address >= highest_ever_allocated_;
+  }
+
+#ifdef DEBUG
+  // Reports statistic info of the space.
+  void ReportStatistics();
+#endif
+
+  // Returns a MemoryChunk in which the memory region from commit_area_size to
+  // reserve_area_size of the chunk area is reserved but not committed, it
+  // could be committed later by calling MemoryChunk::CommitArea.
+  MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
+                             intptr_t commit_area_size,
+                             Executability executable, Space* space);
+
+  Address ReserveAlignedMemory(size_t requested, size_t alignment,
+                               base::VirtualMemory* controller);
+  Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
+                                size_t alignment, Executability executable,
+                                base::VirtualMemory* controller);
+
+  bool CommitMemory(Address addr, size_t size, Executability executable);
+
+  void FreeMemory(base::VirtualMemory* reservation, Executability executable);
+  void FreeMemory(Address addr, size_t size, Executability executable);
+
+  // Commit a contiguous block of memory from the initial chunk.  Assumes that
+  // the address is not NULL, the size is greater than zero, and that the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool CommitBlock(Address start, size_t size, Executability executable);
+
+  // Uncommit a contiguous block of memory [start..(start+size)[.
+  // start is not NULL, the size is greater than zero, and the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool UncommitBlock(Address start, size_t size);
+
+  // Zaps a contiguous block of memory [start..(start+size)[ thus
+  // filling it up with a recognizable non-NULL bit pattern.
+  void ZapBlock(Address start, size_t size);
+
+  void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
+                                 size_t size);
+
+  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+                                   ObjectSpace space, AllocationAction action);
+
+  void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
+
+  bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
+
+  static int CodePageGuardStartOffset();
+
+  static int CodePageGuardSize();
+
+  static int CodePageAreaStartOffset();
+
+  static int CodePageAreaEndOffset();
+
+  static int CodePageAreaSize() {
+    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
+  }
+
+  MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
+                                              Address start, size_t commit_size,
+                                              size_t reserved_size);
+
+ private:
+  Isolate* isolate_;
+
+  // Maximum space size in bytes.
+  size_t capacity_;
+  // Maximum subset of capacity_ that can be executable
+  size_t capacity_executable_;
+
+  // Allocated space size in bytes.
+  size_t size_;
+  // Allocated executable space size in bytes.
+  size_t size_executable_;
+
+  // We keep the lowest and highest addresses allocated as a quick way
+  // of determining that pointers are outside the heap. The estimate is
+  // conservative, i.e. not all addrsses in 'allocated' space are allocated
+  // to our heap. The range is [lowest, highest[, inclusive on the low end
+  // and exclusive on the high end.
+  void* lowest_ever_allocated_;
+  void* highest_ever_allocated_;
+
+  struct MemoryAllocationCallbackRegistration {
+    MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
+                                         ObjectSpace space,
+                                         AllocationAction action)
+        : callback(callback), space(space), action(action) {}
+    MemoryAllocationCallback callback;
+    ObjectSpace space;
+    AllocationAction action;
+  };
+
+  // A List of callback that are triggered when memory is allocated or free'd
+  List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
+
+  // Initializes pages in a chunk. Returns the first page address.
+  // This function and GetChunkId() are provided for the mark-compact
+  // collector to rebuild page headers in the from space, which is
+  // used as a marking stack and its page headers are destroyed.
+  Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+                               PagedSpace* owner);
+
+  void UpdateAllocatedSpaceLimits(void* low, void* high) {
+    lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
+    highest_ever_allocated_ = Max(highest_ever_allocated_, high);
+  }
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
+};
+
+
+// -----------------------------------------------------------------------------
+// Interface for heap object iterator to be implemented by all object space
+// object iterators.
+//
+// NOTE: The space specific object iterators also implements the own next()
+//       method which is used to avoid using virtual functions
+//       iterating a specific space.
+
+class ObjectIterator : public Malloced {
+ public:
+  virtual ~ObjectIterator() {}
+
+  virtual HeapObject* next_object() = 0;
+};
+
+
+// -----------------------------------------------------------------------------
+// Heap object iterator in new/old/map spaces.
+//
+// A HeapObjectIterator iterates objects from the bottom of the given space
+// to its top or from the bottom of the given page to its top.
+//
+// If objects are allocated in the page during iteration the iterator may
+// or may not iterate over those objects.  The caller must create a new
+// iterator in order to be sure to visit these new objects.
+class HeapObjectIterator : public ObjectIterator {
+ public:
+  // Creates a new object iterator in a given space.
+  // If the size function is not given, the iterator calls the default
+  // Object::Size().
+  explicit HeapObjectIterator(PagedSpace* space);
+  HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
+  HeapObjectIterator(Page* page, HeapObjectCallback size_func);
+
+  // Advance to the next object, skipping free spaces and other fillers and
+  // skipping the special garbage section of which there is one per space.
+  // Returns NULL when the iteration has ended.
+  inline HeapObject* Next() {
+    do {
+      HeapObject* next_obj = FromCurrentPage();
+      if (next_obj != NULL) return next_obj;
+    } while (AdvanceToNextPage());
+    return NULL;
+  }
+
+  virtual HeapObject* next_object() { return Next(); }
+
+ private:
+  enum PageMode { kOnePageOnly, kAllPagesInSpace };
+
+  Address cur_addr_;              // Current iteration point.
+  Address cur_end_;               // End iteration point.
+  HeapObjectCallback size_func_;  // Size function or NULL.
+  PagedSpace* space_;
+  PageMode page_mode_;
+
+  // Fast (inlined) path of next().
+  inline HeapObject* FromCurrentPage();
+
+  // Slow path of next(), goes into the next page.  Returns false if the
+  // iteration has ended.
+  bool AdvanceToNextPage();
+
+  // Initializes fields.
+  inline void Initialize(PagedSpace* owner, Address start, Address end,
+                         PageMode mode, HeapObjectCallback size_func);
+};
+
+
+// -----------------------------------------------------------------------------
+// A PageIterator iterates the pages in a paged space.
+
+class PageIterator BASE_EMBEDDED {
+ public:
+  explicit inline PageIterator(PagedSpace* space);
+
+  inline bool has_next();
+  inline Page* next();
+
+ private:
+  PagedSpace* space_;
+  Page* prev_page_;  // Previous page returned.
+  // Next page that will be returned.  Cached here so that we can use this
+  // iterator for operations that deallocate pages.
+  Page* next_page_;
+};
+
+
+// -----------------------------------------------------------------------------
+// A space has a circular list of pages. The next page can be accessed via
+// Page::next_page() call.
+
+// An abstraction of allocation and relocation pointers in a page-structured
+// space.
+class AllocationInfo {
+ public:
+  AllocationInfo() : top_(NULL), limit_(NULL) {}
+
+  INLINE(void set_top(Address top)) {
+    SLOW_DCHECK(top == NULL ||
+                (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+    top_ = top;
+  }
+
+  INLINE(Address top()) const {
+    SLOW_DCHECK(top_ == NULL ||
+                (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+    return top_;
+  }
+
+  Address* top_address() { return &top_; }
+
+  INLINE(void set_limit(Address limit)) {
+    SLOW_DCHECK(limit == NULL ||
+                (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
+    limit_ = limit;
+  }
+
+  INLINE(Address limit()) const {
+    SLOW_DCHECK(limit_ == NULL ||
+                (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) ==
+                    0);
+    return limit_;
+  }
+
+  Address* limit_address() { return &limit_; }
+
+#ifdef DEBUG
+  bool VerifyPagedAllocation() {
+    return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
+           (top_ <= limit_);
+  }
+#endif
+
+ private:
+  // Current allocation top.
+  Address top_;
+  // Current allocation limit.
+  Address limit_;
+};
+
+
+// An abstraction of the accounting statistics of a page-structured space.
+// The 'capacity' of a space is the number of object-area bytes (i.e., not
+// including page bookkeeping structures) currently in the space. The 'size'
+// of a space is the number of allocated bytes, the 'waste' in the space is
+// the number of bytes that are not allocated and not available to
+// allocation without reorganizing the space via a GC (e.g. small blocks due
+// to internal fragmentation, top of page areas in map space), and the bytes
+// 'available' is the number of unallocated bytes that are not waste.  The
+// capacity is the sum of size, waste, and available.
+//
+// The stats are only set by functions that ensure they stay balanced. These
+// functions increase or decrease one of the non-capacity stats in
+// conjunction with capacity, or else they always balance increases and
+// decreases to the non-capacity stats.
+class AllocationStats BASE_EMBEDDED {
+ public:
+  AllocationStats() { Clear(); }
+
+  // Zero out all the allocation statistics (i.e., no capacity).
+  void Clear() {
+    capacity_ = 0;
+    max_capacity_ = 0;
+    size_ = 0;
+    waste_ = 0;
+  }
+
+  void ClearSizeWaste() {
+    size_ = capacity_;
+    waste_ = 0;
+  }
+
+  // Reset the allocation statistics (i.e., available = capacity with no
+  // wasted or allocated bytes).
+  void Reset() {
+    size_ = 0;
+    waste_ = 0;
+  }
+
+  // Accessors for the allocation statistics.
+  intptr_t Capacity() { return capacity_; }
+  intptr_t MaxCapacity() { return max_capacity_; }
+  intptr_t Size() { return size_; }
+  intptr_t Waste() { return waste_; }
+
+  // Grow the space by adding available bytes.  They are initially marked as
+  // being in use (part of the size), but will normally be immediately freed,
+  // putting them on the free list and removing them from size_.
+  void ExpandSpace(int size_in_bytes) {
+    capacity_ += size_in_bytes;
+    size_ += size_in_bytes;
+    if (capacity_ > max_capacity_) {
+      max_capacity_ = capacity_;
+    }
+    DCHECK(size_ >= 0);
+  }
+
+  // Shrink the space by removing available bytes.  Since shrinking is done
+  // during sweeping, bytes have been marked as being in use (part of the size)
+  // and are hereby freed.
+  void ShrinkSpace(int size_in_bytes) {
+    capacity_ -= size_in_bytes;
+    size_ -= size_in_bytes;
+    DCHECK(size_ >= 0);
+  }
+
+  // Allocate from available bytes (available -> size).
+  void AllocateBytes(intptr_t size_in_bytes) {
+    size_ += size_in_bytes;
+    DCHECK(size_ >= 0);
+  }
+
+  // Free allocated bytes, making them available (size -> available).
+  void DeallocateBytes(intptr_t size_in_bytes) {
+    size_ -= size_in_bytes;
+    DCHECK(size_ >= 0);
+  }
+
+  // Waste free bytes (available -> waste).
+  void WasteBytes(int size_in_bytes) {
+    DCHECK(size_in_bytes >= 0);
+    waste_ += size_in_bytes;
+  }
+
+ private:
+  intptr_t capacity_;
+  intptr_t max_capacity_;
+  intptr_t size_;
+  intptr_t waste_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap.  They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object).  They have a size and a next pointer.  The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode : public HeapObject {
+ public:
+  // Obtain a free-list node from a raw address.  This is not a cast because
+  // it does not check nor require that the first word at the address is a map
+  // pointer.
+  static FreeListNode* FromAddress(Address address) {
+    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+  }
+
+  static inline bool IsFreeListNode(HeapObject* object);
+
+  // Set the size in bytes, which can be read with HeapObject::Size().  This
+  // function also writes a map to the first word of the block so that it
+  // looks like a heap object to the garbage collector and heap iteration
+  // functions.
+  void set_size(Heap* heap, int size_in_bytes);
+
+  // Accessors for the next field.
+  inline FreeListNode* next();
+  inline FreeListNode** next_address();
+  inline void set_next(FreeListNode* next);
+
+  inline void Zap();
+
+  static inline FreeListNode* cast(Object* object) {
+    return reinterpret_cast<FreeListNode*>(object);
+  }
+
+ private:
+  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
+
+// The free list category holds a pointer to the top element and a pointer to
+// the end element of the linked list of free memory blocks.
+class FreeListCategory {
+ public:
+  FreeListCategory() : top_(0), end_(NULL), available_(0) {}
+
+  intptr_t Concatenate(FreeListCategory* category);
+
+  void Reset();
+
+  void Free(FreeListNode* node, int size_in_bytes);
+
+  FreeListNode* PickNodeFromList(int* node_size);
+  FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
+
+  intptr_t EvictFreeListItemsInList(Page* p);
+  bool ContainsPageFreeListItemsInList(Page* p);
+
+  void RepairFreeList(Heap* heap);
+
+  FreeListNode* top() const {
+    return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
+  }
+
+  void set_top(FreeListNode* top) {
+    base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
+  }
+
+  FreeListNode** GetEndAddress() { return &end_; }
+  FreeListNode* end() const { return end_; }
+  void set_end(FreeListNode* end) { end_ = end; }
+
+  int* GetAvailableAddress() { return &available_; }
+  int available() const { return available_; }
+  void set_available(int available) { available_ = available; }
+
+  base::Mutex* mutex() { return &mutex_; }
+
+  bool IsEmpty() { return top() == 0; }
+
+#ifdef DEBUG
+  intptr_t SumFreeList();
+  int FreeListLength();
+#endif
+
+ private:
+  // top_ points to the top FreeListNode* in the free list category.
+  base::AtomicWord top_;
+  FreeListNode* end_;
+  base::Mutex mutex_;
+
+  // Total available bytes in all blocks of this free list category.
+  int available_;
+};
+
+
+// The free list for the old space.  The free list is organized in such a way
+// as to encourage objects allocated around the same time to be near each
+// other.  The normal way to allocate is intended to be by bumping a 'top'
+// pointer until it hits a 'limit' pointer.  When the limit is hit we need to
+// find a new space to allocate from.  This is done with the free list, which
+// is divided up into rough categories to cut down on waste.  Having finer
+// categories would scatter allocation more.
+
+// The old space free list is organized in categories.
+// 1-31 words:  Such small free areas are discarded for efficiency reasons.
+//     They can be reclaimed by the compactor.  However the distance between top
+//     and limit may be this small.
+// 32-255 words: There is a list of spaces this large.  It is used for top and
+//     limit when the object we need to allocate is 1-31 words in size.  These
+//     spaces are called small.
+// 256-2047 words: There is a list of spaces this large.  It is used for top and
+//     limit when the object we need to allocate is 32-255 words in size.  These
+//     spaces are called medium.
+// 1048-16383 words: There is a list of spaces this large.  It is used for top
+//     and limit when the object we need to allocate is 256-2047 words in size.
+//     These spaces are call large.
+// At least 16384 words.  This list is for objects of 2048 words or larger.
+//     Empty pages are added to this list.  These spaces are called huge.
+class FreeList {
+ public:
+  explicit FreeList(PagedSpace* owner);
+
+  intptr_t Concatenate(FreeList* free_list);
+
+  // Clear the free list.
+  void Reset();
+
+  // Return the number of bytes available on the free list.
+  intptr_t available() {
+    return small_list_.available() + medium_list_.available() +
+           large_list_.available() + huge_list_.available();
+  }
+
+  // Place a node on the free list.  The block of size 'size_in_bytes'
+  // starting at 'start' is placed on the free list.  The return value is the
+  // number of bytes that have been lost due to internal fragmentation by
+  // freeing the block.  Bookkeeping information will be written to the block,
+  // i.e., its contents will be destroyed.  The start address should be word
+  // aligned, and the size should be a non-zero multiple of the word size.
+  int Free(Address start, int size_in_bytes);
+
+  // This method returns how much memory can be allocated after freeing
+  // maximum_freed memory.
+  static inline int GuaranteedAllocatable(int maximum_freed) {
+    if (maximum_freed < kSmallListMin) {
+      return 0;
+    } else if (maximum_freed <= kSmallListMax) {
+      return kSmallAllocationMax;
+    } else if (maximum_freed <= kMediumListMax) {
+      return kMediumAllocationMax;
+    } else if (maximum_freed <= kLargeListMax) {
+      return kLargeAllocationMax;
+    }
+    return maximum_freed;
+  }
+
+  // Allocate a block of size 'size_in_bytes' from the free list.  The block
+  // is unitialized.  A failure is returned if no block is available.  The
+  // number of bytes lost to fragmentation is returned in the output parameter
+  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
+  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+
+  bool IsEmpty() {
+    return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
+           large_list_.IsEmpty() && huge_list_.IsEmpty();
+  }
+
+#ifdef DEBUG
+  void Zap();
+  intptr_t SumFreeLists();
+  bool IsVeryLong();
+#endif
+
+  // Used after booting the VM.
+  void RepairLists(Heap* heap);
+
+  intptr_t EvictFreeListItems(Page* p);
+  bool ContainsPageFreeListItems(Page* p);
+
+  FreeListCategory* small_list() { return &small_list_; }
+  FreeListCategory* medium_list() { return &medium_list_; }
+  FreeListCategory* large_list() { return &large_list_; }
+  FreeListCategory* huge_list() { return &huge_list_; }
+
+ private:
+  // The size range of blocks, in bytes.
+  static const int kMinBlockSize = 3 * kPointerSize;
+  static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
+
+  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+
+  PagedSpace* owner_;
+  Heap* heap_;
+
+  static const int kSmallListMin = 0x20 * kPointerSize;
+  static const int kSmallListMax = 0xff * kPointerSize;
+  static const int kMediumListMax = 0x7ff * kPointerSize;
+  static const int kLargeListMax = 0x3fff * kPointerSize;
+  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
+  static const int kMediumAllocationMax = kSmallListMax;
+  static const int kLargeAllocationMax = kMediumListMax;
+  FreeListCategory small_list_;
+  FreeListCategory medium_list_;
+  FreeListCategory large_list_;
+  FreeListCategory huge_list_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
+};
+
+
+class AllocationResult {
+ public:
+  // Implicit constructor from Object*.
+  AllocationResult(Object* object)  // NOLINT
+      : object_(object),
+        retry_space_(INVALID_SPACE) {}
+
+  AllocationResult() : object_(NULL), retry_space_(INVALID_SPACE) {}
+
+  static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
+    return AllocationResult(space);
+  }
+
+  inline bool IsRetry() { return retry_space_ != INVALID_SPACE; }
+
+  template <typename T>
+  bool To(T** obj) {
+    if (IsRetry()) return false;
+    *obj = T::cast(object_);
+    return true;
+  }
+
+  Object* ToObjectChecked() {
+    CHECK(!IsRetry());
+    return object_;
+  }
+
+  AllocationSpace RetrySpace() {
+    DCHECK(IsRetry());
+    return retry_space_;
+  }
+
+ private:
+  explicit AllocationResult(AllocationSpace space)
+      : object_(NULL), retry_space_(space) {}
+
+  Object* object_;
+  AllocationSpace retry_space_;
+};
+
+
+class PagedSpace : public Space {
+ public:
+  // Creates a space with a maximum capacity, and an id.
+  PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
+             Executability executable);
+
+  virtual ~PagedSpace() {}
+
+  // Set up the space using the given address range of virtual memory (from
+  // the memory allocator's initial chunk) if possible.  If the block of
+  // addresses is not big enough to contain a single page-aligned page, a
+  // fresh chunk will be allocated.
+  bool SetUp();
+
+  // Returns true if the space has been successfully set up and not
+  // subsequently torn down.
+  bool HasBeenSetUp();
+
+  // Cleans up the space, frees all pages in this space except those belonging
+  // to the initial chunk, uncommits addresses in the initial chunk.
+  void TearDown();
+
+  // Checks whether an object/address is in this space.
+  inline bool Contains(Address a);
+  bool Contains(HeapObject* o) { return Contains(o->address()); }
+
+  // Given an address occupied by a live object, return that object if it is
+  // in this space, or a Smi if it is not.  The implementation iterates over
+  // objects in the page containing the address, the cost is linear in the
+  // number of objects in the page.  It may be slow.
+  Object* FindObject(Address addr);
+
+  // During boot the free_space_map is created, and afterwards we may need
+  // to write it into the free list nodes that were already created.
+  void RepairFreeListsAfterBoot();
+
+  // Prepares for a mark-compact GC.
+  void PrepareForMarkCompact();
+
+  // Current capacity without growing (Size() + Available()).
+  intptr_t Capacity() { return accounting_stats_.Capacity(); }
+
+  // Total amount of memory committed for this space.  For paged
+  // spaces this equals the capacity.
+  intptr_t CommittedMemory() { return Capacity(); }
+
+  // The maximum amount of memory ever committed for this space.
+  intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
+
+  // Approximate amount of physical memory committed for this space.
+  size_t CommittedPhysicalMemory();
+
+  struct SizeStats {
+    intptr_t Total() {
+      return small_size_ + medium_size_ + large_size_ + huge_size_;
+    }
+
+    intptr_t small_size_;
+    intptr_t medium_size_;
+    intptr_t large_size_;
+    intptr_t huge_size_;
+  };
+
+  void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
+  void ResetFreeListStatistics();
+
+  // Sets the capacity, the available space and the wasted space to zero.
+  // The stats are rebuilt during sweeping by adding each page to the
+  // capacity and the size when it is encountered.  As free spaces are
+  // discovered during the sweeping they are subtracted from the size and added
+  // to the available and wasted totals.
+  void ClearStats() {
+    accounting_stats_.ClearSizeWaste();
+    ResetFreeListStatistics();
+  }
+
+  // Increases the number of available bytes of that space.
+  void AddToAccountingStats(intptr_t bytes) {
+    accounting_stats_.DeallocateBytes(bytes);
+  }
+
+  // Available bytes without growing.  These are the bytes on the free list.
+  // The bytes in the linear allocation area are not included in this total
+  // because updating the stats would slow down allocation.  New pages are
+  // immediately added to the free list so they show up here.
+  intptr_t Available() { return free_list_.available(); }
+
+  // Allocated bytes in this space.  Garbage bytes that were not found due to
+  // concurrent sweeping are counted as being allocated!  The bytes in the
+  // current linear allocation area (between top and limit) are also counted
+  // here.
+  virtual intptr_t Size() { return accounting_stats_.Size(); }
+
+  // As size, but the bytes in lazily swept pages are estimated and the bytes
+  // in the current linear allocation area are not included.
+  virtual intptr_t SizeOfObjects();
+
+  // Wasted bytes in this space.  These are just the bytes that were thrown away
+  // due to being too small to use for allocation.  They do not include the
+  // free bytes that were not found at all due to lazy sweeping.
+  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
+
+  // Returns the allocation pointer in this space.
+  Address top() { return allocation_info_.top(); }
+  Address limit() { return allocation_info_.limit(); }
+
+  // The allocation top address.
+  Address* allocation_top_address() { return allocation_info_.top_address(); }
+
+  // The allocation limit address.
+  Address* allocation_limit_address() {
+    return allocation_info_.limit_address();
+  }
+
+  // Allocate the requested number of bytes in the space if possible, return a
+  // failure object if not.
+  MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
+
+  // Give a block of memory to the space's free list.  It might be added to
+  // the free list or accounted as waste.
+  // If add_to_freelist is false then just accounting stats are updated and
+  // no attempt to add area to free list is made.
+  int Free(Address start, int size_in_bytes) {
+    int wasted = free_list_.Free(start, size_in_bytes);
+    accounting_stats_.DeallocateBytes(size_in_bytes);
+    accounting_stats_.WasteBytes(wasted);
+    return size_in_bytes - wasted;
+  }
+
+  void ResetFreeList() { free_list_.Reset(); }
+
+  // Set space allocation info.
+  void SetTopAndLimit(Address top, Address limit) {
+    DCHECK(top == limit ||
+           Page::FromAddress(top) == Page::FromAddress(limit - 1));
+    MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+    allocation_info_.set_top(top);
+    allocation_info_.set_limit(limit);
+  }
+
+  // Empty space allocation info, returning unused area to free list.
+  void EmptyAllocationInfo() {
+    // Mark the old linear allocation area with a free space map so it can be
+    // skipped when scanning the heap.
+    int old_linear_size = static_cast<int>(limit() - top());
+    Free(top(), old_linear_size);
+    SetTopAndLimit(NULL, NULL);
+  }
+
+  void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
+
+  void IncreaseCapacity(int size);
+
+  // Releases an unused page and shrinks the space.
+  void ReleasePage(Page* page);
+
+  // The dummy page that anchors the linked list of pages.
+  Page* anchor() { return &anchor_; }
+
+#ifdef VERIFY_HEAP
+  // Verify integrity of this space.
+  virtual void Verify(ObjectVisitor* visitor);
+
+  // Overridden by subclasses to verify space-specific object
+  // properties (e.g., only maps or free-list nodes are in map space).
+  virtual void VerifyObject(HeapObject* obj) {}
+#endif
+
+#ifdef DEBUG
+  // Print meta info and objects in this space.
+  virtual void Print();
+
+  // Reports statistics for the space
+  void ReportStatistics();
+
+  // Report code object related statistics
+  void CollectCodeStatistics();
+  static void ReportCodeStatistics(Isolate* isolate);
+  static void ResetCodeStatistics(Isolate* isolate);
+#endif
+
+  // Evacuation candidates are swept by evacuator.  Needs to return a valid
+  // result before _and_ after evacuation has finished.
+  static bool ShouldBeSweptBySweeperThreads(Page* p) {
+    return !p->IsEvacuationCandidate() &&
+           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
+  }
+
+  void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
+
+  void IncreaseUnsweptFreeBytes(Page* p) {
+    DCHECK(ShouldBeSweptBySweeperThreads(p));
+    unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
+  }
+
+  void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
+
+  void DecreaseUnsweptFreeBytes(Page* p) {
+    DCHECK(ShouldBeSweptBySweeperThreads(p));
+    unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
+  }
+
+  void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
+
+  // This function tries to steal size_in_bytes memory from the sweeper threads
+  // free-lists. If it does not succeed stealing enough memory, it will wait
+  // for the sweeper threads to finish sweeping.
+  // It returns true when sweeping is completed and false otherwise.
+  bool EnsureSweeperProgress(intptr_t size_in_bytes);
+
+  void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
+
+  Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
+
+  Page* FirstPage() { return anchor_.next_page(); }
+  Page* LastPage() { return anchor_.prev_page(); }
+
+  void EvictEvacuationCandidatesFromFreeLists();
+
+  bool CanExpand();
+
+  // Returns the number of total pages in this space.
+  int CountTotalPages();
+
+  // Return size of allocatable area on a page in this space.
+  inline int AreaSize() { return area_size_; }
+
+  void CreateEmergencyMemory();
+  void FreeEmergencyMemory();
+  void UseEmergencyMemory();
+
+  bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
+
+ protected:
+  FreeList* free_list() { return &free_list_; }
+
+  int area_size_;
+
+  // Maximum capacity of this space.
+  intptr_t max_capacity_;
+
+  intptr_t SizeOfFirstPage();
+
+  // Accounting information for this space.
+  AllocationStats accounting_stats_;
+
+  // The dummy page that anchors the double linked list of pages.
+  Page anchor_;
+
+  // The space's free list.
+  FreeList free_list_;
+
+  // Normal allocation information.
+  AllocationInfo allocation_info_;
+
+  // The number of free bytes which could be reclaimed by advancing the
+  // concurrent sweeper threads.
+  intptr_t unswept_free_bytes_;
+
+  // The sweeper threads iterate over the list of pointer and data space pages
+  // and sweep these pages concurrently. They will stop sweeping after the
+  // end_of_unswept_pages_ page.
+  Page* end_of_unswept_pages_;
+
+  // Emergency memory is the memory of a full page for a given space, allocated
+  // conservatively before evacuating a page. If compaction fails due to out
+  // of memory error the emergency memory can be used to complete compaction.
+  // If not used, the emergency memory is released after compaction.
+  MemoryChunk* emergency_memory_;
+
+  // Expands the space by allocating a fixed number of pages. Returns false if
+  // it cannot allocate requested number of pages from OS, or if the hard heap
+  // size limit has been hit.
+  bool Expand();
+
+  // Generic fast case allocation function that tries linear allocation at the
+  // address denoted by top in allocation_info_.
+  inline HeapObject* AllocateLinearly(int size_in_bytes);
+
+  // If sweeping is still in progress try to sweep unswept pages. If that is
+  // not successful, wait for the sweeper threads and re-try free-list
+  // allocation.
+  MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
+      int size_in_bytes);
+
+  // Slow path of AllocateRaw.  This function is space-dependent.
+  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+  friend class PageIterator;
+  friend class MarkCompactCollector;
+};
+
+
+class NumberAndSizeInfo BASE_EMBEDDED {
+ public:
+  NumberAndSizeInfo() : number_(0), bytes_(0) {}
+
+  int number() const { return number_; }
+  void increment_number(int num) { number_ += num; }
+
+  int bytes() const { return bytes_; }
+  void increment_bytes(int size) { bytes_ += size; }
+
+  void clear() {
+    number_ = 0;
+    bytes_ = 0;
+  }
+
+ private:
+  int number_;
+  int bytes_;
+};
+
+
+// HistogramInfo class for recording a single "bar" of a histogram.  This
+// class is used for collecting statistics to print to the log file.
+class HistogramInfo : public NumberAndSizeInfo {
+ public:
+  HistogramInfo() : NumberAndSizeInfo() {}
+
+  const char* name() { return name_; }
+  void set_name(const char* name) { name_ = name; }
+
+ private:
+  const char* name_;
+};
+
+
+enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
+
+
+class SemiSpace;
+
+
+class NewSpacePage : public MemoryChunk {
+ public:
+  // GC related flags copied from from-space to to-space when
+  // flipping semispaces.
+  static const intptr_t kCopyOnFlipFlagsMask =
+      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+      (1 << MemoryChunk::SCAN_ON_SCAVENGE);
+
+  static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
+
+  inline NewSpacePage* next_page() const {
+    return static_cast<NewSpacePage*>(next_chunk());
+  }
+
+  inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
+
+  inline NewSpacePage* prev_page() const {
+    return static_cast<NewSpacePage*>(prev_chunk());
+  }
+
+  inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
+
+  SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
+
+  bool is_anchor() { return !this->InNewSpace(); }
+
+  static bool IsAtStart(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
+           kObjectStartOffset;
+  }
+
+  static bool IsAtEnd(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
+  }
+
+  Address address() { return reinterpret_cast<Address>(this); }
+
+  // Finds the NewSpacePage containing the given address.
+  static inline NewSpacePage* FromAddress(Address address_in_page) {
+    Address page_start =
+        reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
+                                  ~Page::kPageAlignmentMask);
+    NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
+    return page;
+  }
+
+  // Find the page for a limit address. A limit address is either an address
+  // inside a page, or the address right after the last byte of a page.
+  static inline NewSpacePage* FromLimit(Address address_limit) {
+    return NewSpacePage::FromAddress(address_limit - 1);
+  }
+
+  // Checks if address1 and address2 are on the same new space page.
+  static inline bool OnSamePage(Address address1, Address address2) {
+    return NewSpacePage::FromAddress(address1) ==
+           NewSpacePage::FromAddress(address2);
+  }
+
+ private:
+  // Create a NewSpacePage object that is only used as anchor
+  // for the doubly-linked list of real pages.
+  explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
+
+  static NewSpacePage* Initialize(Heap* heap, Address start,
+                                  SemiSpace* semi_space);
+
+  // Intialize a fake NewSpacePage used as sentinel at the ends
+  // of a doubly-linked list of real NewSpacePages.
+  // Only uses the prev/next links, and sets flags to not be in new-space.
+  void InitializeAsAnchor(SemiSpace* owner);
+
+  friend class SemiSpace;
+  friend class SemiSpaceIterator;
+};
+
+
+// -----------------------------------------------------------------------------
+// SemiSpace in young generation
+//
+// A semispace is a contiguous chunk of memory holding page-like memory
+// chunks. The mark-compact collector  uses the memory of the first page in
+// the from space as a marking stack when tracing live objects.
+
+class SemiSpace : public Space {
+ public:
+  // Constructor.
+  SemiSpace(Heap* heap, SemiSpaceId semispace)
+      : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+        start_(NULL),
+        age_mark_(NULL),
+        id_(semispace),
+        anchor_(this),
+        current_page_(NULL) {}
+
+  // Sets up the semispace using the given chunk.
+  void SetUp(Address start, int initial_capacity, int maximum_capacity);
+
+  // Tear down the space.  Heap memory was not allocated by the space, so it
+  // is not deallocated here.
+  void TearDown();
+
+  // True if the space has been set up but not torn down.
+  bool HasBeenSetUp() { return start_ != NULL; }
+
+  // Grow the semispace to the new capacity.  The new capacity
+  // requested must be larger than the current capacity and less than
+  // the maximum capacity.
+  bool GrowTo(int new_capacity);
+
+  // Shrinks the semispace to the new capacity.  The new capacity
+  // requested must be more than the amount of used memory in the
+  // semispace and less than the current capacity.
+  bool ShrinkTo(int new_capacity);
+
+  // Returns the start address of the first page of the space.
+  Address space_start() {
+    DCHECK(anchor_.next_page() != &anchor_);
+    return anchor_.next_page()->area_start();
+  }
+
+  // Returns the start address of the current page of the space.
+  Address page_low() { return current_page_->area_start(); }
+
+  // Returns one past the end address of the space.
+  Address space_end() { return anchor_.prev_page()->area_end(); }
+
+  // Returns one past the end address of the current page of the space.
+  Address page_high() { return current_page_->area_end(); }
+
+  bool AdvancePage() {
+    NewSpacePage* next_page = current_page_->next_page();
+    if (next_page == anchor()) return false;
+    current_page_ = next_page;
+    return true;
+  }
+
+  // Resets the space to using the first page.
+  void Reset();
+
+  // Age mark accessors.
+  Address age_mark() { return age_mark_; }
+  void set_age_mark(Address mark);
+
+  // True if the address is in the address range of this semispace (not
+  // necessarily below the allocation pointer).
+  bool Contains(Address a) {
+    return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
+           reinterpret_cast<uintptr_t>(start_);
+  }
+
+  // True if the object is a heap object in the address range of this
+  // semispace (not necessarily below the allocation pointer).
+  bool Contains(Object* o) {
+    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
+  }
+
+  // If we don't have these here then SemiSpace will be abstract.  However
+  // they should never be called.
+  virtual intptr_t Size() {
+    UNREACHABLE();
+    return 0;
+  }
+
+  bool is_committed() { return committed_; }
+  bool Commit();
+  bool Uncommit();
+
+  NewSpacePage* first_page() { return anchor_.next_page(); }
+  NewSpacePage* current_page() { return current_page_; }
+
+#ifdef VERIFY_HEAP
+  virtual void Verify();
+#endif
+
+#ifdef DEBUG
+  virtual void Print();
+  // Validate a range of of addresses in a SemiSpace.
+  // The "from" address must be on a page prior to the "to" address,
+  // in the linked page order, or it must be earlier on the same page.
+  static void AssertValidRange(Address from, Address to);
+#else
+  // Do nothing.
+  inline static void AssertValidRange(Address from, Address to) {}
+#endif
+
+  // Returns the current total capacity of the semispace.
+  int TotalCapacity() { return total_capacity_; }
+
+  // Returns the maximum total capacity of the semispace.
+  int MaximumTotalCapacity() { return maximum_total_capacity_; }
+
+  // Returns the initial capacity of the semispace.
+  int InitialTotalCapacity() { return initial_total_capacity_; }
+
+  SemiSpaceId id() { return id_; }
+
+  static void Swap(SemiSpace* from, SemiSpace* to);
+
+  // Returns the maximum amount of memory ever committed by the semi space.
+  size_t MaximumCommittedMemory() { return maximum_committed_; }
+
+  // Approximate amount of physical memory committed for this space.
+  size_t CommittedPhysicalMemory();
+
+ private:
+  // Flips the semispace between being from-space and to-space.
+  // Copies the flags into the masked positions on all pages in the space.
+  void FlipPages(intptr_t flags, intptr_t flag_mask);
+
+  // Updates Capacity and MaximumCommitted based on new capacity.
+  void SetCapacity(int new_capacity);
+
+  NewSpacePage* anchor() { return &anchor_; }
+
+  // The current and maximum total capacity of the space.
+  int total_capacity_;
+  int maximum_total_capacity_;
+  int initial_total_capacity_;
+
+  intptr_t maximum_committed_;
+
+  // The start address of the space.
+  Address start_;
+  // Used to govern object promotion during mark-compact collection.
+  Address age_mark_;
+
+  // Masks and comparison values to test for containment in this semispace.
+  uintptr_t address_mask_;
+  uintptr_t object_mask_;
+  uintptr_t object_expected_;
+
+  bool committed_;
+  SemiSpaceId id_;
+
+  NewSpacePage anchor_;
+  NewSpacePage* current_page_;
+
+  friend class SemiSpaceIterator;
+  friend class NewSpacePageIterator;
+
+ public:
+  TRACK_MEMORY("SemiSpace")
+};
+
+
+// A SemiSpaceIterator is an ObjectIterator that iterates over the active
+// semispace of the heap's new space.  It iterates over the objects in the
+// semispace from a given start address (defaulting to the bottom of the
+// semispace) to the top of the semispace.  New objects allocated after the
+// iterator is created are not iterated.
+class SemiSpaceIterator : public ObjectIterator {
+ public:
+  // Create an iterator over the objects in the given space.  If no start
+  // address is given, the iterator starts from the bottom of the space.  If
+  // no size function is given, the iterator calls Object::Size().
+
+  // Iterate over all of allocated to-space.
+  explicit SemiSpaceIterator(NewSpace* space);
+  // Iterate over all of allocated to-space, with a custome size function.
+  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
+  // Iterate over part of allocated to-space, from start to the end
+  // of allocation.
+  SemiSpaceIterator(NewSpace* space, Address start);
+  // Iterate from one address to another in the same semi-space.
+  SemiSpaceIterator(Address from, Address to);
+
+  HeapObject* Next() {
+    if (current_ == limit_) return NULL;
+    if (NewSpacePage::IsAtEnd(current_)) {
+      NewSpacePage* page = NewSpacePage::FromLimit(current_);
+      page = page->next_page();
+      DCHECK(!page->is_anchor());
+      current_ = page->area_start();
+      if (current_ == limit_) return NULL;
+    }
+
+    HeapObject* object = HeapObject::FromAddress(current_);
+    int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
+
+    current_ += size;
+    return object;
+  }
+
+  // Implementation of the ObjectIterator functions.
+  virtual HeapObject* next_object() { return Next(); }
+
+ private:
+  void Initialize(Address start, Address end, HeapObjectCallback size_func);
+
+  // The current iteration point.
+  Address current_;
+  // The end of iteration.
+  Address limit_;
+  // The callback function.
+  HeapObjectCallback size_func_;
+};
+
+
+// -----------------------------------------------------------------------------
+// A PageIterator iterates the pages in a semi-space.
+class NewSpacePageIterator BASE_EMBEDDED {
+ public:
+  // Make an iterator that runs over all pages in to-space.
+  explicit inline NewSpacePageIterator(NewSpace* space);
+
+  // Make an iterator that runs over all pages in the given semispace,
+  // even those not used in allocation.
+  explicit inline NewSpacePageIterator(SemiSpace* space);
+
+  // Make iterator that iterates from the page containing start
+  // to the page that contains limit in the same semispace.
+  inline NewSpacePageIterator(Address start, Address limit);
+
+  inline bool has_next();
+  inline NewSpacePage* next();
+
+ private:
+  NewSpacePage* prev_page_;  // Previous page returned.
+  // Next page that will be returned.  Cached here so that we can use this
+  // iterator for operations that deallocate pages.
+  NewSpacePage* next_page_;
+  // Last page returned.
+  NewSpacePage* last_page_;
+};
+
+
+// -----------------------------------------------------------------------------
+// The young generation space.
+//
+// The new space consists of a contiguous pair of semispaces.  It simply
+// forwards most functions to the appropriate semispace.
+
+class NewSpace : public Space {
+ public:
+  // Constructor.
+  explicit NewSpace(Heap* heap)
+      : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+        to_space_(heap, kToSpace),
+        from_space_(heap, kFromSpace),
+        reservation_(),
+        inline_allocation_limit_step_(0) {}
+
+  // Sets up the new space using the given chunk.
+  bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
+
+  // Tears down the space.  Heap memory was not allocated by the space, so it
+  // is not deallocated here.
+  void TearDown();
+
+  // True if the space has been set up but not torn down.
+  bool HasBeenSetUp() {
+    return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
+  }
+
+  // Flip the pair of spaces.
+  void Flip();
+
+  // Grow the capacity of the semispaces.  Assumes that they are not at
+  // their maximum capacity.
+  void Grow();
+
+  // Shrink the capacity of the semispaces.
+  void Shrink();
+
+  // True if the address or object lies in the address range of either
+  // semispace (not necessarily below the allocation pointer).
+  bool Contains(Address a) {
+    return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
+           reinterpret_cast<uintptr_t>(start_);
+  }
+
+  bool Contains(Object* o) {
+    Address a = reinterpret_cast<Address>(o);
+    return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
+  }
+
+  // Return the allocated bytes in the active semispace.
+  virtual intptr_t Size() {
+    return pages_used_ * NewSpacePage::kAreaSize +
+           static_cast<int>(top() - to_space_.page_low());
+  }
+
+  // The same, but returning an int.  We have to have the one that returns
+  // intptr_t because it is inherited, but if we know we are dealing with the
+  // new space, which can't get as big as the other spaces then this is useful:
+  int SizeAsInt() { return static_cast<int>(Size()); }
+
+  // Return the allocatable capacity of a semispace.
+  intptr_t Capacity() {
+    SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
+    return (to_space_.TotalCapacity() / Page::kPageSize) *
+           NewSpacePage::kAreaSize;
+  }
+
+  // Return the current size of a semispace, allocatable and non-allocatable
+  // memory.
+  intptr_t TotalCapacity() {
+    DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
+    return to_space_.TotalCapacity();
+  }
+
+  // Return the total amount of memory committed for new space.
+  intptr_t CommittedMemory() {
+    if (from_space_.is_committed()) return 2 * Capacity();
+    return TotalCapacity();
+  }
+
+  // Return the total amount of memory committed for new space.
+  intptr_t MaximumCommittedMemory() {
+    return to_space_.MaximumCommittedMemory() +
+           from_space_.MaximumCommittedMemory();
+  }
+
+  // Approximate amount of physical memory committed for this space.
+  size_t CommittedPhysicalMemory();
+
+  // Return the available bytes without growing.
+  intptr_t Available() { return Capacity() - Size(); }
+
+  // Return the maximum capacity of a semispace.
+  int MaximumCapacity() {
+    DCHECK(to_space_.MaximumTotalCapacity() ==
+           from_space_.MaximumTotalCapacity());
+    return to_space_.MaximumTotalCapacity();
+  }
+
+  bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
+
+  // Returns the initial capacity of a semispace.
+  int InitialTotalCapacity() {
+    DCHECK(to_space_.InitialTotalCapacity() ==
+           from_space_.InitialTotalCapacity());
+    return to_space_.InitialTotalCapacity();
+  }
+
+  // Return the address of the allocation pointer in the active semispace.
+  Address top() {
+    DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
+    return allocation_info_.top();
+  }
+
+  void set_top(Address top) {
+    DCHECK(to_space_.current_page()->ContainsLimit(top));
+    allocation_info_.set_top(top);
+  }
+
+  // Return the address of the allocation pointer limit in the active semispace.
+  Address limit() {
+    DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
+    return allocation_info_.limit();
+  }
+
+  // Return the address of the first object in the active semispace.
+  Address bottom() { return to_space_.space_start(); }
+
+  // Get the age mark of the inactive semispace.
+  Address age_mark() { return from_space_.age_mark(); }
+  // Set the age mark in the active semispace.
+  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
+
+  // The start address of the space and a bit mask. Anding an address in the
+  // new space with the mask will result in the start address.
+  Address start() { return start_; }
+  uintptr_t mask() { return address_mask_; }
+
+  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
+    DCHECK(Contains(addr));
+    DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) ||
+           IsAligned(OffsetFrom(addr) - 1, kPointerSize));
+    return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
+  }
+
+  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
+    return reinterpret_cast<Address>(index << kPointerSizeLog2);
+  }
+
+  // The allocation top and limit address.
+  Address* allocation_top_address() { return allocation_info_.top_address(); }
+
+  // The allocation limit address.
+  Address* allocation_limit_address() {
+    return allocation_info_.limit_address();
+  }
+
+  MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
+
+  // Reset the allocation pointer to the beginning of the active semispace.
+  void ResetAllocationInfo();
+
+  void UpdateInlineAllocationLimit(int size_in_bytes);
+  void LowerInlineAllocationLimit(intptr_t step) {
+    inline_allocation_limit_step_ = step;
+    UpdateInlineAllocationLimit(0);
+    top_on_previous_step_ = allocation_info_.top();
+  }
+
+  // Get the extent of the inactive semispace (for use as a marking stack,
+  // or to zap it). Notice: space-addresses are not necessarily on the
+  // same page, so FromSpaceStart() might be above FromSpaceEnd().
+  Address FromSpacePageLow() { return from_space_.page_low(); }
+  Address FromSpacePageHigh() { return from_space_.page_high(); }
+  Address FromSpaceStart() { return from_space_.space_start(); }
+  Address FromSpaceEnd() { return from_space_.space_end(); }
+
+  // Get the extent of the active semispace's pages' memory.
+  Address ToSpaceStart() { return to_space_.space_start(); }
+  Address ToSpaceEnd() { return to_space_.space_end(); }
+
+  inline bool ToSpaceContains(Address address) {
+    return to_space_.Contains(address);
+  }
+  inline bool FromSpaceContains(Address address) {
+    return from_space_.Contains(address);
+  }
+
+  // True if the object is a heap object in the address range of the
+  // respective semispace (not necessarily below the allocation pointer of the
+  // semispace).
+  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+
+  // Try to switch the active semispace to a new, empty, page.
+  // Returns false if this isn't possible or reasonable (i.e., there
+  // are no pages, or the current page is already empty), or true
+  // if successful.
+  bool AddFreshPage();
+
+#ifdef VERIFY_HEAP
+  // Verify the active semispace.
+  virtual void Verify();
+#endif
+
+#ifdef DEBUG
+  // Print the active semispace.
+  virtual void Print() { to_space_.Print(); }
+#endif
+
+  // Iterates the active semispace to collect statistics.
+  void CollectStatistics();
+  // Reports previously collected statistics of the active semispace.
+  void ReportStatistics();
+  // Clears previously collected statistics.
+  void ClearHistograms();
+
+  // Record the allocation or promotion of a heap object.  Note that we don't
+  // record every single allocation, but only those that happen in the
+  // to space during a scavenge GC.
+  void RecordAllocation(HeapObject* obj);
+  void RecordPromotion(HeapObject* obj);
+
+  // Return whether the operation succeded.
+  bool CommitFromSpaceIfNeeded() {
+    if (from_space_.is_committed()) return true;
+    return from_space_.Commit();
+  }
+
+  bool UncommitFromSpace() {
+    if (!from_space_.is_committed()) return true;
+    return from_space_.Uncommit();
+  }
+
+  inline intptr_t inline_allocation_limit_step() {
+    return inline_allocation_limit_step_;
+  }
+
+  SemiSpace* active_space() { return &to_space_; }
+
+ private:
+  // Update allocation info to match the current to-space page.
+  void UpdateAllocationInfo();
+
+  Address chunk_base_;
+  uintptr_t chunk_size_;
+
+  // The semispaces.
+  SemiSpace to_space_;
+  SemiSpace from_space_;
+  base::VirtualMemory reservation_;
+  int pages_used_;
+
+  // Start address and bit mask for containment testing.
+  Address start_;
+  uintptr_t address_mask_;
+  uintptr_t object_mask_;
+  uintptr_t object_expected_;
+
+  // Allocation pointer and limit for normal allocation and allocation during
+  // mark-compact collection.
+  AllocationInfo allocation_info_;
+
+  // When incremental marking is active we will set allocation_info_.limit
+  // to be lower than actual limit and then will gradually increase it
+  // in steps to guarantee that we do incremental marking steps even
+  // when all allocation is performed from inlined generated code.
+  intptr_t inline_allocation_limit_step_;
+
+  Address top_on_previous_step_;
+
+  HistogramInfo* allocated_histogram_;
+  HistogramInfo* promoted_histogram_;
+
+  MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
+
+  friend class SemiSpaceIterator;
+
+ public:
+  TRACK_MEMORY("NewSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Old object space (excluding map objects)
+
+class OldSpace : public PagedSpace {
+ public:
+  // Creates an old space object with a given maximum capacity.
+  // The constructor does not allocate pages from OS.
+  OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
+           Executability executable)
+      : PagedSpace(heap, max_capacity, id, executable) {}
+
+ public:
+  TRACK_MEMORY("OldSpace")
+};
+
+
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
+  SLOW_DCHECK((space).page_low() <= (info).top() &&   \
+              (info).top() <= (space).page_high() &&  \
+              (info).limit() <= (space).page_high())
+
+
+// -----------------------------------------------------------------------------
+// Old space for all map objects
+
+class MapSpace : public PagedSpace {
+ public:
+  // Creates a map space object with a maximum capacity.
+  MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
+      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
+        max_map_space_pages_(kMaxMapPageIndex - 1) {}
+
+  // Given an index, returns the page address.
+  // TODO(1600): this limit is artifical just to keep code compilable
+  static const int kMaxMapPageIndex = 1 << 16;
+
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (base::bits::IsPowerOfTwo32(Map::kSize)) {
+      return RoundDown(size, Map::kSize);
+    } else {
+      return (size / Map::kSize) * Map::kSize;
+    }
+  }
+
+ protected:
+  virtual void VerifyObject(HeapObject* obj);
+
+ private:
+  static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
+
+  // Do map space compaction if there is a page gap.
+  int CompactionThreshold() {
+    return kMapsPerPage * (max_map_space_pages_ - 1);
+  }
+
+  const int max_map_space_pages_;
+
+ public:
+  TRACK_MEMORY("MapSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Old space for simple property cell objects
+
+class CellSpace : public PagedSpace {
+ public:
+  // Creates a property cell space object with a maximum capacity.
+  CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
+      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
+
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (base::bits::IsPowerOfTwo32(Cell::kSize)) {
+      return RoundDown(size, Cell::kSize);
+    } else {
+      return (size / Cell::kSize) * Cell::kSize;
+    }
+  }
+
+ protected:
+  virtual void VerifyObject(HeapObject* obj);
+
+ public:
+  TRACK_MEMORY("CellSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Old space for all global object property cell objects
+
+class PropertyCellSpace : public PagedSpace {
+ public:
+  // Creates a property cell space object with a maximum capacity.
+  PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
+      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
+
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (base::bits::IsPowerOfTwo32(PropertyCell::kSize)) {
+      return RoundDown(size, PropertyCell::kSize);
+    } else {
+      return (size / PropertyCell::kSize) * PropertyCell::kSize;
+    }
+  }
+
+ protected:
+  virtual void VerifyObject(HeapObject* obj);
+
+ public:
+  TRACK_MEMORY("PropertyCellSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
+// the large object space. A large object is allocated from OS heap with
+// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
+// A large object always starts at Page::kObjectStartOffset to a page.
+// Large objects do not move during garbage collections.
+
+class LargeObjectSpace : public Space {
+ public:
+  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
+  virtual ~LargeObjectSpace() {}
+
+  // Initializes internal data structures.
+  bool SetUp();
+
+  // Releases internal resources, frees objects in this space.
+  void TearDown();
+
+  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
+    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
+    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
+  }
+
+  // Shared implementation of AllocateRaw, AllocateRawCode and
+  // AllocateRawFixedArray.
+  MUST_USE_RESULT AllocationResult
+      AllocateRaw(int object_size, Executability executable);
+
+  // Available bytes for objects in this space.
+  inline intptr_t Available();
+
+  virtual intptr_t Size() { return size_; }
+
+  virtual intptr_t SizeOfObjects() { return objects_size_; }
+
+  intptr_t MaximumCommittedMemory() { return maximum_committed_; }
+
+  intptr_t CommittedMemory() { return Size(); }
+
+  // Approximate amount of physical memory committed for this space.
+  size_t CommittedPhysicalMemory();
+
+  int PageCount() { return page_count_; }
+
+  // Finds an object for a given address, returns a Smi if it is not found.
+  // The function iterates through all objects in this space, may be slow.
+  Object* FindObject(Address a);
+
+  // Finds a large object page containing the given address, returns NULL
+  // if such a page doesn't exist.
+  LargePage* FindPage(Address a);
+
+  // Frees unmarked objects.
+  void FreeUnmarkedObjects();
+
+  // Checks whether a heap object is in this space; O(1).
+  bool Contains(HeapObject* obj);
+
+  // Checks whether the space is empty.
+  bool IsEmpty() { return first_page_ == NULL; }
+
+  LargePage* first_page() { return first_page_; }
+
+#ifdef VERIFY_HEAP
+  virtual void Verify();
+#endif
+
+#ifdef DEBUG
+  virtual void Print();
+  void ReportStatistics();
+  void CollectCodeStatistics();
+#endif
+  // Checks whether an address is in the object area in this space.  It
+  // iterates all objects in the space. May be slow.
+  bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
+
+ private:
+  intptr_t max_capacity_;
+  intptr_t maximum_committed_;
+  // The head of the linked list of large object chunks.
+  LargePage* first_page_;
+  intptr_t size_;          // allocated bytes
+  int page_count_;         // number of chunks
+  intptr_t objects_size_;  // size of objects
+  // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
+  HashMap chunk_map_;
+
+  friend class LargeObjectIterator;
+
+ public:
+  TRACK_MEMORY("LargeObjectSpace")
+};
+
+
+class LargeObjectIterator : public ObjectIterator {
+ public:
+  explicit LargeObjectIterator(LargeObjectSpace* space);
+  LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
+
+  HeapObject* Next();
+
+  // implementation of ObjectIterator.
+  virtual HeapObject* next_object() { return Next(); }
+
+ private:
+  LargePage* current_;
+  HeapObjectCallback size_func_;
+};
+
+
+// Iterates over the chunks (pages and large object pages) that can contain
+// pointers to new space.
+class PointerChunkIterator BASE_EMBEDDED {
+ public:
+  inline explicit PointerChunkIterator(Heap* heap);
+
+  // Return NULL when the iterator is done.
+  MemoryChunk* next() {
+    switch (state_) {
+      case kOldPointerState: {
+        if (old_pointer_iterator_.has_next()) {
+          return old_pointer_iterator_.next();
+        }
+        state_ = kMapState;
+        // Fall through.
+      }
+      case kMapState: {
+        if (map_iterator_.has_next()) {
+          return map_iterator_.next();
+        }
+        state_ = kLargeObjectState;
+        // Fall through.
+      }
+      case kLargeObjectState: {
+        HeapObject* heap_object;
+        do {
+          heap_object = lo_iterator_.Next();
+          if (heap_object == NULL) {
+            state_ = kFinishedState;
+            return NULL;
+          }
+          // Fixed arrays are the only pointer-containing objects in large
+          // object space.
+        } while (!heap_object->IsFixedArray());
+        MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
+        return answer;
+      }
+      case kFinishedState:
+        return NULL;
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+
+ private:
+  enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState };
+  State state_;
+  PageIterator old_pointer_iterator_;
+  PageIterator map_iterator_;
+  LargeObjectIterator lo_iterator_;
+};
+
+
+#ifdef DEBUG
+struct CommentStatistic {
+  const char* comment;
+  int size;
+  int count;
+  void Clear() {
+    comment = NULL;
+    size = 0;
+    count = 0;
+  }
+  // Must be small, since an iteration is used for lookup.
+  static const int kMaxComments = 64;
+};
+#endif
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_SPACES_H_
diff --git a/src/heap/store-buffer-inl.h b/src/heap/store-buffer-inl.h
new file mode 100644
index 0000000..1606465
--- /dev/null
+++ b/src/heap/store-buffer-inl.h
@@ -0,0 +1,63 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STORE_BUFFER_INL_H_
+#define V8_STORE_BUFFER_INL_H_
+
+#include "src/heap/store-buffer.h"
+
+namespace v8 {
+namespace internal {
+
+Address StoreBuffer::TopAddress() {
+  return reinterpret_cast<Address>(heap_->store_buffer_top_address());
+}
+
+
+void StoreBuffer::Mark(Address addr) {
+  DCHECK(!heap_->cell_space()->Contains(addr));
+  DCHECK(!heap_->code_space()->Contains(addr));
+  DCHECK(!heap_->old_data_space()->Contains(addr));
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+  *top++ = addr;
+  heap_->public_set_store_buffer_top(top);
+  if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
+    DCHECK(top == limit_);
+    Compact();
+  } else {
+    DCHECK(top < limit_);
+  }
+}
+
+
+void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
+  if (store_buffer_rebuilding_enabled_) {
+    SLOW_DCHECK(!heap_->cell_space()->Contains(addr) &&
+                !heap_->code_space()->Contains(addr) &&
+                !heap_->old_data_space()->Contains(addr) &&
+                !heap_->new_space()->Contains(addr));
+    Address* top = old_top_;
+    *top++ = addr;
+    old_top_ = top;
+    old_buffer_is_sorted_ = false;
+    old_buffer_is_filtered_ = false;
+    if (top >= old_limit_) {
+      DCHECK(callback_ != NULL);
+      (*callback_)(heap_, MemoryChunk::FromAnyPointerAddress(heap_, addr),
+                   kStoreBufferFullEvent);
+    }
+  }
+}
+
+
+void StoreBuffer::ClearDeadObject(HeapObject* object) {
+  Address& map_field = Memory::Address_at(object->address());
+  if (heap_->map_space()->Contains(map_field)) {
+    map_field = NULL;
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_STORE_BUFFER_INL_H_
diff --git a/src/heap/store-buffer.cc b/src/heap/store-buffer.cc
new file mode 100644
index 0000000..278e9f2
--- /dev/null
+++ b/src/heap/store-buffer.cc
@@ -0,0 +1,581 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+
+#include "src/v8.h"
+
+#include "src/base/atomicops.h"
+#include "src/counters.h"
+#include "src/heap/store-buffer-inl.h"
+
+namespace v8 {
+namespace internal {
+
+StoreBuffer::StoreBuffer(Heap* heap)
+    : heap_(heap),
+      start_(NULL),
+      limit_(NULL),
+      old_start_(NULL),
+      old_limit_(NULL),
+      old_top_(NULL),
+      old_reserved_limit_(NULL),
+      old_buffer_is_sorted_(false),
+      old_buffer_is_filtered_(false),
+      during_gc_(false),
+      store_buffer_rebuilding_enabled_(false),
+      callback_(NULL),
+      may_move_store_buffer_entries_(true),
+      virtual_memory_(NULL),
+      hash_set_1_(NULL),
+      hash_set_2_(NULL),
+      hash_sets_are_empty_(true) {}
+
+
+void StoreBuffer::SetUp() {
+  virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
+  uintptr_t start_as_int =
+      reinterpret_cast<uintptr_t>(virtual_memory_->address());
+  start_ =
+      reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
+  limit_ = start_ + (kStoreBufferSize / kPointerSize);
+
+  old_virtual_memory_ =
+      new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
+  old_top_ = old_start_ =
+      reinterpret_cast<Address*>(old_virtual_memory_->address());
+  // Don't know the alignment requirements of the OS, but it is certainly not
+  // less than 0xfff.
+  DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
+  int initial_length =
+      static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
+  DCHECK(initial_length > 0);
+  DCHECK(initial_length <= kOldStoreBufferLength);
+  old_limit_ = old_start_ + initial_length;
+  old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
+
+  CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
+                                    (old_limit_ - old_start_) * kPointerSize,
+                                    false));
+
+  DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
+  DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
+  Address* vm_limit = reinterpret_cast<Address*>(
+      reinterpret_cast<char*>(virtual_memory_->address()) +
+      virtual_memory_->size());
+  DCHECK(start_ <= vm_limit);
+  DCHECK(limit_ <= vm_limit);
+  USE(vm_limit);
+  DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
+  DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
+         0);
+
+  CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+                                kStoreBufferSize,
+                                false));  // Not executable.
+  heap_->public_set_store_buffer_top(start_);
+
+  hash_set_1_ = new uintptr_t[kHashSetLength];
+  hash_set_2_ = new uintptr_t[kHashSetLength];
+  hash_sets_are_empty_ = false;
+
+  ClearFilteringHashSets();
+}
+
+
+void StoreBuffer::TearDown() {
+  delete virtual_memory_;
+  delete old_virtual_memory_;
+  delete[] hash_set_1_;
+  delete[] hash_set_2_;
+  old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
+  start_ = limit_ = NULL;
+  heap_->public_set_store_buffer_top(start_);
+}
+
+
+void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
+  isolate->heap()->store_buffer()->Compact();
+  isolate->counters()->store_buffer_overflows()->Increment();
+}
+
+
+void StoreBuffer::Uniq() {
+  // Remove adjacent duplicates and cells that do not point at new space.
+  Address previous = NULL;
+  Address* write = old_start_;
+  DCHECK(may_move_store_buffer_entries_);
+  for (Address* read = old_start_; read < old_top_; read++) {
+    Address current = *read;
+    if (current != previous) {
+      if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
+        *write++ = current;
+      }
+    }
+    previous = current;
+  }
+  old_top_ = write;
+}
+
+
+bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
+  return old_limit_ - old_top_ >= space_needed;
+}
+
+
+void StoreBuffer::EnsureSpace(intptr_t space_needed) {
+  while (old_limit_ - old_top_ < space_needed &&
+         old_limit_ < old_reserved_limit_) {
+    size_t grow = old_limit_ - old_start_;  // Double size.
+    CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
+                                      grow * kPointerSize, false));
+    old_limit_ += grow;
+  }
+
+  if (SpaceAvailable(space_needed)) return;
+
+  if (old_buffer_is_filtered_) return;
+  DCHECK(may_move_store_buffer_entries_);
+  Compact();
+
+  old_buffer_is_filtered_ = true;
+  bool page_has_scan_on_scavenge_flag = false;
+
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  while ((chunk = it.next()) != NULL) {
+    if (chunk->scan_on_scavenge()) {
+      page_has_scan_on_scavenge_flag = true;
+      break;
+    }
+  }
+
+  if (page_has_scan_on_scavenge_flag) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  if (SpaceAvailable(space_needed)) return;
+
+  // Sample 1 entry in 97 and filter out the pages where we estimate that more
+  // than 1 in 8 pointers are to new space.
+  static const int kSampleFinenesses = 5;
+  static const struct Samples {
+    int prime_sample_step;
+    int threshold;
+  } samples[kSampleFinenesses] = {
+        {97, ((Page::kPageSize / kPointerSize) / 97) / 8},
+        {23, ((Page::kPageSize / kPointerSize) / 23) / 16},
+        {7, ((Page::kPageSize / kPointerSize) / 7) / 32},
+        {3, ((Page::kPageSize / kPointerSize) / 3) / 256},
+        {1, 0}};
+  for (int i = 0; i < kSampleFinenesses; i++) {
+    ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
+    // As a last resort we mark all pages as being exempt from the store buffer.
+    DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
+    if (SpaceAvailable(space_needed)) return;
+  }
+  UNREACHABLE();
+}
+
+
+// Sample the store buffer to see if some pages are taking up a lot of space
+// in the store buffer.
+void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  while ((chunk = it.next()) != NULL) {
+    chunk->set_store_buffer_counter(0);
+  }
+  bool created_new_scan_on_scavenge_pages = false;
+  MemoryChunk* previous_chunk = NULL;
+  for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
+    Address addr = *p;
+    MemoryChunk* containing_chunk = NULL;
+    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+      containing_chunk = previous_chunk;
+    } else {
+      containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
+    }
+    int old_counter = containing_chunk->store_buffer_counter();
+    if (old_counter >= threshold) {
+      containing_chunk->set_scan_on_scavenge(true);
+      created_new_scan_on_scavenge_pages = true;
+    }
+    containing_chunk->set_store_buffer_counter(old_counter + 1);
+    previous_chunk = containing_chunk;
+  }
+  if (created_new_scan_on_scavenge_pages) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+  old_buffer_is_filtered_ = true;
+}
+
+
+void StoreBuffer::Filter(int flag) {
+  Address* new_top = old_start_;
+  MemoryChunk* previous_chunk = NULL;
+  for (Address* p = old_start_; p < old_top_; p++) {
+    Address addr = *p;
+    MemoryChunk* containing_chunk = NULL;
+    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+      containing_chunk = previous_chunk;
+    } else {
+      containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
+      previous_chunk = containing_chunk;
+    }
+    if (!containing_chunk->IsFlagSet(flag)) {
+      *new_top++ = addr;
+    }
+  }
+  old_top_ = new_top;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
+}
+
+
+void StoreBuffer::SortUniq() {
+  Compact();
+  if (old_buffer_is_sorted_) return;
+  std::sort(old_start_, old_top_);
+  Uniq();
+
+  old_buffer_is_sorted_ = true;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
+}
+
+
+bool StoreBuffer::PrepareForIteration() {
+  Compact();
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  bool page_has_scan_on_scavenge_flag = false;
+  while ((chunk = it.next()) != NULL) {
+    if (chunk->scan_on_scavenge()) {
+      page_has_scan_on_scavenge_flag = true;
+      break;
+    }
+  }
+
+  if (page_has_scan_on_scavenge_flag) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  // Filtering hash sets are inconsistent with the store buffer after
+  // iteration.
+  ClearFilteringHashSets();
+
+  return page_has_scan_on_scavenge_flag;
+}
+
+
+#ifdef DEBUG
+void StoreBuffer::Clean() {
+  ClearFilteringHashSets();
+  Uniq();  // Also removes things that no longer point to new space.
+  EnsureSpace(kStoreBufferSize / 2);
+}
+
+
+static Address* in_store_buffer_1_element_cache = NULL;
+
+
+bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
+  if (!FLAG_enable_slow_asserts) return true;
+  if (in_store_buffer_1_element_cache != NULL &&
+      *in_store_buffer_1_element_cache == cell_address) {
+    return true;
+  }
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+  for (Address* current = top - 1; current >= start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  return false;
+}
+#endif
+
+
+void StoreBuffer::ClearFilteringHashSets() {
+  if (!hash_sets_are_empty_) {
+    memset(reinterpret_cast<void*>(hash_set_1_), 0,
+           sizeof(uintptr_t) * kHashSetLength);
+    memset(reinterpret_cast<void*>(hash_set_2_), 0,
+           sizeof(uintptr_t) * kHashSetLength);
+    hash_sets_are_empty_ = true;
+  }
+}
+
+
+void StoreBuffer::GCPrologue() {
+  ClearFilteringHashSets();
+  during_gc_ = true;
+}
+
+
+#ifdef VERIFY_HEAP
+void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
+  LargeObjectIterator it(space);
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    if (object->IsFixedArray()) {
+      Address slot_address = object->address();
+      Address end = object->address() + object->Size();
+
+      while (slot_address < end) {
+        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
+        // When we are not in GC the Heap::InNewSpace() predicate
+        // checks that pointers which satisfy predicate point into
+        // the active semispace.
+        Object* object = reinterpret_cast<Object*>(
+            base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+        heap_->InNewSpace(object);
+        slot_address += kPointerSize;
+      }
+    }
+  }
+}
+#endif
+
+
+void StoreBuffer::Verify() {
+#ifdef VERIFY_HEAP
+  VerifyPointers(heap_->lo_space());
+#endif
+}
+
+
+void StoreBuffer::GCEpilogue() {
+  during_gc_ = false;
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+#endif
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInRegion(
+    Address start, Address end, ObjectSlotCallback slot_callback,
+    bool clear_maps) {
+  for (Address slot_address = start; slot_address < end;
+       slot_address += kPointerSize) {
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    Object* object = reinterpret_cast<Object*>(
+        base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+    if (heap_->InNewSpace(object)) {
+      HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+      DCHECK(heap_object->IsHeapObject());
+      // The new space object was not promoted if it still contains a map
+      // pointer. Clear the map field now lazily.
+      if (clear_maps) ClearDeadObject(heap_object);
+      slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+      object = reinterpret_cast<Object*>(
+          base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+      if (heap_->InNewSpace(object)) {
+        EnterDirectlyIntoStoreBuffer(slot_address);
+      }
+    }
+  }
+}
+
+
+void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
+                                               bool clear_maps) {
+  Address* limit = old_top_;
+  old_top_ = old_start_;
+  {
+    DontMoveStoreBufferEntriesScope scope(this);
+    for (Address* current = old_start_; current < limit; current++) {
+#ifdef DEBUG
+      Address* saved_top = old_top_;
+#endif
+      Object** slot = reinterpret_cast<Object**>(*current);
+      Object* object = reinterpret_cast<Object*>(
+          base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+      if (heap_->InFromSpace(object)) {
+        HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+        // The new space object was not promoted if it still contains a map
+        // pointer. Clear the map field now lazily.
+        if (clear_maps) ClearDeadObject(heap_object);
+        slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+        object = reinterpret_cast<Object*>(
+            base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+        if (heap_->InNewSpace(object)) {
+          EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
+        }
+      }
+      DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
+    }
+  }
+}
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
+  IteratePointersToNewSpace(slot_callback, false);
+}
+
+
+void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
+    ObjectSlotCallback slot_callback) {
+  IteratePointersToNewSpace(slot_callback, true);
+}
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
+                                            bool clear_maps) {
+  // We do not sort or remove duplicated entries from the store buffer because
+  // we expect that callback will rebuild the store buffer thus removing
+  // all duplicates and pointers to old space.
+  bool some_pages_to_scan = PrepareForIteration();
+
+  // TODO(gc): we want to skip slots on evacuation candidates
+  // but we can't simply figure that out from slot address
+  // because slot can belong to a large object.
+  IteratePointersInStoreBuffer(slot_callback, clear_maps);
+
+  // We are done scanning all the pointers that were in the store buffer, but
+  // there may be some pages marked scan_on_scavenge that have pointers to new
+  // space that are not in the store buffer.  We must scan them now.  As we
+  // scan, the surviving pointers to new space will be added to the store
+  // buffer.  If there are still a lot of pointers to new space then we will
+  // keep the scan_on_scavenge flag on the page and discard the pointers that
+  // were added to the store buffer.  If there are not many pointers to new
+  // space left on the page we will keep the pointers in the store buffer and
+  // remove the flag from the page.
+  if (some_pages_to_scan) {
+    if (callback_ != NULL) {
+      (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
+    }
+    PointerChunkIterator it(heap_);
+    MemoryChunk* chunk;
+    while ((chunk = it.next()) != NULL) {
+      if (chunk->scan_on_scavenge()) {
+        chunk->set_scan_on_scavenge(false);
+        if (callback_ != NULL) {
+          (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
+        }
+        if (chunk->owner() == heap_->lo_space()) {
+          LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
+          HeapObject* array = large_page->GetObject();
+          DCHECK(array->IsFixedArray());
+          Address start = array->address();
+          Address end = start + array->Size();
+          FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
+        } else {
+          Page* page = reinterpret_cast<Page*>(chunk);
+          PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
+          if (owner == heap_->map_space()) {
+            DCHECK(page->WasSwept());
+            HeapObjectIterator iterator(page, NULL);
+            for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
+                 heap_object = iterator.Next()) {
+              // We skip free space objects.
+              if (!heap_object->IsFiller()) {
+                DCHECK(heap_object->IsMap());
+                FindPointersToNewSpaceInRegion(
+                    heap_object->address() + Map::kPointerFieldsBeginOffset,
+                    heap_object->address() + Map::kPointerFieldsEndOffset,
+                    slot_callback, clear_maps);
+              }
+            }
+          } else {
+            if (!page->SweepingCompleted()) {
+              heap_->mark_compact_collector()->SweepInParallel(page, owner);
+              if (!page->SweepingCompleted()) {
+                // We were not able to sweep that page, i.e., a concurrent
+                // sweeper thread currently owns this page.
+                // TODO(hpayer): This may introduce a huge pause here. We
+                // just care about finish sweeping of the scan on scavenge page.
+                heap_->mark_compact_collector()->EnsureSweepingCompleted();
+              }
+            }
+            CHECK(page->owner() == heap_->old_pointer_space());
+            HeapObjectIterator iterator(page, NULL);
+            for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
+                 heap_object = iterator.Next()) {
+              // We iterate over objects that contain new space pointers only.
+              if (!heap_object->MayContainRawValues()) {
+                FindPointersToNewSpaceInRegion(
+                    heap_object->address() + HeapObject::kHeaderSize,
+                    heap_object->address() + heap_object->Size(), slot_callback,
+                    clear_maps);
+              }
+            }
+          }
+        }
+      }
+    }
+    if (callback_ != NULL) {
+      (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
+    }
+  }
+}
+
+
+void StoreBuffer::Compact() {
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+
+  if (top == start_) return;
+
+  // There's no check of the limit in the loop below so we check here for
+  // the worst case (compaction doesn't eliminate any pointers).
+  DCHECK(top <= limit_);
+  heap_->public_set_store_buffer_top(start_);
+  EnsureSpace(top - start_);
+  DCHECK(may_move_store_buffer_entries_);
+  // Goes through the addresses in the store buffer attempting to remove
+  // duplicates.  In the interest of speed this is a lossy operation.  Some
+  // duplicates will remain.  We have two hash sets with different hash
+  // functions to reduce the number of unnecessary clashes.
+  hash_sets_are_empty_ = false;  // Hash sets are in use.
+  for (Address* current = start_; current < top; current++) {
+    DCHECK(!heap_->cell_space()->Contains(*current));
+    DCHECK(!heap_->code_space()->Contains(*current));
+    DCHECK(!heap_->old_data_space()->Contains(*current));
+    uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
+    // Shift out the last bits including any tags.
+    int_addr >>= kPointerSizeLog2;
+    // The upper part of an address is basically random because of ASLR and OS
+    // non-determinism, so we use only the bits within a page for hashing to
+    // make v8's behavior (more) deterministic.
+    uintptr_t hash_addr =
+        int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
+    int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
+                 (kHashSetLength - 1));
+    if (hash_set_1_[hash1] == int_addr) continue;
+    uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
+    hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
+    hash2 &= (kHashSetLength - 1);
+    if (hash_set_2_[hash2] == int_addr) continue;
+    if (hash_set_1_[hash1] == 0) {
+      hash_set_1_[hash1] = int_addr;
+    } else if (hash_set_2_[hash2] == 0) {
+      hash_set_2_[hash2] = int_addr;
+    } else {
+      // Rather than slowing down we just throw away some entries.  This will
+      // cause some duplicates to remain undetected.
+      hash_set_1_[hash1] = int_addr;
+      hash_set_2_[hash2] = 0;
+    }
+    old_buffer_is_sorted_ = false;
+    old_buffer_is_filtered_ = false;
+    *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
+    DCHECK(old_top_ <= old_limit_);
+  }
+  heap_->isolate()->counters()->store_buffer_compactions()->Increment();
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h
new file mode 100644
index 0000000..5efd692
--- /dev/null
+++ b/src/heap/store-buffer.h
@@ -0,0 +1,221 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STORE_BUFFER_H_
+#define V8_STORE_BUFFER_H_
+
+#include "src/allocation.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Page;
+class PagedSpace;
+class StoreBuffer;
+
+typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
+
+typedef void (StoreBuffer::*RegionCallback)(Address start, Address end,
+                                            ObjectSlotCallback slot_callback,
+                                            bool clear_maps);
+
+// Used to implement the write barrier by collecting addresses of pointers
+// between spaces.
+class StoreBuffer {
+ public:
+  explicit StoreBuffer(Heap* heap);
+
+  static void StoreBufferOverflow(Isolate* isolate);
+
+  inline Address TopAddress();
+
+  void SetUp();
+  void TearDown();
+
+  // This is used by the mutator to enter addresses into the store buffer.
+  inline void Mark(Address addr);
+
+  // This is used by the heap traversal to enter the addresses into the store
+  // buffer that should still be in the store buffer after GC.  It enters
+  // addresses directly into the old buffer because the GC starts by wiping the
+  // old buffer and thereafter only visits each cell once so there is no need
+  // to attempt to remove any dupes.  During the first part of a GC we
+  // are using the store buffer to access the old spaces and at the same time
+  // we are rebuilding the store buffer using this function.  There is, however
+  // no issue of overwriting the buffer we are iterating over, because this
+  // stage of the scavenge can only reduce the number of addresses in the store
+  // buffer (some objects are promoted so pointers to them do not need to be in
+  // the store buffer).  The later parts of the GC scan the pages that are
+  // exempt from the store buffer and process the promotion queue.  These steps
+  // can overflow this buffer.  We check for this and on overflow we call the
+  // callback set up with the StoreBufferRebuildScope object.
+  inline void EnterDirectlyIntoStoreBuffer(Address addr);
+
+  // Iterates over all pointers that go from old space to new space.  It will
+  // delete the store buffer as it starts so the callback should reenter
+  // surviving old-to-new pointers into the store buffer to rebuild it.
+  void IteratePointersToNewSpace(ObjectSlotCallback callback);
+
+  // Same as IteratePointersToNewSpace but additonally clears maps in objects
+  // referenced from the store buffer that do not contain a forwarding pointer.
+  void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback);
+
+  static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
+  static const int kStoreBufferSize = kStoreBufferOverflowBit;
+  static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
+  static const int kOldStoreBufferLength = kStoreBufferLength * 16;
+  static const int kHashSetLengthLog2 = 12;
+  static const int kHashSetLength = 1 << kHashSetLengthLog2;
+
+  void Compact();
+
+  void GCPrologue();
+  void GCEpilogue();
+
+  Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
+  Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
+  Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
+  void SetTop(Object*** top) {
+    DCHECK(top >= Start());
+    DCHECK(top <= Limit());
+    old_top_ = reinterpret_cast<Address*>(top);
+  }
+
+  bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
+  bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
+
+  // Goes through the store buffer removing pointers to things that have
+  // been promoted.  Rebuilds the store buffer completely if it overflowed.
+  void SortUniq();
+
+  void EnsureSpace(intptr_t space_needed);
+  void Verify();
+
+  bool PrepareForIteration();
+
+#ifdef DEBUG
+  void Clean();
+  // Slow, for asserts only.
+  bool CellIsInStoreBuffer(Address cell);
+#endif
+
+  void Filter(int flag);
+
+ private:
+  Heap* heap_;
+
+  // The store buffer is divided up into a new buffer that is constantly being
+  // filled by mutator activity and an old buffer that is filled with the data
+  // from the new buffer after compression.
+  Address* start_;
+  Address* limit_;
+
+  Address* old_start_;
+  Address* old_limit_;
+  Address* old_top_;
+  Address* old_reserved_limit_;
+  base::VirtualMemory* old_virtual_memory_;
+
+  bool old_buffer_is_sorted_;
+  bool old_buffer_is_filtered_;
+  bool during_gc_;
+  // The garbage collector iterates over many pointers to new space that are not
+  // handled by the store buffer.  This flag indicates whether the pointers
+  // found by the callbacks should be added to the store buffer or not.
+  bool store_buffer_rebuilding_enabled_;
+  StoreBufferCallback callback_;
+  bool may_move_store_buffer_entries_;
+
+  base::VirtualMemory* virtual_memory_;
+
+  // Two hash sets used for filtering.
+  // If address is in the hash set then it is guaranteed to be in the
+  // old part of the store buffer.
+  uintptr_t* hash_set_1_;
+  uintptr_t* hash_set_2_;
+  bool hash_sets_are_empty_;
+
+  void ClearFilteringHashSets();
+
+  bool SpaceAvailable(intptr_t space_needed);
+  void Uniq();
+  void ExemptPopularPages(int prime_sample_step, int threshold);
+
+  // Set the map field of the object to NULL if contains a map.
+  inline void ClearDeadObject(HeapObject* object);
+
+  void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps);
+
+  void FindPointersToNewSpaceInRegion(Address start, Address end,
+                                      ObjectSlotCallback slot_callback,
+                                      bool clear_maps);
+
+  // For each region of pointers on a page in use from an old space call
+  // visit_pointer_region callback.
+  // If either visit_pointer_region or callback can cause an allocation
+  // in old space and changes in allocation watermark then
+  // can_preallocate_during_iteration should be set to true.
+  void IteratePointersOnPage(PagedSpace* space, Page* page,
+                             RegionCallback region_callback,
+                             ObjectSlotCallback slot_callback);
+
+  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
+                                    bool clear_maps);
+
+#ifdef VERIFY_HEAP
+  void VerifyPointers(LargeObjectSpace* space);
+#endif
+
+  friend class StoreBufferRebuildScope;
+  friend class DontMoveStoreBufferEntriesScope;
+};
+
+
+class StoreBufferRebuildScope {
+ public:
+  explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer,
+                                   StoreBufferCallback callback)
+      : store_buffer_(store_buffer),
+        stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
+        stored_callback_(store_buffer->callback_) {
+    store_buffer_->store_buffer_rebuilding_enabled_ = true;
+    store_buffer_->callback_ = callback;
+    (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
+  }
+
+  ~StoreBufferRebuildScope() {
+    store_buffer_->callback_ = stored_callback_;
+    store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
+  }
+
+ private:
+  StoreBuffer* store_buffer_;
+  bool stored_state_;
+  StoreBufferCallback stored_callback_;
+};
+
+
+class DontMoveStoreBufferEntriesScope {
+ public:
+  explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer),
+        stored_state_(store_buffer->may_move_store_buffer_entries_) {
+    store_buffer_->may_move_store_buffer_entries_ = false;
+  }
+
+  ~DontMoveStoreBufferEntriesScope() {
+    store_buffer_->may_move_store_buffer_entries_ = stored_state_;
+  }
+
+ private:
+  StoreBuffer* store_buffer_;
+  bool stored_state_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_STORE_BUFFER_H_
diff --git a/src/heap/sweeper-thread.cc b/src/heap/sweeper-thread.cc
new file mode 100644
index 0000000..b0e8cea
--- /dev/null
+++ b/src/heap/sweeper-thread.cc
@@ -0,0 +1,82 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/sweeper-thread.h"
+
+#include "src/v8.h"
+
+#include "src/isolate.h"
+#include "src/v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kSweeperThreadStackSize = 64 * KB;
+
+SweeperThread::SweeperThread(Isolate* isolate)
+    : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)),
+      isolate_(isolate),
+      heap_(isolate->heap()),
+      collector_(heap_->mark_compact_collector()),
+      start_sweeping_semaphore_(0),
+      end_sweeping_semaphore_(0),
+      stop_semaphore_(0) {
+  DCHECK(!FLAG_job_based_sweeping);
+  base::NoBarrier_Store(&stop_thread_, static_cast<base::AtomicWord>(false));
+}
+
+
+void SweeperThread::Run() {
+  Isolate::SetIsolateThreadLocals(isolate_, NULL);
+  DisallowHeapAllocation no_allocation;
+  DisallowHandleAllocation no_handles;
+  DisallowHandleDereference no_deref;
+
+  while (true) {
+    start_sweeping_semaphore_.Wait();
+
+    if (base::Acquire_Load(&stop_thread_)) {
+      stop_semaphore_.Signal();
+      return;
+    }
+
+    collector_->SweepInParallel(heap_->old_data_space(), 0);
+    collector_->SweepInParallel(heap_->old_pointer_space(), 0);
+    end_sweeping_semaphore_.Signal();
+  }
+}
+
+
+void SweeperThread::Stop() {
+  base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(true));
+  start_sweeping_semaphore_.Signal();
+  stop_semaphore_.Wait();
+  Join();
+}
+
+
+void SweeperThread::StartSweeping() { start_sweeping_semaphore_.Signal(); }
+
+
+void SweeperThread::WaitForSweeperThread() { end_sweeping_semaphore_.Wait(); }
+
+
+bool SweeperThread::SweepingCompleted() {
+  bool value = end_sweeping_semaphore_.WaitFor(base::TimeDelta::FromSeconds(0));
+  if (value) {
+    end_sweeping_semaphore_.Signal();
+  }
+  return value;
+}
+
+
+int SweeperThread::NumberOfThreads(int max_available) {
+  if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) return 0;
+  if (FLAG_sweeper_threads > 0) return FLAG_sweeper_threads;
+  if (FLAG_concurrent_sweeping) return max_available - 1;
+  DCHECK(FLAG_parallel_sweeping);
+  return max_available;
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/sweeper-thread.h b/src/heap/sweeper-thread.h
new file mode 100644
index 0000000..fc6bdda
--- /dev/null
+++ b/src/heap/sweeper-thread.h
@@ -0,0 +1,45 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SWEEPER_THREAD_H_
+#define V8_HEAP_SWEEPER_THREAD_H_
+
+#include "src/base/atomicops.h"
+#include "src/base/platform/platform.h"
+#include "src/flags.h"
+#include "src/utils.h"
+
+#include "src/heap/spaces.h"
+
+#include "src/heap/heap.h"
+
+namespace v8 {
+namespace internal {
+
+class SweeperThread : public base::Thread {
+ public:
+  explicit SweeperThread(Isolate* isolate);
+  ~SweeperThread() {}
+
+  void Run();
+  void Stop();
+  void StartSweeping();
+  void WaitForSweeperThread();
+  bool SweepingCompleted();
+
+  static int NumberOfThreads(int max_available);
+
+ private:
+  Isolate* isolate_;
+  Heap* heap_;
+  MarkCompactCollector* collector_;
+  base::Semaphore start_sweeping_semaphore_;
+  base::Semaphore end_sweeping_semaphore_;
+  base::Semaphore stop_semaphore_;
+  volatile base::AtomicWord stop_thread_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_SWEEPER_THREAD_H_