Version 3.6.5

New incremental garbage collector.

Removed the hard heap size limit (soft heap size limit is still
700/1400Mbytes by default).

Implemented ES5 generic Array.prototype.toString (Issue 1361).

V8 now allows surrogate pair codes in decodeURIComponent (Issue 1415).

Fixed x64 RegExp start-of-string bug (Issues 1746, 1748).

Fixed propertyIsEnumerable for numeric properties (Issue 1692).

Fixed the MinGW and Windows 2000 builds.

Fixed "Prototype chain is not searched if named property handler does
not set a property" (Issue 1636).

Made the RegExp.prototype object be a RegExp object (Issue 1217).

Disallowed future reserved words as labels in strict mode.

Fixed string split to correctly coerce the separator to a string
(Issue 1711).

API: Added an optional source length field to the Extension
constructor.

API: Added Debug::DisableAgent to match existing Debug::EnableAgent
(Issue 1573).

Added "native" target to Makefile for the benefit of Linux distros.

Fixed: debugger stops stepping outside evaluate (Issue 1639).

More work on ES-Harmony proxies.  Still hidden behind a flag.

Bug fixes and performance improvements on all platforms.
Review URL: http://codereview.chromium.org/8139027

git-svn-id: http://v8.googlecode.com/svn/trunk@9534 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/heap.cc b/src/heap.cc
index d018593..d1f48ac 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -36,13 +36,16 @@
 #include "deoptimizer.h"
 #include "global-handles.h"
 #include "heap-profiler.h"
+#include "incremental-marking.h"
 #include "liveobjectlist-inl.h"
 #include "mark-compact.h"
 #include "natives.h"
 #include "objects-visiting.h"
+#include "objects-visiting-inl.h"
 #include "runtime-profiler.h"
 #include "scopeinfo.h"
 #include "snapshot.h"
+#include "store-buffer.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
@@ -58,10 +61,6 @@
 namespace internal {
 
 
-static const intptr_t kMinimumPromotionLimit = 2 * MB;
-static const intptr_t kMinimumAllocationLimit = 8 * MB;
-
-
 static Mutex* gc_initializer_mutex = OS::CreateMutex();
 
 
@@ -70,27 +69,21 @@
 // semispace_size_ should be a power of 2 and old_generation_size_ should be
 // a multiple of Page::kPageSize.
 #if defined(ANDROID)
-      reserved_semispace_size_(2*MB),
-      max_semispace_size_(2*MB),
-      initial_semispace_size_(128*KB),
-      max_old_generation_size_(192*MB),
-      max_executable_size_(max_old_generation_size_),
+#define LUMP_OF_MEMORY (128 * KB)
       code_range_size_(0),
 #elif defined(V8_TARGET_ARCH_X64)
-      reserved_semispace_size_(16*MB),
-      max_semispace_size_(16*MB),
-      initial_semispace_size_(1*MB),
-      max_old_generation_size_(1400*MB),
-      max_executable_size_(256*MB),
+#define LUMP_OF_MEMORY (2 * MB)
       code_range_size_(512*MB),
 #else
-      reserved_semispace_size_(8*MB),
-      max_semispace_size_(8*MB),
-      initial_semispace_size_(512*KB),
-      max_old_generation_size_(700*MB),
-      max_executable_size_(128*MB),
+#define LUMP_OF_MEMORY MB
       code_range_size_(0),
 #endif
+      reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      max_old_generation_size_(700ul * LUMP_OF_MEMORY),
+      max_executable_size_(128l * LUMP_OF_MEMORY),
+
 // Variables set based on semispace_size_ and old_generation_size_ in
 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
 // Will be 4 * reserved_semispace_size_ to ensure that young
@@ -100,6 +93,7 @@
       always_allocate_scope_depth_(0),
       linear_allocation_scope_depth_(0),
       contexts_disposed_(0),
+      scan_on_scavenge_pages_(0),
       new_space_(this),
       old_pointer_space_(NULL),
       old_data_space_(NULL),
@@ -109,7 +103,6 @@
       lo_space_(NULL),
       gc_state_(NOT_IN_GC),
       gc_post_processing_depth_(0),
-      mc_count_(0),
       ms_count_(0),
       gc_count_(0),
       unflattened_strings_length_(0),
@@ -121,10 +114,13 @@
 #endif  // DEBUG
       old_gen_promotion_limit_(kMinimumPromotionLimit),
       old_gen_allocation_limit_(kMinimumAllocationLimit),
+      old_gen_limit_factor_(1),
+      size_of_old_gen_at_last_old_space_gc_(0),
       external_allocation_limit_(0),
       amount_of_external_allocated_memory_(0),
       amount_of_external_allocated_memory_at_last_global_gc_(0),
       old_gen_exhausted_(false),
+      store_buffer_rebuilder_(store_buffer()),
       hidden_symbol_(NULL),
       global_gc_prologue_callback_(NULL),
       global_gc_epilogue_callback_(NULL),
@@ -141,12 +137,14 @@
       min_in_mutator_(kMaxInt),
       alive_after_last_gc_(0),
       last_gc_end_timestamp_(0.0),
-      page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
+      store_buffer_(this),
+      marking_(this),
+      incremental_marking_(this),
       number_idle_notifications_(0),
       last_idle_notification_gc_count_(0),
       last_idle_notification_gc_count_init_(false),
       configured_(false),
-      is_safe_to_read_maps_(true) {
+      chunks_queued_for_free_(NULL) {
   // Allow build-time customization of the max semispace size. Building
   // V8 with snapshots and a non-default max semispace size is much
   // easier if you can define it as part of the build environment.
@@ -224,29 +222,10 @@
 
 
 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
-  ASSERT(!HEAP->InNewSpace(object));  // Code only works for old objects.
-  ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
-  MapWord map_word = object->map_word();
-  map_word.ClearMark();
-  map_word.ClearOverflow();
-  return object->SizeFromMap(map_word.ToMap());
-}
-
-
-int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
-  ASSERT(!HEAP->InNewSpace(object));  // Code only works for old objects.
-  ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
-  uint32_t marker = Memory::uint32_at(object->address());
-  if (marker == MarkCompactCollector::kSingleFreeEncoding) {
-    return kIntSize;
-  } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
-    return Memory::int_at(object->address() + kIntSize);
-  } else {
-    MapWord map_word = object->map_word();
-    Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
-    Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
-    return object->SizeFromMap(map);
+  if (IntrusiveMarking::IsMarked(object)) {
+    return IntrusiveMarking::SizeOfMarkedObject(object);
   }
+  return object->SizeFromMap(object->map());
 }
 
 
@@ -400,6 +379,7 @@
 #endif  // DEBUG
 
   LiveObjectList::GCPrologue();
+  store_buffer()->GCPrologue();
 }
 
 intptr_t Heap::SizeOfObjects() {
@@ -412,6 +392,7 @@
 }
 
 void Heap::GarbageCollectionEpilogue() {
+  store_buffer()->GCEpilogue();
   LiveObjectList::GCEpilogue();
 #ifdef DEBUG
   allow_allocation(true);
@@ -443,13 +424,13 @@
 }
 
 
-void Heap::CollectAllGarbage(bool force_compaction) {
+void Heap::CollectAllGarbage(int flags) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
-  mark_compact_collector_.SetForceCompaction(force_compaction);
+  mark_compact_collector_.SetFlags(flags);
   CollectGarbage(OLD_POINTER_SPACE);
-  mark_compact_collector_.SetForceCompaction(false);
+  mark_compact_collector_.SetFlags(kNoGCFlags);
 }
 
 
@@ -457,8 +438,6 @@
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
-  mark_compact_collector()->SetForceCompaction(true);
-
   // Major GC would invoke weak handle callbacks on weakly reachable
   // handles, but won't collect weakly reachable objects until next
   // major GC.  Therefore if we collect aggressively and weak handle callback
@@ -467,13 +446,14 @@
   // Note: as weak callbacks can execute arbitrary code, we cannot
   // hope that eventually there will be no weak callbacks invocations.
   // Therefore stop recollecting after several attempts.
+  mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
   const int kMaxNumberOfAttempts = 7;
   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
       break;
     }
   }
-  mark_compact_collector()->SetForceCompaction(false);
+  mark_compact_collector()->SetFlags(kNoGCFlags);
 }
 
 
@@ -490,6 +470,23 @@
   allocation_timeout_ = Max(6, FLAG_gc_interval);
 #endif
 
+  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Scavenge during marking.\n");
+    }
+  }
+
+  if (collector == MARK_COMPACTOR &&
+      !mark_compact_collector()->PreciseSweepingRequired() &&
+      !incremental_marking()->IsStopped() &&
+      !incremental_marking()->should_hurry() &&
+      FLAG_incremental_marking_steps) {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+    }
+    collector = SCAVENGER;
+  }
+
   bool next_gc_likely_to_collect_more = false;
 
   { GCTracer tracer(this);
@@ -512,13 +509,24 @@
     GarbageCollectionEpilogue();
   }
 
+  ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
+  if (incremental_marking()->IsStopped()) {
+    if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
+      incremental_marking()->Start();
+    }
+  }
+
   return next_gc_likely_to_collect_more;
 }
 
 
 void Heap::PerformScavenge() {
   GCTracer tracer(this);
-  PerformGarbageCollection(SCAVENGER, &tracer);
+  if (incremental_marking()->IsStopped()) {
+    PerformGarbageCollection(SCAVENGER, &tracer);
+  } else {
+    PerformGarbageCollection(MARK_COMPACTOR, &tracer);
+  }
 }
 
 
@@ -610,13 +618,6 @@
 
   // Committing memory to from space failed.
   // Try shrinking and try again.
-  PagedSpaces spaces;
-  for (PagedSpace* space = spaces.next();
-       space != NULL;
-       space = spaces.next()) {
-    space->RelinkPageListInChunkOrder(true);
-  }
-
   Shrink();
   if (new_space_.CommitFromSpaceIfNeeded()) return;
 
@@ -647,7 +648,10 @@
 
 
 void Heap::ClearNormalizedMapCaches() {
-  if (isolate_->bootstrapper()->IsActive()) return;
+  if (isolate_->bootstrapper()->IsActive() &&
+      !incremental_marking()->IsMarking()) {
+    return;
+  }
 
   Object* context = global_contexts_list_;
   while (!context->IsUndefined()) {
@@ -657,24 +661,6 @@
 }
 
 
-#ifdef DEBUG
-
-enum PageWatermarkValidity {
-  ALL_VALID,
-  ALL_INVALID
-};
-
-static void VerifyPageWatermarkValidity(PagedSpace* space,
-                                        PageWatermarkValidity validity) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-  bool expected_value = (validity == ALL_VALID);
-  while (it.has_next()) {
-    Page* page = it.next();
-    ASSERT(page->IsWatermarkValid() == expected_value);
-  }
-}
-#endif
-
 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
   double survival_rate =
       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
@@ -727,6 +713,13 @@
 
   int start_new_space_size = Heap::new_space()->SizeAsInt();
 
+  if (IsHighSurvivalRate()) {
+    // We speed up the incremental marker if it is running so that it
+    // does not fall behind the rate of promotion, which would cause a
+    // constantly growing old space.
+    incremental_marking()->NotifyOfHighPromotionRate();
+  }
+
   if (collector == MARK_COMPACTOR) {
     // Perform mark-sweep with optional compaction.
     MarkCompact(tracer);
@@ -736,11 +729,7 @@
 
     UpdateSurvivalRateTrend(start_new_space_size);
 
-    intptr_t old_gen_size = PromotedSpaceSize();
-    old_gen_promotion_limit_ =
-        old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
-    old_gen_allocation_limit_ =
-        old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
 
     if (high_survival_rate_during_scavenges &&
         IsStableOrIncreasingSurvivalTrend()) {
@@ -750,10 +739,16 @@
       // In this case we aggressively raise old generation memory limits to
       // postpone subsequent mark-sweep collection and thus trade memory
       // space for the mutation speed.
-      old_gen_promotion_limit_ *= 2;
-      old_gen_allocation_limit_ *= 2;
+      old_gen_limit_factor_ = 2;
+    } else {
+      old_gen_limit_factor_ = 1;
     }
 
+    old_gen_promotion_limit_ =
+        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
+    old_gen_allocation_limit_ =
+        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+
     old_gen_exhausted_ = false;
   } else {
     tracer_ = tracer;
@@ -782,9 +777,7 @@
         amount_of_external_allocated_memory_;
   }
 
-  GCCallbackFlags callback_flags = tracer->is_compacting()
-      ? kGCCallbackFlagCompacted
-      : kNoGCCallbackFlags;
+  GCCallbackFlags callback_flags = kNoGCCallbackFlags;
   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
       gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
@@ -808,34 +801,24 @@
 
   mark_compact_collector_.Prepare(tracer);
 
-  bool is_compacting = mark_compact_collector_.IsCompacting();
+  ms_count_++;
+  tracer->set_full_gc_count(ms_count_);
 
-  if (is_compacting) {
-    mc_count_++;
-  } else {
-    ms_count_++;
-  }
-  tracer->set_full_gc_count(mc_count_ + ms_count_);
+  MarkCompactPrologue();
 
-  MarkCompactPrologue(is_compacting);
-
-  is_safe_to_read_maps_ = false;
   mark_compact_collector_.CollectGarbage();
-  is_safe_to_read_maps_ = true;
 
   LOG(isolate_, ResourceEvent("markcompact", "end"));
 
   gc_state_ = NOT_IN_GC;
 
-  Shrink();
-
   isolate_->counters()->objs_since_last_full()->Set(0);
 
   contexts_disposed_ = 0;
 }
 
 
-void Heap::MarkCompactPrologue(bool is_compacting) {
+void Heap::MarkCompactPrologue() {
   // At any old GC clear the keyed lookup cache to enable collection of unused
   // maps.
   isolate_->keyed_lookup_cache()->Clear();
@@ -847,7 +830,8 @@
 
   CompletelyClearInstanceofCache();
 
-  if (is_compacting) FlushNumberStringCache();
+  // TODO(1605) select heuristic for flushing NumberString cache with
+  // FlushNumberStringCache
   if (FLAG_cleanup_code_caches_at_gc) {
     polymorphic_code_cache()->set_cache(undefined_value());
   }
@@ -857,13 +841,8 @@
 
 
 Object* Heap::FindCodeObject(Address a) {
-  Object* obj = NULL;  // Initialization to please compiler.
-  { MaybeObject* maybe_obj = code_space_->FindObject(a);
-    if (!maybe_obj->ToObject(&obj)) {
-      obj = lo_space_->FindObject(a)->ToObjectUnchecked();
-    }
-  }
-  return obj;
+  return isolate()->inner_pointer_to_code_cache()->
+      GcSafeFindCodeForInnerPointer(a);
 }
 
 
@@ -911,14 +890,18 @@
   // do not expect them.
   VerifyNonPointerSpacePointersVisitor v;
   HeapObjectIterator code_it(HEAP->code_space());
-  for (HeapObject* object = code_it.next();
-       object != NULL; object = code_it.next())
+  for (HeapObject* object = code_it.Next();
+       object != NULL; object = code_it.Next())
     object->Iterate(&v);
 
-  HeapObjectIterator data_it(HEAP->old_data_space());
-  for (HeapObject* object = data_it.next();
-       object != NULL; object = data_it.next())
-    object->Iterate(&v);
+  // The old data space was normally swept conservatively so that the iterator
+  // doesn't work, so we normally skip the next bit.
+  if (!HEAP->old_data_space()->was_swept_conservatively()) {
+    HeapObjectIterator data_it(HEAP->old_data_space());
+    for (HeapObject* object = data_it.Next();
+         object != NULL; object = data_it.Next())
+      object->Iterate(&v);
+  }
 }
 #endif
 
@@ -940,6 +923,64 @@
 }
 
 
+void Heap::ScavengeStoreBufferCallback(
+    Heap* heap,
+    MemoryChunk* page,
+    StoreBufferEvent event) {
+  heap->store_buffer_rebuilder_.Callback(page, event);
+}
+
+
+void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
+  if (event == kStoreBufferStartScanningPagesEvent) {
+    start_of_current_page_ = NULL;
+    current_page_ = NULL;
+  } else if (event == kStoreBufferScanningPageEvent) {
+    if (current_page_ != NULL) {
+      // If this page already overflowed the store buffer during this iteration.
+      if (current_page_->scan_on_scavenge()) {
+        // Then we should wipe out the entries that have been added for it.
+        store_buffer_->SetTop(start_of_current_page_);
+      } else if (store_buffer_->Top() - start_of_current_page_ >=
+                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
+        // Did we find too many pointers in the previous page?  The heuristic is
+        // that no page can take more then 1/5 the remaining slots in the store
+        // buffer.
+        current_page_->set_scan_on_scavenge(true);
+        store_buffer_->SetTop(start_of_current_page_);
+      } else {
+        // In this case the page we scanned took a reasonable number of slots in
+        // the store buffer.  It has now been rehabilitated and is no longer
+        // marked scan_on_scavenge.
+        ASSERT(!current_page_->scan_on_scavenge());
+      }
+    }
+    start_of_current_page_ = store_buffer_->Top();
+    current_page_ = page;
+  } else if (event == kStoreBufferFullEvent) {
+    // The current page overflowed the store buffer again.  Wipe out its entries
+    // in the store buffer and mark it scan-on-scavenge again.  This may happen
+    // several times while scanning.
+    if (current_page_ == NULL) {
+      // Store Buffer overflowed while scanning promoted objects.  These are not
+      // in any particular page, though they are likely to be clustered by the
+      // allocation routines.
+      store_buffer_->HandleFullness();
+    } else {
+      // Store Buffer overflowed while scanning a particular old space page for
+      // pointers to new space.
+      ASSERT(current_page_ == page);
+      ASSERT(page != NULL);
+      current_page_->set_scan_on_scavenge(true);
+      ASSERT(start_of_current_page_ != store_buffer_->Top());
+      store_buffer_->SetTop(start_of_current_page_);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
 void Heap::Scavenge() {
 #ifdef DEBUG
   if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@@ -947,22 +988,6 @@
 
   gc_state_ = SCAVENGE;
 
-  SwitchScavengingVisitorsTableIfProfilingWasEnabled();
-
-  Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
-#ifdef DEBUG
-  VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
-  VerifyPageWatermarkValidity(map_space_, ALL_VALID);
-#endif
-
-  // We do not update an allocation watermark of the top page during linear
-  // allocation to avoid overhead. So to maintain the watermark invariant
-  // we have to manually cache the watermark and mark the top page as having an
-  // invalid watermark. This guarantees that dirty regions iteration will use a
-  // correct watermark even if a linear allocation happens.
-  old_pointer_space_->FlushTopPageWatermark();
-  map_space_->FlushTopPageWatermark();
-
   // Implements Cheney's copying algorithm
   LOG(isolate_, ResourceEvent("scavenge", "begin"));
 
@@ -974,6 +999,13 @@
 
   CheckNewSpaceExpansionCriteria();
 
+  SelectScavengingVisitorsTable();
+
+  incremental_marking()->PrepareForScavenge();
+
+  old_pointer_space()->AdvanceSweeper(new_space_.Size());
+  old_data_space()->AdvanceSweeper(new_space_.Size());
+
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
   new_space_.Flip();
@@ -996,32 +1028,29 @@
   // for the addresses of promoted objects: every object promoted
   // frees up its size in bytes from the top of the new space, and
   // objects are at least one pointer in size.
-  Address new_space_front = new_space_.ToSpaceLow();
-  promotion_queue_.Initialize(new_space_.ToSpaceHigh());
+  Address new_space_front = new_space_.ToSpaceStart();
+  promotion_queue_.Initialize(new_space_.ToSpaceEnd());
 
-  is_safe_to_read_maps_ = false;
+#ifdef DEBUG
+  store_buffer()->Clean();
+#endif
+
   ScavengeVisitor scavenge_visitor(this);
   // Copy roots.
   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
 
-  // Copy objects reachable from the old generation.  By definition,
-  // there are no intergenerational pointers in code or data spaces.
-  IterateDirtyRegions(old_pointer_space_,
-                      &Heap::IteratePointersInDirtyRegion,
-                      &ScavengePointer,
-                      WATERMARK_CAN_BE_INVALID);
-
-  IterateDirtyRegions(map_space_,
-                      &IteratePointersInDirtyMapsRegion,
-                      &ScavengePointer,
-                      WATERMARK_CAN_BE_INVALID);
-
-  lo_space_->IterateDirtyRegions(&ScavengePointer);
+  // Copy objects reachable from the old generation.
+  {
+    StoreBufferRebuildScope scope(this,
+                                  store_buffer(),
+                                  &ScavengeStoreBufferCallback);
+    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
+  }
 
   // Copy objects reachable from cells by scavenging cell values directly.
   HeapObjectIterator cell_iterator(cell_space_);
-  for (HeapObject* cell = cell_iterator.next();
-       cell != NULL; cell = cell_iterator.next()) {
+  for (HeapObject* cell = cell_iterator.Next();
+       cell != NULL; cell = cell_iterator.Next()) {
     if (cell->IsJSGlobalPropertyCell()) {
       Address value_address =
           reinterpret_cast<Address>(cell) +
@@ -1046,14 +1075,16 @@
 
   LiveObjectList::UpdateReferencesForScavengeGC();
   isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+  incremental_marking()->UpdateMarkingDequeAfterScavenge();
 
   ASSERT(new_space_front == new_space_.top());
 
-  is_safe_to_read_maps_ = true;
-
   // Set age mark.
   new_space_.set_age_mark(new_space_.top());
 
+  new_space_.LowerInlineAllocationLimit(
+      new_space_.inline_allocation_limit_step());
+
   // Update how much has survived scavenge.
   IncrementYoungSurvivorsCounter(static_cast<int>(
       (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
@@ -1112,35 +1143,56 @@
 }
 
 
+void Heap::UpdateReferencesInExternalStringTable(
+    ExternalStringTableUpdaterCallback updater_func) {
+
+  // Update old space string references.
+  if (external_string_table_.old_space_strings_.length() > 0) {
+    Object** start = &external_string_table_.old_space_strings_[0];
+    Object** end = start + external_string_table_.old_space_strings_.length();
+    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
+  }
+
+  UpdateNewSpaceReferencesInExternalStringTable(updater_func);
+}
+
+
 static Object* ProcessFunctionWeakReferences(Heap* heap,
                                              Object* function,
                                              WeakObjectRetainer* retainer) {
-  Object* head = heap->undefined_value();
+  Object* undefined = heap->undefined_value();
+  Object* head = undefined;
   JSFunction* tail = NULL;
   Object* candidate = function;
-  while (candidate != heap->undefined_value()) {
+  while (candidate != undefined) {
     // Check whether to keep the candidate in the list.
     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
     Object* retain = retainer->RetainAs(candidate);
     if (retain != NULL) {
-      if (head == heap->undefined_value()) {
+      if (head == undefined) {
         // First element in the list.
-        head = candidate_function;
+        head = retain;
       } else {
         // Subsequent elements in the list.
         ASSERT(tail != NULL);
-        tail->set_next_function_link(candidate_function);
+        tail->set_next_function_link(retain);
       }
       // Retained function is new tail.
+      candidate_function = reinterpret_cast<JSFunction*>(retain);
       tail = candidate_function;
+
+      ASSERT(retain->IsUndefined() || retain->IsJSFunction());
+
+      if (retain == undefined) break;
     }
+
     // Move to next element in the list.
     candidate = candidate_function->next_function_link();
   }
 
   // Terminate the list if there is one or more elements.
   if (tail != NULL) {
-    tail->set_next_function_link(heap->undefined_value());
+    tail->set_next_function_link(undefined);
   }
 
   return head;
@@ -1148,28 +1200,32 @@
 
 
 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
-  Object* head = undefined_value();
+  Object* undefined = undefined_value();
+  Object* head = undefined;
   Context* tail = NULL;
   Object* candidate = global_contexts_list_;
-  while (candidate != undefined_value()) {
+  while (candidate != undefined) {
     // Check whether to keep the candidate in the list.
     Context* candidate_context = reinterpret_cast<Context*>(candidate);
     Object* retain = retainer->RetainAs(candidate);
     if (retain != NULL) {
-      if (head == undefined_value()) {
+      if (head == undefined) {
         // First element in the list.
-        head = candidate_context;
+        head = retain;
       } else {
         // Subsequent elements in the list.
         ASSERT(tail != NULL);
         tail->set_unchecked(this,
                             Context::NEXT_CONTEXT_LINK,
-                            candidate_context,
+                            retain,
                             UPDATE_WRITE_BARRIER);
       }
       // Retained context is new tail.
+      candidate_context = reinterpret_cast<Context*>(retain);
       tail = candidate_context;
 
+      if (retain == undefined) break;
+
       // Process the weak list of optimized functions for the context.
       Object* function_list_head =
           ProcessFunctionWeakReferences(
@@ -1181,6 +1237,7 @@
                                        function_list_head,
                                        UPDATE_WRITE_BARRIER);
     }
+
     // Move to next element in the list.
     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
   }
@@ -1212,35 +1269,45 @@
 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
                          Address new_space_front) {
   do {
-    ASSERT(new_space_front <= new_space_.top());
-
+    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
     // The addresses new_space_front and new_space_.top() define a
     // queue of unprocessed copied objects.  Process them until the
     // queue is empty.
-    while (new_space_front < new_space_.top()) {
-      HeapObject* object = HeapObject::FromAddress(new_space_front);
-      new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
+    while (new_space_front != new_space_.top()) {
+      if (!NewSpacePage::IsAtEnd(new_space_front)) {
+        HeapObject* object = HeapObject::FromAddress(new_space_front);
+        new_space_front +=
+          NewSpaceScavenger::IterateBody(object->map(), object);
+      } else {
+        new_space_front =
+            NewSpacePage::FromLimit(new_space_front)->next_page()->body();
+      }
     }
 
     // Promote and process all the to-be-promoted objects.
-    while (!promotion_queue_.is_empty()) {
-      HeapObject* target;
-      int size;
-      promotion_queue_.remove(&target, &size);
+    {
+      StoreBufferRebuildScope scope(this,
+                                    store_buffer(),
+                                    &ScavengeStoreBufferCallback);
+      while (!promotion_queue()->is_empty()) {
+        HeapObject* target;
+        int size;
+        promotion_queue()->remove(&target, &size);
 
-      // Promoted object might be already partially visited
-      // during dirty regions iteration. Thus we search specificly
-      // for pointers to from semispace instead of looking for pointers
-      // to new space.
-      ASSERT(!target->IsMap());
-      IterateAndMarkPointersToFromSpace(target->address(),
-                                        target->address() + size,
-                                        &ScavengePointer);
+        // Promoted object might be already partially visited
+        // during old space pointer iteration. Thus we search specificly
+        // for pointers to from semispace instead of looking for pointers
+        // to new space.
+        ASSERT(!target->IsMap());
+        IterateAndMarkPointersToFromSpace(target->address(),
+                                          target->address() + size,
+                                          &ScavengeObject);
+      }
     }
 
     // Take another spin if there are now unswept objects in new space
     // (there are currently no more unswept promoted objects).
-  } while (new_space_front < new_space_.top());
+  } while (new_space_front != new_space_.top());
 
   return new_space_front;
 }
@@ -1252,26 +1319,11 @@
 };
 
 
-typedef void (*ScavengingCallback)(Map* map,
-                                   HeapObject** slot,
-                                   HeapObject* object);
+enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
 
 
-static Atomic32 scavenging_visitors_table_mode_;
-static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
-
-
-INLINE(static void DoScavengeObject(Map* map,
-                                    HeapObject** slot,
-                                    HeapObject* obj));
-
-
-void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
-  scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
-}
-
-
-template<LoggingAndProfiling logging_and_profiling_mode>
+template<MarksHandling marks_handling,
+         LoggingAndProfiling logging_and_profiling_mode>
 class ScavengingVisitor : public StaticVisitorBase {
  public:
   static void Initialize() {
@@ -1306,9 +1358,13 @@
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                     Visit);
 
-    table_.Register(kVisitJSFunction,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<JSFunction::kSize>);
+    if (marks_handling == IGNORE_MARKS) {
+      table_.Register(kVisitJSFunction,
+                      &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                          template VisitSpecialized<JSFunction::kSize>);
+    } else {
+      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
+    }
 
     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
                                    kVisitDataObject,
@@ -1373,10 +1429,15 @@
       }
     }
 
+    if (marks_handling == TRANSFER_MARKS) {
+      if (Marking::TransferColor(source, target)) {
+        MemoryChunk::IncrementLiveBytes(target->address(), size);
+      }
+    }
+
     return target;
   }
 
-
   template<ObjectContents object_contents, SizeRestriction size_restriction>
   static inline void EvacuateObject(Map* map,
                                     HeapObject** slot,
@@ -1386,13 +1447,14 @@
            (object_size <= Page::kMaxHeapObjectSize));
     ASSERT(object->Size() == object_size);
 
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     if (heap->ShouldBePromoted(object->address(), object_size)) {
       MaybeObject* maybe_result;
 
       if ((size_restriction != SMALL) &&
           (object_size > Page::kMaxHeapObjectSize)) {
-        maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
+        maybe_result = heap->lo_space()->AllocateRaw(object_size,
+                                                     NOT_EXECUTABLE);
       } else {
         if (object_contents == DATA_OBJECT) {
           maybe_result = heap->old_data_space()->AllocateRaw(object_size);
@@ -1414,13 +1476,36 @@
         return;
       }
     }
-    Object* result =
-        heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
+    MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
+    Object* result = allocation->ToObjectUnchecked();
+
     *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
     return;
   }
 
 
+  static inline void EvacuateJSFunction(Map* map,
+                                        HeapObject** slot,
+                                        HeapObject* object) {
+    ObjectEvacuationStrategy<POINTER_OBJECT>::
+        template VisitSpecialized<JSFunction::kSize>(map, slot, object);
+
+    HeapObject* target = *slot;
+    MarkBit mark_bit = Marking::MarkBitFrom(target);
+    if (Marking::IsBlack(mark_bit)) {
+      // This object is black and it might not be rescanned by marker.
+      // We should explicitly record code entry slot for compaction because
+      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
+      // miss it as it is not HeapObject-tagged.
+      Address code_entry_slot =
+          target->address() + JSFunction::kCodeEntryOffset;
+      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+      map->GetHeap()->mark_compact_collector()->
+          RecordCodeEntrySlot(code_entry_slot, code);
+    }
+  }
+
+
   static inline void EvacuateFixedArray(Map* map,
                                         HeapObject** slot,
                                         HeapObject* object) {
@@ -1479,14 +1564,17 @@
                                                HeapObject* object) {
     ASSERT(IsShortcutCandidate(map->instance_type()));
 
-    if (ConsString::cast(object)->unchecked_second() ==
-        map->heap()->empty_string()) {
+    Heap* heap = map->GetHeap();
+
+    if (marks_handling == IGNORE_MARKS &&
+        ConsString::cast(object)->unchecked_second() ==
+        heap->empty_string()) {
       HeapObject* first =
           HeapObject::cast(ConsString::cast(object)->unchecked_first());
 
       *slot = first;
 
-      if (!map->heap()->InNewSpace(first)) {
+      if (!heap->InNewSpace(first)) {
         object->set_map_word(MapWord::FromForwardingAddress(first));
         return;
       }
@@ -1500,7 +1588,7 @@
         return;
       }
 
-      DoScavengeObject(first->map(), slot, first);
+      heap->DoScavengeObject(first->map(), slot, first);
       object->set_map_word(MapWord::FromForwardingAddress(*slot));
       return;
     }
@@ -1531,45 +1619,49 @@
 };
 
 
-template<LoggingAndProfiling logging_and_profiling_mode>
+template<MarksHandling marks_handling,
+         LoggingAndProfiling logging_and_profiling_mode>
 VisitorDispatchTable<ScavengingCallback>
-    ScavengingVisitor<logging_and_profiling_mode>::table_;
+    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
 
 
 static void InitializeScavengingVisitorsTables() {
-  ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
-  scavenging_visitors_table_.CopyFrom(
-      ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
-  scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
 }
 
 
-void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
-  if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
-    // Table was already updated by some isolate.
-    return;
-  }
-
-  if (isolate()->logger()->is_logging() |
+void Heap::SelectScavengingVisitorsTable() {
+  bool logging_and_profiling =
+      isolate()->logger()->is_logging() ||
       CpuProfiler::is_profiling(isolate()) ||
       (isolate()->heap_profiler() != NULL &&
-       isolate()->heap_profiler()->is_profiling())) {
-    // If one of the isolates is doing scavenge at this moment of time
-    // it might see this table in an inconsitent state when
-    // some of the callbacks point to
-    // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
-    // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
-    // However this does not lead to any bugs as such isolate does not have
-    // profiling enabled and any isolate with enabled profiling is guaranteed
-    // to see the table in the consistent state.
-    scavenging_visitors_table_.CopyFrom(
-        ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
+       isolate()->heap_profiler()->is_profiling());
 
-    // We use Release_Store to prevent reordering of this write before writes
-    // to the table.
-    Release_Store(&scavenging_visitors_table_mode_,
-                  LOGGING_AND_PROFILING_ENABLED);
+  if (!incremental_marking()->IsMarking()) {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<IGNORE_MARKS,
+                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<IGNORE_MARKS,
+                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+  } else {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<TRANSFER_MARKS,
+                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<TRANSFER_MARKS,
+                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
   }
 }
 
@@ -1579,7 +1671,7 @@
   MapWord first_word = object->map_word();
   ASSERT(!first_word.IsForwardingAddress());
   Map* map = first_word.ToMap();
-  DoScavengeObject(map, p, object);
+  map->GetHeap()->DoScavengeObject(map, p, object);
 }
 
 
@@ -1605,7 +1697,9 @@
 }
 
 
-MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
+MaybeObject* Heap::AllocateMap(InstanceType instance_type,
+                               int instance_size,
+                               ElementsKind elements_kind) {
   Object* result;
   { MaybeObject* maybe_result = AllocateRawMap();
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -1627,7 +1721,7 @@
   map->set_unused_property_fields(0);
   map->set_bit_field(0);
   map->set_bit_field2(1 << Map::kIsExtensible);
-  map->set_elements_kind(FAST_ELEMENTS);
+  map->set_elements_kind(elements_kind);
 
   // If the map object is aligned fill the padding area with Smi 0 objects.
   if (Map::kPadStart < Map::kSize) {
@@ -1707,7 +1801,7 @@
   }
   set_empty_fixed_array(FixedArray::cast(obj));
 
-  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_null_value(obj);
@@ -1798,6 +1892,12 @@
   }
   set_byte_array_map(Map::cast(obj));
 
+  { MaybeObject* maybe_obj =
+        AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_free_space_map(Map::cast(obj));
+
   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
@@ -1998,7 +2098,7 @@
                                  Object* to_number,
                                  byte kind) {
   Object* result;
-  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
@@ -2011,7 +2111,13 @@
   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_neander_map(Map::cast(obj));
+  // Don't use Smi-only elements optimizations for objects with the neander
+  // map. There are too many cases where element values are set directly with a
+  // bottleneck to trap the Smi-only -> fast elements transition, and there
+  // appears to be no benefit for optimize this case.
+  Map* new_neander_map = Map::cast(obj);
+  new_neander_map->set_elements_kind(FAST_ELEMENTS);
+  set_neander_map(new_neander_map);
 
   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2056,6 +2162,12 @@
   // To workaround the problem, make separate functions without inlining.
   Heap::CreateJSEntryStub();
   Heap::CreateJSConstructEntryStub();
+
+  // Create stubs that should be there, so we don't unexpectedly have to
+  // create them if we need them during the creation of another stub.
+  // Stub creation mixes raw pointers and handles in an unsafe manner so
+  // we cannot create stubs while we are creating stubs.
+  CodeStub::GenerateStubsAheadOfTime();
 }
 
 
@@ -2074,7 +2186,12 @@
   }
   set_nan_value(obj);
 
-  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_infinity_value(obj);
+
+  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_undefined_value(obj);
@@ -2126,26 +2243,34 @@
   set_the_hole_value(obj);
 
   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
-                                           Smi::FromInt(-4),
+                                           Smi::FromInt(-2),
                                            Oddball::kArgumentMarker);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_arguments_marker(obj);
 
   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
-                                           Smi::FromInt(-2),
+                                           Smi::FromInt(-3),
                                            Oddball::kOther);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_no_interceptor_result_sentinel(obj);
 
   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
-                                           Smi::FromInt(-3),
+                                           Smi::FromInt(-4),
                                            Oddball::kOther);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_termination_exception(obj);
 
+  { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
+                                           Smi::FromInt(-5),
+                                           Oddball::kOther);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_frame_alignment_marker(obj);
+  STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
+
   // Allocate the empty string.
   { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2422,6 +2547,15 @@
 }
 
 
+MaybeObject* Heap::Uint32ToString(uint32_t value,
+                                  bool check_number_string_cache) {
+  Object* number;
+  MaybeObject* maybe = NumberFromUint32(value);
+  if (!maybe->To<Object>(&number)) return maybe;
+  return NumberToString(number, check_number_string_cache);
+}
+
+
 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
 }
@@ -2737,25 +2871,23 @@
   // Make an attempt to flatten the buffer to reduce access time.
   buffer = buffer->TryFlattenGetString();
 
-  // TODO(1626): For now slicing external strings is not supported.  However,
-  // a flat cons string can have an external string as first part in some cases.
-  // Therefore we have to single out this case as well.
   if (!FLAG_string_slices ||
-      (buffer->IsConsString() &&
-        (!buffer->IsFlat() ||
-         !ConsString::cast(buffer)->first()->IsSeqString())) ||
-      buffer->IsExternalString() ||
+      !buffer->IsFlat() ||
       length < SlicedString::kMinLength ||
       pretenure == TENURED) {
     Object* result;
-    { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
-                     ? AllocateRawAsciiString(length, pretenure)
-                     : AllocateRawTwoByteString(length, pretenure);
+    // WriteToFlat takes care of the case when an indirect string has a
+    // different encoding from its underlying string.  These encodings may
+    // differ because of externalization.
+    bool is_ascii = buffer->IsAsciiRepresentation();
+    { MaybeObject* maybe_result = is_ascii
+                                  ? AllocateRawAsciiString(length, pretenure)
+                                  : AllocateRawTwoByteString(length, pretenure);
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     String* string_result = String::cast(result);
     // Copy the characters into the new object.
-    if (buffer->IsAsciiRepresentation()) {
+    if (is_ascii) {
       ASSERT(string_result->IsAsciiRepresentation());
       char* dest = SeqAsciiString::cast(string_result)->GetChars();
       String::WriteToFlat(buffer, dest, start, end);
@@ -2768,12 +2900,17 @@
   }
 
   ASSERT(buffer->IsFlat());
-  ASSERT(!buffer->IsExternalString());
 #if DEBUG
   buffer->StringVerify();
 #endif
 
   Object* result;
+  // When slicing an indirect string we use its encoding for a newly created
+  // slice and don't check the encoding of the underlying string.  This is safe
+  // even if the encodings are different because of externalization.  If an
+  // indirect ASCII string is pointing to a two-byte string, the two-byte char
+  // codes of the underlying string must still fit into ASCII (because
+  // externalization must not change char codes).
   { Map* map = buffer->IsAsciiRepresentation()
                  ? sliced_ascii_string_map()
                  : sliced_string_map();
@@ -2799,13 +2936,14 @@
     sliced_string->set_parent(buffer);
     sliced_string->set_offset(start);
   }
-  ASSERT(sliced_string->parent()->IsSeqString());
+  ASSERT(sliced_string->parent()->IsSeqString() ||
+         sliced_string->parent()->IsExternalString());
   return result;
 }
 
 
 MaybeObject* Heap::AllocateExternalStringFromAscii(
-    ExternalAsciiString::Resource* resource) {
+    const ExternalAsciiString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
     isolate()->context()->mark_out_of_memory();
@@ -2828,7 +2966,7 @@
 
 
 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
-    ExternalTwoByteString::Resource* resource) {
+    const ExternalTwoByteString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
     isolate()->context()->mark_out_of_memory();
@@ -2892,7 +3030,7 @@
   Object* result;
   { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
                    ? old_data_space_->AllocateRaw(size)
-                   : lo_space_->AllocateRaw(size);
+                   : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
@@ -2928,8 +3066,8 @@
   } else if (size == 2 * kPointerSize) {
     filler->set_map(two_pointer_filler_map());
   } else {
-    filler->set_map(byte_array_map());
-    ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
+    filler->set_map(free_space_map());
+    FreeSpace::cast(filler)->set_size(size);
   }
 }
 
@@ -2975,7 +3113,7 @@
   // Large code objects and code objects which should stay at a fixed address
   // are allocated in large object space.
   if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
-    maybe_result = lo_space_->AllocateRawCode(obj_size);
+    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -3020,7 +3158,7 @@
   int obj_size = code->Size();
   MaybeObject* maybe_result;
   if (obj_size > MaxObjectSizeInPagedSpace()) {
-    maybe_result = lo_space_->AllocateRawCode(obj_size);
+    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -3063,7 +3201,7 @@
 
   MaybeObject* maybe_result;
   if (new_obj_size > MaxObjectSizeInPagedSpace()) {
-    maybe_result = lo_space_->AllocateRawCode(new_obj_size);
+    maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(new_obj_size);
   }
@@ -3112,9 +3250,9 @@
 }
 
 
-MaybeObject* Heap::InitializeFunction(JSFunction* function,
-                                      SharedFunctionInfo* shared,
-                                      Object* prototype) {
+void Heap::InitializeFunction(JSFunction* function,
+                              SharedFunctionInfo* shared,
+                              Object* prototype) {
   ASSERT(!prototype->IsMap());
   function->initialize_properties();
   function->initialize_elements();
@@ -3124,7 +3262,6 @@
   function->set_context(undefined_value());
   function->set_literals(empty_fixed_array());
   function->set_next_function_link(undefined_value());
-  return function;
 }
 
 
@@ -3134,8 +3271,18 @@
   // different context.
   JSFunction* object_function =
       function->context()->global_context()->object_function();
+
+  // Each function prototype gets a copy of the object function map.
+  // This avoid unwanted sharing of maps between prototypes of different
+  // constructors.
+  Map* new_map;
+  ASSERT(object_function->has_initial_map());
+  { MaybeObject* maybe_map =
+        object_function->initial_map()->CopyDropTransitions();
+    if (!maybe_map->To<Map>(&new_map)) return maybe_map;
+  }
   Object* prototype;
-  { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
+  { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
     if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
   }
   // When creating the prototype for the function we must set its
@@ -3160,7 +3307,8 @@
   { MaybeObject* maybe_result = Allocate(function_map, space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  return InitializeFunction(JSFunction::cast(result), shared, prototype);
+  InitializeFunction(JSFunction::cast(result), shared, prototype);
+  return result;
 }
 
 
@@ -3330,6 +3478,9 @@
   // We cannot always fill with one_pointer_filler_map because objects
   // created from API functions expect their internal fields to be initialized
   // with undefined_value.
+  // Pre-allocated fields need to be initialized with undefined_value as well
+  // so that object accesses before the constructor completes (e.g. in the
+  // debugger) will not cause a crash.
   if (map->constructor()->IsJSFunction() &&
       JSFunction::cast(map->constructor())->shared()->
           IsInobjectSlackTrackingInProgress()) {
@@ -3339,7 +3490,7 @@
   } else {
     filler = Heap::undefined_value();
   }
-  obj->InitializeBody(map->instance_size(), filler);
+  obj->InitializeBody(map, Heap::undefined_value(), filler);
 }
 
 
@@ -3377,7 +3528,8 @@
   InitializeJSObjectFromMap(JSObject::cast(obj),
                             FixedArray::cast(properties),
                             map);
-  ASSERT(JSObject::cast(obj)->HasFastElements());
+  ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
+         JSObject::cast(obj)->HasFastElements());
   return obj;
 }
 
@@ -3420,6 +3572,7 @@
   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   result->set_handler(handler);
+  result->set_hash(undefined_value());
   return result;
 }
 
@@ -3443,6 +3596,7 @@
   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   result->set_handler(handler);
+  result->set_hash(undefined_value());
   result->set_call_trap(call_trap);
   result->set_construct_trap(construct_trap);
   return result;
@@ -3559,6 +3713,7 @@
               object_size);
   }
 
+  ASSERT(JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
   FixedArray* properties = FixedArray::cast(source->properties());
   // Update elements if necessary.
@@ -3591,13 +3746,13 @@
 
 MaybeObject* Heap::ReinitializeJSReceiver(
     JSReceiver* object, InstanceType type, int size) {
-  ASSERT(type >= FIRST_JS_RECEIVER_TYPE);
+  ASSERT(type >= FIRST_JS_OBJECT_TYPE);
 
   // Allocate fresh map.
   // TODO(rossberg): Once we optimize proxies, cache these maps.
   Map* map;
-  MaybeObject* maybe_map_obj = AllocateMap(type, size);
-  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
+  MaybeObject* maybe = AllocateMap(type, size);
+  if (!maybe->To<Map>(&map)) return maybe;
 
   // Check that the receiver has at least the size of the fresh object.
   int size_difference = object->map()->instance_size() - map->instance_size();
@@ -3608,30 +3763,35 @@
   // Allocate the backing storage for the properties.
   int prop_size = map->unused_property_fields() - map->inobject_properties();
   Object* properties;
-  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
-    if (!maybe_properties->ToObject(&properties)) return maybe_properties;
+  maybe = AllocateFixedArray(prop_size, TENURED);
+  if (!maybe->ToObject(&properties)) return maybe;
+
+  // Functions require some allocation, which might fail here.
+  SharedFunctionInfo* shared = NULL;
+  if (type == JS_FUNCTION_TYPE) {
+    String* name;
+    maybe = LookupAsciiSymbol("<freezing call trap>");
+    if (!maybe->To<String>(&name)) return maybe;
+    maybe = AllocateSharedFunctionInfo(name);
+    if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
   }
 
+  // Because of possible retries of this function after failure,
+  // we must NOT fail after this point, where we have changed the type!
+
   // Reset the map for the object.
   object->set_map(map);
+  JSObject* jsobj = JSObject::cast(object);
 
   // Reinitialize the object from the constructor map.
-  InitializeJSObjectFromMap(JSObject::cast(object),
-                            FixedArray::cast(properties), map);
+  InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
 
   // Functions require some minimal initialization.
   if (type == JS_FUNCTION_TYPE) {
-    String* name;
-    MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>");
-    if (!maybe_name->To<String>(&name)) return maybe_name;
-    SharedFunctionInfo* shared;
-    MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name);
-    if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared;
-    JSFunction* func;
-    MaybeObject* maybe_func =
-        InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
-    if (!maybe_func->To<JSFunction>(&func)) return maybe_func;
-    func->set_context(isolate()->context()->global_context());
+    map->set_function_with_prototype(true);
+    InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
+    JSFunction::cast(object)->set_context(
+        isolate()->context()->global_context());
   }
 
   // Put in filler if the new object is smaller than the old.
@@ -3814,7 +3974,7 @@
   // Allocate string.
   Object* result;
   { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
-                   ? lo_space_->AllocateRaw(size)
+                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
@@ -3931,7 +4091,7 @@
   int size = FixedArray::SizeFor(length);
   return size <= kMaxObjectSizeInNewSpace
       ? new_space_.AllocateRaw(size)
-      : lo_space_->AllocateRawFixedArray(size);
+      : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
 }
 
 
@@ -4262,6 +4422,21 @@
 }
 
 
+bool Heap::IsHeapIterable() {
+  return (!old_pointer_space()->was_swept_conservatively() &&
+          !old_data_space()->was_swept_conservatively());
+}
+
+
+void Heap::EnsureHeapIsIterable() {
+  ASSERT(IsAllocationAllowed());
+  if (!IsHeapIterable()) {
+    CollectAllGarbage(kMakeHeapIterableMask);
+  }
+  ASSERT(IsHeapIterable());
+}
+
+
 bool Heap::IdleNotification() {
   static const int kIdlesBeforeScavenge = 4;
   static const int kIdlesBeforeMarkSweep = 7;
@@ -4292,7 +4467,7 @@
   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
     if (contexts_disposed_ > 0) {
       HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(false);
+      CollectAllGarbage(kNoGCFlags);
     } else {
       CollectGarbage(NEW_SPACE);
     }
@@ -4304,12 +4479,12 @@
     // generated code for cached functions.
     isolate_->compilation_cache()->Clear();
 
-    CollectAllGarbage(false);
+    CollectAllGarbage(kNoGCFlags);
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
 
   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
-    CollectAllGarbage(true);
+    CollectAllGarbage(kNoGCFlags);
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
     number_idle_notifications_ = 0;
@@ -4319,7 +4494,7 @@
       contexts_disposed_ = 0;
     } else {
       HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(false);
+      CollectAllGarbage(kNoGCFlags);
       last_idle_notification_gc_count_ = gc_count_;
     }
     // If this is the first idle notification, we reset the
@@ -4339,8 +4514,11 @@
 
   // Make sure that we have no pending context disposals and
   // conditionally uncommit from space.
-  ASSERT(contexts_disposed_ == 0);
+  // Take into account that we might have decided to delay full collection
+  // because incremental marking is in progress.
+  ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
   if (uncommit) UncommitFromSpace();
+
   return finished;
 }
 
@@ -4374,11 +4552,11 @@
   USE(title);
   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
          title, gc_count_);
-  PrintF("mark-compact GC : %d\n", mc_count_);
   PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
          old_gen_promotion_limit_);
   PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
          old_gen_allocation_limit_);
+  PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
 
   PrintF("\n");
   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
@@ -4455,69 +4633,18 @@
 
 
 #ifdef DEBUG
-static void DummyScavengePointer(HeapObject** p) {
-}
-
-
-static void VerifyPointersUnderWatermark(
-    PagedSpace* space,
-    DirtyRegionCallback visit_dirty_region) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-
-  while (it.has_next()) {
-    Page* page = it.next();
-    Address start = page->ObjectAreaStart();
-    Address end = page->AllocationWatermark();
-
-    HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
-                              start,
-                              end,
-                              visit_dirty_region,
-                              &DummyScavengePointer);
-  }
-}
-
-
-static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
-  LargeObjectIterator it(space);
-  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
-    if (object->IsFixedArray()) {
-      Address slot_address = object->address();
-      Address end = object->address() + object->Size();
-
-      while (slot_address < end) {
-        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
-        // When we are not in GC the Heap::InNewSpace() predicate
-        // checks that pointers which satisfy predicate point into
-        // the active semispace.
-        HEAP->InNewSpace(*slot);
-        slot_address += kPointerSize;
-      }
-    }
-  }
-}
-
-
 void Heap::Verify() {
   ASSERT(HasBeenSetup());
 
+  store_buffer()->Verify();
+
   VerifyPointersVisitor visitor;
   IterateRoots(&visitor, VISIT_ONLY_STRONG);
 
   new_space_.Verify();
 
-  VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
-  old_pointer_space_->Verify(&dirty_regions_visitor);
-  map_space_->Verify(&dirty_regions_visitor);
-
-  VerifyPointersUnderWatermark(old_pointer_space_,
-                               &IteratePointersInDirtyRegion);
-  VerifyPointersUnderWatermark(map_space_,
-                               &IteratePointersInDirtyMapsRegion);
-  VerifyPointersUnderWatermark(lo_space_);
-
-  VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
-  VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
+  old_pointer_space_->Verify(&visitor);
+  map_space_->Verify(&visitor);
 
   VerifyPointersVisitor no_dirty_regions_visitor;
   old_data_space_->Verify(&no_dirty_regions_visitor);
@@ -4526,6 +4653,7 @@
 
   lo_space_->Verify();
 }
+
 #endif  // DEBUG
 
 
@@ -4621,277 +4749,223 @@
 
 #ifdef DEBUG
 void Heap::ZapFromSpace() {
-  ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
-  for (Address a = new_space_.FromSpaceLow();
-       a < new_space_.FromSpaceHigh();
-       a += kPointerSize) {
-    Memory::Address_at(a) = kFromSpaceZapValue;
+  NewSpacePageIterator it(new_space_.FromSpaceStart(),
+                          new_space_.FromSpaceEnd());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    for (Address cursor = page->body(), limit = page->body_limit();
+         cursor < limit;
+         cursor += kPointerSize) {
+      Memory::Address_at(cursor) = kFromSpaceZapValue;
+    }
   }
 }
 #endif  // DEBUG
 
 
-bool Heap::IteratePointersInDirtyRegion(Heap* heap,
-                                        Address start,
-                                        Address end,
-                                        ObjectSlotCallback copy_object_func) {
-  Address slot_address = start;
-  bool pointers_to_new_space_found = false;
-
-  while (slot_address < end) {
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    if (heap->InNewSpace(*slot)) {
-      ASSERT((*slot)->IsHeapObject());
-      copy_object_func(reinterpret_cast<HeapObject**>(slot));
-      if (heap->InNewSpace(*slot)) {
-        ASSERT((*slot)->IsHeapObject());
-        pointers_to_new_space_found = true;
-      }
-    }
-    slot_address += kPointerSize;
-  }
-  return pointers_to_new_space_found;
-}
-
-
-// Compute start address of the first map following given addr.
-static inline Address MapStartAlign(Address addr) {
-  Address page = Page::FromAddress(addr)->ObjectAreaStart();
-  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
-}
-
-
-// Compute end address of the first map preceding given addr.
-static inline Address MapEndAlign(Address addr) {
-  Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
-  return page + ((addr - page) / Map::kSize * Map::kSize);
-}
-
-
-static bool IteratePointersInDirtyMaps(Address start,
-                                       Address end,
-                                       ObjectSlotCallback copy_object_func) {
-  ASSERT(MapStartAlign(start) == start);
-  ASSERT(MapEndAlign(end) == end);
-
-  Address map_address = start;
-  bool pointers_to_new_space_found = false;
-
-  Heap* heap = HEAP;
-  while (map_address < end) {
-    ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
-    ASSERT(Memory::Object_at(map_address)->IsMap());
-
-    Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
-    Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
-
-    if (Heap::IteratePointersInDirtyRegion(heap,
-                                           pointer_fields_start,
-                                           pointer_fields_end,
-                                           copy_object_func)) {
-      pointers_to_new_space_found = true;
-    }
-
-    map_address += Map::kSize;
-  }
-
-  return pointers_to_new_space_found;
-}
-
-
-bool Heap::IteratePointersInDirtyMapsRegion(
-    Heap* heap,
-    Address start,
-    Address end,
-    ObjectSlotCallback copy_object_func) {
-  Address map_aligned_start = MapStartAlign(start);
-  Address map_aligned_end   = MapEndAlign(end);
-
-  bool contains_pointers_to_new_space = false;
-
-  if (map_aligned_start != start) {
-    Address prev_map = map_aligned_start - Map::kSize;
-    ASSERT(Memory::Object_at(prev_map)->IsMap());
-
-    Address pointer_fields_start =
-        Max(start, prev_map + Map::kPointerFieldsBeginOffset);
-
-    Address pointer_fields_end =
-        Min(prev_map + Map::kPointerFieldsEndOffset, end);
-
-    contains_pointers_to_new_space =
-      IteratePointersInDirtyRegion(heap,
-                                   pointer_fields_start,
-                                   pointer_fields_end,
-                                   copy_object_func)
-        || contains_pointers_to_new_space;
-  }
-
-  contains_pointers_to_new_space =
-    IteratePointersInDirtyMaps(map_aligned_start,
-                               map_aligned_end,
-                               copy_object_func)
-      || contains_pointers_to_new_space;
-
-  if (map_aligned_end != end) {
-    ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
-
-    Address pointer_fields_start =
-        map_aligned_end + Map::kPointerFieldsBeginOffset;
-
-    Address pointer_fields_end =
-        Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
-
-    contains_pointers_to_new_space =
-      IteratePointersInDirtyRegion(heap,
-                                   pointer_fields_start,
-                                   pointer_fields_end,
-                                   copy_object_func)
-        || contains_pointers_to_new_space;
-  }
-
-  return contains_pointers_to_new_space;
-}
-
-
 void Heap::IterateAndMarkPointersToFromSpace(Address start,
                                              Address end,
                                              ObjectSlotCallback callback) {
   Address slot_address = start;
-  Page* page = Page::FromAddress(start);
 
-  uint32_t marks = page->GetRegionMarks();
+  // We are not collecting slots on new space objects during mutation
+  // thus we have to scan for pointers to evacuation candidates when we
+  // promote objects. But we should not record any slots in non-black
+  // objects. Grey object's slots would be rescanned.
+  // White object might not survive until the end of collection
+  // it would be a violation of the invariant to record it's slots.
+  bool record_slots = false;
+  if (incremental_marking()->IsCompacting()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
+    record_slots = Marking::IsBlack(mark_bit);
+  }
 
   while (slot_address < end) {
     Object** slot = reinterpret_cast<Object**>(slot_address);
-    if (InFromSpace(*slot)) {
-      ASSERT((*slot)->IsHeapObject());
-      callback(reinterpret_cast<HeapObject**>(slot));
-      if (InNewSpace(*slot)) {
-        ASSERT((*slot)->IsHeapObject());
-        marks |= page->GetRegionMaskForAddress(slot_address);
+    Object* object = *slot;
+    // If the store buffer becomes overfull we mark pages as being exempt from
+    // the store buffer.  These pages are scanned to find pointers that point
+    // to the new space.  In that case we may hit newly promoted objects and
+    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
+    if (object->IsHeapObject()) {
+      if (Heap::InFromSpace(object)) {
+        callback(reinterpret_cast<HeapObject**>(slot),
+                 HeapObject::cast(object));
+        Object* new_object = *slot;
+        if (InNewSpace(new_object)) {
+          ASSERT(Heap::InToSpace(new_object));
+          ASSERT(new_object->IsHeapObject());
+          store_buffer_.EnterDirectlyIntoStoreBuffer(
+              reinterpret_cast<Address>(slot));
+        }
+        ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
+      } else if (record_slots &&
+                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
+        mark_compact_collector()->RecordSlot(slot, slot, object);
       }
     }
     slot_address += kPointerSize;
   }
-
-  page->SetRegionMarks(marks);
 }
 
 
-uint32_t Heap::IterateDirtyRegions(
-    uint32_t marks,
-    Address area_start,
-    Address area_end,
-    DirtyRegionCallback visit_dirty_region,
-    ObjectSlotCallback copy_object_func) {
-  uint32_t newmarks = 0;
-  uint32_t mask = 1;
+#ifdef DEBUG
+typedef bool (*CheckStoreBufferFilter)(Object** addr);
 
-  if (area_start >= area_end) {
-    return newmarks;
-  }
 
-  Address region_start = area_start;
-
-  // area_start does not necessarily coincide with start of the first region.
-  // Thus to calculate the beginning of the next region we have to align
-  // area_start by Page::kRegionSize.
-  Address second_region =
-      reinterpret_cast<Address>(
-          reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
-          ~Page::kRegionAlignmentMask);
-
-  // Next region might be beyond area_end.
-  Address region_end = Min(second_region, area_end);
-
-  if (marks & mask) {
-    if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
-      newmarks |= mask;
-    }
-  }
-  mask <<= 1;
-
-  // Iterate subsequent regions which fully lay inside [area_start, area_end[.
-  region_start = region_end;
-  region_end = region_start + Page::kRegionSize;
-
-  while (region_end <= area_end) {
-    if (marks & mask) {
-      if (visit_dirty_region(this,
-                             region_start,
-                             region_end,
-                             copy_object_func)) {
-        newmarks |= mask;
-      }
-    }
-
-    region_start = region_end;
-    region_end = region_start + Page::kRegionSize;
-
-    mask <<= 1;
-  }
-
-  if (region_start != area_end) {
-    // A small piece of area left uniterated because area_end does not coincide
-    // with region end. Check whether region covering last part of area is
-    // dirty.
-    if (marks & mask) {
-      if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
-        newmarks |= mask;
-      }
-    }
-  }
-
-  return newmarks;
+bool IsAMapPointerAddress(Object** addr) {
+  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
+  int mod = a % Map::kSize;
+  return mod >= Map::kPointerFieldsBeginOffset &&
+         mod < Map::kPointerFieldsEndOffset;
 }
 
 
+bool EverythingsAPointer(Object** addr) {
+  return true;
+}
 
-void Heap::IterateDirtyRegions(
-    PagedSpace* space,
-    DirtyRegionCallback visit_dirty_region,
-    ObjectSlotCallback copy_object_func,
-    ExpectedPageWatermarkState expected_page_watermark_state) {
 
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-
-  while (it.has_next()) {
-    Page* page = it.next();
-    uint32_t marks = page->GetRegionMarks();
-
-    if (marks != Page::kAllRegionsCleanMarks) {
-      Address start = page->ObjectAreaStart();
-
-      // Do not try to visit pointers beyond page allocation watermark.
-      // Page can contain garbage pointers there.
-      Address end;
-
-      if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
-          page->IsWatermarkValid()) {
-        end = page->AllocationWatermark();
-      } else {
-        end = page->CachedAllocationWatermark();
-      }
-
-      ASSERT(space == old_pointer_space_ ||
-             (space == map_space_ &&
-              ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
-
-      page->SetRegionMarks(IterateDirtyRegions(marks,
-                                               start,
-                                               end,
-                                               visit_dirty_region,
-                                               copy_object_func));
+static void CheckStoreBuffer(Heap* heap,
+                             Object** current,
+                             Object** limit,
+                             Object**** store_buffer_position,
+                             Object*** store_buffer_top,
+                             CheckStoreBufferFilter filter,
+                             Address special_garbage_start,
+                             Address special_garbage_end) {
+  Map* free_space_map = heap->free_space_map();
+  for ( ; current < limit; current++) {
+    Object* o = *current;
+    Address current_address = reinterpret_cast<Address>(current);
+    // Skip free space.
+    if (o == free_space_map) {
+      Address current_address = reinterpret_cast<Address>(current);
+      FreeSpace* free_space =
+          FreeSpace::cast(HeapObject::FromAddress(current_address));
+      int skip = free_space->Size();
+      ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
+      ASSERT(skip > 0);
+      current_address += skip - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
     }
-
-    // Mark page watermark as invalid to maintain watermark validity invariant.
-    // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
-    page->InvalidateWatermark(true);
+    // Skip the current linear allocation space between top and limit which is
+    // unmarked with the free space map, but can contain junk.
+    if (current_address == special_garbage_start &&
+        special_garbage_end != special_garbage_start) {
+      current_address = special_garbage_end - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
+    }
+    if (!(*filter)(current)) continue;
+    ASSERT(current_address < special_garbage_start ||
+           current_address >= special_garbage_end);
+    ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
+    // We have to check that the pointer does not point into new space
+    // without trying to cast it to a heap object since the hash field of
+    // a string can contain values like 1 and 3 which are tagged null
+    // pointers.
+    if (!heap->InNewSpace(o)) continue;
+    while (**store_buffer_position < current &&
+           *store_buffer_position < store_buffer_top) {
+      (*store_buffer_position)++;
+    }
+    if (**store_buffer_position != current ||
+        *store_buffer_position == store_buffer_top) {
+      Object** obj_start = current;
+      while (!(*obj_start)->IsMap()) obj_start--;
+      UNREACHABLE();
+    }
   }
 }
 
 
+// Check that the store buffer contains all intergenerational pointers by
+// scanning a page and ensuring that all pointers to young space are in the
+// store buffer.
+void Heap::OldPointerSpaceCheckStoreBuffer() {
+  OldSpace* space = old_pointer_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+
+    Address end = page->ObjectAreaEnd();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this,
+                     current,
+                     limit,
+                     &store_buffer_position,
+                     store_buffer_top,
+                     &EverythingsAPointer,
+                     space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::MapSpaceCheckStoreBuffer() {
+  MapSpace* space = map_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+
+    Address end = page->ObjectAreaEnd();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this,
+                     current,
+                     limit,
+                     &store_buffer_position,
+                     store_buffer_top,
+                     &IsAMapPointerAddress,
+                     space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::LargeObjectSpaceCheckStoreBuffer() {
+  LargeObjectIterator it(lo_space());
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    // We only have code, sequential strings, or fixed arrays in large
+    // object space, and only fixed arrays can possibly contain pointers to
+    // the young generation.
+    if (object->IsFixedArray()) {
+      Object*** store_buffer_position = store_buffer()->Start();
+      Object*** store_buffer_top = store_buffer()->Top();
+      Object** current = reinterpret_cast<Object**>(object->address());
+      Object** limit =
+          reinterpret_cast<Object**>(object->address() + object->Size());
+      CheckStoreBuffer(this,
+                       current,
+                       limit,
+                       &store_buffer_position,
+                       store_buffer_top,
+                       &EverythingsAPointer,
+                       NULL,
+                       NULL);
+    }
+  }
+}
+#endif
+
+
 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
   IterateStrongRoots(v, mode);
   IterateWeakRoots(v, mode);
@@ -4941,8 +5015,7 @@
   // Iterate over the builtin code objects and code stubs in the
   // heap. Note that it is not necessary to iterate over code objects
   // on scavenge collections.
-  if (mode != VISIT_ALL_IN_SCAVENGE &&
-      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+  if (mode != VISIT_ALL_IN_SCAVENGE) {
     isolate_->builtins()->IterateBuiltins(v);
   }
   v->Synchronize("builtins");
@@ -4986,11 +5059,20 @@
 // and through the API, we should gracefully handle the case that the heap
 // size is not big enough to fit all the initial objects.
 bool Heap::ConfigureHeap(int max_semispace_size,
-                         int max_old_gen_size,
-                         int max_executable_size) {
+                         intptr_t max_old_gen_size,
+                         intptr_t max_executable_size) {
   if (HasBeenSetup()) return false;
 
-  if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
+  if (max_semispace_size > 0) {
+    if (max_semispace_size < Page::kPageSize) {
+      max_semispace_size = Page::kPageSize;
+      if (FLAG_trace_gc) {
+        PrintF("Max semispace size cannot be less than %dkbytes",
+               Page::kPageSize >> 10);
+      }
+    }
+    max_semispace_size_ = max_semispace_size;
+  }
 
   if (Snapshot::IsEnabled()) {
     // If we are using a snapshot we always reserve the default amount
@@ -5000,6 +5082,10 @@
     // than the default reserved semispace size.
     if (max_semispace_size_ > reserved_semispace_size_) {
       max_semispace_size_ = reserved_semispace_size_;
+      if (FLAG_trace_gc) {
+        PrintF("Max semispace size cannot be more than %dkbytes",
+               reserved_semispace_size_ >> 10);
+      }
     }
   } else {
     // If we are not using snapshots we reserve space for the actual
@@ -5025,8 +5111,12 @@
   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
   external_allocation_limit_ = 10 * max_semispace_size_;
 
-  // The old generation is paged.
-  max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
+  // The old generation is paged and needs at least one page for each space.
+  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+  max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
+                                                       Page::kPageSize),
+                                 RoundUp(max_old_generation_size_,
+                                         Page::kPageSize));
 
   configured_ = true;
   return true;
@@ -5034,9 +5124,9 @@
 
 
 bool Heap::ConfigureHeapDefault() {
-  return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
-                       FLAG_max_old_space_size * MB,
-                       FLAG_max_executable_size * MB);
+  return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
+                       static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
+                       static_cast<intptr_t>(FLAG_max_executable_size) * MB);
 }
 
 
@@ -5064,7 +5154,7 @@
   *stats->os_error = OS::GetLastError();
       isolate()->memory_allocator()->Available();
   if (take_snapshot) {
-    HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+    HeapIterator iterator;
     for (HeapObject* obj = iterator.next();
          obj != NULL;
          obj = iterator.next()) {
@@ -5280,31 +5370,21 @@
   gc_initializer_mutex->Lock();
   static bool initialized_gc = false;
   if (!initialized_gc) {
-    initialized_gc = true;
-    InitializeScavengingVisitorsTables();
-    NewSpaceScavenger::Initialize();
-    MarkCompactCollector::Initialize();
+      initialized_gc = true;
+      InitializeScavengingVisitorsTables();
+      NewSpaceScavenger::Initialize();
+      MarkCompactCollector::Initialize();
   }
   gc_initializer_mutex->Unlock();
 
   MarkMapPointersAsEncoded(false);
 
-  // Setup memory allocator and reserve a chunk of memory for new
-  // space.  The chunk is double the size of the requested reserved
-  // new space size to ensure that we can find a pair of semispaces that
-  // are contiguous and aligned to their size.
+  // Setup memory allocator.
   if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
       return false;
-  void* chunk =
-      isolate_->memory_allocator()->ReserveInitialChunk(
-          4 * reserved_semispace_size_);
-  if (chunk == NULL) return false;
 
-  // Align the pair of semispaces to their size, which must be a power
-  // of 2.
-  Address new_space_start =
-      RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
-  if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
+  // Setup new space.
+  if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
     return false;
   }
 
@@ -5315,7 +5395,7 @@
                    OLD_POINTER_SPACE,
                    NOT_EXECUTABLE);
   if (old_pointer_space_ == NULL) return false;
-  if (!old_pointer_space_->Setup(NULL, 0)) return false;
+  if (!old_pointer_space_->Setup()) return false;
 
   // Initialize old data space.
   old_data_space_ =
@@ -5324,7 +5404,7 @@
                    OLD_DATA_SPACE,
                    NOT_EXECUTABLE);
   if (old_data_space_ == NULL) return false;
-  if (!old_data_space_->Setup(NULL, 0)) return false;
+  if (!old_data_space_->Setup()) return false;
 
   // Initialize the code space, set its maximum capacity to the old
   // generation size. It needs executable memory.
@@ -5339,21 +5419,20 @@
   code_space_ =
       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
   if (code_space_ == NULL) return false;
-  if (!code_space_->Setup(NULL, 0)) return false;
+  if (!code_space_->Setup()) return false;
 
   // Initialize map space.
-  map_space_ = new MapSpace(this, FLAG_use_big_map_space
-      ? max_old_generation_size_
-      : MapSpace::kMaxMapPageIndex * Page::kPageSize,
-      FLAG_max_map_space_pages,
-      MAP_SPACE);
+  map_space_ = new MapSpace(this,
+                            max_old_generation_size_,
+                            FLAG_max_map_space_pages,
+                            MAP_SPACE);
   if (map_space_ == NULL) return false;
-  if (!map_space_->Setup(NULL, 0)) return false;
+  if (!map_space_->Setup()) return false;
 
   // Initialize global property cell space.
   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
   if (cell_space_ == NULL) return false;
-  if (!cell_space_->Setup(NULL, 0)) return false;
+  if (!cell_space_->Setup()) return false;
 
   // The large object code space may contain code or data.  We set the memory
   // to be non-executable here for safety, but this means we need to enable it
@@ -5361,7 +5440,6 @@
   lo_space_ = new LargeObjectSpace(this, LO_SPACE);
   if (lo_space_ == NULL) return false;
   if (!lo_space_->Setup()) return false;
-
   if (create_heap_objects) {
     // Create initial maps.
     if (!CreateInitialMaps()) return false;
@@ -5376,6 +5454,8 @@
   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
 
+  store_buffer()->Setup();
+
   return true;
 }
 
@@ -5402,7 +5482,6 @@
     PrintF("\n\n");
     PrintF("gc_count=%d ", gc_count_);
     PrintF("mark_sweep_count=%d ", ms_count_);
-    PrintF("mark_compact_count=%d ", mc_count_);
     PrintF("max_gc_pause=%d ", get_max_gc_pause());
     PrintF("min_in_mutator=%d ", get_min_in_mutator());
     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
@@ -5452,6 +5531,9 @@
     lo_space_ = NULL;
   }
 
+  store_buffer()->TearDown();
+  incremental_marking()->TearDown();
+
   isolate_->memory_allocator()->TearDown();
 
 #ifdef DEBUG
@@ -5465,7 +5547,7 @@
   // Try to shrink all paged spaces.
   PagedSpaces spaces;
   for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
-    space->Shrink();
+    space->ReleaseAllUnusedPages();
 }
 
 
@@ -5668,45 +5750,6 @@
 };
 
 
-class FreeListNodesFilter : public HeapObjectsFilter {
- public:
-  FreeListNodesFilter() {
-    MarkFreeListNodes();
-  }
-
-  bool SkipObject(HeapObject* object) {
-    if (object->IsMarked()) {
-      object->ClearMark();
-      return true;
-    } else {
-      return false;
-    }
-  }
-
- private:
-  void MarkFreeListNodes() {
-    Heap* heap = HEAP;
-    heap->old_pointer_space()->MarkFreeListNodes();
-    heap->old_data_space()->MarkFreeListNodes();
-    MarkCodeSpaceFreeListNodes(heap);
-    heap->map_space()->MarkFreeListNodes();
-    heap->cell_space()->MarkFreeListNodes();
-  }
-
-  void MarkCodeSpaceFreeListNodes(Heap* heap) {
-    // For code space, using FreeListNode::IsFreeListNode is OK.
-    HeapObjectIterator iter(heap->code_space());
-    for (HeapObject* obj = iter.next_object();
-         obj != NULL;
-         obj = iter.next_object()) {
-      if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
-    }
-  }
-
-  AssertNoAllocation no_alloc;
-};
-
-
 class UnreachableObjectsFilter : public HeapObjectsFilter {
  public:
   UnreachableObjectsFilter() {
@@ -5714,8 +5757,8 @@
   }
 
   bool SkipObject(HeapObject* object) {
-    if (object->IsMarked()) {
-      object->ClearMark();
+    if (IntrusiveMarking::IsMarked(object)) {
+      IntrusiveMarking::ClearMark(object);
       return true;
     } else {
       return false;
@@ -5731,8 +5774,8 @@
       for (Object** p = start; p < end; p++) {
         if (!(*p)->IsHeapObject()) continue;
         HeapObject* obj = HeapObject::cast(*p);
-        if (obj->IsMarked()) {
-          obj->ClearMark();
+        if (IntrusiveMarking::IsMarked(obj)) {
+          IntrusiveMarking::ClearMark(obj);
           list_.Add(obj);
         }
       }
@@ -5754,7 +5797,7 @@
     for (HeapObject* obj = iterator.next();
          obj != NULL;
          obj = iterator.next()) {
-      obj->SetMark();
+      IntrusiveMarking::SetMark(obj);
     }
     UnmarkingVisitor visitor;
     HEAP->IterateRoots(&visitor, VISIT_ALL);
@@ -5788,10 +5831,11 @@
 void HeapIterator::Init() {
   // Start the iteration.
   space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
-      new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
+      new SpaceIterator(Isolate::Current()->heap()->
+                        GcSafeSizeOfOldObjectFunction());
   switch (filtering_) {
     case kFilterFreeListNodes:
-      filter_ = new FreeListNodesFilter;
+      // TODO(gc): Not handled.
       break;
     case kFilterUnreachable:
       filter_ = new UnreachableObjectsFilter;
@@ -5928,6 +5972,11 @@
 }
 
 
+static bool SafeIsGlobalContext(HeapObject* obj) {
+  return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
+}
+
+
 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
   if (!(*p)->IsHeapObject()) return;
 
@@ -5946,7 +5995,7 @@
     return;
   }
 
-  bool is_global_context = obj->IsGlobalContext();
+  bool is_global_context = SafeIsGlobalContext(obj);
 
   // not visited yet
   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@@ -6054,7 +6103,7 @@
   for (OldSpace* space = spaces.next();
        space != NULL;
        space = spaces.next()) {
-    holes_size += space->Waste() + space->AvailableFree();
+    holes_size += space->Waste() + space->Available();
   }
   return holes_size;
 }
@@ -6065,17 +6114,10 @@
       start_size_(0),
       gc_count_(0),
       full_gc_count_(0),
-      is_compacting_(false),
-      marked_count_(0),
       allocated_since_last_gc_(0),
       spent_in_mutator_(0),
       promoted_objects_size_(0),
       heap_(heap) {
-  // These two fields reflect the state of the previous full collection.
-  // Set them before they are changed by the collector.
-  previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
-  previous_marked_count_ =
-      heap_->mark_compact_collector_.previous_marked_count();
   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
   start_time_ = OS::TimeCurrentMillis();
   start_size_ = heap_->SizeOfObjects();
@@ -6092,6 +6134,14 @@
   if (heap_->last_gc_end_timestamp_ > 0) {
     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
   }
+
+  steps_count_ = heap_->incremental_marking()->steps_count();
+  steps_took_ = heap_->incremental_marking()->steps_took();
+  longest_step_ = heap_->incremental_marking()->longest_step();
+  steps_count_since_last_gc_ =
+      heap_->incremental_marking()->steps_count_since_last_gc();
+  steps_took_since_last_gc_ =
+      heap_->incremental_marking()->steps_took_since_last_gc();
 }
 
 
@@ -6126,7 +6176,21 @@
            SizeOfHeapObjects());
 
     if (external_time > 0) PrintF("%d / ", external_time);
-    PrintF("%d ms.\n", time);
+    PrintF("%d ms", time);
+    if (steps_count_ > 0) {
+      if (collector_ == SCAVENGER) {
+        PrintF(" (+ %d ms in %d steps since last GC)",
+               static_cast<int>(steps_took_since_last_gc_),
+               steps_count_since_last_gc_);
+      } else {
+        PrintF(" (+ %d ms in %d steps since start of marking, "
+                   "biggest step %f ms)",
+               static_cast<int>(steps_took_),
+               steps_count_,
+               longest_step_);
+      }
+    }
+    PrintF(".\n");
   } else {
     PrintF("pause=%d ", time);
     PrintF("mutator=%d ",
@@ -6138,8 +6202,7 @@
         PrintF("s");
         break;
       case MARK_COMPACTOR:
-        PrintF("%s",
-               heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
+        PrintF("ms");
         break;
       default:
         UNREACHABLE();
@@ -6161,6 +6224,14 @@
     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
 
+    if (collector_ == SCAVENGER) {
+      PrintF("stepscount=%d ", steps_count_since_last_gc_);
+      PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
+    } else {
+      PrintF("stepscount=%d ", steps_count_);
+      PrintF("stepstook=%d ", static_cast<int>(steps_took_));
+    }
+
     PrintF("\n");
   }
 
@@ -6173,8 +6244,7 @@
     case SCAVENGER:
       return "Scavenge";
     case MARK_COMPACTOR:
-      return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
-                                                           : "Mark-sweep";
+      return "Mark-sweep";
   }
   return "Unknown GC";
 }
@@ -6281,4 +6351,52 @@
 }
 
 
+void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
+  chunk->set_next_chunk(chunks_queued_for_free_);
+  chunks_queued_for_free_ = chunk;
+}
+
+
+void Heap::FreeQueuedChunks() {
+  if (chunks_queued_for_free_ == NULL) return;
+  MemoryChunk* next;
+  MemoryChunk* chunk;
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+
+    if (chunk->owner()->identity() == LO_SPACE) {
+      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
+      // If FromAnyPointerAddress encounters a slot that belongs to a large
+      // chunk queued for deletion it will fail to find the chunk because
+      // it try to perform a search in the list of pages owned by of the large
+      // object space and queued chunks were detached from that list.
+      // To work around this we split large chunk into normal kPageSize aligned
+      // pieces and initialize owner field and flags of every piece.
+      // If FromAnyPointerAddress encounteres a slot that belongs to one of
+      // these smaller pieces it will treat it as a slot on a normal Page.
+      MemoryChunk* inner = MemoryChunk::FromAddress(
+          chunk->address() + Page::kPageSize);
+      MemoryChunk* inner_last = MemoryChunk::FromAddress(
+          chunk->address() + chunk->size() - 1);
+      while (inner <= inner_last) {
+        // Size of a large chunk is always a multiple of
+        // OS::AllocationAlignment() so there is always
+        // enough space for a fake MemoryChunk header.
+        inner->set_owner(lo_space());
+        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+        inner = MemoryChunk::FromAddress(
+            inner->address() + Page::kPageSize);
+      }
+    }
+  }
+  isolate_->heap()->store_buffer()->Compact();
+  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    isolate_->memory_allocator()->Free(chunk);
+  }
+  chunks_queued_for_free_ = NULL;
+}
+
 } }  // namespace v8::internal