Version 3.11.0

Fixed compose-discard crasher from r11524 (issue 2123).

Activated new global semantics by default. Global variables can now shadow properties of the global object (ES5.1 erratum).

Properly set ElementsKind of empty FAST_DOUBLE_ELEMENTS arrays when transitioning (Chromium issue 117409).

Made Error.prototype.name writable again, as required by the spec and the web (Chromium issue 69187).

Implemented map collection with incremental marking (issue 1465).

Regexp: Fixed overflow in min-match-length calculation (Chromium issue 126412).

MIPS: Fixed illegal instruction use on Loongson in code for Math.random() (issue 2115).

Fixed crash bug in VisitChoice (Chromium issue 126272).

Fixed unsigned-Smi check in MappedArgumentsLookup (Chromium issue 126414).

Fixed LiveEdit for function with no locals (issue 825).

Fixed register clobbering in LoadIC for interceptors (Chromium issue 125988).

Implemented clearing of CompareICs (issue 2102).

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@11551 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 09c0237..e15786e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,39 @@
+2012-05-11: Version 3.11.0
+
+        Fixed compose-discard crasher from r11524 (issue 2123).
+
+        Activated new global semantics by default. Global variables can
+        now shadow properties of the global object (ES5.1 erratum).
+
+        Properly set ElementsKind of empty FAST_DOUBLE_ELEMENTS arrays when
+        transitioning (Chromium issue 117409).
+
+        Made Error.prototype.name writable again, as required by the spec and
+        the web (Chromium issue 69187).
+
+        Implemented map collection with incremental marking (issue 1465).
+
+        Regexp: Fixed overflow in min-match-length calculation
+        (Chromium issue 126412).
+
+        MIPS: Fixed illegal instruction use on Loongson in code for
+        Math.random() (issue 2115).
+
+        Fixed crash bug in VisitChoice (Chromium issue 126272).
+
+        Fixed unsigned-Smi check in MappedArgumentsLookup
+        (Chromium issue 126414).
+
+        Fixed LiveEdit for function with no locals (issue 825).
+
+        Fixed register clobbering in LoadIC for interceptors
+        (Chromium issue 125988).
+
+        Implemented clearing of CompareICs (issue 2102).
+
+        Performance and stability improvements on all platforms.
+
+
 2012-05-03: Version 3.10.8
 
         Enabled MIPS cross-compilation.
diff --git a/DEPS b/DEPS
new file mode 100644
index 0000000..e50d1d2
--- /dev/null
+++ b/DEPS
@@ -0,0 +1,27 @@
+# Note: The buildbots evaluate this file with CWD set to the parent
+# directory and assume that the root of the checkout is in ./v8/, so
+# all paths in here must match this assumption.
+
+deps = {
+  # Remember to keep the revision in sync with the Makefile.
+  "v8/build/gyp":
+    "http://gyp.googlecode.com/svn/trunk@1282",
+}
+
+deps_os = {
+  "win": {
+    "v8/third_party/cygwin":
+      "http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844",
+
+    "v8/third_party/python_26":
+      "http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111",
+  }
+}
+
+hooks = [
+  {
+    # A change to a .gyp, .gypi, or to GYP itself should run the generator.
+    "pattern": ".",
+    "action": ["python", "v8/build/gyp_v8"],
+  },
+]
diff --git a/Makefile b/Makefile
index 277c1f7..fbca566 100644
--- a/Makefile
+++ b/Makefile
@@ -137,6 +137,12 @@
 # Target definitions. "all" is the default.
 all: $(MODES)
 
+# Special target for the buildbots to use. Depends on $(OUTDIR)/Makefile
+# having been created before.
+buildbot:
+	$(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
+	        builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
+
 # Compile targets. MODES and ARCHES are convenience targets.
 .SECONDEXPANSION:
 $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
diff --git a/SConstruct b/SConstruct
index 34d0efc..b0d1344 100644
--- a/SConstruct
+++ b/SConstruct
@@ -1601,4 +1601,17 @@
   pass
 
 
+def WarnAboutDeprecation():
+  print """
+#######################################################
+#  WARNING: Building V8 with SCons is deprecated and  #
+#  will not work much longer. Please switch to using  #
+#  the GYP-based build now. Instructions are at       #
+#  http://code.google.com/p/v8/wiki/BuildingWithGYP.  #
+#######################################################
+  """
+
+WarnAboutDeprecation()
+import atexit
+atexit.register(WarnAboutDeprecation)
 Build()
diff --git a/build/common.gypi b/build/common.gypi
index 45195f1..4a9d45d 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -280,6 +280,13 @@
           },
         },
       }],
+      ['OS=="win" and v8_target_arch=="x64"', {
+        'msvs_settings': {
+          'VCLinkerTool': {
+            'StackReserveSize': '2097152',
+          },
+        },
+      }],
       ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
          or OS=="netbsd"', {
         'conditions': [
@@ -322,10 +329,6 @@
           },
           'VCLinkerTool': {
             'LinkIncremental': '2',
-            # For future reference, the stack size needs to be increased
-            # when building for Windows 64-bit, otherwise some test cases
-            # can cause stack overflow.
-            # 'StackReserveSize': '297152',
           },
         },
         'conditions': [
@@ -407,12 +410,7 @@
               'VCLinkerTool': {
                 'LinkIncremental': '1',
                 'OptimizeReferences': '2',
-                'OptimizeForWindows98': '1',
                 'EnableCOMDATFolding': '2',
-                # For future reference, the stack size needs to be
-                # increased when building for Windows 64-bit, otherwise
-                # some test cases can cause stack overflow.
-                # 'StackReserveSize': '297152',
               },
             },
           }],  # OS=="win"
diff --git a/build/gyp_v8 b/build/gyp_v8
index a926fe8..345f777 100755
--- a/build/gyp_v8
+++ b/build/gyp_v8
@@ -38,6 +38,11 @@
 script_dir = os.path.dirname(__file__)
 v8_root = os.path.normpath(os.path.join(script_dir, os.pardir))
 
+if __name__ == '__main__':
+  os.chdir(v8_root)
+  script_dir = os.path.dirname(__file__)
+  v8_root = '.'
+
 sys.path.insert(0, os.path.join(v8_root, 'tools'))
 import utils
 
@@ -93,7 +98,7 @@
       result.append(path)
 
   # Always include standalone.gypi
-  AddInclude(os.path.join(script_dir, 'standalone.gypi'))
+  AddInclude(os.path.join(v8_root, 'build', 'standalone.gypi'))
 
   # Optionally add supplemental .gypi files if present.
   supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
@@ -135,7 +140,10 @@
       # path separators even on Windows due to the use of shlex.split().
       args.extend(shlex.split(gyp_file))
     else:
-      args.append(os.path.join(script_dir, 'all.gyp'))
+      # Note that this must not start with "./" or things break.
+      # So we rely on having done os.chdir(v8_root) above and use the
+      # relative path.
+      args.append(os.path.join('build', 'all.gyp'))
 
   args.extend(['-I' + i for i in additional_include_files(args)])
 
@@ -156,28 +164,6 @@
 
   # Generate for the architectures supported on the given platform.
   gyp_args = list(args)
-  target_arch = None
-  for p in gyp_args:
-    if p.find('-Dtarget_arch=') == 0:
-      target_arch = p
-  if target_arch is None:
-    gyp_args.append('-Dtarget_arch=ia32')
   if utils.GuessOS() == 'linux':
-    gyp_args.append('-S.ia32')
+    gyp_args.append('--generator-output=out')
   run_gyp(gyp_args)
-
-  if utils.GuessOS() == 'linux':
-    gyp_args = list(args)
-    gyp_args.append('-Dtarget_arch=x64')
-    gyp_args.append('-S.x64')
-    run_gyp(gyp_args)
-
-    gyp_args = list(args)
-    gyp_args.append('-Dv8_target_arch=arm')
-    gyp_args.append('-S.arm')
-    run_gyp(gyp_args)
-
-    gyp_args = list(args)
-    gyp_args.append('-Dv8_target_arch=mips')
-    gyp_args.append('-S.mips')
-    run_gyp(gyp_args)
diff --git a/build/standalone.gypi b/build/standalone.gypi
index dad05ae..ebdf557 100644
--- a/build/standalone.gypi
+++ b/build/standalone.gypi
@@ -37,8 +37,9 @@
       'variables': {
         'variables': {
           'conditions': [
-            ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
-              # This handles the Linux platforms we generally deal with.
+            ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
+               OS=="netbsd" or OS=="mac"', {
+              # This handles the Unix platforms we generally deal with.
               # Anything else gets passed through, which probably won't work
               # very well; such hosts should pass an explicit target_arch
               # to gyp.
@@ -46,7 +47,8 @@
                 '<!(uname -m | sed -e "s/i.86/ia32/;\
                   s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
             }, {
-              # OS!="linux" and OS!="freebsd" and OS!="openbsd" and OS!="netbsd"
+              # OS!="linux" and OS!="freebsd" and OS!="openbsd" and
+              # OS!="netbsd" and OS!="mac"
               'host_arch%': 'ia32',
             }],
           ],
@@ -169,6 +171,9 @@
       },
     }],  # OS=="win"
     ['OS=="mac"', {
+      'xcode_settings': {
+        'SYMROOT': '<(DEPTH)/xcodebuild',
+      },
       'target_defaults': {
         'xcode_settings': {
           'ALWAYS_SEARCH_USER_PATHS': 'NO',
@@ -188,6 +193,7 @@
           'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES',  # -Wnewline-eof
           'MACOSX_DEPLOYMENT_TARGET': '10.4',       # -mmacosx-version-min=10.4
           'PREBINDING': 'NO',                       # No -Wl,-prebind
+          'SYMROOT': '<(DEPTH)/xcodebuild',
           'USE_HEADERMAP': 'NO',
           'OTHER_CFLAGS': [
             '-fno-strict-aliasing',
diff --git a/src/api.cc b/src/api.cc
index 0bc93c2..52a84ed 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -6063,7 +6063,7 @@
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
   return reinterpret_cast<const HeapGraphEdge*>(
-      &ToInternal(this)->children()[index]);
+      ToInternal(this)->children()[index]);
 }
 
 
@@ -6157,7 +6157,7 @@
 int HeapSnapshot::GetNodesCount() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
-  return ToInternal(this)->entries()->length();
+  return ToInternal(this)->entries().length();
 }
 
 
@@ -6165,7 +6165,7 @@
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
   return reinterpret_cast<const HeapGraphNode*>(
-      ToInternal(this)->entries()->at(index));
+      &ToInternal(this)->entries().at(index));
 }
 
 
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 96139a2..3e7a1e9 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -125,6 +125,8 @@
                      Assembler::kDebugBreakSlotInstructions);
 }
 
+const bool Debug::FramePaddingLayout::kIsSupported = false;
+
 
 #define __ ACCESS_MASM(masm)
 
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index c88c257..c12c167 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1690,12 +1690,12 @@
 
   // Activate inlined smi code.
   if (previous_state == UNINITIALIZED) {
-    PatchInlinedSmiCode(address());
+    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
   }
 }
 
 
-void PatchInlinedSmiCode(Address address) {
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
   Address cmp_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
 
@@ -1729,34 +1729,31 @@
   Instr instr_at_patch = Assembler::instr_at(patch_address);
   Instr branch_instr =
       Assembler::instr_at(patch_address + Instruction::kInstrSize);
-  ASSERT(Assembler::IsCmpRegister(instr_at_patch));
-  ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
-            Assembler::GetRm(instr_at_patch).code());
+  // This is patching a conditional "jump if not smi/jump if smi" site.
+  // Enabling by changing from
+  //   cmp rx, rx
+  //   b eq/ne, <target>
+  // to
+  //   tst rx, #kSmiTagMask
+  //   b ne/eq, <target>
+  // and vice-versa to be disabled again.
+  CodePatcher patcher(patch_address, 2);
+  Register reg = Assembler::GetRn(instr_at_patch);
+  if (check == ENABLE_INLINED_SMI_CHECK) {
+    ASSERT(Assembler::IsCmpRegister(instr_at_patch));
+    ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
+              Assembler::GetRm(instr_at_patch).code());
+    patcher.masm()->tst(reg, Operand(kSmiTagMask));
+  } else {
+    ASSERT(check == DISABLE_INLINED_SMI_CHECK);
+    ASSERT(Assembler::IsTstImmediate(instr_at_patch));
+    patcher.masm()->cmp(reg, reg);
+  }
   ASSERT(Assembler::IsBranch(branch_instr));
   if (Assembler::GetCondition(branch_instr) == eq) {
-    // This is patching a "jump if not smi" site to be active.
-    // Changing
-    //   cmp rx, rx
-    //   b eq, <target>
-    // to
-    //   tst rx, #kSmiTagMask
-    //   b ne, <target>
-    CodePatcher patcher(patch_address, 2);
-    Register reg = Assembler::GetRn(instr_at_patch);
-    patcher.masm()->tst(reg, Operand(kSmiTagMask));
     patcher.EmitCondition(ne);
   } else {
     ASSERT(Assembler::GetCondition(branch_instr) == ne);
-    // This is patching a "jump if smi" site to be active.
-    // Changing
-    //   cmp rx, rx
-    //   b ne, <target>
-    // to
-    //   tst rx, #kSmiTagMask
-    //   b eq, <target>
-    CodePatcher patcher(patch_address, 2);
-    Register reg = Assembler::GetRn(instr_at_patch);
-    patcher.masm()->tst(reg, Operand(kSmiTagMask));
     patcher.EmitCondition(eq);
   }
 }
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 79b56fc..bf11ab9 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -2587,42 +2587,38 @@
   Register object = ToRegister(instr->object());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
+
   int map_count = instr->hydrogen()->types()->length();
+  bool need_generic = instr->hydrogen()->need_generic();
+
+  if (map_count == 0 && !need_generic) {
+    DeoptimizeIf(al, instr->environment());
+    return;
+  }
   Handle<String> name = instr->hydrogen()->name();
-  if (map_count == 0) {
-    ASSERT(instr->hydrogen()->need_generic());
-    __ mov(r2, Operand(name));
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    Label done;
-    __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
-    for (int i = 0; i < map_count - 1; ++i) {
-      Handle<Map> map = instr->hydrogen()->types()->at(i);
+  Label done;
+  __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  for (int i = 0; i < map_count; ++i) {
+    bool last = (i == map_count - 1);
+    Handle<Map> map = instr->hydrogen()->types()->at(i);
+    __ cmp(scratch, Operand(map));
+    if (last && !need_generic) {
+      DeoptimizeIf(ne, instr->environment());
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+    } else {
       Label next;
-      __ cmp(scratch, Operand(map));
       __ b(ne, &next);
       EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ b(&done);
       __ bind(&next);
     }
-    Handle<Map> map = instr->hydrogen()->types()->last();
-    __ cmp(scratch, Operand(map));
-    if (instr->hydrogen()->need_generic()) {
-      Label generic;
-      __ b(ne, &generic);
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-      __ b(&done);
-      __ bind(&generic);
-      __ mov(r2, Operand(name));
-      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr);
-    } else {
-      DeoptimizeIf(ne, instr->environment());
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-    }
-    __ bind(&done);
   }
+  if (need_generic) {
+    __ mov(r2, Operand(name));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  }
+  __ bind(&done);
 }
 
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 42c9961..4da2fec 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -3738,7 +3738,7 @@
     : address_(address),
       instructions_(instructions),
       size_(instructions * Assembler::kInstrSize),
-      masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
+      masm_(NULL, address, size_ + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 40ee585..49c0982 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1273,12 +1273,19 @@
                                           name, miss);
     ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
 
+    // Preserve the receiver register explicitly whenever it is different from
+    // the holder and it is needed should the interceptor return without any
+    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+    // the FIELD case might cause a miss during the prototype check.
+    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+    bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
     {
       FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
+      if (must_preserve_receiver_reg) {
         __ Push(receiver, holder_reg, name_reg);
       } else {
         __ Push(holder_reg, name_reg);
@@ -1303,14 +1310,14 @@
       __ bind(&interceptor_failed);
       __ pop(name_reg);
       __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      if (must_preserve_receiver_reg) {
         __ pop(receiver);
       }
       // Leave the internal frame.
     }
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (*interceptor_holder != lookup->holder()) {
+    if (must_perfrom_prototype_check) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
                                    Handle<JSObject>(lookup->holder()),
diff --git a/src/ast.cc b/src/ast.cc
index 6ee48d5..6f9fd7a 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -962,6 +962,14 @@
 }
 
 
+static int IncreaseBy(int previous, int increase) {
+  if (RegExpTree::kInfinity - previous < increase) {
+    return RegExpTree::kInfinity;
+  } else {
+    return previous + increase;
+  }
+}
+
 RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
     : nodes_(nodes) {
   ASSERT(nodes->length() > 1);
@@ -969,13 +977,10 @@
   max_match_ = 0;
   for (int i = 0; i < nodes->length(); i++) {
     RegExpTree* node = nodes->at(i);
-    min_match_ += node->min_match();
+    int node_min_match = node->min_match();
+    min_match_ = IncreaseBy(min_match_, node_min_match);
     int node_max_match = node->max_match();
-    if (kInfinity - max_match_ < node_max_match) {
-      max_match_ = kInfinity;
-    } else {
-      max_match_ += node->max_match();
-    }
+    max_match_ = IncreaseBy(max_match_, node_max_match);
   }
 }
 
diff --git a/src/builtins.cc b/src/builtins.cc
index 0f79510..6d1c6a9 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -412,12 +412,19 @@
   HeapObject* elms = array->elements();
   Map* map = elms->map();
   if (map == heap->fixed_array_map()) {
-    if (args == NULL || !array->HasFastSmiOnlyElements()) {
+    if (array->HasFastElements()) return elms;
+    if (args == NULL) {
+      if (array->HasFastDoubleElements()) {
+        ASSERT(elms == heap->empty_fixed_array());
+        MaybeObject* maybe_transition =
+            array->TransitionElementsKind(FAST_ELEMENTS);
+        if (maybe_transition->IsFailure()) return maybe_transition;
+      }
       return elms;
     }
   } else if (map == heap->fixed_cow_array_map()) {
     MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
-    if (args == NULL || !array->HasFastSmiOnlyElements() ||
+    if (args == NULL || array->HasFastElements() ||
         maybe_writable_result->IsFailure()) {
       return maybe_writable_result;
     }
diff --git a/src/code-stubs.h b/src/code-stubs.h
index b67e961..5c87178 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -498,6 +498,7 @@
 
   virtual void FinishCode(Handle<Code> code) {
     code->set_compare_state(state_);
+    code->set_compare_operation(op_);
   }
 
   virtual CodeStub::Major MajorKey() { return CompareIC; }
diff --git a/src/debug.cc b/src/debug.cc
index 99256ba..9efb5c3 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -892,6 +892,16 @@
 }
 
 
+void Debug::PutValuesOnStackAndDie(int start,
+                                   Address c_entry_fp,
+                                   Address last_fp,
+                                   Address larger_fp,
+                                   int count,
+                                   int end) {
+  OS::Abort();
+}
+
+
 Object* Debug::Break(Arguments args) {
   Heap* heap = isolate_->heap();
   HandleScope scope(isolate_);
@@ -984,11 +994,34 @@
       // Count frames until target frame
       int count = 0;
       JavaScriptFrameIterator it(isolate_);
-      while (!it.done() && it.frame()->fp() != thread_local_.last_fp_) {
+      while (!it.done() && it.frame()->fp() < thread_local_.last_fp_) {
         count++;
         it.Advance();
       }
 
+      // Catch the cases that would lead to crashes and capture
+      // - C entry FP at which to start stack crawl.
+      // - FP of the frame at which we plan to stop stepping out (last FP).
+      // - current FP that's larger than last FP.
+      // - Counter for the number of steps to step out.
+      if (it.done()) {
+        // We crawled the entire stack, never reaching last_fp_.
+        PutValuesOnStackAndDie(0xBEEEEEEE,
+                               frame->fp(),
+                               thread_local_.last_fp_,
+                               NULL,
+                               count,
+                               0xFEEEEEEE);
+      } else if (it.frame()->fp() != thread_local_.last_fp_) {
+        // We crawled over last_fp_, without getting a match.
+        PutValuesOnStackAndDie(0xBEEEEEEE,
+                               frame->fp(),
+                               thread_local_.last_fp_,
+                               it.frame()->fp(),
+                               count,
+                               0xFEEEEEEE);
+      }
+
       // If we found original frame
       if (it.frame()->fp() == thread_local_.last_fp_) {
         if (step_count > 1) {
@@ -2227,6 +2260,13 @@
 }
 
 
+const int Debug::FramePaddingLayout::kInitialSize = 1;
+
+
+// Any even value bigger than kInitialSize as needed for stack scanning.
+const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
+
+
 bool Debug::IsDebugGlobal(GlobalObject* global) {
   return IsLoaded() && global == debug_context()->global();
 }
diff --git a/src/debug.h b/src/debug.h
index 7ec7801..d9c966c 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -232,6 +232,12 @@
   void PreemptionWhileInDebugger();
   void Iterate(ObjectVisitor* v);
 
+  NO_INLINE(void PutValuesOnStackAndDie(int start,
+                                        Address c_entry_fp,
+                                        Address last_fp,
+                                        Address larger_fp,
+                                        int count,
+                                        int end));
   Object* Break(Arguments args);
   void SetBreakPoint(Handle<SharedFunctionInfo> shared,
                      Handle<Object> break_point_object,
@@ -457,6 +463,50 @@
   // Architecture-specific constant.
   static const bool kFrameDropperSupported;
 
+  /**
+   * Defines layout of a stack frame that supports padding. This is a regular
+   * internal frame that has a flexible stack structure. LiveEdit can shift
+   * its lower part up the stack, taking up the 'padding' space when additional
+   * stack memory is required.
+   * Such frame is expected immediately above the topmost JavaScript frame.
+   *
+   * Stack Layout:
+   *   --- Top
+   *   LiveEdit routine frames
+   *   ---
+   *   C frames of debug handler
+   *   ---
+   *   ...
+   *   ---
+   *      An internal frame that has n padding words:
+   *      - any number of words as needed by code -- upper part of frame
+   *      - padding size: a Smi storing n -- current size of padding
+   *      - padding: n words filled with kPaddingValue in form of Smi
+   *      - 3 context/type words of a regular InternalFrame
+   *      - fp
+   *   ---
+   *      Topmost JavaScript frame
+   *   ---
+   *   ...
+   *   --- Bottom
+   */
+  class FramePaddingLayout : public AllStatic {
+   public:
+    // Architecture-specific constant.
+    static const bool kIsSupported;
+
+    // A size of frame base including fp. Padding words starts right above
+    // the base.
+    static const int kFrameBaseSize = 4;
+
+    // A number of words that should be reserved on stack for the LiveEdit use.
+    // Normally equals 1. Stored on stack in form of Smi.
+    static const int kInitialSize;
+    // A value that padding words are filled with (in form of Smi). Going
+    // bottom-top, the first word not having this value is a counter word.
+    static const int kPaddingValue;
+  };
+
  private:
   explicit Debug(Isolate* isolate);
   ~Debug();
diff --git a/src/elements.cc b/src/elements.cc
index 26d3dc1..d367af8 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -424,10 +424,10 @@
         receiver, holder, key, BackingStore::cast(backing_store));
   }
 
-  virtual MaybeObject* Get(Object* receiver,
-                           JSObject* holder,
-                           uint32_t key,
-                           FixedArrayBase* backing_store) {
+  MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
+                                           JSObject* holder,
+                                           uint32_t key,
+                                           FixedArrayBase* backing_store) {
     if (backing_store == NULL) {
       backing_store = holder->elements();
     }
@@ -435,62 +435,64 @@
         receiver, holder, key, BackingStore::cast(backing_store));
   }
 
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              BackingStore* backing_store) {
+  MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
+                                              JSObject* obj,
+                                              uint32_t key,
+                                              BackingStore* backing_store) {
     return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
            ? backing_store->get(key)
            : backing_store->GetHeap()->the_hole_value();
   }
 
-  virtual MaybeObject* SetLength(JSArray* array,
-                                 Object* length) {
+  MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
+                                                 Object* length) {
     return ElementsAccessorSubclass::SetLengthImpl(
         array, length, BackingStore::cast(array->elements()));
   }
 
-  static MaybeObject* SetLengthImpl(JSObject* obj,
-                                    Object* length,
-                                    BackingStore* backing_store);
+  MUST_USE_RESULT static MaybeObject* SetLengthImpl(
+      JSObject* obj,
+      Object* length,
+      BackingStore* backing_store);
 
-  virtual MaybeObject* SetCapacityAndLength(JSArray* array,
-                                            int capacity,
-                                            int length) {
+  MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+                                                            int capacity,
+                                                            int length) {
     return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
         array,
         capacity,
         length);
   }
 
-  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
-                                                       int capacity,
-                                                       int length) {
+  MUST_USE_RESULT static MaybeObject* SetFastElementsCapacityAndLength(
+      JSObject* obj,
+      int capacity,
+      int length) {
     UNIMPLEMENTED();
     return obj;
   }
 
-  virtual MaybeObject* Delete(JSObject* obj,
-                              uint32_t key,
-                              JSReceiver::DeleteMode mode) = 0;
+  MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
+                                              uint32_t key,
+                                              JSReceiver::DeleteMode mode) = 0;
 
-  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
-                                       uint32_t from_start,
-                                       FixedArrayBase* to,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
-                                       int copy_size) {
+  MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+                                                       uint32_t from_start,
+                                                       FixedArrayBase* to,
+                                                       ElementsKind to_kind,
+                                                       uint32_t to_start,
+                                                       int copy_size) {
     UNREACHABLE();
     return NULL;
   }
 
-  virtual MaybeObject* CopyElements(JSObject* from_holder,
-                                    uint32_t from_start,
-                                    FixedArrayBase* to,
-                                    ElementsKind to_kind,
-                                    uint32_t to_start,
-                                    int copy_size,
-                                    FixedArrayBase* from) {
+  MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
+                                                    uint32_t from_start,
+                                                    FixedArrayBase* to,
+                                                    ElementsKind to_kind,
+                                                    uint32_t to_start,
+                                                    int copy_size,
+                                                    FixedArrayBase* from) {
     if (from == NULL) {
       from = from_holder->elements();
     }
@@ -501,10 +503,11 @@
         from, from_start, to, to_kind, to_start, copy_size);
   }
 
-  virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
-                                               JSObject* holder,
-                                               FixedArray* to,
-                                               FixedArrayBase* from) {
+  MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
+      Object* receiver,
+      JSObject* holder,
+      FixedArray* to,
+      FixedArrayBase* from) {
     int len0 = to->length();
 #ifdef DEBUG
     if (FLAG_enable_slow_asserts) {
@@ -866,27 +869,28 @@
   friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
                                     ElementsKindTraits<Kind> >;
 
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              BackingStore* backing_store) {
+  MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
+                                              JSObject* obj,
+                                              uint32_t key,
+                                              BackingStore* backing_store) {
     return
         key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
         ? backing_store->get(key)
         : backing_store->GetHeap()->undefined_value();
   }
 
-  static MaybeObject* SetLengthImpl(JSObject* obj,
-                                    Object* length,
-                                    BackingStore* backing_store) {
+  MUST_USE_RESULT static MaybeObject* SetLengthImpl(
+      JSObject* obj,
+      Object* length,
+      BackingStore* backing_store) {
     // External arrays do not support changing their length.
     UNREACHABLE();
     return obj;
   }
 
-  virtual MaybeObject* Delete(JSObject* obj,
-                              uint32_t key,
-                              JSReceiver::DeleteMode mode) {
+  MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
+                                              uint32_t key,
+                                              JSReceiver::DeleteMode mode) {
     // External arrays always ignore deletes.
     return obj->GetHeap()->true_value();
   }
@@ -1002,10 +1006,11 @@
 
   // Adjusts the length of the dictionary backing store and returns the new
   // length according to ES5 section 15.4.5.2 behavior.
-  static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict,
-                                                JSArray* array,
-                                                Object* length_object,
-                                                uint32_t length) {
+  MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize(
+      SeededNumberDictionary* dict,
+      JSArray* array,
+      Object* length_object,
+      uint32_t length) {
     if (length == 0) {
       // If the length of a slow array is reset to zero, we clear
       // the array and flush backing storage. This has the added
@@ -1057,9 +1062,10 @@
     return length_object;
   }
 
-  static MaybeObject* DeleteCommon(JSObject* obj,
-                                   uint32_t key,
-                                   JSReceiver::DeleteMode mode) {
+  MUST_USE_RESULT static MaybeObject* DeleteCommon(
+      JSObject* obj,
+      uint32_t key,
+      JSReceiver::DeleteMode mode) {
     Isolate* isolate = obj->GetIsolate();
     Heap* heap = isolate->heap();
     FixedArray* backing_store = FixedArray::cast(obj->elements());
@@ -1102,12 +1108,12 @@
     return heap->true_value();
   }
 
-  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
-                                       uint32_t from_start,
-                                       FixedArrayBase* to,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
-                                       int copy_size) {
+  MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+                                                       uint32_t from_start,
+                                                       FixedArrayBase* to,
+                                                       ElementsKind to_kind,
+                                                       uint32_t to_start,
+                                                       int copy_size) {
     switch (to_kind) {
       case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
@@ -1131,16 +1137,17 @@
   friend class ElementsAccessorBase<DictionaryElementsAccessor,
                                     ElementsKindTraits<DICTIONARY_ELEMENTS> >;
 
-  virtual MaybeObject* Delete(JSObject* obj,
-                              uint32_t key,
-                              JSReceiver::DeleteMode mode) {
+  MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
+                                              uint32_t key,
+                                              JSReceiver::DeleteMode mode) {
     return DeleteCommon(obj, key, mode);
   }
 
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              SeededNumberDictionary* backing_store) {
+  MUST_USE_RESULT static MaybeObject* GetImpl(
+      Object* receiver,
+      JSObject* obj,
+      uint32_t key,
+      SeededNumberDictionary* backing_store) {
     int entry = backing_store->FindEntry(key);
     if (entry != SeededNumberDictionary::kNotFound) {
       Object* element = backing_store->ValueAt(entry);
@@ -1186,10 +1193,10 @@
       NonStrictArgumentsElementsAccessor,
       ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
 
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              FixedArray* parameter_map) {
+  MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
+                                              JSObject* obj,
+                                              uint32_t key,
+                                              FixedArray* parameter_map) {
     Object* probe = GetParameterMapArg(obj, parameter_map, key);
     if (!probe->IsTheHole()) {
       Context* context = Context::cast(parameter_map->get(0));
@@ -1216,18 +1223,19 @@
     }
   }
 
-  static MaybeObject* SetLengthImpl(JSObject* obj,
-                                    Object* length,
-                                    FixedArray* parameter_map) {
+  MUST_USE_RESULT static MaybeObject* SetLengthImpl(
+      JSObject* obj,
+      Object* length,
+      FixedArray* parameter_map) {
     // TODO(mstarzinger): This was never implemented but will be used once we
     // correctly implement [[DefineOwnProperty]] on arrays.
     UNIMPLEMENTED();
     return obj;
   }
 
-  virtual MaybeObject* Delete(JSObject* obj,
-                              uint32_t key,
-                              JSReceiver::DeleteMode mode) {
+  MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
+                                              uint32_t key,
+                                              JSReceiver::DeleteMode mode) {
     FixedArray* parameter_map = FixedArray::cast(obj->elements());
     Object* probe = GetParameterMapArg(obj, parameter_map, key);
     if (!probe->IsTheHole()) {
@@ -1246,12 +1254,12 @@
     return obj->GetHeap()->true_value();
   }
 
-  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
-                                       uint32_t from_start,
-                                       FixedArrayBase* to,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
-                                       int copy_size) {
+  MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+                                                       uint32_t from_start,
+                                                       FixedArrayBase* to,
+                                                       ElementsKind to_kind,
+                                                       uint32_t to_start,
+                                                       int copy_size) {
     FixedArray* parameter_map = FixedArray::cast(from);
     FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
     ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
@@ -1354,8 +1362,8 @@
 
 
 template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
-MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
-                                  ElementsKindTraits>::
+MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
+                                                  ElementsKindTraits>::
     SetLengthImpl(JSObject* obj,
                   Object* length,
                   typename ElementsKindTraits::BackingStore* backing_store) {
diff --git a/src/elements.h b/src/elements.h
index 51d402d..55d6fa5 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -60,18 +60,19 @@
   // can optionally pass in the backing store to use for the check, which must
   // be compatible with the ElementsKind of the ElementsAccessor. If
   // backing_store is NULL, the holder->elements() is used as the backing store.
-  virtual MaybeObject* Get(Object* receiver,
-                           JSObject* holder,
-                           uint32_t key,
-                           FixedArrayBase* backing_store = NULL) = 0;
+  MUST_USE_RESULT virtual MaybeObject* Get(
+      Object* receiver,
+      JSObject* holder,
+      uint32_t key,
+      FixedArrayBase* backing_store = NULL) = 0;
 
   // Modifies the length data property as specified for JSArrays and resizes the
   // underlying backing store accordingly. The method honors the semantics of
   // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
   // have non-deletable elements can only be shrunk to the size of highest
   // element that is non-deletable.
-  virtual MaybeObject* SetLength(JSArray* holder,
-                                 Object* new_length) = 0;
+  MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder,
+                                                 Object* new_length) = 0;
 
   // Modifies both the length and capacity of a JSArray, resizing the underlying
   // backing store as necessary. This method does NOT honor the semantics of
@@ -79,14 +80,14 @@
   // elements. This method should only be called for array expansion OR by
   // runtime JavaScript code that use InternalArrays and don't care about
   // EcmaScript 5.1 semantics.
-  virtual MaybeObject* SetCapacityAndLength(JSArray* array,
-                                            int capacity,
-                                            int length) = 0;
+  MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+                                                            int capacity,
+                                                            int length) = 0;
 
   // Deletes an element in an object, returning a new elements backing store.
-  virtual MaybeObject* Delete(JSObject* holder,
-                              uint32_t key,
-                              JSReceiver::DeleteMode mode) = 0;
+  MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder,
+                                              uint32_t key,
+                                              JSReceiver::DeleteMode mode) = 0;
 
   // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
   // of elements from source after source_start to the destination array.
@@ -101,26 +102,28 @@
   // the source JSObject or JSArray in source_holder. If the holder's backing
   // store is available, it can be passed in source and source_holder is
   // ignored.
-  virtual MaybeObject* CopyElements(JSObject* source_holder,
-                                    uint32_t source_start,
-                                    FixedArrayBase* destination,
-                                    ElementsKind destination_kind,
-                                    uint32_t destination_start,
-                                    int copy_size,
-                                    FixedArrayBase* source = NULL) = 0;
+  MUST_USE_RESULT virtual MaybeObject* CopyElements(
+      JSObject* source_holder,
+      uint32_t source_start,
+      FixedArrayBase* destination,
+      ElementsKind destination_kind,
+      uint32_t destination_start,
+      int copy_size,
+      FixedArrayBase* source = NULL) = 0;
 
-  MaybeObject* CopyElements(JSObject* from_holder,
-                            FixedArrayBase* to,
-                            ElementsKind to_kind,
-                            FixedArrayBase* from = NULL) {
+  MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
+                                            FixedArrayBase* to,
+                                            ElementsKind to_kind,
+                                            FixedArrayBase* from = NULL) {
     return CopyElements(from_holder, 0, to, to_kind, 0,
                         kCopyToEndAndInitializeToHole, from);
   }
 
-  virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
-                                               JSObject* holder,
-                                               FixedArray* to,
-                                               FixedArrayBase* from = NULL) = 0;
+  MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
+      Object* receiver,
+      JSObject* holder,
+      FixedArray* to,
+      FixedArrayBase* from = NULL) = 0;
 
   // Returns a shared ElementsAccessor for the specified ElementsKind.
   static ElementsAccessor* ForKind(ElementsKind elements_kind) {
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 62a9782..7000fbd 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -132,7 +132,7 @@
 
 // Flags for language modes and experimental language features.
 DEFINE_bool(use_strict, false, "enforce strict mode")
-DEFINE_bool(es52_globals, false,
+DEFINE_bool(es52_globals, true,
             "activate new semantics for global var declarations")
 
 DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
diff --git a/src/frames.h b/src/frames.h
index 7178bd4..78cdd0c 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -211,6 +211,9 @@
 
   virtual void SetCallerFp(Address caller_fp) = 0;
 
+  // Manually changes value of fp in this object.
+  void UpdateFp(Address fp) { state_.fp = fp; }
+
   Address* pc_address() const { return state_.pc_address; }
 
   // Get the id of this stack frame.
diff --git a/src/globals.h b/src/globals.h
index 25d4ffe..97b033f 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -345,6 +345,9 @@
 #define INLINE(header) inline __attribute__((always_inline)) header
 #define NO_INLINE(header) __attribute__((noinline)) header
 #endif
+#elif defined(_MSC_VER) && !defined(DEBUG)
+#define INLINE(header) __forceinline header
+#define NO_INLINE(header) header
 #else
 #define INLINE(header) inline header
 #define NO_INLINE(header) header
diff --git a/src/heap.cc b/src/heap.cc
index e2e0e9e..0035fd5 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -805,7 +805,7 @@
 
     UpdateSurvivalRateTrend(start_new_space_size);
 
-    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
+    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
 
     if (high_survival_rate_during_scavenges &&
         IsStableOrIncreasingSurvivalTrend()) {
@@ -2020,7 +2020,7 @@
   map->set_pre_allocated_property_fields(0);
   map->init_instance_descriptors();
   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
-  map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  map->init_prototype_transitions(undefined_value());
   map->set_unused_property_fields(0);
   map->set_bit_field(0);
   map->set_bit_field2(1 << Map::kIsExtensible);
@@ -2159,15 +2159,15 @@
   // Fix the instance_descriptors for the existing maps.
   meta_map()->init_instance_descriptors();
   meta_map()->set_code_cache(empty_fixed_array());
-  meta_map()->set_prototype_transitions(empty_fixed_array());
+  meta_map()->init_prototype_transitions(undefined_value());
 
   fixed_array_map()->init_instance_descriptors();
   fixed_array_map()->set_code_cache(empty_fixed_array());
-  fixed_array_map()->set_prototype_transitions(empty_fixed_array());
+  fixed_array_map()->init_prototype_transitions(undefined_value());
 
   oddball_map()->init_instance_descriptors();
   oddball_map()->set_code_cache(empty_fixed_array());
-  oddball_map()->set_prototype_transitions(empty_fixed_array());
+  oddball_map()->init_prototype_transitions(undefined_value());
 
   // Fix prototype object for existing maps.
   meta_map()->set_prototype(null_value());
@@ -5810,16 +5810,6 @@
 }
 
 
-intptr_t Heap::PromotedSpaceSize() {
-  return old_pointer_space_->Size()
-      + old_data_space_->Size()
-      + code_space_->Size()
-      + map_space_->Size()
-      + cell_space_->Size()
-      + lo_space_->Size();
-}
-
-
 intptr_t Heap::PromotedSpaceSizeOfObjects() {
   return old_pointer_space_->SizeOfObjects()
       + old_data_space_->SizeOfObjects()
diff --git a/src/heap.h b/src/heap.h
index b91416f..beb1bc5 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1342,7 +1342,7 @@
                                                      PretenureFlag pretenure);
 
   inline intptr_t PromotedTotalSize() {
-    return PromotedSpaceSize() + PromotedExternalMemorySize();
+    return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
   }
 
   // True if we have reached the allocation limit in the old generation that
@@ -1363,19 +1363,6 @@
   static const intptr_t kMinimumAllocationLimit =
       8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
 
-  // When we sweep lazily we initially guess that there is no garbage on the
-  // heap and set the limits for the next GC accordingly.  As we sweep we find
-  // out that some of the pages contained garbage and we have to adjust
-  // downwards the size of the heap.  This means the limits that control the
-  // timing of the next GC also need to be adjusted downwards.
-  void LowerOldGenLimits(intptr_t adjustment) {
-    size_of_old_gen_at_last_old_space_gc_ -= adjustment;
-    old_gen_promotion_limit_ =
-        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
-    old_gen_allocation_limit_ =
-        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
-  }
-
   intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
     const int divisor = FLAG_stress_compaction ? 10 : 3;
     intptr_t limit =
@@ -1468,7 +1455,7 @@
     intptr_t adjusted_allocation_limit =
         old_gen_allocation_limit_ - new_space_.Capacity() / 5;
 
-    if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
+    if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;
 
     return false;
   }
@@ -1506,7 +1493,6 @@
   GCTracer* tracer() { return tracer_; }
 
   // Returns the size of objects residing in non new spaces.
-  intptr_t PromotedSpaceSize();
   intptr_t PromotedSpaceSizeOfObjects();
 
   double total_regexp_code_generated() { return total_regexp_code_generated_; }
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index f8c021c..c66a7a1 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1603,6 +1603,7 @@
   SetOperandAt(1, object);
   set_representation(Representation::Tagged());
   SetGVNFlag(kDependsOnMaps);
+  int map_transitions = 0;
   for (int i = 0;
        i < types->length() && types_.length() < kMaxLoadPolymorphism;
        ++i) {
@@ -1624,13 +1625,20 @@
         case CONSTANT_FUNCTION:
           types_.Add(types->at(i));
           break;
+        case MAP_TRANSITION:
+          // We should just ignore these since they are not relevant to a load
+          // operation.  This means we will deopt if we actually see this map
+          // from optimized code.
+          map_transitions++;
+          break;
         default:
           break;
       }
     }
   }
 
-  if (types_.length() == types->length() && FLAG_deoptimize_uncommon_cases) {
+  if (types_.length() + map_transitions == types->length() &&
+      FLAG_deoptimize_uncommon_cases) {
     SetFlag(kUseGVN);
   } else {
     SetAllSideEffects();
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index ab61ef7..3be001e 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -5662,6 +5662,39 @@
 }
 
 
+class FunctionSorter {
+ public:
+  FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
+  FunctionSorter(int index, int ticks, int ast_length, int src_length)
+      : index_(index),
+        ticks_(ticks),
+        ast_length_(ast_length),
+        src_length_(src_length) { }
+
+  int index() const { return index_; }
+  int ticks() const { return ticks_; }
+  int ast_length() const { return ast_length_; }
+  int src_length() const { return src_length_; }
+
+ private:
+  int index_;
+  int ticks_;
+  int ast_length_;
+  int src_length_;
+};
+
+
+static int CompareHotness(void const* a, void const* b) {
+  FunctionSorter const* function1 = reinterpret_cast<FunctionSorter const*>(a);
+  FunctionSorter const* function2 = reinterpret_cast<FunctionSorter const*>(b);
+  int diff = function1->ticks() - function2->ticks();
+  if (diff != 0) return -diff;
+  diff = function1->ast_length() - function2->ast_length();
+  if (diff != 0) return diff;
+  return function1->src_length() - function2->src_length();
+}
+
+
 void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
                                                HValue* receiver,
                                                SmallMapList* types,
@@ -5670,51 +5703,73 @@
   // maps are identical. In that case we can avoid repeatedly generating the
   // same prototype map checks.
   int argument_count = expr->arguments()->length() + 1;  // Includes receiver.
-  int count = 0;
   HBasicBlock* join = NULL;
-  for (int i = 0; i < types->length() && count < kMaxCallPolymorphism; ++i) {
+  FunctionSorter order[kMaxCallPolymorphism];
+  int ordered_functions = 0;
+  for (int i = 0;
+       i < types->length() && ordered_functions < kMaxCallPolymorphism;
+       ++i) {
     Handle<Map> map = types->at(i);
     if (expr->ComputeTarget(map, name)) {
-      if (count == 0) {
-        // Only needed once.
-        AddInstruction(new(zone()) HCheckNonSmi(receiver));
-        join = graph()->CreateBasicBlock();
-      }
-      ++count;
-      HBasicBlock* if_true = graph()->CreateBasicBlock();
-      HBasicBlock* if_false = graph()->CreateBasicBlock();
-      HCompareMap* compare =
-          new(zone()) HCompareMap(receiver, map, if_true, if_false);
-      current_block()->Finish(compare);
-
-      set_current_block(if_true);
-      AddCheckConstantFunction(expr, receiver, map, false);
-      if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
-        PrintF("Trying to inline the polymorphic call to %s\n",
-               *name->ToCString());
-      }
-      if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
-        // Trying to inline will signal that we should bailout from the
-        // entire compilation by setting stack overflow on the visitor.
-        if (HasStackOverflow()) return;
-      } else {
-        HCallConstantFunction* call =
-            new(zone()) HCallConstantFunction(expr->target(), argument_count);
-        call->set_position(expr->position());
-        PreProcessCall(call);
-        AddInstruction(call);
-        if (!ast_context()->IsEffect()) Push(call);
-      }
-
-      if (current_block() != NULL) current_block()->Goto(join);
-      set_current_block(if_false);
+      order[ordered_functions++] =
+          FunctionSorter(i,
+                         expr->target()->shared()->profiler_ticks(),
+                         InliningAstSize(expr->target()),
+                         expr->target()->shared()->SourceSize());
     }
   }
 
+  qsort(reinterpret_cast<void*>(&order[0]),
+        ordered_functions,
+        sizeof(order[0]),
+        &CompareHotness);
+
+  for (int fn = 0; fn < ordered_functions; ++fn) {
+    int i = order[fn].index();
+    Handle<Map> map = types->at(i);
+    if (fn == 0) {
+      // Only needed once.
+      AddInstruction(new(zone()) HCheckNonSmi(receiver));
+      join = graph()->CreateBasicBlock();
+    }
+    HBasicBlock* if_true = graph()->CreateBasicBlock();
+    HBasicBlock* if_false = graph()->CreateBasicBlock();
+    HCompareMap* compare =
+        new(zone()) HCompareMap(receiver, map, if_true, if_false);
+    current_block()->Finish(compare);
+
+    set_current_block(if_true);
+    expr->ComputeTarget(map, name);
+    AddCheckConstantFunction(expr, receiver, map, false);
+    if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
+      Handle<JSFunction> caller = info()->closure();
+      SmartArrayPointer<char> caller_name =
+          caller->shared()->DebugName()->ToCString();
+      PrintF("Trying to inline the polymorphic call to %s from %s\n",
+             *name->ToCString(),
+             *caller_name);
+    }
+    if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
+      // Trying to inline will signal that we should bailout from the
+      // entire compilation by setting stack overflow on the visitor.
+      if (HasStackOverflow()) return;
+    } else {
+      HCallConstantFunction* call =
+          new(zone()) HCallConstantFunction(expr->target(), argument_count);
+      call->set_position(expr->position());
+      PreProcessCall(call);
+      AddInstruction(call);
+      if (!ast_context()->IsEffect()) Push(call);
+    }
+
+    if (current_block() != NULL) current_block()->Goto(join);
+    set_current_block(if_false);
+  }
+
   // Finish up.  Unconditionally deoptimize if we've handled all the maps we
   // know about and do not want to handle ones we've never seen.  Otherwise
   // use a generic IC.
-  if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
+  if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
     current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
   } else {
     HValue* context = environment()->LookupContext();
@@ -5763,14 +5818,11 @@
 }
 
 
-bool HGraphBuilder::TryInline(CallKind call_kind,
-                              Handle<JSFunction> target,
-                              ZoneList<Expression*>* arguments,
-                              HValue* receiver,
-                              int ast_id,
-                              int return_id,
-                              ReturnHandlingFlag return_handling) {
-  if (!FLAG_use_inlining) return false;
+static const int kNotInlinable = 1000000000;
+
+
+int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
+  if (!FLAG_use_inlining) return kNotInlinable;
 
   // Precondition: call is monomorphic and we have found a target with the
   // appropriate arity.
@@ -5782,25 +5834,43 @@
   if (target_shared->SourceSize() >
       Min(FLAG_max_inlined_source_size, kUnlimitedMaxInlinedSourceSize)) {
     TraceInline(target, caller, "target text too big");
-    return false;
+    return kNotInlinable;
   }
 
   // Target must be inlineable.
   if (!target->IsInlineable()) {
     TraceInline(target, caller, "target not inlineable");
-    return false;
+    return kNotInlinable;
   }
   if (target_shared->dont_inline() || target_shared->dont_optimize()) {
     TraceInline(target, caller, "target contains unsupported syntax [early]");
-    return false;
+    return kNotInlinable;
   }
 
   int nodes_added = target_shared->ast_node_count();
+  return nodes_added;
+}
+
+
+bool HGraphBuilder::TryInline(CallKind call_kind,
+                              Handle<JSFunction> target,
+                              ZoneList<Expression*>* arguments,
+                              HValue* receiver,
+                              int ast_id,
+                              int return_id,
+                              ReturnHandlingFlag return_handling) {
+  int nodes_added = InliningAstSize(target);
+  if (nodes_added == kNotInlinable) return false;
+
+  Handle<JSFunction> caller = info()->closure();
+
   if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
     TraceInline(target, caller, "target AST is too large [early]");
     return false;
   }
 
+  Handle<SharedFunctionInfo> target_shared(target->shared());
+
 #if !defined(V8_TARGET_ARCH_IA32)
   // Target must be able to use caller's context.
   CompilationInfo* outer_info = info();
diff --git a/src/hydrogen.h b/src/hydrogen.h
index a52bf3b..909d07b 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1010,6 +1010,7 @@
   // Try to optimize fun.apply(receiver, arguments) pattern.
   bool TryCallApply(Call* expr);
 
+  int InliningAstSize(Handle<JSFunction> target);
   bool TryInline(CallKind call_kind,
                  Handle<JSFunction> target,
                  ZoneList<Expression*>* arguments,
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 929b485..4ead80b 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -640,6 +640,9 @@
   static const byte kJccShortPrefix = 0x70;
   static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
   static const byte kJcShortOpcode = kJccShortPrefix | carry;
+  static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
+  static const byte kJzShortOpcode = kJccShortPrefix | zero;
+
 
   // ---------------------------------------------------------------------------
   // Code generation
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 710cbaf..901e38b 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -91,10 +91,12 @@
   rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
 }
 
+// All debug break stubs support padding for LiveEdit.
+const bool Debug::FramePaddingLayout::kIsSupported = true;
+
 
 #define __ ACCESS_MASM(masm)
 
-
 static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
                                           RegList object_regs,
                                           RegList non_object_regs,
@@ -103,6 +105,13 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Load padding words on stack.
+    for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
+      __ push(Immediate(Smi::FromInt(
+          Debug::FramePaddingLayout::kPaddingValue)));
+    }
+    __ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
+
     // Store the registers containing live values on the expression stack to
     // make sure that these are correctly updated during GC. Non object values
     // are stored as a smi causing it to be untouched by GC.
@@ -134,6 +143,10 @@
     CEntryStub ceb(1);
     __ CallStub(&ceb);
 
+    // Automatically find register that could be used after register restore.
+    // We need one register for padding skip instructions.
+    Register unused_reg = { -1 };
+
     // Restore the register values containing object pointers from the
     // expression stack.
     for (int i = kNumJSCallerSaved; --i >= 0;) {
@@ -142,15 +155,29 @@
       if (FLAG_debug_code) {
         __ Set(reg, Immediate(kDebugZapValue));
       }
+      bool taken = reg.code() == esi.code();
       if ((object_regs & (1 << r)) != 0) {
         __ pop(reg);
+        taken = true;
       }
       if ((non_object_regs & (1 << r)) != 0) {
         __ pop(reg);
         __ SmiUntag(reg);
+        taken = true;
+      }
+      if (!taken) {
+        unused_reg = reg;
       }
     }
 
+    ASSERT(unused_reg.code() != -1);
+
+    // Read current padding counter and skip corresponding number of words.
+    __ pop(unused_reg);
+    // We divide stored value by 2 (untagging) and multiply it by word's size.
+    STATIC_ASSERT(kSmiTagSize == 1);
+    __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
+
     // Get rid of the internal frame.
   }
 
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index a591af1..dc64a09 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1727,12 +1727,12 @@
 
   // Activate inlined smi code.
   if (previous_state == UNINITIALIZED) {
-    PatchInlinedSmiCode(address());
+    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
   }
 }
 
 
-void PatchInlinedSmiCode(Address address) {
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
@@ -1753,14 +1753,18 @@
            address, test_instruction_address, delta);
   }
 
-  // Patch with a short conditional jump. There must be a
-  // short jump-if-carry/not-carry at this position.
+  // Patch with a short conditional jump. Enabling means switching from a short
+  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
+  // reverse operation of that.
   Address jmp_address = test_instruction_address - delta;
-  ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
-         *jmp_address == Assembler::kJcShortOpcode);
-  Condition cc = *jmp_address == Assembler::kJncShortOpcode
-      ? not_zero
-      : zero;
+  ASSERT((check == ENABLE_INLINED_SMI_CHECK)
+         ? (*jmp_address == Assembler::kJncShortOpcode ||
+            *jmp_address == Assembler::kJcShortOpcode)
+         : (*jmp_address == Assembler::kJnzShortOpcode ||
+            *jmp_address == Assembler::kJzShortOpcode));
+  Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
+      ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+      : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
   *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
 }
 
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index d416662..455c502 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -2274,40 +2274,35 @@
   Register result = ToRegister(instr->result());
 
   int map_count = instr->hydrogen()->types()->length();
+  bool need_generic = instr->hydrogen()->need_generic();
+
+  if (map_count == 0 && !need_generic) {
+    DeoptimizeIf(no_condition, instr->environment());
+    return;
+  }
   Handle<String> name = instr->hydrogen()->name();
-  if (map_count == 0) {
-    ASSERT(instr->hydrogen()->need_generic());
-    __ mov(ecx, name);
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    Label done;
-    for (int i = 0; i < map_count - 1; ++i) {
-      Handle<Map> map = instr->hydrogen()->types()->at(i);
+  Label done;
+  for (int i = 0; i < map_count; ++i) {
+    bool last = (i == map_count - 1);
+    Handle<Map> map = instr->hydrogen()->types()->at(i);
+    __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+    if (last && !need_generic) {
+      DeoptimizeIf(not_equal, instr->environment());
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+    } else {
       Label next;
-      __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
       __ j(not_equal, &next, Label::kNear);
       EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ jmp(&done, Label::kNear);
       __ bind(&next);
     }
-    Handle<Map> map = instr->hydrogen()->types()->last();
-    __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
-    if (instr->hydrogen()->need_generic()) {
-      Label generic;
-      __ j(not_equal, &generic, Label::kNear);
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-      __ jmp(&done, Label::kNear);
-      __ bind(&generic);
-      __ mov(ecx, name);
-      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr);
-    } else {
-      DeoptimizeIf(not_equal, instr->environment());
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-    }
-    __ bind(&done);
   }
+  if (need_generic) {
+    __ mov(ecx, name);
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  }
+  __ bind(&done);
 }
 
 
@@ -2925,11 +2920,13 @@
     __ cmp(output_reg, 0x80000000u);
     DeoptimizeIf(equal, instr->environment());
   } else {
+    Label negative_sign;
     Label done;
-    // Deoptimize on negative numbers.
+    // Deoptimize on unordered.
     __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
     __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below, instr->environment());
+    DeoptimizeIf(parity_even, instr->environment());
+    __ j(below, &negative_sign, Label::kNear);
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       // Check for negative zero.
@@ -2945,10 +2942,21 @@
 
     // Use truncating instruction (OK because input is positive).
     __ cvttsd2si(output_reg, Operand(input_reg));
-
     // Overflow is signalled with minint.
     __ cmp(output_reg, 0x80000000u);
     DeoptimizeIf(equal, instr->environment());
+    __ jmp(&done, Label::kNear);
+
+    // Non-zero negative reaches here
+    __ bind(&negative_sign);
+    // Truncate, then compare and compensate
+    __ cvttsd2si(output_reg, Operand(input_reg));
+    __ cvtsi2sd(xmm_scratch, output_reg);
+    __ ucomisd(input_reg, xmm_scratch);
+    __ j(equal, &done, Label::kNear);
+    __ sub(output_reg, Immediate(1));
+    DeoptimizeIf(overflow, instr->environment());
+
     __ bind(&done);
   }
 }
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 60e38a6..c31b0c2 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -2566,7 +2566,7 @@
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
-      masm_(Isolate::Current(), address, size + Assembler::kGap) {
+      masm_(NULL, address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 734b89b..e148e2f 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1129,13 +1129,20 @@
                                           name, miss);
     ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
 
+    // Preserve the receiver register explicitly whenever it is different from
+    // the holder and it is needed should the interceptor return without any
+    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+    // the FIELD case might cause a miss during the prototype check.
+    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+    bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
     {
       FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
+      if (must_preserve_receiver_reg) {
         __ push(receiver);
       }
       __ push(holder_reg);
@@ -1158,10 +1165,17 @@
       frame_scope.GenerateLeaveFrame();
       __ ret(0);
 
+      // Clobber registers when generating debug-code to provoke errors.
       __ bind(&interceptor_failed);
+      if (FLAG_debug_code) {
+        __ mov(receiver, Immediate(BitCast<int32_t>(kZapValue)));
+        __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
+        __ mov(name_reg, Immediate(BitCast<int32_t>(kZapValue)));
+      }
+
       __ pop(name_reg);
       __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      if (must_preserve_receiver_reg) {
         __ pop(receiver);
       }
 
@@ -1170,7 +1184,7 @@
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into holder_reg.
-    if (*interceptor_holder != lookup->holder()) {
+    if (must_perfrom_prototype_check) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
                                    Handle<JSObject>(lookup->holder()),
diff --git a/src/ic.cc b/src/ic.cc
index 643fa88..9772b94 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -352,9 +352,9 @@
       return KeyedStoreIC::Clear(address, target);
     case Code::CALL_IC: return CallIC::Clear(address, target);
     case Code::KEYED_CALL_IC:  return KeyedCallIC::Clear(address, target);
+    case Code::COMPARE_IC: return CompareIC::Clear(address, target);
     case Code::UNARY_OP_IC:
     case Code::BINARY_OP_IC:
-    case Code::COMPARE_IC:
     case Code::TO_BOOLEAN_IC:
       // Clearing these is tricky and does not
       // make any performance difference.
@@ -365,9 +365,8 @@
 
 
 void CallICBase::Clear(Address address, Code* target) {
+  if (target->ic_state() == UNINITIALIZED) return;
   bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
-  State state = target->ic_state();
-  if (state == UNINITIALIZED) return;
   Code* code =
       Isolate::Current()->stub_cache()->FindCallInitialize(
           target->arguments_count(),
@@ -410,6 +409,17 @@
 }
 
 
+void CompareIC::Clear(Address address, Code* target) {
+  // Only clear ICCompareStubs, we currently cannot clear generic CompareStubs.
+  if (target->major_key() != CodeStub::CompareIC) return;
+  // Only clear CompareICs that can retain objects.
+  if (target->compare_state() != KNOWN_OBJECTS) return;
+  Token::Value op = CompareIC::ComputeOperation(target);
+  SetTargetAtAddress(address, GetRawUninitialized(op));
+  PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
+}
+
+
 static bool HasInterceptorGetter(JSObject* object) {
   return !object->GetNamedInterceptor()->getter()->IsUndefined();
 }
@@ -2396,7 +2406,7 @@
 
     // Activate inlined smi code.
     if (previous_type == BinaryOpIC::UNINITIALIZED) {
-      PatchInlinedSmiCode(ic.address());
+      PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
     }
   }
 
@@ -2457,6 +2467,14 @@
 }
 
 
+Code* CompareIC::GetRawUninitialized(Token::Value op) {
+  ICCompareStub stub(op, UNINITIALIZED);
+  Code* code = NULL;
+  CHECK(stub.FindCodeInCache(&code));
+  return code;
+}
+
+
 Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
   ICCompareStub stub(op, UNINITIALIZED);
   return stub.GetCode();
@@ -2471,6 +2489,12 @@
 }
 
 
+Token::Value CompareIC::ComputeOperation(Code* target) {
+  ASSERT(target->major_key() == CodeStub::CompareIC);
+  return static_cast<Token::Value>(target->compare_operation());
+}
+
+
 const char* CompareIC::GetStateName(State state) {
   switch (state) {
     case UNINITIALIZED: return "UNINITIALIZED";
diff --git a/src/ic.h b/src/ic.h
index 5662552..3b44abf 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -794,6 +794,9 @@
   // Helper function for determining the state of a compare IC.
   static State ComputeState(Code* target);
 
+  // Helper function for determining the operation a compare IC is for.
+  static Token::Value ComputeOperation(Code* target);
+
   static const char* GetStateName(State state);
 
  private:
@@ -804,7 +807,13 @@
   Condition GetCondition() const { return ComputeCondition(op_); }
   State GetState() { return ComputeState(target()); }
 
+  static Code* GetRawUninitialized(Token::Value op);
+
+  static void Clear(Address address, Code* target);
+
   Token::Value op_;
+
+  friend class IC;
 };
 
 
@@ -817,7 +826,8 @@
 
 
 // Helper for BinaryOpIC and CompareIC.
-void PatchInlinedSmiCode(Address address);
+enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
 
 } }  // namespace v8::internal
 
diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h
index 3e3d6c4..5ce003f 100644
--- a/src/incremental-marking-inl.h
+++ b/src/incremental-marking-inl.h
@@ -100,7 +100,7 @@
   int64_t old_bytes_rescanned = bytes_rescanned_;
   bytes_rescanned_ = old_bytes_rescanned + obj_size;
   if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
-    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
+    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
       // If we have queued twice the heap size for rescanning then we are
       // going around in circles, scanning the same objects again and again
       // as the program mutates the heap faster than we can incrementally
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 2413b67..5b58c9d 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -951,7 +951,7 @@
 
 
 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
-  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
+  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
 }
 
 } }  // namespace v8::internal
diff --git a/src/list-inl.h b/src/list-inl.h
index 35ee3f5..6cf3bad 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -137,6 +137,14 @@
 
 
 template<typename T, class P>
+void List<T, P>::Allocate(int length) {
+  DeleteData(data_);
+  Initialize(length);
+  length_ = length;
+}
+
+
+template<typename T, class P>
 void List<T, P>::Clear() {
   DeleteData(data_);
   Initialize(0);
diff --git a/src/list.h b/src/list.h
index a210dfb..7350c0d 100644
--- a/src/list.h
+++ b/src/list.h
@@ -117,6 +117,9 @@
   // pointer type. Returns the removed element.
   INLINE(T RemoveLast()) { return Remove(length_ - 1); }
 
+  // Deletes current list contents and allocates space for 'length' elements.
+  INLINE(void Allocate(int length));
+
   // Clears the list by setting the length to zero. Even if T is a
   // pointer type, clearing the list doesn't delete the entries.
   INLINE(void Clear());
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 9c5294a..22b8250 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include "liveedit.h"
 
+#include "code-stubs.h"
 #include "compilation-cache.h"
 #include "compiler.h"
 #include "debug.h"
@@ -1475,26 +1476,36 @@
   // Check the nature of the top frame.
   Isolate* isolate = Isolate::Current();
   Code* pre_top_frame_code = pre_top_frame->LookupCode();
+  bool frame_has_padding;
   if (pre_top_frame_code->is_inline_cache_stub() &&
       pre_top_frame_code->ic_state() == DEBUG_BREAK) {
     // OK, we can drop inline cache calls.
     *mode = Debug::FRAME_DROPPED_IN_IC_CALL;
+    frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
   } else if (pre_top_frame_code ==
              isolate->debug()->debug_break_slot()) {
     // OK, we can drop debug break slot.
     *mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
+    frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
   } else if (pre_top_frame_code ==
       isolate->builtins()->builtin(
           Builtins::kFrameDropper_LiveEdit)) {
     // OK, we can drop our own code.
     *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
+    frame_has_padding = false;
   } else if (pre_top_frame_code ==
       isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
     *mode = Debug::FRAME_DROPPED_IN_RETURN_CALL;
+    frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
   } else if (pre_top_frame_code->kind() == Code::STUB &&
-      pre_top_frame_code->major_key()) {
-    // Entry from our unit tests, it's fine, we support this case.
+      pre_top_frame_code->major_key() == CodeStub::CEntry) {
+    // Entry from our unit tests on 'debugger' statement.
+    // It's fine, we support this case.
     *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
+    // We don't have a padding from 'debugger' statement call.
+    // Here the stub is CEntry, it's not debug-only and can't be padded.
+    // If anyone would complain, a proxy padded stub could be added.
+    frame_has_padding = false;
   } else {
     return "Unknown structure of stack above changing function";
   }
@@ -1504,8 +1515,49 @@
       - Debug::kFrameDropperFrameSize * kPointerSize  // Size of the new frame.
       + kPointerSize;  // Bigger address end is exclusive.
 
+  Address* top_frame_pc_address = top_frame->pc_address();
+
+  // top_frame may be damaged below this point. Do not used it.
+  ASSERT(!(top_frame = NULL));
+
   if (unused_stack_top > unused_stack_bottom) {
-    return "Not enough space for frame dropper frame";
+    if (frame_has_padding) {
+      int shortage_bytes =
+          static_cast<int>(unused_stack_top - unused_stack_bottom);
+
+      Address padding_start = pre_top_frame->fp() -
+          Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize;
+
+      Address padding_pointer = padding_start;
+      Smi* padding_object =
+          Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue);
+      while (Memory::Object_at(padding_pointer) == padding_object) {
+        padding_pointer -= kPointerSize;
+      }
+      int padding_counter =
+          Smi::cast(Memory::Object_at(padding_pointer))->value();
+      if (padding_counter * kPointerSize < shortage_bytes) {
+        return "Not enough space for frame dropper frame "
+            "(even with padding frame)";
+      }
+      Memory::Object_at(padding_pointer) =
+          Smi::FromInt(padding_counter - shortage_bytes / kPointerSize);
+
+      StackFrame* pre_pre_frame = frames[top_frame_index - 2];
+
+      memmove(padding_start + kPointerSize - shortage_bytes,
+          padding_start + kPointerSize,
+          Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
+
+      pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
+      pre_pre_frame->SetCallerFp(pre_top_frame->fp());
+      unused_stack_top -= shortage_bytes;
+
+      STATIC_ASSERT(sizeof(Address) == kPointerSize);
+      top_frame_pc_address -= shortage_bytes / kPointerSize;
+    } else {
+      return "Not enough space for frame dropper frame";
+    }
   }
 
   // Committing now. After this point we should return only NULL value.
@@ -1515,7 +1567,7 @@
   ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
 
   Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit();
-  top_frame->set_pc(code->entry());
+  *top_frame_pc_address = code->entry();
   pre_top_frame->SetCallerFp(bottom_js_frame->fp());
 
   *restarter_frame_function_pointer =
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 507ad84..0aa1192 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -680,7 +680,6 @@
 
   ASSERT(!FLAG_never_compact || !FLAG_always_compact);
 
-  if (collect_maps_) CreateBackPointers();
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (FLAG_gdbjit) {
     // If GDBJIT interface is active disable compaction.
@@ -1186,16 +1185,7 @@
     Heap* heap = map->GetHeap();
     Code* code = reinterpret_cast<Code*>(object);
     if (FLAG_cleanup_code_caches_at_gc) {
-      Object* raw_info = code->type_feedback_info();
-      if (raw_info->IsTypeFeedbackInfo()) {
-        TypeFeedbackCells* type_feedback_cells =
-            TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
-        for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
-          ASSERT(type_feedback_cells->AstId(i)->IsSmi());
-          JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
-          cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
-        }
-      }
+      code->ClearTypeFeedbackCells(heap);
     }
     code->CodeIterateBody<StaticMarkingVisitor>(heap);
   }
@@ -1825,13 +1815,19 @@
 void MarkCompactCollector::MarkMapContents(Map* map) {
   // Mark prototype transitions array but don't push it into marking stack.
   // This will make references from it weak. We will clean dead prototype
-  // transitions in ClearNonLiveTransitions.
-  FixedArray* prototype_transitions = map->prototype_transitions();
-  MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
-  if (!mark.Get()) {
-    mark.Set();
-    MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
-                                          prototype_transitions->Size());
+  // transitions in ClearNonLiveTransitions. But make sure that back pointers
+  // stored inside prototype transitions arrays are marked.
+  Object* raw_proto_transitions = map->unchecked_prototype_transitions();
+  if (raw_proto_transitions->IsFixedArray()) {
+    FixedArray* prototype_transitions = FixedArray::cast(raw_proto_transitions);
+    MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
+    if (!mark.Get()) {
+      mark.Set();
+      MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
+                                            prototype_transitions->Size());
+      MarkObjectAndPush(HeapObject::cast(
+          prototype_transitions->get(Map::kProtoTransitionBackPointerOffset)));
+    }
   }
 
   Object** raw_descriptor_array_slot =
@@ -1930,23 +1926,6 @@
 }
 
 
-void MarkCompactCollector::CreateBackPointers() {
-  HeapObjectIterator iterator(heap()->map_space());
-  for (HeapObject* next_object = iterator.Next();
-       next_object != NULL; next_object = iterator.Next()) {
-    if (next_object->IsMap()) {  // Could also be FreeSpace object on free list.
-      Map* map = Map::cast(next_object);
-      STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-      if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
-        map->CreateBackPointers();
-      } else {
-        ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
-      }
-    }
-  }
-}
-
-
 // Fill the marking stack with overflowed objects returned by the given
 // iterator.  Stop when the marking stack is filled or the end of the space
 // is reached, whichever comes first.
@@ -2470,15 +2449,8 @@
 void MarkCompactCollector::ClearNonLiveTransitions() {
   HeapObjectIterator map_iterator(heap()->map_space());
   // Iterate over the map space, setting map transitions that go from
-  // a marked map to an unmarked map to null transitions.  At the same time,
-  // set all the prototype fields of maps back to their original value,
-  // dropping the back pointers temporarily stored in the prototype field.
-  // Setting the prototype field requires following the linked list of
-  // back pointers, reversing them all at once.  This allows us to find
-  // those maps with map transitions that need to be nulled, and only
-  // scan the descriptor arrays of those maps, not all maps.
-  // All of these actions are carried out only on maps of JSObjects
-  // and related subtypes.
+  // a marked map to an unmarked map to null transitions.  This action
+  // is carried out only on maps of JSObjects and related subtypes.
   for (HeapObject* obj = map_iterator.Next();
        obj != NULL; obj = map_iterator.Next()) {
     Map* map = reinterpret_cast<Map*>(obj);
@@ -2554,36 +2526,16 @@
 
 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
                                                       MarkBit map_mark) {
-  // Follow the chain of back pointers to find the prototype.
-  Object* real_prototype = map;
-  while (real_prototype->IsMap()) {
-    real_prototype = Map::cast(real_prototype)->prototype();
-    ASSERT(real_prototype->IsHeapObject());
-  }
+  Object* potential_parent = map->GetBackPointer();
+  if (!potential_parent->IsMap()) return;
+  Map* parent = Map::cast(potential_parent);
 
-  // Follow back pointers, setting them to prototype, clearing map transitions
-  // when necessary.
-  Map* current = map;
+  // Follow back pointer, check whether we are dealing with a map transition
+  // from a live map to a dead path and in case clear transitions of parent.
   bool current_is_alive = map_mark.Get();
-  bool on_dead_path = !current_is_alive;
-  while (current->IsMap()) {
-    Object* next = current->prototype();
-    // There should never be a dead map above a live map.
-    ASSERT(on_dead_path || current_is_alive);
-
-    // A live map above a dead map indicates a dead transition. This test will
-    // always be false on the first iteration.
-    if (on_dead_path && current_is_alive) {
-      on_dead_path = false;
-      current->ClearNonLiveTransitions(heap(), real_prototype);
-    }
-
-    Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
-    *slot = real_prototype;
-    if (current_is_alive) RecordSlot(slot, slot, real_prototype);
-
-    current = reinterpret_cast<Map*>(next);
-    current_is_alive = Marking::MarkBitFrom(current).Get();
+  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
+  if (!current_is_alive && parent_is_alive) {
+    parent->ClearNonLiveTransitions(heap());
   }
 }
 
@@ -3838,7 +3790,7 @@
   bool lazy_sweeping_active = false;
   bool unused_page_present = false;
 
-  intptr_t old_space_size = heap()->PromotedSpaceSize();
+  intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
   intptr_t space_left =
       Min(heap()->OldGenPromotionLimit(old_space_size),
           heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
diff --git a/src/mark-compact.h b/src/mark-compact.h
index f8488bb..6420a21 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -642,13 +642,6 @@
 
   void ProcessNewlyMarkedObject(HeapObject* obj);
 
-  // Creates back pointers for all map transitions, stores them in
-  // the prototype field.  The original prototype pointers are restored
-  // in ClearNonLiveTransitions().  All JSObject maps
-  // connected by map transitions have the same prototype object, which
-  // is why we can use this field temporarily for back pointers.
-  void CreateBackPointers();
-
   // Mark a Map and its DescriptorArray together, skipping transitions.
   void MarkMapContents(Map* map);
   void MarkAccessorPairSlot(HeapObject* accessors, int offset);
diff --git a/src/messages.js b/src/messages.js
index a3adcf8..f8b5766 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -1125,13 +1125,7 @@
     }
     %FunctionSetInstanceClassName(f, 'Error');
     %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
-    // The name property on the prototype of error objects is not
-    // specified as being read-one and dont-delete. However, allowing
-    // overwriting allows leaks of error objects between script blocks
-    // in the same context in a browser setting. Therefore we fix the
-    // name.
-    %SetProperty(f.prototype, "name", name,
-                 DONT_ENUM | DONT_DELETE | READ_ONLY)  ;
+    %SetProperty(f.prototype, "name", name, DONT_ENUM);
     %SetCode(f, function(m) {
       if (%_IsConstructCall()) {
         // Define all the expected properties directly on the error
@@ -1147,10 +1141,8 @@
               return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
           });
         } else if (!IS_UNDEFINED(m)) {
-          %IgnoreAttributesAndSetProperty(this,
-                                          'message',
-                                          ToString(m),
-                                          DONT_ENUM);
+          %IgnoreAttributesAndSetProperty(
+            this, 'message', ToString(m), DONT_ENUM);
         }
         captureStackTrace(this, f);
       } else {
@@ -1180,16 +1172,41 @@
 var visited_errors = new InternalArray();
 var cyclic_error_marker = new $Object();
 
+function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
+  // Climb the prototype chain until we find the holder.
+  while (error && !%HasLocalProperty(error, name)) {
+    error = error.__proto__;
+  }
+  if (error === null) return void 0;
+  if (!IS_OBJECT(error)) return error[name];
+  // If the property is an accessor on one of the predefined errors that can be
+  // generated statically by the compiler, don't touch it. This is to address
+  // http://code.google.com/p/chromium/issues/detail?id=69187
+  var desc = %GetOwnProperty(error, name);
+  if (desc && desc[IS_ACCESSOR_INDEX]) {
+    var isName = name === "name";
+    if (error === $ReferenceError.prototype)
+      return isName ? "ReferenceError" : void 0;
+    if (error === $SyntaxError.prototype)
+      return isName ? "SyntaxError" : void 0;
+    if (error === $TypeError.prototype)
+      return isName ? "TypeError" : void 0;
+  }
+  // Otherwise, read normally.
+  return error[name];
+}
+
 function ErrorToStringDetectCycle(error) {
   if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
   try {
-    var type = error.type;
-    var name = error.name;
+    var type = GetPropertyWithoutInvokingMonkeyGetters(error, "type");
+    var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name");
     name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
-    var message = error.message;
+    var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message");
     var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
     if (type && !hasMessage) {
-      message = FormatMessage(%NewMessageObject(type, error.arguments));
+      var args = GetPropertyWithoutInvokingMonkeyGetters(error, "arguments");
+      message = FormatMessage(%NewMessageObject(type, args));
     }
     message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
     if (name === "") return message;
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index 83f5f50..3be1e4d 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -116,6 +116,8 @@
                      Assembler::kDebugBreakSlotInstructions);
 }
 
+const bool Debug::FramePaddingLayout::kIsSupported = false;
+
 
 #define __ ACCESS_MASM(masm)
 
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index 32da2df..964a7e2 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1688,12 +1688,12 @@
 
   // Activate inlined smi code.
   if (previous_state == UNINITIALIZED) {
-    PatchInlinedSmiCode(address());
+    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
   }
 }
 
 
-void PatchInlinedSmiCode(Address address) {
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
   Address andi_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
 
@@ -1727,33 +1727,30 @@
   Instr instr_at_patch = Assembler::instr_at(patch_address);
   Instr branch_instr =
       Assembler::instr_at(patch_address + Instruction::kInstrSize);
-  ASSERT(Assembler::IsAndImmediate(instr_at_patch));
-  ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+  // This is patching a conditional "jump if not smi/jump if smi" site.
+  // Enabling by changing from
+  //   andi at, rx, 0
+  //   Branch <target>, eq, at, Operand(zero_reg)
+  // to:
+  //   andi at, rx, #kSmiTagMask
+  //   Branch <target>, ne, at, Operand(zero_reg)
+  // and vice-versa to be disabled again.
+  CodePatcher patcher(patch_address, 2);
+  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+  if (check == ENABLE_INLINED_SMI_CHECK) {
+    ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+    ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+    patcher.masm()->andi(at, reg, kSmiTagMask);
+  } else {
+    ASSERT(check == DISABLE_INLINED_SMI_CHECK);
+    ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+    patcher.masm()->andi(at, reg, 0);
+  }
   ASSERT(Assembler::IsBranch(branch_instr));
   if (Assembler::IsBeq(branch_instr)) {
-    // This is patching a "jump if not smi" site to be active.
-    // Changing:
-    //   andi at, rx, 0
-    //   Branch <target>, eq, at, Operand(zero_reg)
-    // to:
-    //   andi at, rx, #kSmiTagMask
-    //   Branch <target>, ne, at, Operand(zero_reg)
-    CodePatcher patcher(patch_address, 2);
-    Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
-    patcher.masm()->andi(at, reg, kSmiTagMask);
     patcher.ChangeBranchCondition(ne);
   } else {
     ASSERT(Assembler::IsBne(branch_instr));
-    // This is patching a "jump if smi" site to be active.
-    // Changing:
-    //   andi at, rx, 0
-    //   Branch <target>, ne, at, Operand(zero_reg)
-    // to:
-    //   andi at, rx, #kSmiTagMask
-    //   Branch <target>, eq, at, Operand(zero_reg)
-    CodePatcher patcher(patch_address, 2);
-    Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
-    patcher.masm()->andi(at, reg, kSmiTagMask);
     patcher.ChangeBranchCondition(eq);
   }
 }
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 762e15c..986921f 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -2344,39 +2344,35 @@
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
   int map_count = instr->hydrogen()->types()->length();
+  bool need_generic = instr->hydrogen()->need_generic();
+
+  if (map_count == 0 && !need_generic) {
+    DeoptimizeIf(al, instr->environment());
+    return;
+  }
   Handle<String> name = instr->hydrogen()->name();
-  if (map_count == 0) {
-    ASSERT(instr->hydrogen()->need_generic());
-    __ li(a2, Operand(name));
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    Label done;
-    __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
-    for (int i = 0; i < map_count - 1; ++i) {
-      Handle<Map> map = instr->hydrogen()->types()->at(i);
+  Label done;
+  __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  for (int i = 0; i < map_count; ++i) {
+    bool last = (i == map_count - 1);
+    Handle<Map> map = instr->hydrogen()->types()->at(i);
+    if (last && !need_generic) {
+      Handle<Map> map = instr->hydrogen()->types()->last();
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
+    } else {
       Label next;
       __ Branch(&next, ne, scratch, Operand(map));
       EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ Branch(&done);
       __ bind(&next);
     }
-    Handle<Map> map = instr->hydrogen()->types()->last();
-    if (instr->hydrogen()->need_generic()) {
-      Label generic;
-      __ Branch(&generic, ne, scratch, Operand(map));
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-      __ Branch(&done);
-      __ bind(&generic);
-      __ li(a2, Operand(name));
-      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr);
-    } else {
-      DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-    }
-    __ bind(&done);
   }
+  if (need_generic) {
+    __ li(a2, Operand(name));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  }
+  __ bind(&done);
 }
 
 
@@ -3225,7 +3221,7 @@
   // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
   __ And(a3, a1, Operand(0xFFFF));
   __ li(t0, Operand(18273));
-  __ mul(a3, a3, t0);
+  __ Mul(a3, a3, t0);
   __ srl(a1, a1, 16);
   __ Addu(a1, a3, a1);
   // Save state[0].
@@ -3234,7 +3230,7 @@
   // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
   __ And(a3, a0, Operand(0xFFFF));
   __ li(t0, Operand(36969));
-  __ mul(a3, a3, t0);
+  __ Mul(a3, a3, t0);
   __ srl(a0, a0, 16),
   __ Addu(a0, a3, a0);
   // Save state[1].
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index e93a417..2c2445b 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -5378,7 +5378,7 @@
     : address_(address),
       instructions_(instructions),
       size_(instructions * Assembler::kInstrSize),
-      masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
+      masm_(NULL, address, size_ + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 676b523..18a5f5f 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1287,12 +1287,19 @@
                                           name, miss);
     ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
 
+    // Preserve the receiver register explicitly whenever it is different from
+    // the holder and it is needed should the interceptor return without any
+    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+    // the FIELD case might cause a miss during the prototype check.
+    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+    bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
     {
       FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
+      if (must_preserve_receiver_reg) {
         __ Push(receiver, holder_reg, name_reg);
       } else {
         __ Push(holder_reg, name_reg);
@@ -1316,14 +1323,14 @@
       __ bind(&interceptor_failed);
       __ pop(name_reg);
       __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      if (must_preserve_receiver_reg) {
         __ pop(receiver);
       }
       // Leave the internal frame.
     }
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (*interceptor_holder != lookup->holder()) {
+    if (must_perfrom_prototype_check) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
                                    Handle<JSObject>(lookup->holder()),
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index cd2ccf8..3bfb74d 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -303,6 +303,8 @@
           instance_size() < HEAP->Capacity()));
   VerifyHeapPointer(prototype());
   VerifyHeapPointer(instance_descriptors());
+  SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
+  SLOW_ASSERT(instance_descriptors()->IsConsistentWithBackPointers(this));
 }
 
 
@@ -894,6 +896,61 @@
 }
 
 
+static bool CheckOneBackPointer(Map* current_map, Object* target) {
+  return !target->IsMap() || Map::cast(target)->GetBackPointer() == current_map;
+}
+
+
+bool DescriptorArray::IsConsistentWithBackPointers(Map* current_map) {
+  for (int i = 0; i < number_of_descriptors(); ++i) {
+    switch (GetType(i)) {
+      case MAP_TRANSITION:
+      case CONSTANT_TRANSITION:
+        if (!CheckOneBackPointer(current_map, GetValue(i))) {
+          return false;
+        }
+        break;
+      case ELEMENTS_TRANSITION: {
+        Object* object = GetValue(i);
+        if (!CheckOneBackPointer(current_map, object)) {
+          return false;
+        }
+        if (object->IsFixedArray()) {
+          FixedArray* array = FixedArray::cast(object);
+          for (int i = 0; i < array->length(); ++i) {
+            if (!CheckOneBackPointer(current_map, array->get(i))) {
+              return false;
+            }
+          }
+        }
+        break;
+      }
+      case CALLBACKS: {
+        Object* object = GetValue(i);
+        if (object->IsAccessorPair()) {
+          AccessorPair* accessors = AccessorPair::cast(object);
+          if (!CheckOneBackPointer(current_map, accessors->getter())) {
+            return false;
+          }
+          if (!CheckOneBackPointer(current_map, accessors->setter())) {
+            return false;
+          }
+        }
+        break;
+      }
+      case NORMAL:
+      case FIELD:
+      case CONSTANT_FUNCTION:
+      case HANDLER:
+      case INTERCEPTOR:
+      case NULL_DESCRIPTOR:
+        break;
+    }
+  }
+  return true;
+}
+
+
 void JSFunctionResultCache::JSFunctionResultCacheVerify() {
   JSFunction::cast(get(kFactoryIndex))->Verify();
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 459420c..eb1586a 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -3193,6 +3193,18 @@
 }
 
 
+byte Code::compare_operation() {
+  ASSERT(is_compare_ic_stub());
+  return READ_BYTE_FIELD(this, kCompareOperationOffset);
+}
+
+
+void Code::set_compare_operation(byte value) {
+  ASSERT(is_compare_ic_stub());
+  WRITE_BYTE_FIELD(this, kCompareOperationOffset, value);
+}
+
+
 byte Code::to_boolean_state() {
   ASSERT(is_to_boolean_ic_stub());
   return READ_BYTE_FIELD(this, kToBooleanTypeOffset);
@@ -3395,14 +3407,66 @@
 }
 
 
-FixedArray* Map::unchecked_prototype_transitions() {
-  return reinterpret_cast<FixedArray*>(
-      READ_FIELD(this, kPrototypeTransitionsOffset));
+Object* Map::GetBackPointer() {
+  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
+  if (object->IsFixedArray()) {
+    return FixedArray::cast(object)->get(kProtoTransitionBackPointerOffset);
+  } else {
+    return object;
+  }
+}
+
+
+void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
+  Heap* heap = GetHeap();
+  ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
+  ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
+         (value->IsMap() && GetBackPointer()->IsUndefined()));
+  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
+  if (object->IsFixedArray()) {
+    FixedArray::cast(object)->set(
+        kProtoTransitionBackPointerOffset, value, mode);
+  } else {
+    WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
+    CONDITIONAL_WRITE_BARRIER(
+        heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
+  }
+}
+
+
+FixedArray* Map::prototype_transitions() {
+  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
+  if (object->IsFixedArray()) {
+    return FixedArray::cast(object);
+  } else {
+    return GetHeap()->empty_fixed_array();
+  }
+}
+
+
+void Map::set_prototype_transitions(FixedArray* value, WriteBarrierMode mode) {
+  Heap* heap = GetHeap();
+  ASSERT(value != heap->empty_fixed_array());
+  value->set(kProtoTransitionBackPointerOffset, GetBackPointer());
+  WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
+  CONDITIONAL_WRITE_BARRIER(
+      heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
+}
+
+
+void Map::init_prototype_transitions(Object* undefined) {
+  ASSERT(undefined->IsUndefined());
+  WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, undefined);
+}
+
+
+HeapObject* Map::unchecked_prototype_transitions() {
+  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
+  return reinterpret_cast<HeapObject*>(object);
 }
 
 
 ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
-ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
 ACCESSORS(Map, constructor, Object, kConstructorOffset)
 
 ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
@@ -3660,6 +3724,12 @@
 }
 
 
+int SharedFunctionInfo::profiler_ticks() {
+  if (code()->kind() != Code::FUNCTION) return 0;
+  return code()->profiler_ticks();
+}
+
+
 LanguageMode SharedFunctionInfo::language_mode() {
   int hints = compiler_hints();
   if (BooleanBit::get(hints, kExtendedModeFunction)) {
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 7d6ef67..febdaab 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -331,14 +331,16 @@
     }
     case FAST_DOUBLE_ELEMENTS: {
       // Print in array notation for non-sparse arrays.
-      FixedDoubleArray* p = FixedDoubleArray::cast(elements());
-      for (int i = 0; i < p->length(); i++) {
-        if (p->is_the_hole(i)) {
-          PrintF(out, "   %d: <the hole>", i);
-        } else {
-          PrintF(out, "   %d: %g", i, p->get_scalar(i));
+      if (elements()->length() > 0) {
+        FixedDoubleArray* p = FixedDoubleArray::cast(elements());
+        for (int i = 0; i < p->length(); i++) {
+          if (p->is_the_hole(i)) {
+            PrintF(out, "   %d: <the hole>", i);
+          } else {
+            PrintF(out, "   %d: %g", i, p->get_scalar(i));
+          }
+          PrintF(out, "\n");
         }
-        PrintF(out, "\n");
       }
       break;
     }
diff --git a/src/objects.cc b/src/objects.cc
index 76a8266..7f75611 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1604,6 +1604,7 @@
   // We have now allocated all the necessary objects.
   // All the changes can be applied at once, so they are atomic.
   map()->set_instance_descriptors(old_descriptors);
+  new_map->SetBackPointer(map());
   new_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
   set_map(new_map);
   return FastPropertyAtPut(index, value);
@@ -1664,6 +1665,7 @@
     }
   }
   old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+  Map::cast(new_map)->SetBackPointer(old_map);
 
   return function;
 }
@@ -1824,6 +1826,7 @@
     }
   }
   old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+  map()->SetBackPointer(old_map);
   return result;
 }
 
@@ -2408,6 +2411,7 @@
     return maybe_new_descriptors;
   }
   set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+  transitioned_map->SetBackPointer(this);
   return this;
 }
 
@@ -4413,7 +4417,12 @@
   LookupResult result(GetHeap()->isolate());
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty() && result.type() == CALLBACKS) {
-    ASSERT(!result.IsDontDelete());
+    // Note that the result can actually have IsDontDelete() == true when we
+    // e.g. have to fall back to the slow case while adding a setter after
+    // successfully reusing a map transition for a getter. Nevertheless, this is
+    // OK, because the assertion only holds for the whole addition of both
+    // accessors, not for the addition of each part. See first comment in
+    // DefinePropertyAccessor below.
     Object* obj = result.GetCallbackObject();
     if (obj->IsAccessorPair()) {
       return AccessorPair::cast(obj)->CopyWithoutTransitions();
@@ -4427,6 +4436,28 @@
                                               Object* getter,
                                               Object* setter,
                                               PropertyAttributes attributes) {
+  // We could assert that the property is configurable here, but we would need
+  // to do a lookup, which seems to be a bit of overkill.
+  Heap* heap = GetHeap();
+  bool only_attribute_changes = getter->IsNull() && setter->IsNull();
+  if (HasFastProperties() && !only_attribute_changes) {
+    MaybeObject* getterOk = heap->undefined_value();
+    if (!getter->IsNull()) {
+      getterOk = DefineFastAccessor(name, ACCESSOR_GETTER, getter, attributes);
+      if (getterOk->IsFailure()) return getterOk;
+    }
+
+    MaybeObject* setterOk = heap->undefined_value();
+    if (getterOk != heap->null_value() && !setter->IsNull()) {
+      setterOk = DefineFastAccessor(name, ACCESSOR_SETTER, setter, attributes);
+      if (setterOk->IsFailure()) return setterOk;
+    }
+
+    if (getterOk != heap->null_value() && setterOk != heap->null_value()) {
+      return heap->undefined_value();
+    }
+  }
+
   AccessorPair* accessors;
   { MaybeObject* maybe_accessors = CreateAccessorPairFor(name);
     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
@@ -4576,6 +4607,159 @@
 }
 
 
+static MaybeObject* CreateFreshAccessor(JSObject* obj,
+                                        String* name,
+                                        AccessorComponent component,
+                                        Object* accessor,
+                                        PropertyAttributes attributes) {
+  // step 1: create a new getter/setter pair with only the accessor in it
+  Heap* heap = obj->GetHeap();
+  AccessorPair* accessors2;
+  { MaybeObject* maybe_accessors2 = heap->AllocateAccessorPair();
+    if (!maybe_accessors2->To(&accessors2)) return maybe_accessors2;
+  }
+  accessors2->set(component, accessor);
+
+  // step 2: create a copy of the descriptors, incl. the new getter/setter pair
+  Map* map1 = obj->map();
+  CallbacksDescriptor callbacks_descr2(name, accessors2, attributes);
+  DescriptorArray* descriptors2;
+  { MaybeObject* maybe_descriptors2 =
+        map1->instance_descriptors()->CopyInsert(&callbacks_descr2,
+                                                 REMOVE_TRANSITIONS);
+    if (!maybe_descriptors2->To(&descriptors2)) return maybe_descriptors2;
+  }
+
+  // step 3: create a new map with the new descriptors
+  Map* map2;
+  { MaybeObject* maybe_map2 = map1->CopyDropDescriptors();
+    if (!maybe_map2->To(&map2)) return maybe_map2;
+  }
+  map2->set_instance_descriptors(descriptors2);
+
+  // step 4: create a new getter/setter pair with a transition to the new map
+  AccessorPair* accessors1;
+  { MaybeObject* maybe_accessors1 = heap->AllocateAccessorPair();
+    if (!maybe_accessors1->To(&accessors1)) return maybe_accessors1;
+  }
+  accessors1->set(component, map2);
+
+  // step 5: create a copy of the descriptors, incl. the new getter/setter pair
+  // with the transition
+  CallbacksDescriptor callbacks_descr1(name, accessors1, attributes);
+  DescriptorArray* descriptors1;
+  { MaybeObject* maybe_descriptors1 =
+        map1->instance_descriptors()->CopyInsert(&callbacks_descr1,
+                                                 KEEP_TRANSITIONS);
+    if (!maybe_descriptors1->To(&descriptors1)) return maybe_descriptors1;
+  }
+
+  // step 6: everything went well so far, so we make our changes visible
+  obj->set_map(map2);
+  map1->set_instance_descriptors(descriptors1);
+  map2->SetBackPointer(map1);
+  return obj;
+}
+
+
+static bool TransitionToSameAccessor(Object* map,
+                                     String* name,
+                                     AccessorComponent component,
+                                     Object* accessor,
+                                     PropertyAttributes attributes ) {
+  DescriptorArray* descs = Map::cast(map)->instance_descriptors();
+  int number = descs->SearchWithCache(name);
+  ASSERT(number != DescriptorArray::kNotFound);
+  Object* target_accessor =
+      AccessorPair::cast(descs->GetCallbacksObject(number))->get(component);
+  PropertyAttributes target_attributes = descs->GetDetails(number).attributes();
+  return target_accessor == accessor && target_attributes == attributes;
+}
+
+
+static MaybeObject* NewCallbackTransition(JSObject* obj,
+                                          String* name,
+                                          AccessorComponent component,
+                                          Object* accessor,
+                                          PropertyAttributes attributes,
+                                          AccessorPair* accessors2) {
+  // step 1: copy the old getter/setter pair and set the new accessor
+  AccessorPair* accessors3;
+  { MaybeObject* maybe_accessors3 = accessors2->CopyWithoutTransitions();
+    if (!maybe_accessors3->To(&accessors3)) return maybe_accessors3;
+  }
+  accessors3->set(component, accessor);
+
+  // step 2: create a copy of the descriptors, incl. the new getter/setter pair
+  Map* map2 = obj->map();
+  CallbacksDescriptor callbacks_descr3(name, accessors3, attributes);
+  DescriptorArray* descriptors3;
+  { MaybeObject* maybe_descriptors3 =
+        map2->instance_descriptors()->CopyInsert(&callbacks_descr3,
+                                                 REMOVE_TRANSITIONS);
+    if (!maybe_descriptors3->To(&descriptors3)) return maybe_descriptors3;
+  }
+
+  // step 3: create a new map with the new descriptors
+  Map* map3;
+  { MaybeObject* maybe_map3 = map2->CopyDropDescriptors();
+    if (!maybe_map3->To(&map3)) return maybe_map3;
+  }
+  map3->set_instance_descriptors(descriptors3);
+
+  // step 4: everything went well so far, so we make our changes visible
+  obj->set_map(map3);
+  accessors2->set(component, map3);
+  map3->SetBackPointer(map2);
+  return obj;
+}
+
+
+MaybeObject* JSObject::DefineFastAccessor(String* name,
+                                          AccessorComponent component,
+                                          Object* accessor,
+                                          PropertyAttributes attributes) {
+  ASSERT(accessor->IsSpecFunction() || accessor->IsUndefined());
+  LookupResult result(GetIsolate());
+  LocalLookup(name, &result);
+
+  // If we have a new property, create a fresh accessor plus a transition to it.
+  if (!result.IsFound()) {
+    return CreateFreshAccessor(this, name, component, accessor, attributes);
+  }
+
+  // If the property is not a JavaScript accessor, fall back to the slow case.
+  if (result.type() != CALLBACKS) return GetHeap()->null_value();
+  Object* callback_value = result.GetValue();
+  if (!callback_value->IsAccessorPair()) return GetHeap()->null_value();
+  AccessorPair* accessors = AccessorPair::cast(callback_value);
+
+  // Follow a callback transition, if there is a fitting one.
+  Object* entry = accessors->get(component);
+  if (entry->IsMap() &&
+      TransitionToSameAccessor(entry, name, component, accessor, attributes)) {
+    set_map(Map::cast(entry));
+    return this;
+  }
+
+  // When we re-add the same accessor again, there is nothing to do.
+  if (entry == accessor && result.GetAttributes() == attributes) return this;
+
+  // Only the other accessor has been set so far, create a new transition.
+  if (entry->IsTheHole()) {
+    return NewCallbackTransition(this,
+                                 name,
+                                 component,
+                                 accessor,
+                                 attributes,
+                                 accessors);
+  }
+
+  // Nothing from the above worked, so we have to fall back to the slow case.
+  return GetHeap()->null_value();
+}
+
+
 MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
   Isolate* isolate = GetIsolate();
   String* name = String::cast(info->name());
@@ -4959,7 +5143,7 @@
 // underlying array while it is running.
 class IntrusivePrototypeTransitionIterator {
  public:
-  explicit IntrusivePrototypeTransitionIterator(FixedArray* proto_trans)
+  explicit IntrusivePrototypeTransitionIterator(HeapObject* proto_trans)
       : proto_trans_(proto_trans) { }
 
   void Start() {
@@ -4984,7 +5168,7 @@
 
  private:
   bool HasTransitions() {
-    return proto_trans_->length() >= Map::kProtoTransitionHeaderSize;
+    return proto_trans_->map()->IsSmi() || proto_trans_->IsFixedArray();
   }
 
   Object** Header() {
@@ -4992,12 +5176,16 @@
   }
 
   int NumberOfTransitions() {
-    Object* num = proto_trans_->get(Map::kProtoTransitionNumberOfEntriesOffset);
+    ASSERT(HasTransitions());
+    FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
+    Object* num = proto_trans->get(Map::kProtoTransitionNumberOfEntriesOffset);
     return Smi::cast(num)->value();
   }
 
   Map* GetTransition(int transitionNumber) {
-    return Map::cast(proto_trans_->get(IndexFor(transitionNumber)));
+    ASSERT(HasTransitions());
+    FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
+    return Map::cast(proto_trans->get(IndexFor(transitionNumber)));
   }
 
   int IndexFor(int transitionNumber) {
@@ -5006,7 +5194,7 @@
         transitionNumber * Map::kProtoTransitionElementsPerEntry;
   }
 
-  FixedArray* proto_trans_;
+  HeapObject* proto_trans_;
 };
 
 
@@ -5939,8 +6127,8 @@
 
 
 Object* AccessorPair::GetComponent(AccessorComponent component) {
-    Object* accessor = (component == ACCESSOR_GETTER) ? getter() : setter();
-    return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
+  Object* accessor = get(component);
+  return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
 }
 
 
@@ -7168,85 +7356,23 @@
 }
 
 
-void Map::CreateOneBackPointer(Object* transition_target) {
-  if (!transition_target->IsMap()) return;
-  Map* target = Map::cast(transition_target);
-#ifdef DEBUG
-  // Verify target.
-  Object* source_prototype = prototype();
-  Object* target_prototype = target->prototype();
-  ASSERT(source_prototype->IsJSReceiver() ||
-         source_prototype->IsMap() ||
-         source_prototype->IsNull());
-  ASSERT(target_prototype->IsJSReceiver() ||
-         target_prototype->IsNull());
-  ASSERT(source_prototype->IsMap() ||
-         source_prototype == target_prototype);
-#endif
-  // Point target back to source.  set_prototype() will not let us set
-  // the prototype to a map, as we do here.
-  *RawField(target, kPrototypeOffset) = this;
-}
-
-
-void Map::CreateBackPointers() {
-  DescriptorArray* descriptors = instance_descriptors();
-  for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
-    switch (descriptors->GetType(i)) {
-      case MAP_TRANSITION:
-      case CONSTANT_TRANSITION:
-        CreateOneBackPointer(descriptors->GetValue(i));
-        break;
-      case ELEMENTS_TRANSITION: {
-        Object* object = descriptors->GetValue(i);
-        if (object->IsMap()) {
-          CreateOneBackPointer(object);
-        } else {
-          FixedArray* array = FixedArray::cast(object);
-          for (int i = 0; i < array->length(); ++i) {
-            CreateOneBackPointer(array->get(i));
-          }
-        }
-        break;
-      }
-      case CALLBACKS: {
-        Object* object = descriptors->GetValue(i);
-        if (object->IsAccessorPair()) {
-          AccessorPair* accessors = AccessorPair::cast(object);
-          CreateOneBackPointer(accessors->getter());
-          CreateOneBackPointer(accessors->setter());
-        }
-        break;
-      }
-      case NORMAL:
-      case FIELD:
-      case CONSTANT_FUNCTION:
-      case HANDLER:
-      case INTERCEPTOR:
-      case NULL_DESCRIPTOR:
-        break;
-    }
-  }
-}
-
-
-bool Map::RestoreOneBackPointer(Object* object,
-                                Object* real_prototype,
-                                bool* keep_entry) {
-  if (!object->IsMap()) return false;
-  Map* map = Map::cast(object);
+// Clear a possible back pointer in case the transition leads to a dead map.
+// Return true in case a back pointer has been cleared and false otherwise.
+// Set *keep_entry to true when a live map transition has been found.
+static bool ClearBackPointer(Heap* heap, Object* target, bool* keep_entry) {
+  if (!target->IsMap()) return false;
+  Map* map = Map::cast(target);
   if (Marking::MarkBitFrom(map).Get()) {
     *keep_entry = true;
     return false;
+  } else {
+    map->SetBackPointer(heap->undefined_value(), SKIP_WRITE_BARRIER);
+    return true;
   }
-  ASSERT(map->prototype() == this || map->prototype() == real_prototype);
-  // Getter prototype() is read-only, set_prototype() has side effects.
-  *RawField(map, Map::kPrototypeOffset) = real_prototype;
-  return true;
 }
 
 
-void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
+void Map::ClearNonLiveTransitions(Heap* heap) {
   DescriptorArray* d = DescriptorArray::cast(
       *RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
   if (d->IsEmpty()) return;
@@ -7259,24 +7385,22 @@
     // If the pair (value, details) is a map transition, check if the target is
     // live. If not, null the descriptor. Also drop the back pointer for that
     // map transition, so that this map is not reached again by following a back
-    // pointer from a non-live object.
+    // pointer from that non-live map.
     bool keep_entry = false;
     PropertyDetails details(Smi::cast(contents->get(i + 1)));
     switch (details.type()) {
       case MAP_TRANSITION:
       case CONSTANT_TRANSITION:
-        RestoreOneBackPointer(contents->get(i), real_prototype, &keep_entry);
+        ClearBackPointer(heap, contents->get(i), &keep_entry);
         break;
       case ELEMENTS_TRANSITION: {
         Object* object = contents->get(i);
         if (object->IsMap()) {
-          RestoreOneBackPointer(object, real_prototype, &keep_entry);
+          ClearBackPointer(heap, object, &keep_entry);
         } else {
           FixedArray* array = FixedArray::cast(object);
           for (int j = 0; j < array->length(); ++j) {
-            if (RestoreOneBackPointer(array->get(j),
-                                      real_prototype,
-                                      &keep_entry)) {
+            if (ClearBackPointer(heap, array->get(j), &keep_entry)) {
               array->set_undefined(j);
             }
           }
@@ -7287,14 +7411,10 @@
         Object* object = contents->get(i);
         if (object->IsAccessorPair()) {
           AccessorPair* accessors = AccessorPair::cast(object);
-          if (RestoreOneBackPointer(accessors->getter(),
-                                    real_prototype,
-                                    &keep_entry)) {
+          if (ClearBackPointer(heap, accessors->getter(), &keep_entry)) {
             accessors->set_getter(heap->the_hole_value());
           }
-          if (RestoreOneBackPointer(accessors->setter(),
-                                    real_prototype,
-                                    &keep_entry)) {
+          if (ClearBackPointer(heap, accessors->setter(), &keep_entry)) {
             accessors->set_setter(heap->the_hole_value());
           }
         } else {
@@ -8142,6 +8262,20 @@
 }
 
 
+void Code::ClearTypeFeedbackCells(Heap* heap) {
+  Object* raw_info = type_feedback_info();
+  if (raw_info->IsTypeFeedbackInfo()) {
+    TypeFeedbackCells* type_feedback_cells =
+        TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
+    for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
+      ASSERT(type_feedback_cells->AstId(i)->IsSmi());
+      JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
+      cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+    }
+  }
+}
+
+
 #ifdef ENABLE_DISASSEMBLER
 
 void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
@@ -8378,6 +8512,10 @@
       CompareIC::State state = CompareIC::ComputeState(this);
       PrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state));
     }
+    if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) {
+      Token::Value op = CompareIC::ComputeOperation(this);
+      PrintF(out, "compare_operation = %s\n", Token::Name(op));
+    }
   }
   if ((name != NULL) && (name[0] != '\0')) {
     PrintF(out, "name = %s\n", name);
@@ -8483,8 +8621,10 @@
   ElementsKind to_kind = (elements_kind == FAST_SMI_ONLY_ELEMENTS)
       ? FAST_SMI_ONLY_ELEMENTS
       : FAST_ELEMENTS;
-  //  int copy_size = Min(old_elements_raw->length(), new_elements->length());
-  accessor->CopyElements(this, new_elements, to_kind);
+  { MaybeObject* maybe_obj =
+        accessor->CopyElements(this, new_elements, to_kind);
+    if (maybe_obj->IsFailure()) return maybe_obj;
+  }
   if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
     set_map_and_elements(new_map, new_elements);
   } else {
@@ -8513,7 +8653,7 @@
   // We should never end in here with a pixel or external array.
   ASSERT(!HasExternalArrayElements());
 
-  FixedDoubleArray* elems;
+  FixedArrayBase* elems;
   { MaybeObject* maybe_obj =
         heap->AllocateUninitializedFixedDoubleArray(capacity);
     if (!maybe_obj->To(&elems)) return maybe_obj;
@@ -8528,7 +8668,10 @@
   FixedArrayBase* old_elements = elements();
   ElementsKind elements_kind = GetElementsKind();
   ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
-  accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
+  { MaybeObject* maybe_obj =
+        accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
+    if (maybe_obj->IsFailure()) return maybe_obj;
+  }
   if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
     set_map_and_elements(new_map, elems);
   } else {
@@ -9664,9 +9807,10 @@
   ElementsKind from_kind = map()->elements_kind();
 
   Isolate* isolate = GetIsolate();
-  if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
-      (to_kind == FAST_ELEMENTS ||
-       elements() == isolate->heap()->empty_fixed_array())) {
+  if ((from_kind == FAST_SMI_ONLY_ELEMENTS ||
+      elements() == isolate->heap()->empty_fixed_array()) &&
+      to_kind == FAST_ELEMENTS) {
+    ASSERT(from_kind != FAST_ELEMENTS);
     MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
     Map* new_map;
     if (!maybe_new_map->To(&new_map)) return maybe_new_map;
@@ -12844,7 +12988,7 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
-MaybeObject* JSDate::GetField(Object* object, Smi* index) {
+Object* JSDate::GetField(Object* object, Smi* index) {
   return JSDate::cast(object)->DoGetField(
       static_cast<FieldIndex>(index->value()));
 }
diff --git a/src/objects.h b/src/objects.h
index 80d1fd4..22993f2 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -704,12 +704,13 @@
                          WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
 
 
+class AccessorPair;
 class DictionaryElementsAccessor;
 class ElementsAccessor;
+class Failure;
 class FixedArrayBase;
 class ObjectVisitor;
 class StringStream;
-class Failure;
 
 struct ValueInfo : public Malloced {
   ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -1642,6 +1643,14 @@
                                               Object* getter,
                                               Object* setter,
                                               PropertyAttributes attributes);
+  // Try to define a single accessor paying attention to map transitions.
+  // Returns a JavaScript null if this was not possible and we have to use the
+  // slow case. Note that we can fail due to allocations, too.
+  MUST_USE_RESULT MaybeObject* DefineFastAccessor(
+      String* name,
+      AccessorComponent component,
+      Object* accessor,
+      PropertyAttributes attributes);
   Object* LookupAccessor(String* name, AccessorComponent component);
 
   MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
@@ -2600,6 +2609,9 @@
   // Is the descriptor array sorted and without duplicates?
   bool IsSortedNoDuplicates();
 
+  // Is the descriptor array consistent with the back pointers in targets?
+  bool IsConsistentWithBackPointers(Map* current_map);
+
   // Are two DescriptorArrays equal?
   bool IsEqualTo(DescriptorArray* other);
 #endif
@@ -4291,6 +4303,11 @@
   inline byte compare_state();
   inline void set_compare_state(byte value);
 
+  // [compare_operation]: For kind COMPARE_IC tells what compare operation the
+  // stub was generated for.
+  inline byte compare_operation();
+  inline void set_compare_operation(byte value);
+
   // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
   inline byte to_boolean_state();
   inline void set_to_boolean_state(byte value);
@@ -4426,6 +4443,7 @@
   void CodeVerify();
 #endif
   void ClearInlineCaches();
+  void ClearTypeFeedbackCells(Heap* heap);
 
   // Max loop nesting marker used to postpose OSR. We don't take loop
   // nesting that is deeper than 5 levels into account.
@@ -4474,6 +4492,8 @@
 
   static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
 
+  static const int kCompareOperationOffset = kCompareStateOffset + 1;
+
   static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
   static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
 
@@ -4702,19 +4722,30 @@
   // [stub cache]: contains stubs compiled for this map.
   DECL_ACCESSORS(code_cache, Object)
 
+  // [back pointer]: points back to the parent map from which a transition
+  // leads to this map. The field overlaps with prototype transitions and the
+  // back pointer will be moved into the prototype transitions array if
+  // required.
+  inline Object* GetBackPointer();
+  inline void SetBackPointer(Object* value,
+                             WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
   // [prototype transitions]: cache of prototype transitions.
   // Prototype transition is a transition that happens
   // when we change object's prototype to a new one.
   // Cache format:
   //    0: finger - index of the first free cell in the cache
-  //    1 + 2 * i: prototype
-  //    2 + 2 * i: target map
+  //    1: back pointer that overlaps with prototype transitions field.
+  //    2 + 2 * i: prototype
+  //    3 + 2 * i: target map
   DECL_ACCESSORS(prototype_transitions, FixedArray)
 
-  inline FixedArray* unchecked_prototype_transitions();
+  inline void init_prototype_transitions(Object* undefined);
+  inline HeapObject* unchecked_prototype_transitions();
 
-  static const int kProtoTransitionHeaderSize = 1;
+  static const int kProtoTransitionHeaderSize = 2;
   static const int kProtoTransitionNumberOfEntriesOffset = 0;
+  static const int kProtoTransitionBackPointerOffset = 1;
   static const int kProtoTransitionElementsPerEntry = 2;
   static const int kProtoTransitionPrototypeOffset = 0;
   static const int kProtoTransitionMapOffset = 1;
@@ -4786,25 +4817,10 @@
   // Removes a code object from the code cache at the given index.
   void RemoveFromCodeCache(String* name, Code* code, int index);
 
-  // For every transition in this map, makes the transition's
-  // target's prototype pointer point back to this map.
-  // This is undone in MarkCompactCollector::ClearNonLiveTransitions().
-  void CreateBackPointers();
-
-  void CreateOneBackPointer(Object* transition_target);
-
-  // Set all map transitions from this map to dead maps to null.
-  // Also, restore the original prototype on the targets of these
-  // transitions, so that we do not process this map again while
-  // following back pointers.
-  void ClearNonLiveTransitions(Heap* heap, Object* real_prototype);
-
-  // Restore a possible back pointer in the prototype field of object.
-  // Return true in that case and false otherwise. Set *keep_entry to
-  // true when a live map transition has been found.
-  bool RestoreOneBackPointer(Object* object,
-                             Object* real_prototype,
-                             bool* keep_entry);
+  // Set all map transitions from this map to dead maps to null.  Also clear
+  // back pointers in transition targets so that we do not process this map
+  // again while following back pointers.
+  void ClearNonLiveTransitions(Heap* heap);
 
   // Computes a hash value for this map, to be used in HashTables and such.
   int Hash();
@@ -4886,16 +4902,17 @@
       kConstructorOffset + kPointerSize;
   static const int kCodeCacheOffset =
       kInstanceDescriptorsOrBitField3Offset + kPointerSize;
-  static const int kPrototypeTransitionsOffset =
+  static const int kPrototypeTransitionsOrBackPointerOffset =
       kCodeCacheOffset + kPointerSize;
-  static const int kPadStart = kPrototypeTransitionsOffset + kPointerSize;
+  static const int kPadStart =
+      kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
   static const int kSize = MAP_POINTER_ALIGN(kPadStart);
 
   // Layout of pointer fields. Heap iteration code relies on them
   // being continuously allocated.
   static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
   static const int kPointerFieldsEndOffset =
-      Map::kPrototypeTransitionsOffset + kPointerSize;
+      kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
 
   // Byte offsets within kInstanceSizesOffset.
   static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -5334,6 +5351,8 @@
   inline int deopt_counter();
   inline void set_deopt_counter(int counter);
 
+  inline int profiler_ticks();
+
   // Inline cache age is used to infer whether the function survived a context
   // disposal or not. In the former case we reset the opt_count.
   inline int ic_age();
@@ -6120,7 +6139,7 @@
 
   // Returns the date field with the specified index.
   // See FieldIndex for the list of date fields.
-  static MaybeObject* GetField(Object* date, Smi* index);
+  static Object* GetField(Object* date, Smi* index);
 
   void SetValue(Object* value, bool is_value_nan);
 
@@ -6869,7 +6888,7 @@
   inline void Set(int index, uint16_t value);
   // Get individual two byte char in the string.  Repeated calls
   // to this method are not efficient unless the string is flat.
-  inline uint16_t Get(int index);
+  INLINE(uint16_t Get(int index));
 
   // Try to flatten the string.  Checks first inline to see if it is
   // necessary.  Does nothing if the string is not a cons string.
@@ -8101,6 +8120,18 @@
 
   MUST_USE_RESULT MaybeObject* CopyWithoutTransitions();
 
+  Object* get(AccessorComponent component) {
+    return component == ACCESSOR_GETTER ? getter() : setter();
+  }
+
+  void set(AccessorComponent component, Object* value) {
+    if (component == ACCESSOR_GETTER) {
+      set_getter(value);
+    } else {
+      set_setter(value);
+    }
+  }
+
   // Note: Returns undefined instead in case of a hole.
   Object* GetComponent(AccessorComponent component);
 
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 284e2df..9afc52f 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -96,8 +96,51 @@
 
 
 HeapEntry* HeapGraphEdge::from() const {
-  return const_cast<HeapEntry*>(
-      reinterpret_cast<const HeapEntry*>(this - child_index_) - 1);
+  return &snapshot()->entries()[from_index_];
+}
+
+
+HeapSnapshot* HeapGraphEdge::snapshot() const {
+  return to_entry_->snapshot();
+}
+
+
+int HeapEntry::index() const {
+  return static_cast<int>(this - &snapshot_->entries().first());
+}
+
+
+int HeapEntry::set_children_index(int index) {
+  children_index_ = index;
+  int next_index = index + children_count_;
+  children_count_ = 0;
+  return next_index;
+}
+
+
+int HeapEntry::set_retainers_index(int index) {
+  retainers_index_ = index;
+  int next_index = index + retainers_count_;
+  retainers_count_ = 0;
+  return next_index;
+}
+
+
+HeapGraphEdge** HeapEntry::children_arr() {
+  ASSERT(children_index_ >= 0);
+  return &snapshot_->children()[children_index_];
+}
+
+
+HeapGraphEdge** HeapEntry::retainers_arr() {
+  ASSERT(retainers_index_ >= 0);
+  return &snapshot_->retainers()[retainers_index_];
+}
+
+
+HeapEntry* HeapEntry::dominator() const {
+  ASSERT(dominator_ >= 0);
+  return &snapshot_->entries()[dominator_];
 }
 
 
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index c91e83b..b31b77b 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -931,78 +931,71 @@
 }
 
 
-void HeapGraphEdge::Init(
-    int child_index, Type type, const char* name, HeapEntry* to) {
+HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
+    : type_(type),
+      from_index_(from),
+      to_index_(to),
+      name_(name) {
   ASSERT(type == kContextVariable
-         || type == kProperty
-         || type == kInternal
-         || type == kShortcut);
-  child_index_ = child_index;
-  type_ = type;
-  name_ = name;
-  to_ = to;
+      || type == kProperty
+      || type == kInternal
+      || type == kShortcut);
 }
 
 
-void HeapGraphEdge::Init(int child_index, Type type, int index, HeapEntry* to) {
+HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to)
+    : type_(type),
+      from_index_(from),
+      to_index_(to),
+      index_(index) {
   ASSERT(type == kElement || type == kHidden || type == kWeak);
-  child_index_ = child_index;
-  type_ = type;
-  index_ = index;
-  to_ = to;
 }
 
 
-void HeapGraphEdge::Init(int child_index, int index, HeapEntry* to) {
-  Init(child_index, kElement, index, to);
+void HeapGraphEdge::ReplaceToIndexWithEntry(HeapSnapshot* snapshot) {
+  to_entry_ = &snapshot->entries()[to_index_];
 }
 
 
-void HeapEntry::Init(HeapSnapshot* snapshot,
+const int HeapEntry::kNoEntry = -1;
+
+HeapEntry::HeapEntry(HeapSnapshot* snapshot,
                      Type type,
                      const char* name,
                      SnapshotObjectId id,
-                     int self_size,
-                     int children_count,
-                     int retainers_count) {
-  snapshot_ = snapshot;
-  type_ = type;
-  painted_ = false;
-  user_reachable_ = false;
-  name_ = name;
-  self_size_ = self_size;
-  retained_size_ = 0;
-  entry_index_ = -1;
-  children_count_ = children_count;
-  retainers_count_ = retainers_count;
-  dominator_ = NULL;
-  id_ = id;
-}
+                     int self_size)
+    : painted_(false),
+      user_reachable_(false),
+      dominator_(kNoEntry),
+      type_(type),
+      retainers_count_(0),
+      retainers_index_(-1),
+      children_count_(0),
+      children_index_(-1),
+      self_size_(self_size),
+      retained_size_(0),
+      id_(id),
+      snapshot_(snapshot),
+      name_(name) { }
 
 
 void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
-                                  int child_index,
                                   const char* name,
-                                  HeapEntry* entry,
-                                  int retainer_index) {
-  children()[child_index].Init(child_index, type, name, entry);
-  entry->retainers()[retainer_index] = children_arr() + child_index;
+                                  HeapEntry* entry) {
+  HeapGraphEdge edge(type, name, this->index(), entry->index());
+  snapshot_->edges().Add(edge);
+  ++children_count_;
+  ++entry->retainers_count_;
 }
 
 
 void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
-                                    int child_index,
                                     int index,
-                                    HeapEntry* entry,
-                                    int retainer_index) {
-  children()[child_index].Init(child_index, type, index, entry);
-  entry->retainers()[retainer_index] = children_arr() + child_index;
-}
-
-
-void HeapEntry::SetUnidirElementReference(
-    int child_index, int index, HeapEntry* entry) {
-  children()[child_index].Init(child_index, index, entry);
+                                    HeapEntry* entry) {
+  HeapGraphEdge edge(type, index, this->index(), entry->index());
+  snapshot_->edges().Add(edge);
+  ++children_count_;
+  ++entry->retainers_count_;
 }
 
 
@@ -1013,7 +1006,8 @@
 
 void HeapEntry::Print(
     const char* prefix, const char* edge_name, int max_depth, int indent) {
-  OS::Print("%6d %7d @%6llu %*c %s%s: ",
+  STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
+  OS::Print("%6d %7d @%6u %*c %s%s: ",
             self_size(), retained_size(), id(),
             indent, ' ', prefix, edge_name);
   if (type() != kString) {
@@ -1031,9 +1025,9 @@
     OS::Print("\"\n");
   }
   if (--max_depth == 0) return;
-  Vector<HeapGraphEdge> ch = children();
+  Vector<HeapGraphEdge*> ch = children();
   for (int i = 0; i < ch.length(); ++i) {
-    HeapGraphEdge& edge = ch[i];
+    HeapGraphEdge& edge = *ch[i];
     const char* edge_prefix = "";
     EmbeddedVector<char, 64> index;
     const char* edge_name = index.start();
@@ -1089,15 +1083,6 @@
 }
 
 
-size_t HeapEntry::EntriesSize(int entries_count,
-                              int children_count,
-                              int retainers_count) {
-  return sizeof(HeapEntry) * entries_count         // NOLINT
-      + sizeof(HeapGraphEdge) * children_count     // NOLINT
-      + sizeof(HeapGraphEdge*) * retainers_count;  // NOLINT
-}
-
-
 // It is very important to keep objects that form a heap snapshot
 // as small as possible.
 namespace {  // Avoid littering the global namespace.
@@ -1106,7 +1091,7 @@
 
 template <> struct SnapshotSizeConstants<4> {
   static const int kExpectedHeapGraphEdgeSize = 12;
-  static const int kExpectedHeapEntrySize = 36;
+  static const int kExpectedHeapEntrySize = 40;
   static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
 };
 
@@ -1127,11 +1112,9 @@
       type_(type),
       title_(title),
       uid_(uid),
-      root_entry_(NULL),
-      gc_roots_entry_(NULL),
-      natives_root_entry_(NULL),
-      raw_entries_(NULL),
-      number_of_edges_(0),
+      root_index_(HeapEntry::kNoEntry),
+      gc_roots_index_(HeapEntry::kNoEntry),
+      natives_root_index_(HeapEntry::kNoEntry),
       max_snapshot_js_object_id_(0) {
   STATIC_CHECK(
       sizeof(HeapGraphEdge) ==
@@ -1140,16 +1123,11 @@
       sizeof(HeapEntry) ==
       SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
   for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
-    gc_subroot_entries_[i] = NULL;
+    gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
   }
 }
 
 
-HeapSnapshot::~HeapSnapshot() {
-  DeleteArray(raw_entries_);
-}
-
-
 void HeapSnapshot::Delete() {
   collection_->RemoveSnapshot(this);
   delete this;
@@ -1161,19 +1139,8 @@
 }
 
 
-void HeapSnapshot::AllocateEntries(int entries_count,
-                                   int children_count,
-                                   int retainers_count) {
-  ASSERT(raw_entries_ == NULL);
-  number_of_edges_ = children_count;
-  raw_entries_size_ =
-      HeapEntry::EntriesSize(entries_count, children_count, retainers_count);
-  raw_entries_ = NewArray<char>(raw_entries_size_);
-}
-
-
-static void HeapEntryClearPaint(HeapEntry** entry_ptr) {
-  (*entry_ptr)->clear_paint();
+static void HeapEntryClearPaint(HeapEntry* entry_ptr) {
+  entry_ptr->clear_paint();
 }
 
 
@@ -1182,79 +1149,83 @@
 }
 
 
-HeapEntry* HeapSnapshot::AddRootEntry(int children_count) {
-  ASSERT(root_entry_ == NULL);
+HeapEntry* HeapSnapshot::AddRootEntry() {
+  ASSERT(root_index_ == HeapEntry::kNoEntry);
   ASSERT(entries_.is_empty());  // Root entry must be the first one.
-  return (root_entry_ = AddEntry(HeapEntry::kObject,
-                                 "",
-                                 HeapObjectsMap::kInternalRootObjectId,
-                                 0,
-                                 children_count,
-                                 0));
+  HeapEntry* entry = AddEntry(HeapEntry::kObject,
+                              "",
+                              HeapObjectsMap::kInternalRootObjectId,
+                              0);
+  root_index_ = entry->index();
+  ASSERT(root_index_ == 0);
+  return entry;
 }
 
 
-HeapEntry* HeapSnapshot::AddGcRootsEntry(int children_count,
-                                         int retainers_count) {
-  ASSERT(gc_roots_entry_ == NULL);
-  return (gc_roots_entry_ = AddEntry(HeapEntry::kObject,
-                                     "(GC roots)",
-                                     HeapObjectsMap::kGcRootsObjectId,
-                                     0,
-                                     children_count,
-                                     retainers_count));
+HeapEntry* HeapSnapshot::AddGcRootsEntry() {
+  ASSERT(gc_roots_index_ == HeapEntry::kNoEntry);
+  HeapEntry* entry = AddEntry(HeapEntry::kObject,
+                              "(GC roots)",
+                              HeapObjectsMap::kGcRootsObjectId,
+                              0);
+  gc_roots_index_ = entry->index();
+  return entry;
 }
 
 
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag,
-                                           int children_count,
-                                           int retainers_count) {
-  ASSERT(gc_subroot_entries_[tag] == NULL);
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
+  ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
   ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
-  return (gc_subroot_entries_[tag] = AddEntry(
+  HeapEntry* entry = AddEntry(
       HeapEntry::kObject,
       VisitorSynchronization::kTagNames[tag],
       HeapObjectsMap::GetNthGcSubrootId(tag),
-      0,
-      children_count,
-      retainers_count));
+      0);
+  gc_subroot_indexes_[tag] = entry->index();
+  return entry;
 }
 
 
 HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
                                   const char* name,
                                   SnapshotObjectId id,
-                                  int size,
-                                  int children_count,
-                                  int retainers_count) {
-  HeapEntry* entry = GetNextEntryToInit();
-  entry->Init(this, type, name, id, size, children_count, retainers_count);
-  return entry;
+                                  int size) {
+  HeapEntry entry(this, type, name, id, size);
+  entries_.Add(entry);
+  return &entries_.last();
+}
+
+
+void HeapSnapshot::FillChildrenAndRetainers() {
+  ASSERT(children().is_empty());
+  children().Allocate(edges().length());
+  ASSERT(retainers().is_empty());
+  retainers().Allocate(edges().length());
+  int children_index = 0;
+  int retainers_index = 0;
+  for (int i = 0; i < entries().length(); ++i) {
+    HeapEntry* entry = &entries()[i];
+    children_index = entry->set_children_index(children_index);
+    retainers_index = entry->set_retainers_index(retainers_index);
+  }
+  ASSERT(edges().length() == children_index);
+  ASSERT(edges().length() == retainers_index);
+  for (int i = 0; i < edges().length(); ++i) {
+    HeapGraphEdge* edge = &edges()[i];
+    edge->ReplaceToIndexWithEntry(this);
+    edge->from()->add_child(edge);
+    edge->to()->add_retainer(edge);
+  }
 }
 
 
 void HeapSnapshot::SetDominatorsToSelf() {
   for (int i = 0; i < entries_.length(); ++i) {
-    HeapEntry* entry = entries_[i];
-    if (entry->dominator() == NULL) entry->set_dominator(entry);
+    entries_[i].set_dominator(&entries_[i]);
   }
 }
 
 
-HeapEntry* HeapSnapshot::GetNextEntryToInit() {
-  if (entries_.length() > 0) {
-    HeapEntry* last_entry = entries_.last();
-    entries_.Add(reinterpret_cast<HeapEntry*>(
-        reinterpret_cast<char*>(last_entry) + last_entry->EntrySize()));
-  } else {
-    entries_.Add(reinterpret_cast<HeapEntry*>(raw_entries_));
-  }
-  ASSERT(reinterpret_cast<char*>(entries_.last()) <
-         (raw_entries_ + raw_entries_size_));
-  return entries_.last();
-}
-
-
 class FindEntryById {
  public:
   explicit FindEntryById(SnapshotObjectId id) : id_(id) { }
@@ -1287,7 +1258,10 @@
 
 List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
   if (sorted_entries_.is_empty()) {
-    sorted_entries_.AddAll(entries_);
+    sorted_entries_.Allocate(entries_.length());
+    for (int i = 0; i < entries_.length(); ++i) {
+      sorted_entries_[i] = &entries_[i];
+    }
     sorted_entries_.Sort(SortByIds);
   }
   return &sorted_entries_;
@@ -1299,6 +1273,22 @@
 }
 
 
+template<typename T, class P>
+static size_t GetMemoryUsedByList(const List<T, P>& list) {
+  return list.capacity() * sizeof(T);
+}
+
+
+size_t HeapSnapshot::RawSnapshotSize() const {
+  return
+      GetMemoryUsedByList(entries_) +
+      GetMemoryUsedByList(edges_) +
+      GetMemoryUsedByList(children_) +
+      GetMemoryUsedByList(retainers_) +
+      GetMemoryUsedByList(sorted_entries_);
+}
+
+
 // We split IDs on evens for embedder objects (see
 // HeapObjectsMap::GenerateId) and odds for native objects.
 const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1;
@@ -1567,99 +1557,22 @@
 }
 
 
-HeapEntry* const HeapEntriesMap::kHeapEntryPlaceholder =
-    reinterpret_cast<HeapEntry*>(1);
-
 HeapEntriesMap::HeapEntriesMap()
-    : entries_(HeapThingsMatch),
-      entries_count_(0),
-      total_children_count_(0),
-      total_retainers_count_(0) {
+    : entries_(HeapThingsMatch) {
 }
 
 
-HeapEntriesMap::~HeapEntriesMap() {
-  for (HashMap::Entry* p = entries_.Start(); p != NULL; p = entries_.Next(p)) {
-    delete reinterpret_cast<EntryInfo*>(p->value);
-  }
-}
-
-
-void HeapEntriesMap::AllocateHeapEntryForMapEntry(HashMap::Entry* map_entry) {
-    EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(map_entry->value);
-    entry_info->entry = entry_info->allocator->AllocateEntry(
-        map_entry->key,
-        entry_info->children_count,
-        entry_info->retainers_count);
-    ASSERT(entry_info->entry != NULL);
-    ASSERT(entry_info->entry != kHeapEntryPlaceholder);
-    entry_info->children_count = 0;
-    entry_info->retainers_count = 0;
-}
-
-
-void HeapEntriesMap::AllocateEntries(HeapThing root_object) {
-  HashMap::Entry* root_entry =
-      entries_.Lookup(root_object, Hash(root_object), false);
-  ASSERT(root_entry != NULL);
-  // Make sure root entry is allocated first.
-  AllocateHeapEntryForMapEntry(root_entry);
-  void* root_entry_value = root_entry->value;
-  // Remove the root object from map while iterating through other entries.
-  entries_.Remove(root_object, Hash(root_object));
-  root_entry = NULL;
-
-  for (HashMap::Entry* p = entries_.Start();
-       p != NULL;
-       p = entries_.Next(p)) {
-    AllocateHeapEntryForMapEntry(p);
-  }
-
-  // Insert root entry back.
-  root_entry = entries_.Lookup(root_object, Hash(root_object), true);
-  root_entry->value = root_entry_value;
-}
-
-
-HeapEntry* HeapEntriesMap::Map(HeapThing thing) {
+int HeapEntriesMap::Map(HeapThing thing) {
   HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false);
-  if (cache_entry != NULL) {
-    EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(cache_entry->value);
-    return entry_info->entry;
-  } else {
-    return NULL;
-  }
+  if (cache_entry == NULL) return HeapEntry::kNoEntry;
+  return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
 }
 
 
-void HeapEntriesMap::Pair(
-    HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry) {
+void HeapEntriesMap::Pair(HeapThing thing, int entry) {
   HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
   ASSERT(cache_entry->value == NULL);
-  cache_entry->value = new EntryInfo(entry, allocator);
-  ++entries_count_;
-}
-
-
-void HeapEntriesMap::CountReference(HeapThing from, HeapThing to,
-                                    int* prev_children_count,
-                                    int* prev_retainers_count) {
-  HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), false);
-  HashMap::Entry* to_cache_entry = entries_.Lookup(to, Hash(to), false);
-  ASSERT(from_cache_entry != NULL);
-  ASSERT(to_cache_entry != NULL);
-  EntryInfo* from_entry_info =
-      reinterpret_cast<EntryInfo*>(from_cache_entry->value);
-  EntryInfo* to_entry_info =
-      reinterpret_cast<EntryInfo*>(to_cache_entry->value);
-  if (prev_children_count)
-    *prev_children_count = from_entry_info->children_count;
-  if (prev_retainers_count)
-    *prev_retainers_count = to_entry_info->retainers_count;
-  ++from_entry_info->children_count;
-  ++to_entry_info->retainers_count;
-  ++total_children_count_;
-  ++total_retainers_count_;
+  cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
 }
 
 
@@ -1676,20 +1589,14 @@
 bool HeapObjectsSet::Contains(Object* obj) {
   if (!obj->IsHeapObject()) return false;
   HeapObject* object = HeapObject::cast(obj);
-  HashMap::Entry* cache_entry =
-      entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
-  return cache_entry != NULL;
+  return entries_.Lookup(object, HeapEntriesMap::Hash(object), false) != NULL;
 }
 
 
 void HeapObjectsSet::Insert(Object* obj) {
   if (!obj->IsHeapObject()) return;
   HeapObject* object = HeapObject::cast(obj);
-  HashMap::Entry* cache_entry =
-      entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
-  if (cache_entry->value == NULL) {
-    cache_entry->value = HeapEntriesMap::kHeapEntryPlaceholder;
-  }
+  entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
 }
 
 
@@ -1697,12 +1604,9 @@
   HeapObject* object = HeapObject::cast(obj);
   HashMap::Entry* cache_entry =
       entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
-  if (cache_entry != NULL
-      && cache_entry->value != HeapEntriesMap::kHeapEntryPlaceholder) {
-    return reinterpret_cast<const char*>(cache_entry->value);
-  } else {
-    return NULL;
-  }
+  return cache_entry != NULL
+      ? reinterpret_cast<const char*>(cache_entry->value)
+      : NULL;
 }
 
 
@@ -1744,129 +1648,83 @@
 }
 
 
-HeapEntry* V8HeapExplorer::AllocateEntry(
-    HeapThing ptr, int children_count, int retainers_count) {
-  return AddEntry(
-      reinterpret_cast<HeapObject*>(ptr), children_count, retainers_count);
+HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
+  return AddEntry(reinterpret_cast<HeapObject*>(ptr));
 }
 
 
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
-                                    int children_count,
-                                    int retainers_count) {
+HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
   if (object == kInternalRootObject) {
-    ASSERT(retainers_count == 0);
-    return snapshot_->AddRootEntry(children_count);
+    snapshot_->AddRootEntry();
+    return snapshot_->root();
   } else if (object == kGcRootsObject) {
-    return snapshot_->AddGcRootsEntry(children_count, retainers_count);
+    HeapEntry* entry = snapshot_->AddGcRootsEntry();
+    return entry;
   } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
-    return snapshot_->AddGcSubrootEntry(
-        GetGcSubrootOrder(object),
-        children_count,
-        retainers_count);
+    HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object));
+    return entry;
   } else if (object->IsJSFunction()) {
     JSFunction* func = JSFunction::cast(object);
     SharedFunctionInfo* shared = func->shared();
     const char* name = shared->bound() ? "native_bind" :
         collection_->names()->GetName(String::cast(shared->name()));
-    return AddEntry(object,
-                    HeapEntry::kClosure,
-                    name,
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kClosure, name);
   } else if (object->IsJSRegExp()) {
     JSRegExp* re = JSRegExp::cast(object);
     return AddEntry(object,
                     HeapEntry::kRegExp,
-                    collection_->names()->GetName(re->Pattern()),
-                    children_count,
-                    retainers_count);
+                    collection_->names()->GetName(re->Pattern()));
   } else if (object->IsJSObject()) {
-    return AddEntry(object,
-                    HeapEntry::kObject,
-                    "",
-                    children_count,
-                    retainers_count);
+    const char* name = collection_->names()->GetName(
+        GetConstructorName(JSObject::cast(object)));
+    if (object->IsJSGlobalObject()) {
+      const char* tag = objects_tags_.GetTag(object);
+      if (tag != NULL) {
+        name = collection_->names()->GetFormatted("%s / %s", name, tag);
+      }
+    }
+    return AddEntry(object, HeapEntry::kObject, name);
   } else if (object->IsString()) {
     return AddEntry(object,
                     HeapEntry::kString,
-                    collection_->names()->GetName(String::cast(object)),
-                    children_count,
-                    retainers_count);
+                    collection_->names()->GetName(String::cast(object)));
   } else if (object->IsCode()) {
-    return AddEntry(object,
-                    HeapEntry::kCode,
-                    "",
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kCode, "");
   } else if (object->IsSharedFunctionInfo()) {
-    SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
+    String* name = String::cast(SharedFunctionInfo::cast(object)->name());
     return AddEntry(object,
                     HeapEntry::kCode,
-                    collection_->names()->GetName(String::cast(shared->name())),
-                    children_count,
-                    retainers_count);
+                    collection_->names()->GetName(name));
   } else if (object->IsScript()) {
-    Script* script = Script::cast(object);
+    Object* name = Script::cast(object)->name();
     return AddEntry(object,
                     HeapEntry::kCode,
-                    script->name()->IsString() ?
-                        collection_->names()->GetName(
-                            String::cast(script->name()))
-                        : "",
-                    children_count,
-                    retainers_count);
+                    name->IsString()
+                        ? collection_->names()->GetName(String::cast(name))
+                        : "");
   } else if (object->IsGlobalContext()) {
-    return AddEntry(object,
-                    HeapEntry::kHidden,
-                    "system / GlobalContext",
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kHidden, "system / GlobalContext");
   } else if (object->IsContext()) {
-    return AddEntry(object,
-                    HeapEntry::kHidden,
-                    "system / Context",
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kHidden, "system / Context");
   } else if (object->IsFixedArray() ||
              object->IsFixedDoubleArray() ||
              object->IsByteArray() ||
              object->IsExternalArray()) {
-    const char* tag = objects_tags_.GetTag(object);
-    return AddEntry(object,
-                    HeapEntry::kArray,
-                    tag != NULL ? tag : "",
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kArray, "");
   } else if (object->IsHeapNumber()) {
-    return AddEntry(object,
-                    HeapEntry::kHeapNumber,
-                    "number",
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kHeapNumber, "number");
   }
-  return AddEntry(object,
-                  HeapEntry::kHidden,
-                  GetSystemEntryName(object),
-                  children_count,
-                  retainers_count);
+  return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
 }
 
 
 HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
                                     HeapEntry::Type type,
-                                    const char* name,
-                                    int children_count,
-                                    int retainers_count) {
+                                    const char* name) {
   int object_size = object->Size();
   SnapshotObjectId object_id =
     collection_->GetObjectId(object->address(), object_size);
-  return snapshot_->AddEntry(type,
-                             name,
-                             object_id,
-                             object_size,
-                             children_count,
-                             retainers_count);
+  return snapshot_->AddEntry(type, name, object_id, object_size);
 }
 
 
@@ -1935,10 +1793,10 @@
  public:
   IndexedReferencesExtractor(V8HeapExplorer* generator,
                              HeapObject* parent_obj,
-                             HeapEntry* parent_entry)
+                             int parent)
       : generator_(generator),
         parent_obj_(parent_obj),
-        parent_(parent_entry),
+        parent_(parent),
         next_index_(1) {
   }
   void VisitPointers(Object** start, Object** end) {
@@ -1967,14 +1825,15 @@
   }
   V8HeapExplorer* generator_;
   HeapObject* parent_obj_;
-  HeapEntry* parent_;
+  int parent_;
   int next_index_;
 };
 
 
 void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
-  HeapEntry* entry = GetEntry(obj);
-  if (entry == NULL) return;  // No interest in this object.
+  HeapEntry* heap_entry = GetEntry(obj);
+  if (heap_entry == NULL) return;  // No interest in this object.
+  int entry = heap_entry->index();
 
   bool extract_indexed_refs = true;
   if (obj->IsJSGlobalProxy()) {
@@ -2026,7 +1885,7 @@
 
 
 void V8HeapExplorer::ExtractJSObjectReferences(
-    HeapEntry* entry, JSObject* js_obj) {
+    int entry, JSObject* js_obj) {
   HeapObject* obj = js_obj;
   ExtractClosureReferences(js_obj, entry);
   ExtractPropertyReferences(js_obj, entry);
@@ -2095,7 +1954,7 @@
 }
 
 
-void V8HeapExplorer::ExtractStringReferences(HeapEntry* entry, String* string) {
+void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
   if (string->IsConsString()) {
     ConsString* cs = ConsString::cast(string);
     SetInternalReference(cs, entry, "first", cs->first());
@@ -2107,8 +1966,7 @@
 }
 
 
-void V8HeapExplorer::ExtractContextReferences(
-    HeapEntry* entry, Context* context) {
+void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
 #define EXTRACT_CONTEXT_FIELD(index, type, name) \
   SetInternalReference(context, entry, #name, context->get(Context::index), \
       FixedArray::OffsetOfElementAt(Context::index));
@@ -2134,7 +1992,7 @@
 }
 
 
-void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map* map) {
+void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
   SetInternalReference(map, entry,
                        "prototype", map->prototype(), Map::kPrototypeOffset);
   SetInternalReference(map, entry,
@@ -2146,10 +2004,16 @@
                          "descriptors", map->instance_descriptors(),
                          Map::kInstanceDescriptorsOrBitField3Offset);
   }
-  TagObject(map->prototype_transitions(), "(prototype transitions)");
-  SetInternalReference(map, entry,
-                       "prototype_transitions", map->prototype_transitions(),
-                       Map::kPrototypeTransitionsOffset);
+  if (map->unchecked_prototype_transitions()->IsFixedArray()) {
+    TagObject(map->prototype_transitions(), "(prototype transitions)");
+    SetInternalReference(map, entry,
+                         "prototype_transitions", map->prototype_transitions(),
+                         Map::kPrototypeTransitionsOrBackPointerOffset);
+  } else {
+    SetInternalReference(map, entry,
+                         "back_pointer", map->GetBackPointer(),
+                         Map::kPrototypeTransitionsOrBackPointerOffset);
+  }
   SetInternalReference(map, entry,
                        "code_cache", map->code_cache(),
                        Map::kCodeCacheOffset);
@@ -2157,7 +2021,7 @@
 
 
 void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
-    HeapEntry* entry, SharedFunctionInfo* shared) {
+    int entry, SharedFunctionInfo* shared) {
   HeapObject* obj = shared;
   SetInternalReference(obj, entry,
                        "name", shared->name(),
@@ -2199,7 +2063,7 @@
 }
 
 
-void V8HeapExplorer::ExtractScriptReferences(HeapEntry* entry, Script* script) {
+void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
   HeapObject* obj = script;
   SetInternalReference(obj, entry,
                        "source", script->source(),
@@ -2221,7 +2085,7 @@
 
 
 void V8HeapExplorer::ExtractCodeCacheReferences(
-    HeapEntry* entry, CodeCache* code_cache) {
+    int entry, CodeCache* code_cache) {
   TagObject(code_cache->default_cache(), "(default code cache)");
   SetInternalReference(code_cache, entry,
                        "default_cache", code_cache->default_cache(),
@@ -2233,7 +2097,7 @@
 }
 
 
-void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code* code) {
+void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
   TagObject(code->relocation_info(), "(code relocation info)");
   SetInternalReference(code, entry,
                        "relocation_info", code->relocation_info(),
@@ -2255,13 +2119,12 @@
 
 
 void V8HeapExplorer::ExtractJSGlobalPropertyCellReferences(
-    HeapEntry* entry, JSGlobalPropertyCell* cell) {
+    int entry, JSGlobalPropertyCell* cell) {
   SetInternalReference(cell, entry, "value", cell->value());
 }
 
 
-void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
-                                              HeapEntry* entry) {
+void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
   if (!js_obj->IsJSFunction()) return;
 
   JSFunction* func = JSFunction::cast(js_obj);
@@ -2303,8 +2166,7 @@
 }
 
 
-void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
-                                               HeapEntry* entry) {
+void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
   if (js_obj->HasFastProperties()) {
     DescriptorArray* descs = js_obj->map()->instance_descriptors();
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
@@ -2377,8 +2239,7 @@
 }
 
 
-void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj,
-                                              HeapEntry* entry) {
+void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
   if (js_obj->HasFastElements()) {
     FixedArray* elements = FixedArray::cast(js_obj->elements());
     int length = js_obj->IsJSArray() ?
@@ -2404,8 +2265,7 @@
 }
 
 
-void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj,
-                                               HeapEntry* entry) {
+void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
   int length = js_obj->GetInternalFieldCount();
   for (int i = 0; i < length; ++i) {
     Object* o = js_obj->GetInternalField(i);
@@ -2531,6 +2391,7 @@
     filler_ = NULL;
     return false;
   }
+
   SetRootGcRootsReference();
   RootsReferencesExtractor extractor;
   heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
@@ -2538,33 +2399,7 @@
   heap_->IterateRoots(&extractor, VISIT_ALL);
   extractor.FillReferences(this);
   filler_ = NULL;
-  return progress_->ProgressReport(false);
-}
-
-
-bool V8HeapExplorer::IterateAndSetObjectNames(SnapshotFillerInterface* filler) {
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
-  filler_ = filler;
-  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
-    SetObjectName(obj);
-  }
-  return true;
-}
-
-
-void V8HeapExplorer::SetObjectName(HeapObject* object) {
-  if (!object->IsJSObject() || object->IsJSRegExp() || object->IsJSFunction()) {
-    return;
-  }
-  const char* name = collection_->names()->GetName(
-      GetConstructorName(JSObject::cast(object)));
-  if (object->IsJSGlobalObject()) {
-    const char* tag = objects_tags_.GetTag(object);
-    if (tag != NULL) {
-      name = collection_->names()->GetFormatted("%s / %s", name, tag);
-    }
-  }
-  GetEntry(object)->set_name(name);
+  return progress_->ProgressReport(true);
 }
 
 
@@ -2586,55 +2421,49 @@
 
 
 void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
-                                         HeapEntry* parent_entry,
+                                         int parent_entry,
                                          String* reference_name,
                                          Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
-                               parent_obj,
                                parent_entry,
                                collection_->names()->GetName(reference_name),
-                               child_obj,
                                child_entry);
   }
 }
 
 
 void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
-                                            HeapEntry* parent_entry,
+                                            int parent_entry,
                                             const char* reference_name,
                                             Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetNamedReference(HeapGraphEdge::kShortcut,
-                               parent_obj,
                                parent_entry,
                                reference_name,
-                               child_obj,
                                child_entry);
   }
 }
 
 
 void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
-                                         HeapEntry* parent_entry,
+                                         int parent_entry,
                                          int index,
                                          Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetIndexedReference(HeapGraphEdge::kElement,
-                                 parent_obj,
                                  parent_entry,
                                  index,
-                                 child_obj,
                                  child_entry);
   }
 }
 
 
 void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
-                                          HeapEntry* parent_entry,
+                                          int parent_entry,
                                           const char* reference_name,
                                           Object* child_obj,
                                           int field_offset) {
@@ -2642,16 +2471,16 @@
   if (child_entry == NULL) return;
   if (IsEssentialObject(child_obj)) {
     filler_->SetNamedReference(HeapGraphEdge::kInternal,
-                               parent_obj, parent_entry,
+                               parent_entry,
                                reference_name,
-                               child_obj, child_entry);
+                               child_entry);
   }
   IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
 }
 
 
 void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
-                                          HeapEntry* parent_entry,
+                                          int parent_entry,
                                           int index,
                                           Object* child_obj,
                                           int field_offset) {
@@ -2659,42 +2488,38 @@
   if (child_entry == NULL) return;
   if (IsEssentialObject(child_obj)) {
     filler_->SetNamedReference(HeapGraphEdge::kInternal,
-                               parent_obj, parent_entry,
+                               parent_entry,
                                collection_->names()->GetName(index),
-                               child_obj, child_entry);
+                               child_entry);
   }
   IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
 }
 
 
 void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
-                                        HeapEntry* parent_entry,
+                                        int parent_entry,
                                         int index,
                                         Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL && IsEssentialObject(child_obj)) {
     filler_->SetIndexedReference(HeapGraphEdge::kHidden,
-                                 parent_obj,
                                  parent_entry,
                                  index,
-                                 child_obj,
                                  child_entry);
   }
 }
 
 
 void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
-                                      HeapEntry* parent_entry,
+                                      int parent_entry,
                                       int index,
                                       Object* child_obj,
                                       int field_offset) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetIndexedReference(HeapGraphEdge::kWeak,
-                                 parent_obj,
                                  parent_entry,
                                  index,
-                                 child_obj,
                                  child_entry);
     IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
   }
@@ -2702,7 +2527,7 @@
 
 
 void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
-                                          HeapEntry* parent_entry,
+                                          int parent_entry,
                                           String* reference_name,
                                           Object* child_obj,
                                           const char* name_format_string,
@@ -2719,10 +2544,8 @@
         collection_->names()->GetName(reference_name);
 
     filler_->SetNamedReference(type,
-                               parent_obj,
                                parent_entry,
                                name,
-                               child_obj,
                                child_entry);
     IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
   }
@@ -2730,16 +2553,14 @@
 
 
 void V8HeapExplorer::SetPropertyShortcutReference(HeapObject* parent_obj,
-                                                  HeapEntry* parent_entry,
+                                                  int parent_entry,
                                                   String* reference_name,
                                                   Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetNamedReference(HeapGraphEdge::kShortcut,
-                               parent_obj,
                                parent_entry,
                                collection_->names()->GetName(reference_name),
-                               child_obj,
                                child_entry);
   }
 }
@@ -2748,8 +2569,8 @@
 void V8HeapExplorer::SetRootGcRootsReference() {
   filler_->SetIndexedAutoIndexReference(
       HeapGraphEdge::kElement,
-      kInternalRootObject, snapshot_->root(),
-      kGcRootsObject, snapshot_->gc_roots());
+      snapshot_->root()->index(),
+      snapshot_->gc_roots());
 }
 
 
@@ -2758,16 +2579,16 @@
   ASSERT(child_entry != NULL);
   filler_->SetNamedAutoIndexReference(
       HeapGraphEdge::kShortcut,
-      kInternalRootObject, snapshot_->root(),
-      child_obj, child_entry);
+      snapshot_->root()->index(),
+      child_entry);
 }
 
 
 void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
   filler_->SetIndexedAutoIndexReference(
       HeapGraphEdge::kElement,
-      kGcRootsObject, snapshot_->gc_roots(),
-      GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag));
+      snapshot_->gc_roots()->index(),
+      snapshot_->gc_subroot(tag));
 }
 
 
@@ -2779,14 +2600,14 @@
     if (name != NULL) {
       filler_->SetNamedReference(
           HeapGraphEdge::kInternal,
-          GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag),
+          snapshot_->gc_subroot(tag)->index(),
           name,
-          child_obj, child_entry);
+          child_entry);
     } else {
       filler_->SetIndexedAutoIndexReference(
           is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
-          GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag),
-          child_obj, child_entry);
+          snapshot_->gc_subroot(tag)->index(),
+          child_entry);
     }
   }
 }
@@ -2813,7 +2634,10 @@
 
 void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
   if (IsEssentialObject(obj)) {
-    objects_tags_.SetTag(obj, tag);
+    HeapEntry* entry = GetEntry(obj);
+    if (entry->name()[0] == '\0') {
+      entry->set_name(tag);
+    }
   }
 }
 
@@ -2903,8 +2727,7 @@
       collection_(snapshot_->collection()),
       entries_type_(entries_type) {
   }
-  virtual HeapEntry* AllocateEntry(
-      HeapThing ptr, int children_count, int retainers_count);
+  virtual HeapEntry* AllocateEntry(HeapThing ptr);
  private:
   HeapSnapshot* snapshot_;
   HeapSnapshotsCollection* collection_;
@@ -2912,23 +2735,19 @@
 };
 
 
-HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(
-    HeapThing ptr, int children_count, int retainers_count) {
+HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
   v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
   intptr_t elements = info->GetElementCount();
   intptr_t size = info->GetSizeInBytes();
+  const char* name = elements != -1
+      ? collection_->names()->GetFormatted(
+            "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
+      : collection_->names()->GetCopy(info->GetLabel());
   return snapshot_->AddEntry(
       entries_type_,
-      elements != -1 ?
-          collection_->names()->GetFormatted(
-              "%s / %" V8_PTR_PREFIX "d entries",
-              info->GetLabel(),
-              info->GetElementCount()) :
-          collection_->names()->GetCopy(info->GetLabel()),
+      name,
       HeapObjectsMap::GenerateId(info),
-      size != -1 ? static_cast<int>(size) : 0,
-      children_count,
-      retainers_count);
+      size != -1 ? static_cast<int>(size) : 0);
 }
 
 
@@ -3009,9 +2828,9 @@
   for (int i = 0; i < groups->length(); ++i) {
     ImplicitRefGroup* group = groups->at(i);
     HeapObject* parent = *group->parent_;
-    HeapEntry* parent_entry =
-        filler_->FindOrAddEntry(parent, native_entries_allocator_);
-    ASSERT(parent_entry != NULL);
+    int parent_entry =
+        filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
+    ASSERT(parent_entry != HeapEntry::kNoEntry);
     Object*** children = group->children_;
     for (size_t j = 0; j < group->length_; ++j) {
       Object* child = *children[j];
@@ -3019,9 +2838,9 @@
           filler_->FindOrAddEntry(child, native_entries_allocator_);
       filler_->SetNamedReference(
           HeapGraphEdge::kInternal,
-          parent, parent_entry,
+          parent_entry,
           "native",
-          child, child_entry);
+          child_entry);
     }
   }
 }
@@ -3099,8 +2918,9 @@
                                        HEAP->HashSeed());
   HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
                                                 hash, true);
-  if (entry->value == NULL)
+  if (entry->value == NULL) {
     entry->value = new NativeGroupRetainedObjectInfo(label);
+  }
   return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
 }
 
@@ -3116,8 +2936,8 @@
       filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
   filler_->SetNamedAutoIndexReference(
       HeapGraphEdge::kInternal,
-      group_info, group_entry,
-      info, child_entry);
+      group_entry->index(),
+      child_entry);
 }
 
 
@@ -3129,12 +2949,12 @@
       filler_->FindOrAddEntry(info, native_entries_allocator_);
   ASSERT(info_entry != NULL);
   filler_->SetNamedReference(HeapGraphEdge::kInternal,
-                             wrapper, wrapper_entry,
+                             wrapper_entry->index(),
                              "native",
-                             info, info_entry);
+                             info_entry);
   filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
-                                        info, info_entry,
-                                        wrapper, wrapper_entry);
+                                        info_entry->index(),
+                                        wrapper_entry);
 }
 
 
@@ -3149,8 +2969,8 @@
     ASSERT(group_entry != NULL);
     filler_->SetIndexedAutoIndexReference(
         HeapGraphEdge::kElement,
-        V8HeapExplorer::kInternalRootObject, snapshot_->root(),
-        group_info, group_entry);
+        snapshot_->root()->index(),
+        group_entry);
   }
 }
 
@@ -3165,56 +2985,6 @@
 }
 
 
-class SnapshotCounter : public SnapshotFillerInterface {
- public:
-  explicit SnapshotCounter(HeapEntriesMap* entries) : entries_(entries) { }
-  HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
-    entries_->Pair(ptr, allocator, HeapEntriesMap::kHeapEntryPlaceholder);
-    return HeapEntriesMap::kHeapEntryPlaceholder;
-  }
-  HeapEntry* FindEntry(HeapThing ptr) {
-    return entries_->Map(ptr);
-  }
-  HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
-    HeapEntry* entry = FindEntry(ptr);
-    return entry != NULL ? entry : AddEntry(ptr, allocator);
-  }
-  void SetIndexedReference(HeapGraphEdge::Type,
-                           HeapThing parent_ptr,
-                           HeapEntry*,
-                           int,
-                           HeapThing child_ptr,
-                           HeapEntry*) {
-    entries_->CountReference(parent_ptr, child_ptr);
-  }
-  void SetIndexedAutoIndexReference(HeapGraphEdge::Type,
-                                    HeapThing parent_ptr,
-                                    HeapEntry*,
-                                    HeapThing child_ptr,
-                                    HeapEntry*) {
-    entries_->CountReference(parent_ptr, child_ptr);
-  }
-  void SetNamedReference(HeapGraphEdge::Type,
-                         HeapThing parent_ptr,
-                         HeapEntry*,
-                         const char*,
-                         HeapThing child_ptr,
-                         HeapEntry*) {
-    entries_->CountReference(parent_ptr, child_ptr);
-  }
-  void SetNamedAutoIndexReference(HeapGraphEdge::Type,
-                                  HeapThing parent_ptr,
-                                  HeapEntry*,
-                                  HeapThing child_ptr,
-                                  HeapEntry*) {
-    entries_->CountReference(parent_ptr, child_ptr);
-  }
-
- private:
-  HeapEntriesMap* entries_;
-};
-
-
 class SnapshotFiller : public SnapshotFillerInterface {
  public:
   explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
@@ -3222,64 +2992,48 @@
         collection_(snapshot->collection()),
         entries_(entries) { }
   HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
-    UNREACHABLE();
-    return NULL;
+    HeapEntry* entry = allocator->AllocateEntry(ptr);
+    entries_->Pair(ptr, entry->index());
+    return entry;
   }
   HeapEntry* FindEntry(HeapThing ptr) {
-    return entries_->Map(ptr);
+    int index = entries_->Map(ptr);
+    return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
   }
   HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
     HeapEntry* entry = FindEntry(ptr);
     return entry != NULL ? entry : AddEntry(ptr, allocator);
   }
   void SetIndexedReference(HeapGraphEdge::Type type,
-                           HeapThing parent_ptr,
-                           HeapEntry* parent_entry,
+                           int parent,
                            int index,
-                           HeapThing child_ptr,
                            HeapEntry* child_entry) {
-    int child_index, retainer_index;
-    entries_->CountReference(
-        parent_ptr, child_ptr, &child_index, &retainer_index);
-    parent_entry->SetIndexedReference(
-        type, child_index, index, child_entry, retainer_index);
+    HeapEntry* parent_entry = &snapshot_->entries()[parent];
+    parent_entry->SetIndexedReference(type, index, child_entry);
   }
   void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
-                                    HeapThing parent_ptr,
-                                    HeapEntry* parent_entry,
-                                    HeapThing child_ptr,
+                                    int parent,
                                     HeapEntry* child_entry) {
-    int child_index, retainer_index;
-    entries_->CountReference(
-        parent_ptr, child_ptr, &child_index, &retainer_index);
-    parent_entry->SetIndexedReference(
-        type, child_index, child_index + 1, child_entry, retainer_index);
+    HeapEntry* parent_entry = &snapshot_->entries()[parent];
+    int index = parent_entry->children_count() + 1;
+    parent_entry->SetIndexedReference(type, index, child_entry);
   }
   void SetNamedReference(HeapGraphEdge::Type type,
-                         HeapThing parent_ptr,
-                         HeapEntry* parent_entry,
+                         int parent,
                          const char* reference_name,
-                         HeapThing child_ptr,
                          HeapEntry* child_entry) {
-    int child_index, retainer_index;
-    entries_->CountReference(
-        parent_ptr, child_ptr, &child_index, &retainer_index);
-    parent_entry->SetNamedReference(
-        type, child_index, reference_name, child_entry, retainer_index);
+    HeapEntry* parent_entry = &snapshot_->entries()[parent];
+    parent_entry->SetNamedReference(type, reference_name, child_entry);
   }
   void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
-                                  HeapThing parent_ptr,
-                                  HeapEntry* parent_entry,
-                                  HeapThing child_ptr,
+                                  int parent,
                                   HeapEntry* child_entry) {
-    int child_index, retainer_index;
-    entries_->CountReference(
-        parent_ptr, child_ptr, &child_index, &retainer_index);
-    parent_entry->SetNamedReference(type,
-                              child_index,
-                              collection_->names()->GetName(child_index + 1),
-                              child_entry,
-                              retainer_index);
+    HeapEntry* parent_entry = &snapshot_->entries()[parent];
+    int index = parent_entry->children_count() + 1;
+    parent_entry->SetNamedReference(
+        type,
+        collection_->names()->GetName(index),
+        child_entry);
   }
 
  private:
@@ -3329,30 +3083,15 @@
   debug_heap->Verify();
 #endif
 
-  SetProgressTotal(2);  // 2 passes.
+  SetProgressTotal(1);  // 1 pass.
 
 #ifdef DEBUG
   debug_heap->Verify();
 #endif
 
-  // Pass 1. Iterate heap contents to count entries and references.
-  if (!CountEntriesAndReferences()) return false;
-
-#ifdef DEBUG
-  debug_heap->Verify();
-#endif
-
-  // Allocate memory for entries and references.
-  snapshot_->AllocateEntries(entries_.entries_count(),
-                             entries_.total_children_count(),
-                             entries_.total_retainers_count());
-
-  // Allocate heap objects to entries hash map.
-  entries_.AllocateEntries(V8HeapExplorer::kInternalRootObject);
-
-  // Pass 2. Fill references.
   if (!FillReferences()) return false;
 
+  snapshot_->FillChildrenAndRetainers();
   snapshot_->RememberLastJSObjectId();
 
   if (!SetEntriesDominators()) return false;
@@ -3384,48 +3123,34 @@
 void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
   if (control_ == NULL) return;
   HeapIterator iterator(HeapIterator::kFilterUnreachable);
-  progress_total_ = (
+  progress_total_ = iterations_count * (
       v8_heap_explorer_.EstimateObjectsCount(&iterator) +
-      dom_explorer_.EstimateObjectsCount()) * iterations_count;
+      dom_explorer_.EstimateObjectsCount());
   progress_counter_ = 0;
 }
 
 
-bool HeapSnapshotGenerator::CountEntriesAndReferences() {
-  SnapshotCounter counter(&entries_);
-  v8_heap_explorer_.AddRootEntries(&counter);
-  return v8_heap_explorer_.IterateAndExtractReferences(&counter)
-      && dom_explorer_.IterateAndExtractReferences(&counter);
-}
-
-
 bool HeapSnapshotGenerator::FillReferences() {
   SnapshotFiller filler(snapshot_, &entries_);
-  // IterateAndExtractReferences cannot set object names because
-  // it makes call to JSObject::LocalLookupRealNamedProperty which
-  // in turn may relocate objects in property maps thus changing the heap
-  // layout and affecting retainer counts. This is not acceptable because
-  // number of retainers must not change between count and fill passes.
-  // To avoid this there's a separate postpass that set object names.
+  v8_heap_explorer_.AddRootEntries(&filler);
   return v8_heap_explorer_.IterateAndExtractReferences(&filler)
-      && dom_explorer_.IterateAndExtractReferences(&filler)
-      && v8_heap_explorer_.IterateAndSetObjectNames(&filler);
+      && dom_explorer_.IterateAndExtractReferences(&filler);
 }
 
 
-bool HeapSnapshotGenerator::IsUserGlobalReference(const HeapGraphEdge& edge) {
-  ASSERT(edge.from() == snapshot_->root());
-  return edge.type() == HeapGraphEdge::kShortcut;
+bool HeapSnapshotGenerator::IsUserGlobalReference(const HeapGraphEdge* edge) {
+  ASSERT(edge->from() == snapshot_->root());
+  return edge->type() == HeapGraphEdge::kShortcut;
 }
 
 
 void HeapSnapshotGenerator::MarkUserReachableObjects() {
   List<HeapEntry*> worklist;
 
-  Vector<HeapGraphEdge> children = snapshot_->root()->children();
+  Vector<HeapGraphEdge*> children = snapshot_->root()->children();
   for (int i = 0; i < children.length(); ++i) {
     if (IsUserGlobalReference(children[i])) {
-      worklist.Add(children[i].to());
+      worklist.Add(children[i]->to());
     }
   }
 
@@ -3433,9 +3158,9 @@
     HeapEntry* entry = worklist.RemoveLast();
     if (entry->user_reachable()) continue;
     entry->set_user_reachable();
-    Vector<HeapGraphEdge> children = entry->children();
+    Vector<HeapGraphEdge*> children = entry->children();
     for (int i = 0; i < children.length(); ++i) {
-      HeapEntry* child = children[i].to();
+      HeapEntry* child = children[i]->to();
       if (!child->user_reachable()) {
         worklist.Add(child);
       }
@@ -3464,11 +3189,11 @@
   snapshot_->root()->paint();
   while (!nodes_to_visit.is_empty()) {
     HeapEntry* entry = nodes_to_visit.last();
-    Vector<HeapGraphEdge> children = entry->children();
+    Vector<HeapGraphEdge*> children = entry->children();
     bool has_new_edges = false;
     for (int i = 0; i < children.length(); ++i) {
-      if (entry != root && !IsRetainingEdge(&children[i])) continue;
-      HeapEntry* child = children[i].to();
+      if (entry != root && !IsRetainingEdge(children[i])) continue;
+      HeapEntry* child = children[i]->to();
       if (!child->painted()) {
         nodes_to_visit.Add(child);
         child->paint();
@@ -3476,7 +3201,7 @@
       }
     }
     if (!has_new_edges) {
-      entry->set_ordered_index(current_entry);
+      entry->set_postorder_index(current_entry);
       (*entries)[current_entry++] = entry;
       nodes_to_visit.RemoveLast();
     }
@@ -3504,8 +3229,7 @@
   if (entries.length() == 0) return true;
   HeapEntry* root = snapshot_->root();
   const int entries_length = entries.length(), root_index = entries_length - 1;
-  static const int kNoDominator = -1;
-  for (int i = 0; i < root_index; ++i) (*dominators)[i] = kNoDominator;
+  for (int i = 0; i < root_index; ++i) (*dominators)[i] = HeapEntry::kNoEntry;
   (*dominators)[root_index] = root_index;
 
   // The affected array is used to mark entries which dominators
@@ -3513,28 +3237,28 @@
   ScopedVector<bool> affected(entries_length);
   for (int i = 0; i < affected.length(); ++i) affected[i] = false;
   // Mark the root direct children as affected.
-  Vector<HeapGraphEdge> children = entries[root_index]->children();
+  Vector<HeapGraphEdge*> children = entries[root_index]->children();
   for (int i = 0; i < children.length(); ++i) {
-    affected[children[i].to()->ordered_index()] = true;
+    affected[children[i]->to()->postorder_index()] = true;
   }
 
   bool changed = true;
   while (changed) {
     changed = false;
-    if (!ProgressReport(true)) return false;
+    if (!ProgressReport(false)) return false;
     for (int i = root_index - 1; i >= 0; --i) {
       if (!affected[i]) continue;
       affected[i] = false;
       // If dominator of the entry has already been set to root,
       // then it can't propagate any further.
       if ((*dominators)[i] == root_index) continue;
-      int new_idom_index = kNoDominator;
+      int new_idom_index = HeapEntry::kNoEntry;
       Vector<HeapGraphEdge*> rets = entries[i]->retainers();
       for (int j = 0; j < rets.length(); ++j) {
         if (rets[j]->from() != root && !IsRetainingEdge(rets[j])) continue;
-        int ret_index = rets[j]->from()->ordered_index();
-        if (dominators->at(ret_index) != kNoDominator) {
-          new_idom_index = new_idom_index == kNoDominator
+        int ret_index = rets[j]->from()->postorder_index();
+        if (dominators->at(ret_index) != HeapEntry::kNoEntry) {
+          new_idom_index = new_idom_index == HeapEntry::kNoEntry
               ? ret_index
               : Intersect(ret_index, new_idom_index, *dominators);
           // If idom has already reached the root, it doesn't make sense
@@ -3542,13 +3266,13 @@
           if (new_idom_index == root_index) break;
         }
       }
-      if (new_idom_index != kNoDominator
+      if (new_idom_index != HeapEntry::kNoEntry
           && dominators->at(i) != new_idom_index) {
         (*dominators)[i] = new_idom_index;
         changed = true;
-        Vector<HeapGraphEdge> children = entries[i]->children();
+        Vector<HeapGraphEdge*> children = entries[i]->children();
         for (int j = 0; j < children.length(); ++j) {
-          affected[children[j].to()->ordered_index()] = true;
+          affected[children[j]->to()->postorder_index()] = true;
         }
       }
     }
@@ -3560,12 +3284,12 @@
 bool HeapSnapshotGenerator::SetEntriesDominators() {
   MarkUserReachableObjects();
   // This array is used for maintaining postorder of nodes.
-  ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries()->length());
+  ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries().length());
   FillPostorderIndexes(&ordered_entries);
   ScopedVector<int> dominators(ordered_entries.length());
   if (!BuildDominatorTree(ordered_entries, &dominators)) return false;
   for (int i = 0; i < ordered_entries.length(); ++i) {
-    ASSERT(dominators[i] >= 0);
+    ASSERT(dominators[i] != HeapEntry::kNoEntry);
     ordered_entries[i]->set_dominator(ordered_entries[dominators[i]]);
   }
   return true;
@@ -3576,17 +3300,18 @@
   // As for the dominators tree we only know parent nodes, not
   // children, to sum up total sizes we "bubble" node's self size
   // adding it to all of its parents.
-  List<HeapEntry*>& entries = *snapshot_->entries();
+  List<HeapEntry>& entries = snapshot_->entries();
   for (int i = 0; i < entries.length(); ++i) {
-    HeapEntry* entry = entries[i];
+    HeapEntry* entry = &entries[i];
     entry->set_retained_size(entry->self_size());
   }
   for (int i = 0; i < entries.length(); ++i) {
-    HeapEntry* entry = entries[i];
-    int entry_size = entry->self_size();
-    for (HeapEntry* dominator = entry->dominator();
-         dominator != entry;
-         entry = dominator, dominator = entry->dominator()) {
+    int entry_size = entries[i].self_size();
+    HeapEntry* current = &entries[i];
+    for (HeapEntry* dominator = current->dominator();
+         dominator != current;
+         current = dominator, dominator = current->dominator()) {
+      ASSERT(current->dominator() != NULL);
       dominator->add_retained_size(entry_size);
     }
   }
@@ -3690,19 +3415,23 @@
 };
 
 
+// type, name|index, to_node.
+const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
+// type, name, id, self_size, retained_size, dominator, children_index.
+const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 7;
+
 void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
   ASSERT(writer_ == NULL);
   writer_ = new OutputStreamWriter(stream);
 
   HeapSnapshot* original_snapshot = NULL;
-  if (snapshot_->raw_entries_size() >=
+  if (snapshot_->RawSnapshotSize() >=
       SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
     // The snapshot is too big. Serialize a fake snapshot.
     original_snapshot = snapshot_;
     snapshot_ = CreateFakeSnapshot();
   }
-  // Since nodes graph is cyclic, we need the first pass to enumerate
-  // them. Strings can be serialized in one pass.
+
   SerializeImpl();
 
   delete writer_;
@@ -3720,42 +3449,24 @@
                                           HeapSnapshot::kFull,
                                           snapshot_->title(),
                                           snapshot_->uid());
-  result->AllocateEntries(2, 1, 0);
-  HeapEntry* root = result->AddRootEntry(1);
+  result->AddRootEntry();
   const char* text = snapshot_->collection()->names()->GetFormatted(
       "The snapshot is too big. "
       "Maximum snapshot size is %"  V8_PTR_PREFIX "u MB. "
       "Actual snapshot size is %"  V8_PTR_PREFIX "u MB.",
       SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
-      (snapshot_->raw_entries_size() + MB - 1) / MB);
-  HeapEntry* message = result->AddEntry(
-      HeapEntry::kString, text, 0, 4, 0, 0);
-  root->SetUnidirElementReference(0, 1, message);
+      (snapshot_->RawSnapshotSize() + MB - 1) / MB);
+  HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4);
+  result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message);
+  result->FillChildrenAndRetainers();
   result->SetDominatorsToSelf();
   return result;
 }
 
 
-void HeapSnapshotJSONSerializer::CalculateNodeIndexes(
-    const List<HeapEntry*>& nodes) {
-  // type,name,id,self_size,retained_size,dominator,children_index.
-  const int node_fields_count = 7;
-  // Root must be the first.
-  ASSERT(nodes.first() == snapshot_->root());
-  // Rewrite node indexes, so they refer to actual array positions. Do this
-  // only once.
-  if (nodes[0]->entry_index() == -1) {
-    int index = 0;
-    for (int i = 0; i < nodes.length(); ++i, index += node_fields_count) {
-      nodes[i]->set_entry_index(index);
-    }
-  }
-}
-
-
 void HeapSnapshotJSONSerializer::SerializeImpl() {
-  List<HeapEntry*>& nodes = *(snapshot_->entries());
-  CalculateNodeIndexes(nodes);
+  List<HeapEntry>& nodes = snapshot_->entries();
+  ASSERT(0 == snapshot_->root()->index());
   writer_->AddCharacter('{');
   writer_->AddString("\"snapshot\":{");
   SerializeSnapshot();
@@ -3831,19 +3542,19 @@
   buffer[buffer_pos++] = ',';
   buffer_pos = itoa(edge_name_or_index, buffer, buffer_pos);
   buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(edge->to()->entry_index(), buffer, buffer_pos);
+  buffer_pos = itoa(entry_index(edge->to()), buffer, buffer_pos);
   buffer[buffer_pos++] = '\0';
   writer_->AddString(buffer.start());
 }
 
 
-void HeapSnapshotJSONSerializer::SerializeEdges(const List<HeapEntry*>& nodes) {
+void HeapSnapshotJSONSerializer::SerializeEdges(const List<HeapEntry>& nodes) {
   bool first_edge = true;
   for (int i = 0; i < nodes.length(); ++i) {
-    HeapEntry* entry = nodes[i];
-    Vector<HeapGraphEdge> children = entry->children();
+    HeapEntry* entry = &nodes[i];
+    Vector<HeapGraphEdge*> children = entry->children();
     for (int j = 0; j < children.length(); ++j) {
-      SerializeEdge(&children[j], first_edge);
+      SerializeEdge(children[j], first_edge);
       first_edge = false;
       if (writer_->aborted()) return;
     }
@@ -3861,7 +3572,7 @@
   EmbeddedVector<char, kBufferSize> buffer;
   int buffer_pos = 0;
   buffer[buffer_pos++] = '\n';
-  if (entry->entry_index() != 0) {
+  if (entry_index(entry) != 0) {
     buffer[buffer_pos++] = ',';
   }
   buffer_pos = itoa(entry->type(), buffer, buffer_pos);
@@ -3874,7 +3585,7 @@
   buffer[buffer_pos++] = ',';
   buffer_pos = itoa(entry->retained_size(), buffer, buffer_pos);
   buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(entry->dominator()->entry_index(), buffer, buffer_pos);
+  buffer_pos = itoa(entry_index(entry->dominator()), buffer, buffer_pos);
   buffer[buffer_pos++] = ',';
   buffer_pos = itoa(edges_index, buffer, buffer_pos);
   buffer[buffer_pos++] = '\0';
@@ -3882,13 +3593,12 @@
 }
 
 
-void HeapSnapshotJSONSerializer::SerializeNodes(const List<HeapEntry*>& nodes) {
-  const int edge_fields_count = 3;  // type,name|index,to_node.
+void HeapSnapshotJSONSerializer::SerializeNodes(const List<HeapEntry>& nodes) {
   int edges_index = 0;
   for (int i = 0; i < nodes.length(); ++i) {
-    HeapEntry* entry = nodes[i];
+    HeapEntry* entry = &nodes[i];
     SerializeNode(entry, edges_index);
-    edges_index += entry->children().length() * edge_fields_count;
+    edges_index += entry->children().length() * kEdgeFieldsCount;
     if (writer_->aborted()) return;
   }
 }
@@ -3952,9 +3662,9 @@
 #undef JSON_O
 #undef JSON_A
   writer_->AddString(",\"node_count\":");
-  writer_->AddNumber(snapshot_->entries()->length());
+  writer_->AddNumber(snapshot_->entries().length());
   writer_->AddString(",\"edge_count\":");
-  writer_->AddNumber(snapshot_->number_of_edges());
+  writer_->AddNumber(snapshot_->edges().length());
 }
 
 
diff --git a/src/profile-generator.h b/src/profile-generator.h
index e04ddbf..92896c2 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -446,6 +446,7 @@
 
 
 class HeapEntry;
+class HeapSnapshot;
 
 class HeapGraphEdge BASE_EMBEDDED {
  public:
@@ -460,9 +461,9 @@
   };
 
   HeapGraphEdge() { }
-  void Init(int child_index, Type type, const char* name, HeapEntry* to);
-  void Init(int child_index, Type type, int index, HeapEntry* to);
-  void Init(int child_index, int index, HeapEntry* to);
+  HeapGraphEdge(Type type, const char* name, int from, int to);
+  HeapGraphEdge(Type type, int index, int from, int to);
+  void ReplaceToIndexWithEntry(HeapSnapshot* snapshot);
 
   Type type() const { return static_cast<Type>(type_); }
   int index() const {
@@ -471,48 +472,34 @@
   }
   const char* name() const {
     ASSERT(type_ == kContextVariable
-           || type_ == kProperty
-           || type_ == kInternal
-           || type_ == kShortcut);
+        || type_ == kProperty
+        || type_ == kInternal
+        || type_ == kShortcut);
     return name_;
   }
-  HeapEntry* to() const { return to_; }
   INLINE(HeapEntry* from() const);
+  HeapEntry* to() const { return to_entry_; }
 
  private:
-  int child_index_ : 29;
+  INLINE(HeapSnapshot* snapshot() const);
+
   unsigned type_ : 3;
+  int from_index_ : 29;
+  union {
+    // During entries population |to_index_| is used for storing the index,
+    // afterwards it is replaced with a pointer to the entry.
+    int to_index_;
+    HeapEntry* to_entry_;
+  };
   union {
     int index_;
     const char* name_;
   };
-  HeapEntry* to_;
-
-  DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge);
 };
 
 
-class HeapSnapshot;
-
 // HeapEntry instances represent an entity from the heap (or a special
-// virtual node, e.g. root). To make heap snapshots more compact,
-// HeapEntries has a special memory layout (no Vectors or Lists used):
-//
-//   +-----------------+
-//        HeapEntry
-//   +-----------------+
-//      HeapGraphEdge    |
-//           ...         } children_count
-//      HeapGraphEdge    |
-//   +-----------------+
-//      HeapGraphEdge*   |
-//           ...         } retainers_count
-//      HeapGraphEdge*   |
-//   +-----------------+
-//
-// In a HeapSnapshot, all entries are hand-allocated in a continuous array
-// of raw bytes.
-//
+// virtual node, e.g. root).
 class HeapEntry BASE_EMBEDDED {
  public:
   enum Type {
@@ -527,15 +514,14 @@
     kNative = v8::HeapGraphNode::kNative,
     kSynthetic = v8::HeapGraphNode::kSynthetic
   };
+  static const int kNoEntry;
 
   HeapEntry() { }
-  void Init(HeapSnapshot* snapshot,
+  HeapEntry(HeapSnapshot* snapshot,
             Type type,
             const char* name,
             SnapshotObjectId id,
-            int self_size,
-            int children_count,
-            int retainers_count);
+            int self_size);
 
   HeapSnapshot* snapshot() { return snapshot_; }
   Type type() { return static_cast<Type>(type_); }
@@ -545,20 +531,27 @@
   int self_size() { return self_size_; }
   int retained_size() { return retained_size_; }
   void add_retained_size(int size) { retained_size_ += size; }
-  void set_retained_size(int value) { retained_size_ = value; }
-  int ordered_index() { return ordered_index_; }
-  void set_ordered_index(int value) { ordered_index_ = value; }
-  int entry_index() { return entry_index_; }
-  void set_entry_index(int value) { entry_index_ = value; }
-
-  Vector<HeapGraphEdge> children() {
-    return Vector<HeapGraphEdge>(children_arr(), children_count_); }
+  void set_retained_size(int size) { retained_size_ = size; }
+  INLINE(int index() const);
+  int postorder_index() { return postorder_index_; }
+  void set_postorder_index(int value) { postorder_index_ = value; }
+  int children_count() const { return children_count_; }
+  INLINE(int set_children_index(int index));
+  INLINE(int set_retainers_index(int index));
+  void add_child(HeapGraphEdge* edge) {
+    children_arr()[children_count_++] = edge;
+  }
+  void add_retainer(HeapGraphEdge* edge) {
+    retainers_arr()[retainers_count_++] = edge;
+  }
+  Vector<HeapGraphEdge*> children() {
+    return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
   Vector<HeapGraphEdge*> retainers() {
     return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
-  HeapEntry* dominator() { return dominator_; }
+  INLINE(HeapEntry* dominator() const);
   void set_dominator(HeapEntry* entry) {
     ASSERT(entry != NULL);
-    dominator_ = entry;
+    dominator_ = entry->index();
   }
   void clear_paint() { painted_ = false; }
   bool painted() { return painted_; }
@@ -566,57 +559,37 @@
   bool user_reachable() { return user_reachable_; }
   void set_user_reachable() { user_reachable_ = true; }
 
-  void SetIndexedReference(HeapGraphEdge::Type type,
-                           int child_index,
-                           int index,
-                           HeapEntry* entry,
-                           int retainer_index);
-  void SetNamedReference(HeapGraphEdge::Type type,
-                         int child_index,
-                         const char* name,
-                         HeapEntry* entry,
-                         int retainer_index);
-  void SetUnidirElementReference(int child_index, int index, HeapEntry* entry);
-
-  size_t EntrySize() {
-    return EntriesSize(1, children_count_, retainers_count_);
-  }
+  void SetIndexedReference(
+      HeapGraphEdge::Type type, int index, HeapEntry* entry);
+  void SetNamedReference(
+      HeapGraphEdge::Type type, const char* name, HeapEntry* entry);
 
   void Print(
       const char* prefix, const char* edge_name, int max_depth, int indent);
 
   Handle<HeapObject> GetHeapObject();
 
-  static size_t EntriesSize(int entries_count,
-                            int children_count,
-                            int retainers_count);
-
  private:
-  HeapGraphEdge* children_arr() {
-    return reinterpret_cast<HeapGraphEdge*>(this + 1);
-  }
-  HeapGraphEdge** retainers_arr() {
-    return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
-  }
+  INLINE(HeapGraphEdge** children_arr());
+  INLINE(HeapGraphEdge** retainers_arr());
   const char* TypeAsString();
 
   unsigned painted_: 1;
   unsigned user_reachable_: 1;
+  int dominator_: 30;
   unsigned type_: 4;
-  int children_count_: 26;
-  int retainers_count_;
+  int retainers_count_: 28;
+  int retainers_index_;
+  int children_count_;
+  int children_index_;
   int self_size_;
   union {
-    int ordered_index_;  // Used during dominator tree building.
-    int retained_size_;  // At that moment, there is no retained size yet.
+    int postorder_index_;  // Used during dominator tree building.
+    int retained_size_;    // At that moment, there is no retained size yet.
   };
-  int entry_index_;
   SnapshotObjectId id_;
-  HeapEntry* dominator_;
   HeapSnapshot* snapshot_;
   const char* name_;
-
-  DISALLOW_COPY_AND_ASSIGN(HeapEntry);
 };
 
 
@@ -637,63 +610,59 @@
                Type type,
                const char* title,
                unsigned uid);
-  ~HeapSnapshot();
   void Delete();
 
   HeapSnapshotsCollection* collection() { return collection_; }
   Type type() { return type_; }
   const char* title() { return title_; }
   unsigned uid() { return uid_; }
-  HeapEntry* root() { return root_entry_; }
-  HeapEntry* gc_roots() { return gc_roots_entry_; }
-  HeapEntry* natives_root() { return natives_root_entry_; }
-  HeapEntry* gc_subroot(int index) { return gc_subroot_entries_[index]; }
-  List<HeapEntry*>* entries() { return &entries_; }
-  size_t raw_entries_size() { return raw_entries_size_; }
-  int number_of_edges() { return number_of_edges_; }
+  size_t RawSnapshotSize() const;
+  HeapEntry* root() { return &entries_[root_index_]; }
+  HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
+  HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
+  HeapEntry* gc_subroot(int index) {
+    return &entries_[gc_subroot_indexes_[index]];
+  }
+  List<HeapEntry>& entries() { return entries_; }
+  List<HeapGraphEdge>& edges() { return edges_; }
+  List<HeapGraphEdge*>& children() { return children_; }
+  List<HeapGraphEdge*>& retainers() { return retainers_; }
   void RememberLastJSObjectId();
   SnapshotObjectId max_snapshot_js_object_id() const {
     return max_snapshot_js_object_id_;
   }
 
-  void AllocateEntries(
-      int entries_count, int children_count, int retainers_count);
   HeapEntry* AddEntry(HeapEntry::Type type,
                       const char* name,
                       SnapshotObjectId id,
-                      int size,
-                      int children_count,
-                      int retainers_count);
-  HeapEntry* AddRootEntry(int children_count);
-  HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
-  HeapEntry* AddGcSubrootEntry(int tag,
-                               int children_count,
-                               int retainers_count);
-  HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
+                      int size);
+  HeapEntry* AddRootEntry();
+  HeapEntry* AddGcRootsEntry();
+  HeapEntry* AddGcSubrootEntry(int tag);
+  HeapEntry* AddNativesRootEntry();
   void ClearPaint();
   HeapEntry* GetEntryById(SnapshotObjectId id);
   List<HeapEntry*>* GetSortedEntriesList();
   void SetDominatorsToSelf();
+  void FillChildrenAndRetainers();
 
   void Print(int max_depth);
   void PrintEntriesSize();
 
  private:
-  HeapEntry* GetNextEntryToInit();
-
   HeapSnapshotsCollection* collection_;
   Type type_;
   const char* title_;
   unsigned uid_;
-  HeapEntry* root_entry_;
-  HeapEntry* gc_roots_entry_;
-  HeapEntry* natives_root_entry_;
-  HeapEntry* gc_subroot_entries_[VisitorSynchronization::kNumberOfSyncTags];
-  char* raw_entries_;
-  List<HeapEntry*> entries_;
+  int root_index_;
+  int gc_roots_index_;
+  int natives_root_index_;
+  int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
+  List<HeapEntry> entries_;
+  List<HeapGraphEdge> edges_;
+  List<HeapGraphEdge*> children_;
+  List<HeapGraphEdge*> retainers_;
   List<HeapEntry*> sorted_entries_;
-  size_t raw_entries_size_;
-  int number_of_edges_;
   SnapshotObjectId max_snapshot_js_object_id_;
 
   friend class HeapSnapshotTester;
@@ -828,8 +797,7 @@
 class HeapEntriesAllocator {
  public:
   virtual ~HeapEntriesAllocator() { }
-  virtual HeapEntry* AllocateEntry(
-      HeapThing ptr, int children_count, int retainers_count) = 0;
+  virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0;
 };
 
 
@@ -838,37 +806,11 @@
 class HeapEntriesMap {
  public:
   HeapEntriesMap();
-  ~HeapEntriesMap();
 
-  void AllocateEntries(HeapThing root_object);
-  HeapEntry* Map(HeapThing thing);
-  void Pair(HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry);
-  void CountReference(HeapThing from, HeapThing to,
-                      int* prev_children_count = NULL,
-                      int* prev_retainers_count = NULL);
-
-  int entries_count() { return entries_count_; }
-  int total_children_count() { return total_children_count_; }
-  int total_retainers_count() { return total_retainers_count_; }
-
-  static HeapEntry* const kHeapEntryPlaceholder;
+  int Map(HeapThing thing);
+  void Pair(HeapThing thing, int entry);
 
  private:
-  struct EntryInfo {
-    EntryInfo(HeapEntry* entry, HeapEntriesAllocator* allocator)
-        : entry(entry),
-          allocator(allocator),
-          children_count(0),
-          retainers_count(0) {
-    }
-    HeapEntry* entry;
-    HeapEntriesAllocator* allocator;
-    int children_count;
-    int retainers_count;
-  };
-
-  static inline void AllocateHeapEntryForMapEntry(HashMap::Entry* map_entry);
-
   static uint32_t Hash(HeapThing thing) {
     return ComputeIntegerHash(
         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
@@ -879,9 +821,6 @@
   }
 
   HashMap entries_;
-  int entries_count_;
-  int total_children_count_;
-  int total_retainers_count_;
 
   friend class HeapObjectsSet;
 
@@ -916,26 +855,18 @@
   virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
                                     HeapEntriesAllocator* allocator) = 0;
   virtual void SetIndexedReference(HeapGraphEdge::Type type,
-                                   HeapThing parent_ptr,
-                                   HeapEntry* parent_entry,
+                                   int parent_entry,
                                    int index,
-                                   HeapThing child_ptr,
                                    HeapEntry* child_entry) = 0;
   virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
-                                            HeapThing parent_ptr,
-                                            HeapEntry* parent_entry,
-                                            HeapThing child_ptr,
+                                            int parent_entry,
                                             HeapEntry* child_entry) = 0;
   virtual void SetNamedReference(HeapGraphEdge::Type type,
-                                 HeapThing parent_ptr,
-                                 HeapEntry* parent_entry,
+                                 int parent_entry,
                                  const char* reference_name,
-                                 HeapThing child_ptr,
                                  HeapEntry* child_entry) = 0;
   virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
-                                          HeapThing parent_ptr,
-                                          HeapEntry* parent_entry,
-                                          HeapThing child_ptr,
+                                          int parent_entry,
                                           HeapEntry* child_entry) = 0;
 };
 
@@ -954,12 +885,10 @@
   V8HeapExplorer(HeapSnapshot* snapshot,
                  SnapshottingProgressReportingInterface* progress);
   virtual ~V8HeapExplorer();
-  virtual HeapEntry* AllocateEntry(
-      HeapThing ptr, int children_count, int retainers_count);
+  virtual HeapEntry* AllocateEntry(HeapThing ptr);
   void AddRootEntries(SnapshotFillerInterface* filler);
   int EstimateObjectsCount(HeapIterator* iterator);
   bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
-  bool IterateAndSetObjectNames(SnapshotFillerInterface* filler);
   void TagGlobalObjects();
 
   static String* GetConstructorName(JSObject* object);
@@ -967,81 +896,77 @@
   static HeapObject* const kInternalRootObject;
 
  private:
-  HeapEntry* AddEntry(
-      HeapObject* object, int children_count, int retainers_count);
+  HeapEntry* AddEntry(HeapObject* object);
   HeapEntry* AddEntry(HeapObject* object,
                       HeapEntry::Type type,
-                      const char* name,
-                      int children_count,
-                      int retainers_count);
+                      const char* name);
   const char* GetSystemEntryName(HeapObject* object);
 
   void ExtractReferences(HeapObject* obj);
   void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy);
-  void ExtractJSObjectReferences(HeapEntry* entry, JSObject* js_obj);
-  void ExtractStringReferences(HeapEntry* entry, String* obj);
-  void ExtractContextReferences(HeapEntry* entry, Context* context);
-  void ExtractMapReferences(HeapEntry* entry, Map* map);
-  void ExtractSharedFunctionInfoReferences(HeapEntry* entry,
+  void ExtractJSObjectReferences(int entry, JSObject* js_obj);
+  void ExtractStringReferences(int entry, String* obj);
+  void ExtractContextReferences(int entry, Context* context);
+  void ExtractMapReferences(int entry, Map* map);
+  void ExtractSharedFunctionInfoReferences(int entry,
                                            SharedFunctionInfo* shared);
-  void ExtractScriptReferences(HeapEntry* entry, Script* script);
-  void ExtractCodeCacheReferences(HeapEntry* entry, CodeCache* code_cache);
-  void ExtractCodeReferences(HeapEntry* entry, Code* code);
-  void ExtractJSGlobalPropertyCellReferences(HeapEntry* entry,
+  void ExtractScriptReferences(int entry, Script* script);
+  void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
+  void ExtractCodeReferences(int entry, Code* code);
+  void ExtractJSGlobalPropertyCellReferences(int entry,
                                              JSGlobalPropertyCell* cell);
-  void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
-  void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
-  void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
-  void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry);
+  void ExtractClosureReferences(JSObject* js_obj, int entry);
+  void ExtractPropertyReferences(JSObject* js_obj, int entry);
+  void ExtractElementReferences(JSObject* js_obj, int entry);
+  void ExtractInternalReferences(JSObject* js_obj, int entry);
   bool IsEssentialObject(Object* object);
   void SetClosureReference(HeapObject* parent_obj,
-                           HeapEntry* parent,
+                           int parent,
                            String* reference_name,
                            Object* child);
   void SetNativeBindReference(HeapObject* parent_obj,
-                              HeapEntry* parent,
+                              int parent,
                               const char* reference_name,
                               Object* child);
   void SetElementReference(HeapObject* parent_obj,
-                           HeapEntry* parent,
+                           int parent,
                            int index,
                            Object* child);
   void SetInternalReference(HeapObject* parent_obj,
-                            HeapEntry* parent,
+                            int parent,
                             const char* reference_name,
                             Object* child,
                             int field_offset = -1);
   void SetInternalReference(HeapObject* parent_obj,
-                            HeapEntry* parent,
+                            int parent,
                             int index,
                             Object* child,
                             int field_offset = -1);
   void SetHiddenReference(HeapObject* parent_obj,
-                          HeapEntry* parent,
+                          int parent,
                           int index,
                           Object* child);
   void SetWeakReference(HeapObject* parent_obj,
-                        HeapEntry* parent_entry,
+                        int parent,
                         int index,
                         Object* child_obj,
                         int field_offset);
   void SetPropertyReference(HeapObject* parent_obj,
-                            HeapEntry* parent,
+                            int parent,
                             String* reference_name,
                             Object* child,
                             const char* name_format_string = NULL,
                             int field_offset = -1);
   void SetPropertyShortcutReference(HeapObject* parent_obj,
-                                    HeapEntry* parent,
+                                    int parent,
                                     String* reference_name,
                                     Object* child);
-  void SetUserGlobalReference(Object* window);
+  void SetUserGlobalReference(Object* user_global);
   void SetRootGcRootsReference();
   void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
   void SetGcSubrootReference(
       VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
   const char* GetStrongGcSubrootName(Object* object);
-  void SetObjectName(HeapObject* object);
   void TagObject(Object* obj, const char* tag);
 
   HeapEntry* GetEntry(Object* obj);
@@ -1139,10 +1064,9 @@
   bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
                           Vector<int>* dominators);
   bool CalculateRetainedSizes();
-  bool CountEntriesAndReferences();
   bool FillReferences();
   void FillPostorderIndexes(Vector<HeapEntry*>* entries);
-  bool IsUserGlobalReference(const HeapGraphEdge& edge);
+  bool IsUserGlobalReference(const HeapGraphEdge* edge);
   void MarkUserReachableObjects();
   void ProgressStep();
   bool ProgressReport(bool force = false);
@@ -1186,20 +1110,21 @@
         v8::internal::kZeroHashSeed);
   }
 
-  void CalculateNodeIndexes(const List<HeapEntry*>& nodes);
   HeapSnapshot* CreateFakeSnapshot();
   int GetStringId(const char* s);
+  int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
   void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
-  void SerializeEdges(const List<HeapEntry*>& nodes);
+  void SerializeEdges(const List<HeapEntry>& nodes);
   void SerializeImpl();
   void SerializeNode(HeapEntry* entry, int edges_index);
-  void SerializeNodes(const List<HeapEntry*>& nodes);
+  void SerializeNodes(const List<HeapEntry>& nodes);
   void SerializeSnapshot();
   void SerializeString(const unsigned char* s);
   void SerializeStrings();
   void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
 
-  static const int kMaxSerializableSnapshotRawSize;
+  static const int kEdgeFieldsCount;
+  static const int kNodeFieldsCount;
 
   HeapSnapshot* snapshot_;
   HashMap strings_;
diff --git a/src/runtime.cc b/src/runtime.cc
index 1305f8b..0b80eff 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -8277,6 +8277,19 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  Code* unoptimized = function->shared()->code();
+  if (unoptimized->kind() == Code::FUNCTION) {
+    unoptimized->ClearInlineCaches();
+    unoptimized->ClearTypeFeedbackCells(isolate->heap());
+  }
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
 #if defined(USE_SIMULATOR)
   return isolate->heap()->true_value();
diff --git a/src/runtime.h b/src/runtime.h
index 9ae1383..a09d9cc 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -89,6 +89,7 @@
   F(NotifyDeoptimized, 1, 1) \
   F(NotifyOSR, 0, 1) \
   F(DeoptimizeFunction, 1, 1) \
+  F(ClearFunctionTypeFeedback, 1, 1) \
   F(RunningInSimulator, 0, 1) \
   F(OptimizeFunctionOnNextCall, -1, 1) \
   F(GetOptimizationStatus, 1, 1) \
diff --git a/src/spaces.cc b/src/spaces.cc
index a5d61eb..a0c8f2c 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -2295,8 +2295,6 @@
     first_unswept_page_ = p;
   }
 
-  heap()->LowerOldGenLimits(freed_bytes);
-
   heap()->FreeQueuedChunks();
 
   return IsSweepingComplete();
diff --git a/src/v8utils.h b/src/v8utils.h
index c73222a..bb587e1 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -199,10 +199,13 @@
                             bool verbose = true);
 
 
-
 // Copy from ASCII/16bit chars to ASCII/16bit chars.
 template <typename sourcechar, typename sinkchar>
-inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
+
+
+template <typename sourcechar, typename sinkchar>
+void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
   sinkchar* limit = dest + chars;
 #ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*dest) == sizeof(*src)) {
diff --git a/src/version.cc b/src/version.cc
index 0dda633..3e2b8b2 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     10
-#define BUILD_NUMBER      8
-#define PATCH_LEVEL       4
+#define MINOR_VERSION     11
+#define BUILD_NUMBER      0
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 60b29e6..9f5f850 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -629,7 +629,8 @@
   static const byte kJccShortPrefix = 0x70;
   static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
   static const byte kJcShortOpcode = kJccShortPrefix | carry;
-
+  static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
+  static const byte kJzShortOpcode = kJccShortPrefix | zero;
 
 
   // ---------------------------------------------------------------------------
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index eec83d9..94a50eb 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -91,6 +91,8 @@
   rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
 }
 
+const bool Debug::FramePaddingLayout::kIsSupported = false;
+
 
 #define __ ACCESS_MASM(masm)
 
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 0632ce4..6ba5fb6 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -1741,11 +1741,11 @@
 
   // Activate inlined smi code.
   if (previous_state == UNINITIALIZED) {
-    PatchInlinedSmiCode(address());
+    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
   }
 }
 
-void PatchInlinedSmiCode(Address address) {
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
@@ -1766,14 +1766,18 @@
            address, test_instruction_address, delta);
   }
 
-  // Patch with a short conditional jump. There must be a
-  // short jump-if-carry/not-carry at this position.
+  // Patch with a short conditional jump. Enabling means switching from a short
+  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
+  // reverse operation of that.
   Address jmp_address = test_instruction_address - delta;
-  ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
-         *jmp_address == Assembler::kJcShortOpcode);
-  Condition cc = *jmp_address == Assembler::kJncShortOpcode
-      ? not_zero
-      : zero;
+  ASSERT((check == ENABLE_INLINED_SMI_CHECK)
+         ? (*jmp_address == Assembler::kJncShortOpcode ||
+            *jmp_address == Assembler::kJcShortOpcode)
+         : (*jmp_address == Assembler::kJnzShortOpcode ||
+            *jmp_address == Assembler::kJzShortOpcode));
+  Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
+      ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+      : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
   *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
 }
 
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 85e7ac0..5f5c2af 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -2223,41 +2223,35 @@
   Register result = ToRegister(instr->result());
 
   int map_count = instr->hydrogen()->types()->length();
-  Handle<String> name = instr->hydrogen()->name();
+  bool need_generic = instr->hydrogen()->need_generic();
 
-  if (map_count == 0) {
-    ASSERT(instr->hydrogen()->need_generic());
-    __ Move(rcx, instr->hydrogen()->name());
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    Label done;
-    for (int i = 0; i < map_count - 1; ++i) {
-      Handle<Map> map = instr->hydrogen()->types()->at(i);
+  if (map_count == 0 && !need_generic) {
+    DeoptimizeIf(no_condition, instr->environment());
+    return;
+  }
+  Handle<String> name = instr->hydrogen()->name();
+  Label done;
+  for (int i = 0; i < map_count; ++i) {
+    bool last = (i == map_count - 1);
+    Handle<Map> map = instr->hydrogen()->types()->at(i);
+    __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+    if (last && !need_generic) {
+      DeoptimizeIf(not_equal, instr->environment());
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+    } else {
       Label next;
-      __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
       __ j(not_equal, &next, Label::kNear);
       EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ jmp(&done, Label::kNear);
       __ bind(&next);
     }
-    Handle<Map> map = instr->hydrogen()->types()->last();
-    __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
-    if (instr->hydrogen()->need_generic()) {
-      Label generic;
-      __ j(not_equal, &generic, Label::kNear);
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-      __ jmp(&done, Label::kNear);
-      __ bind(&generic);
-      __ Move(rcx, instr->hydrogen()->name());
-      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr);
-    } else {
-      DeoptimizeIf(not_equal, instr->environment());
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-    }
-    __ bind(&done);
   }
+  if (need_generic) {
+    __ Move(rcx, name);
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  }
+  __ bind(&done);
 }
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 53becf6..3d380a2 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -4188,7 +4188,7 @@
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
-      masm_(Isolate::Current(), address, size + Assembler::kGap) {
+      masm_(NULL, address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index f37b48c..5721e9b 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1114,13 +1114,20 @@
                                           name, miss);
     ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
 
+    // Preserve the receiver register explicitly whenever it is different from
+    // the holder and it is needed should the interceptor return without any
+    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+    // the FIELD case might cause a miss during the prototype check.
+    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+    bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
     {
       FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
+      if (must_preserve_receiver_reg) {
         __ push(receiver);
       }
       __ push(holder_reg);
@@ -1146,7 +1153,7 @@
       __ bind(&interceptor_failed);
       __ pop(name_reg);
       __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      if (must_preserve_receiver_reg) {
         __ pop(receiver);
       }
 
@@ -1155,7 +1162,7 @@
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (*interceptor_holder != lookup->holder()) {
+    if (must_perfrom_prototype_check) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
                                    Handle<JSObject>(lookup->holder()),
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 2cdc653..8a1e914 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -16212,6 +16212,30 @@
 }
 
 
+THREADED_TEST(Regress125988) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> intercept = FunctionTemplate::New();
+  AddInterceptor(intercept, EmptyInterceptorGetter, EmptyInterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Intercept"), intercept->GetFunction());
+  CompileRun("var a = new Object();"
+             "var b = new Intercept();"
+             "var c = new Object();"
+             "c.__proto__ = b;"
+             "b.__proto__ = a;"
+             "a.x = 23;"
+             "for (var i = 0; i < 3; i++) c.x;");
+  ExpectBoolean("c.hasOwnProperty('x')", false);
+  ExpectInt32("c.x", 23);
+  CompileRun("a.y = 42;"
+             "for (var i = 0; i < 3; i++) c.x;");
+  ExpectBoolean("c.hasOwnProperty('x')", false);
+  ExpectInt32("c.x", 23);
+  ExpectBoolean("c.hasOwnProperty('y')", false);
+  ExpectInt32("c.y", 42);
+}
+
+
 static void TestReceiver(Local<Value> expected_result,
                          Local<Value> expected_receiver,
                          const char* code) {
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 3ac1741..cbe8d44 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -34,10 +34,10 @@
     CheckEntry(root);
     while (!list.is_empty()) {
       i::HeapEntry* entry = list.RemoveLast();
-      i::Vector<i::HeapGraphEdge> children = entry->children();
+      i::Vector<i::HeapGraphEdge*> children = entry->children();
       for (int i = 0; i < children.length(); ++i) {
-        if (children[i].type() == i::HeapGraphEdge::kShortcut) continue;
-        i::HeapEntry* child = children[i].to();
+        if (children[i]->type() == i::HeapGraphEdge::kShortcut) continue;
+        i::HeapEntry* child = children[i]->to();
         if (!child->painted()) {
           list.Add(child);
           child->paint();
diff --git a/test/cctest/test-list.cc b/test/cctest/test-list.cc
index 7520b05..4c78f02 100644
--- a/test/cctest/test-list.cc
+++ b/test/cctest/test-list.cc
@@ -130,6 +130,18 @@
 }
 
 
+TEST(Allocate) {
+  List<int> list(4);
+  list.Add(1);
+  CHECK_EQ(1, list.length());
+  list.Allocate(100);
+  CHECK_EQ(100, list.length());
+  CHECK_LE(100, list.capacity());
+  list[99] = 123;
+  CHECK_EQ(123, list[99]);
+}
+
+
 TEST(Clear) {
   List<int> list(4);
   CHECK_EQ(0, list.length());
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 83a576d..700f322 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -540,7 +540,7 @@
       }
     } else {
       if (v8::internal::Snapshot::IsEnabled()) {
-        CHECK_LE(booted_memory - initial_memory, 2600 * 1024);  // 2484.
+        CHECK_LE(booted_memory - initial_memory, 2800 * 1024);  // 2484.
       } else {
         CHECK_LE(booted_memory - initial_memory, 2950 * 1024);  // 2844
       }
diff --git a/test/cctest/testcfg.py b/test/cctest/testcfg.py
index b2eabc4..f1387e8 100644
--- a/test/cctest/testcfg.py
+++ b/test/cctest/testcfg.py
@@ -53,6 +53,8 @@
       serialization_file = join('obj', 'test', self.mode, 'serdes')
     else:
       serialization_file = join('obj', 'serdes')
+      if not exists(join(self.context.buildspace, 'obj')):
+        os.makedirs(join(self.context.buildspace, 'obj'))
     serialization_file += '_' + self.GetName()
     serialization_file = join(self.context.buildspace, serialization_file)
     serialization_file += ''.join(self.variant_flags).replace('-', '_')
diff --git a/test/mjsunit/accessor-map-sharing.js b/test/mjsunit/accessor-map-sharing.js
new file mode 100644
index 0000000..ab45afa
--- /dev/null
+++ b/test/mjsunit/accessor-map-sharing.js
@@ -0,0 +1,176 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Handy abbreviations.
+var dp = Object.defineProperty;
+var gop = Object.getOwnPropertyDescriptor;
+
+function getter() { return 111; }
+function setter(x) { print(222); }
+function anotherGetter() { return 333; }
+function anotherSetter(x) { print(444); }
+var obj1, obj2;
+
+// Two objects with the same getter.
+obj1 = {};
+dp(obj1, "alpha", { get: getter });
+obj2 = {};
+dp(obj2, "alpha", { get: getter });
+assertTrue(%HaveSameMap(obj1, obj2));
+
+// Two objects with the same getter, oldskool.
+obj1 = {};
+obj1.__defineGetter__("bravo", getter);
+assertEquals(getter, obj1.__lookupGetter__("bravo"));
+obj2 = {};
+obj2.__defineGetter__("bravo", getter);
+assertEquals(getter, obj2.__lookupGetter__("bravo"));
+assertTrue(%HaveSameMap(obj1, obj2));
+
+// Two objects with the same setter.
+obj1 = {};
+dp(obj1, "charlie", { set: setter });
+obj2 = {};
+dp(obj2, "charlie", { set: setter });
+assertTrue(%HaveSameMap(obj1, obj2));
+
+// Two objects with the same setter, oldskool.
+obj1 = {};
+obj1.__defineSetter__("delta", setter);
+assertEquals(setter, obj1.__lookupSetter__("delta"));
+obj2 = {};
+obj2.__defineSetter__("delta", setter);
+assertEquals(setter, obj2.__lookupSetter__("delta"));
+assertTrue(%HaveSameMap(obj1, obj2));
+
+// Two objects with the same getter and setter.
+obj1 = {};
+dp(obj1, "foxtrot", { get: getter, set: setter });
+obj2 = {};
+dp(obj2, "foxtrot", { get: getter, set: setter });
+assertTrue(%HaveSameMap(obj1, obj2));
+
+// Two objects with the same getter and setter, set separately.
+obj1 = {};
+dp(obj1, "golf", { get: getter, configurable: true });
+dp(obj1, "golf", { set: setter, configurable: true });
+obj2 = {};
+dp(obj2, "golf", { get: getter, configurable: true });
+dp(obj2, "golf", { set: setter, configurable: true });
+assertTrue(%HaveSameMap(obj1, obj2));
+
+// Two objects with the same getter and setter, set separately, oldskool.
+obj1 = {};
+obj1.__defineGetter__("hotel", getter);
+obj1.__defineSetter__("hotel", setter);
+obj2 = {};
+obj2.__defineGetter__("hotel", getter);
+obj2.__defineSetter__("hotel", setter);
+assertTrue(%HaveSameMap(obj1, obj2));
+
+// Attribute-only change, shouldn't affect previous descriptor properties.
+obj1 = {};
+dp(obj1, "india", { get: getter, configurable: true, enumerable: true });
+assertEquals(getter, gop(obj1, "india").get);
+assertTrue(gop(obj1, "india").configurable);
+assertTrue(gop(obj1, "india").enumerable);
+dp(obj1, "india", { enumerable: false });
+assertEquals(getter, gop(obj1, "india").get);
+assertTrue(gop(obj1, "india").configurable);
+assertFalse(gop(obj1, "india").enumerable);
+
+// Attribute-only change, shouldn't affect objects with previously shared maps.
+obj1 = {};
+dp(obj1, "juliet", { set: setter, configurable: true, enumerable: false });
+assertEquals(setter, gop(obj1, "juliet").set);
+assertTrue(gop(obj1, "juliet").configurable);
+assertFalse(gop(obj1, "juliet").enumerable);
+obj2 = {};
+dp(obj2, "juliet", { set: setter, configurable: true, enumerable: false });
+assertEquals(setter, gop(obj2, "juliet").set);
+assertTrue(gop(obj2, "juliet").configurable);
+assertFalse(gop(obj2, "juliet").enumerable);
+dp(obj1, "juliet", { set: setter, configurable: false, enumerable: true });
+assertEquals(setter, gop(obj1, "juliet").set);
+assertFalse(gop(obj1, "juliet").configurable);
+assertTrue(gop(obj1, "juliet").enumerable);
+assertEquals(setter, gop(obj2, "juliet").set);
+assertTrue(gop(obj2, "juliet").configurable);
+assertFalse(gop(obj2, "juliet").enumerable);
+
+// Two objects with the different getters.
+obj1 = {};
+dp(obj1, "kilo", { get: getter });
+obj2 = {};
+dp(obj2, "kilo", { get: anotherGetter });
+assertEquals(getter, gop(obj1, "kilo").get);
+assertEquals(anotherGetter, gop(obj2, "kilo").get);
+assertFalse(%HaveSameMap(obj1, obj2));
+
+// Two objects with the same getters and different setters.
+obj1 = {};
+dp(obj1, "lima", { get: getter, set: setter });
+obj2 = {};
+dp(obj2, "lima", { get: getter, set: anotherSetter });
+assertEquals(setter, gop(obj1, "lima").set);
+assertEquals(anotherSetter, gop(obj2, "lima").set);
+assertFalse(%HaveSameMap(obj1, obj2));
+
+// Even 'undefined' is a kind of getter.
+obj1 = {};
+dp(obj1, "mike", { get: undefined });
+assertTrue("mike" in obj1);
+assertEquals(undefined, gop(obj1, "mike").get);
+assertEquals(undefined, obj1.__lookupGetter__("mike"));
+assertEquals(undefined, gop(obj1, "mike").set);
+assertEquals(undefined, obj1.__lookupSetter__("mike"));
+
+// Even 'undefined' is a kind of setter.
+obj1 = {};
+dp(obj1, "november", { set: undefined });
+assertTrue("november" in obj1);
+assertEquals(undefined, gop(obj1, "november").get);
+assertEquals(undefined, obj1.__lookupGetter__("november"));
+assertEquals(undefined, gop(obj1, "november").set);
+assertEquals(undefined, obj1.__lookupSetter__("november"));
+
+// Redefining a data property.
+obj1 = {};
+obj1.oscar = 12345;
+dp(obj1, "oscar", { set: setter });
+assertEquals(setter, gop(obj1, "oscar").set);
+
+// Re-adding the same getter/attributes pair.
+obj1 = {};
+dp(obj1, "papa", { get: getter, configurable: true });
+dp(obj1, "papa", { get: getter, set: setter, configurable: true });
+assertEquals(getter, gop(obj1, "papa").get);
+assertEquals(setter, gop(obj1, "papa").set);
+assertTrue(gop(obj1, "papa").configurable);
+assertFalse(gop(obj1, "papa").enumerable);
diff --git a/test/mjsunit/big-array-literal.js b/test/mjsunit/big-array-literal.js
index a0fad7c..8e0ff87 100644
--- a/test/mjsunit/big-array-literal.js
+++ b/test/mjsunit/big-array-literal.js
@@ -25,6 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// On MacOS, this test needs a stack size of at least 538 kBytes.
+// Flags: --stack-size=600
+
 // Test that we can make large object literals that work.
 // Also test that we can attempt to make even larger object literals without
 // crashing.
diff --git a/test/mjsunit/compiler/inline-construct.js b/test/mjsunit/compiler/inline-construct.js
index af9e69c..7a3f1e4 100644
--- a/test/mjsunit/compiler/inline-construct.js
+++ b/test/mjsunit/compiler/inline-construct.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --expose-gc --inline-construct
+// Flags: --allow-natives-syntax --inline-construct
 
 // Test inlining of constructor calls.
 
@@ -68,7 +68,9 @@
   %DeoptimizeFunction(value_context);
   %DeoptimizeFunction(test_context);
   %DeoptimizeFunction(effect_context);
-  gc();  // Makes V8 forget about type information for *_context.
+  %ClearFunctionTypeFeedback(value_context);
+  %ClearFunctionTypeFeedback(test_context);
+  %ClearFunctionTypeFeedback(effect_context);
 }
 
 
diff --git a/test/mjsunit/debug-liveedit-stack-padding.js b/test/mjsunit/debug-liveedit-stack-padding.js
new file mode 100644
index 0000000..36de356
--- /dev/null
+++ b/test/mjsunit/debug-liveedit-stack-padding.js
@@ -0,0 +1,88 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+
+Debug = debug.Debug;
+
+SlimFunction = eval(
+    "(function() {\n " +
+    "  return 'Cat';\n" +
+    "})\n"
+);
+
+var script = Debug.findScript(SlimFunction);
+
+Debug.setScriptBreakPointById(script.id, 1, 0);
+
+var orig_animal = "'Cat'";
+var patch_pos = script.source.indexOf(orig_animal);
+var new_animal_patch = "'Capybara'";
+
+debugger_handler = (function() {
+  var already_called = false;
+  return function() {
+    if (already_called) {
+      return;
+    }
+    already_called = true;
+
+    var change_log = new Array();
+    try {
+      Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos,
+          orig_animal.length, new_animal_patch, change_log);
+    } finally {
+      print("Change log: " + JSON.stringify(change_log) + "\n");
+    }
+  };
+})();
+
+var saved_exception = null;
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    try {
+      debugger_handler();
+    } catch (e) {
+      saved_exception = e;
+    }
+  } else {
+    print("Other: " + event);
+  }
+}
+
+Debug.setListener(listener);
+
+var animal = SlimFunction();
+
+if (saved_exception) {
+  print("Exception: " + saved_exception);
+  assertUnreachable();
+}
+
+assertEquals("Capybara", animal);
diff --git a/test/mjsunit/error-constructors.js b/test/mjsunit/error-constructors.js
index 966a162..107164d 100644
--- a/test/mjsunit/error-constructors.js
+++ b/test/mjsunit/error-constructors.js
@@ -25,39 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-var e = new Error();
-assertFalse(e.hasOwnProperty('message'));
-Error.prototype.toString = Object.prototype.toString;
-assertEquals("[object Error]", Error.prototype.toString());
-assertEquals(Object.prototype, Error.prototype.__proto__);
-
-// Check that error construction does not call setters for the
-// properties on error objects in prototypes.
-function fail() { assertTrue(false); };
-ReferenceError.prototype.__defineSetter__('stack', fail);
-ReferenceError.prototype.__defineSetter__('message', fail);
-ReferenceError.prototype.__defineSetter__('type', fail);
-ReferenceError.prototype.__defineSetter__('arguments', fail);
-var e0 = new ReferenceError();
-var e1 = new ReferenceError('123');
-assertTrue(e1.hasOwnProperty('message'));
-assertTrue(e0.hasOwnProperty('stack'));
-assertTrue(e1.hasOwnProperty('stack'));
-assertTrue(e0.hasOwnProperty('type'));
-assertTrue(e1.hasOwnProperty('type'));
-assertTrue(e0.hasOwnProperty('arguments'));
-assertTrue(e1.hasOwnProperty('arguments'));
-
-// Check that the name property on error prototypes is read-only and
-// dont-delete. This is not specified, but allowing overwriting the
-// name property with a getter can leaks error objects from different
-// script tags in the same context in a browser setting. We therefore
-// disallow changes to the name property on error objects.
-assertEquals("ReferenceError", ReferenceError.prototype.name);
-delete ReferenceError.prototype.name;
-assertEquals("ReferenceError", ReferenceError.prototype.name);
-ReferenceError.prototype.name = "not a reference error";
-assertEquals("ReferenceError", ReferenceError.prototype.name);
+// Flags: --allow-natives-syntax
 
 // Check that message and name are not enumerable on Error objects.
 var desc = Object.getOwnPropertyDescriptor(Error.prototype, 'name');
@@ -75,8 +43,75 @@
 desc = Object.getOwnPropertyDescriptor(e, 'stack');
 assertFalse(desc['enumerable']);
 
+var e = new Error();
+assertFalse(e.hasOwnProperty('message'));
+
 // name is not tested above, but in addition we should have no enumerable
 // properties, so we simply assert that.
 for (var v in e) {
   assertUnreachable();
 }
+
+// Check that error construction does not call setters for the
+// properties on error objects in prototypes.
+function fail() { assertUnreachable(); };
+ReferenceError.prototype.__defineSetter__('name', fail);
+ReferenceError.prototype.__defineSetter__('message', fail);
+ReferenceError.prototype.__defineSetter__('type', fail);
+ReferenceError.prototype.__defineSetter__('arguments', fail);
+ReferenceError.prototype.__defineSetter__('stack', fail);
+
+var e = new ReferenceError();
+assertTrue(e.hasOwnProperty('stack'));
+assertTrue(e.hasOwnProperty('type'));
+assertTrue(e.hasOwnProperty('arguments'));
+
+var e = new ReferenceError('123');
+assertTrue(e.hasOwnProperty('message'));
+assertTrue(e.hasOwnProperty('stack'));
+assertTrue(e.hasOwnProperty('type'));
+assertTrue(e.hasOwnProperty('arguments'));
+
+var e = %MakeReferenceError("my_test_error", [0, 1]);
+assertTrue(e.hasOwnProperty('stack'));
+assertTrue(e.hasOwnProperty('type'));
+assertTrue(e.hasOwnProperty('arguments'));
+assertEquals("my_test_error", e.type)
+
+// Check that intercepting property access from toString is prevented for
+// compiler errors. This is not specified, but allowing interception
+// through a getter can leak error objects from different
+// script tags in the same context in a browser setting.
+var errors = [SyntaxError, ReferenceError, TypeError];
+for (var i in errors) {
+  var name = errors[i].prototype.toString();
+  // Monkey-patch prototype.
+  var props = ["name", "message", "type", "arguments", "stack"];
+  for (var j in props) {
+    errors[i].prototype.__defineGetter__(props[j], fail);
+  }
+  // String conversion should not invoke monkey-patched getters on prototype.
+  var e = new errors[i];
+  assertEquals(name, e.toString());
+  // Custom getters in actual objects are welcome.
+  e.__defineGetter__("name", function() { return "mine"; });
+  assertEquals("mine", e.toString());
+}
+
+// Monkey-patching non-static errors should still be observable.
+function MyError() {}
+MyError.prototype = new Error;
+var errors = [Error, RangeError, EvalError, URIError, MyError];
+for (var i in errors) {
+  errors[i].prototype.__defineGetter__("name", function() { return "my"; });
+  errors[i].prototype.__defineGetter__("message", function() { return "moo"; });
+  var e = new errors[i];
+  assertEquals("my: moo", e.toString());
+}
+
+
+Error.prototype.toString = Object.prototype.toString;
+assertEquals("[object Error]", Error.prototype.toString());
+assertEquals(Object.prototype, Error.prototype.__proto__);
+var e = new Error("foo");
+assertEquals("[object Error]", e.toString());
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index a1b9270..ab5f2e3 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -64,6 +64,7 @@
 # Stack manipulations in LiveEdit are buggy - see bug 915
 debug-liveedit-check-stack: SKIP
 debug-liveedit-patch-positions-replace: SKIP
+debug-liveedit-stack-padding: SKIP
 
 # Test Crankshaft compilation time.  Expected to take too long in debug mode.
 regress/regress-1969: PASS, SKIP if $mode == debug
diff --git a/test/mjsunit/regress/regress-117409.js b/test/mjsunit/regress/regress-117409.js
new file mode 100644
index 0000000..9222191
--- /dev/null
+++ b/test/mjsunit/regress/regress-117409.js
@@ -0,0 +1,52 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc
+
+function KeyedStoreIC(a) { a[0] = Math.E; }
+
+// Create literal with a fast double elements backing store
+var literal = [1.2];
+
+// Specialize the IC for fast double elements
+KeyedStoreIC(literal);
+KeyedStoreIC(literal);
+
+// Trruncate array to 0 elements, at which point backing store will be replaced
+// with empty fixed array.
+literal.length = 0;
+
+// ArrayPush built-in will replace empty fixed array backing store with 19
+// elements fixed array backing store.  This leads to a mismatch between the map
+// and the backing store.  Debug mode will crash here in set_elements accessor.
+literal.push(Math.E, Math.E);
+
+// Corrupt the backing store!
+KeyedStoreIC(literal);
+
+// Release mode will crash here when trying to visit parts of E as pointers.
+gc();
diff --git a/test/mjsunit/regress/regress-126412.js b/test/mjsunit/regress/regress-126412.js
new file mode 100644
index 0000000..0677f70
--- /dev/null
+++ b/test/mjsunit/regress/regress-126412.js
@@ -0,0 +1,33 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"".match(/(A{9999999999}B|C*)*D/);
+"C".match(/(A{9999999999}B|C*)*D/);
+"".match(/(A{9999999999}B|C*)*/ );
+"C".match(/(A{9999999999}B|C*)*/ );
+"".match(/(9u|(2\`shj{2147483649,}\r|3|f|y|3*)+8\B)\W93+/);
+"9u8 ".match(/(9u|(2\`shj{2147483649,}\r|3|f|y|3*)+8\B)\W93+/);
diff --git a/test/mjsunit/regress/regress-1639-2.js b/test/mjsunit/regress/regress-1639-2.js
index c439dd8..01f0dc2 100644
--- a/test/mjsunit/regress/regress-1639-2.js
+++ b/test/mjsunit/regress/regress-1639-2.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 // Flags: --expose-debug-as debug
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug
+var exception = false;
 
 function sendCommand(state, cmd) {
   // Get the debug command processor in paused state.
@@ -79,6 +80,7 @@
     }
   } catch (e) {
     print(e);
+    exception = true;
   }
 }
 
@@ -91,3 +93,4 @@
 // Set a break point and call to invoke the debug event listener.
 Debug.setBreakPoint(a, 0, 0);
 a();
+assertFalse(exception);
diff --git a/test/mjsunit/regress/regress-1639.js b/test/mjsunit/regress/regress-1639.js
index ed68c97..47cdbc4 100644
--- a/test/mjsunit/regress/regress-1639.js
+++ b/test/mjsunit/regress/regress-1639.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug
 var breaks = 0;
+var exception = false;
 
 function sendCommand(state, cmd) {
   // Get the debug command processor in paused state.
@@ -47,15 +48,18 @@
                    "should not break on unexpected lines")
       assertEquals('BREAK ' + breaks, line.substr(-7));
       breaks++;
-      sendCommand(exec_state, {
-        seq: 0,
-        type: "request",
-        command: "continue",
-        arguments: { stepaction: "next" }
-      });
+      if (breaks < 4) {
+        sendCommand(exec_state, {
+          seq: 0,
+          type: "request",
+          command: "continue",
+          arguments: { stepaction: "next" }
+        });
+      }
     }
   } catch (e) {
     print(e);
+    exception = true;
   }
 }
 
@@ -82,4 +86,6 @@
 // Set a break point and call to invoke the debug event listener.
 Debug.setBreakPoint(b, 0, 0);
 a(b);
-// BREAK 3
+a(); // BREAK 3
+
+assertFalse(exception);
diff --git a/test/test262/testcfg.py b/test/test262/testcfg.py
index 2c9bf06..07f760c 100644
--- a/test/test262/testcfg.py
+++ b/test/test262/testcfg.py
@@ -31,6 +31,7 @@
 from os.path import join, exists
 import urllib
 import hashlib
+import sys
 import tarfile
 
 
@@ -120,7 +121,11 @@
         os.remove(archive_name)
         raise Exception("Hash mismatch of test data file")
       archive = tarfile.open(archive_name, 'r:bz2')
-      archive.extractall(join(self.root))
+      if sys.platform in ('win32', 'cygwin'):
+        # Magic incantation to allow longer path names on Windows.
+        archive.extractall(u'\\\\?\\%s' % self.root)
+      else:
+        archive.extractall(self.root)
       os.rename(join(self.root, 'test262-%s' % revision), directory_name)
 
   def GetBuildRequirements(self):
diff --git a/tools/check-static-initializers.sh b/tools/check-static-initializers.sh
index e6da828..1103a97 100644
--- a/tools/check-static-initializers.sh
+++ b/tools/check-static-initializers.sh
@@ -37,14 +37,19 @@
 expected_static_init_count=3
 
 v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
-d8="${v8_root}/d8"
+
+if [ -n "$1" ] ; then
+  d8="${v8_root}/$1"
+else
+  d8="${v8_root}/d8"
+fi
 
 if [ ! -f "$d8" ]; then
-  echo "Please build the project with SCons."
+  echo "d8 binary not found: $d8"
   exit 1
 fi
 
-static_inits=$(nm "$d8" | grep _GLOBAL__I | awk '{ print $NF; }')
+static_inits=$(nm "$d8" | grep _GLOBAL_ | grep _I_ | awk '{ print $NF; }')
 
 static_init_count=$(echo "$static_inits" | wc -l)
 
@@ -52,4 +57,7 @@
   echo "Too many static initializers."
   echo "$static_inits"
   exit 1
+else
+  echo "Static initializer check passed ($static_init_count initializers)."
+  exit 0
 fi
diff --git a/tools/grokdump.py b/tools/grokdump.py
index 9977289..f5df489 100755
--- a/tools/grokdump.py
+++ b/tools/grokdump.py
@@ -27,6 +27,7 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import cmd
 import ctypes
 import mmap
 import optparse
@@ -36,6 +37,7 @@
 import types
 import codecs
 import re
+import struct
 
 
 USAGE="""usage: %prog [OPTION]...
@@ -444,6 +446,33 @@
     location = self.FindLocation(address)
     return self.minidump[location:location + size]
 
+  def _ReadWord(self, location):
+    if self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return ctypes.c_uint64.from_buffer(self.minidump, location).value
+    elif self.arch == MD_CPU_ARCHITECTURE_X86:
+      return ctypes.c_uint32.from_buffer(self.minidump, location).value
+
+  def ForEachMemoryRegion(self, cb):
+    if self.memory_list64 is not None:
+      for r in self.memory_list64.ranges:
+        location = self.memory_list64.base_rva + offset
+        cb(self, r.start, r.size, location)
+        offset += r.size
+
+    if self.memory_list is not None:
+      for r in self.memory_list.ranges:
+        cb(self, r.start, r.memory.data_size, r.memory.rva)
+
+  def FindWord(self, word):
+    def search_inside_region(reader, start, size, location):
+      for loc in xrange(location, location + size):
+        if reader._ReadWord(loc) == word:
+          slot = start + (loc - location)
+          print "%s: %s" % (reader.FormatIntPtr(slot),
+                            reader.FormatIntPtr(word))
+
+    self.ForEachMemoryRegion(search_inside_region)
+
   def FindLocation(self, address):
     offset = 0
     if self.memory_list64 is not None:
@@ -1011,6 +1040,42 @@
       ['eax', 'ebx', 'ecx', 'edx', 'edi', 'esi', 'ebp', 'esp', 'eip']
 }
 
+class InspectionShell(cmd.Cmd):
+  def __init__(self, reader, heap):
+    cmd.Cmd.__init__(self)
+    self.reader = reader
+    self.heap = heap
+    self.prompt = "(grok) "
+
+  def do_dd(self, address):
+    "Interpret memory at the given address (if available)"\
+    " as a sequence of words."
+    start = int(address, 16)
+    for slot in xrange(start,
+                       start + self.reader.PointerSize() * 10,
+                       self.reader.PointerSize()):
+      maybe_address = self.reader.ReadUIntPtr(slot)
+      heap_object = self.heap.FindObject(maybe_address)
+      print "%s: %s" % (self.reader.FormatIntPtr(slot),
+                        self.reader.FormatIntPtr(maybe_address))
+      if heap_object:
+        heap_object.Print(Printer())
+        print
+
+  def do_s(self, word):
+    "Search for a given word in available memory regions"
+    word = int(word, 0)
+    print "searching for word", word
+    self.reader.FindWord(word)
+
+  def do_list(self, smth):
+    """List all available memory regions."""
+    def print_region(reader, start, size, location):
+      print "%s - %s" % (reader.FormatIntPtr(start),
+                         reader.FormatIntPtr(start + size))
+
+    self.reader.ForEachMemoryRegion(print_region)
+
 def AnalyzeMinidump(options, minidump_name):
   reader = MinidumpReader(options, minidump_name)
   DebugPrint("========================================")
@@ -1045,21 +1110,25 @@
     print FormatDisasmLine(start, heap, line)
   print
 
-  print "Annotated stack (from exception.esp to bottom):"
-  for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
-    maybe_address = reader.ReadUIntPtr(slot)
-    heap_object = heap.FindObject(maybe_address)
-    print "%s: %s" % (reader.FormatIntPtr(slot),
-                      reader.FormatIntPtr(maybe_address))
-    if heap_object:
-      heap_object.Print(Printer())
-      print
+  if options.shell:
+    InspectionShell(reader, heap).cmdloop("type help to get help")
+  else:
+    print "Annotated stack (from exception.esp to bottom):"
+    for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
+      maybe_address = reader.ReadUIntPtr(slot)
+      heap_object = heap.FindObject(maybe_address)
+      print "%s: %s" % (reader.FormatIntPtr(slot),
+                        reader.FormatIntPtr(maybe_address))
+      if heap_object:
+        heap_object.Print(Printer())
+        print
 
   reader.Dispose()
 
 
 if __name__ == "__main__":
   parser = optparse.OptionParser(USAGE)
+  parser.add_option("-s", "--shell", dest="shell", action="store_true")
   options, args = parser.parse_args()
   if len(args) != 1:
     parser.print_help()
diff --git a/tools/presubmit.py b/tools/presubmit.py
index c606e42..a0b81e8 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -303,7 +303,8 @@
               or (name == 'third_party')
               or (name == 'gyp')
               or (name == 'out')
-              or (name == 'obj'))
+              or (name == 'obj')
+              or (name == 'DerivedSources'))
 
   IGNORE_COPYRIGHTS = ['cpplint.py',
                        'earley-boyer.js',
diff --git a/tools/test-wrapper-gypbuild.py b/tools/test-wrapper-gypbuild.py
index fda4105..eda2459 100755
--- a/tools/test-wrapper-gypbuild.py
+++ b/tools/test-wrapper-gypbuild.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 #
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -56,6 +56,9 @@
   result.add_option("--no-presubmit",
                     help='Skip presubmit checks',
                     default=False, action="store_true")
+  result.add_option("--buildbot",
+                    help='Adapt to path structure used on buildbots',
+                    default=False, action="store_true")
 
   # Flags this wrapper script handles itself:
   result.add_option("-m", "--mode",
@@ -144,14 +147,16 @@
     options.mode = options.mode.split(',')
     options.arch = options.arch.split(',')
   for mode in options.mode:
-    if not mode in ['debug', 'release']:
+    if not mode.lower() in ['debug', 'release']:
       print "Unknown mode %s" % mode
       return False
   for arch in options.arch:
     if not arch in ['ia32', 'x64', 'arm', 'mips']:
       print "Unknown architecture %s" % arch
       return False
-
+  if options.buildbot:
+    # Buildbots run presubmit tests as a separate step.
+    options.no_presubmit = True
   return True
 
 
@@ -213,22 +218,26 @@
     return 1
 
   workspace = abspath(join(dirname(sys.argv[0]), '..'))
+  returncodes = 0
 
   if not options.no_presubmit:
     print ">>> running presubmit tests"
-    subprocess.call([workspace + '/tools/presubmit.py'])
+    returncodes += subprocess.call([workspace + '/tools/presubmit.py'])
 
   args_for_children = [workspace + '/tools/test.py'] + PassOnOptions(options)
   args_for_children += ['--no-build', '--build-system=gyp']
   for arg in args:
     args_for_children += [arg]
-  returncodes = 0
   env = os.environ
 
   for mode in options.mode:
     for arch in options.arch:
       print ">>> running tests for %s.%s" % (arch, mode)
-      shellpath = workspace + '/' + options.outdir + '/' + arch + '.' + mode
+      if options.buildbot:
+        shellpath = workspace + '/' + options.outdir + '/' + mode
+        mode = mode.lower()
+      else:
+        shellpath = workspace + '/' + options.outdir + '/' + arch + '.' + mode
       env['LD_LIBRARY_PATH'] = shellpath + '/lib.target'
       shell = shellpath + "/d8"
       child = subprocess.Popen(' '.join(args_for_children +