Version 2.3.7

Reduced size of heap snapshots produced by heap profiler (issue 783).

Introduced v8::Value::IsRegExp method.

Fixed CPU profiler crash in start / stop sequence when non-existent name is passed (issue http://crbug.com/51594).

Introduced new indexed property query callbacks API (issue 816). This API is guarded by USE_NEW_QUERY_CALLBACK define and is disabled by default.

Removed support for object literal get/set with number/string property name.

Fixed handling of JSObject::elements in CalculateNetworkSize (issue 822).

Allow compiling with strict aliasing enabled on GCC 4.4 (issue 463).


git-svn-id: http://v8.googlecode.com/svn/trunk@5241 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/accessors.cc b/src/accessors.cc
index ed0bbd7..3c49846 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -488,7 +488,7 @@
   JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
   if (!found_it) return Smi::FromInt(0);
   // Check if already compiled.
-  if (!function->is_compiled()) {
+  if (!function->shared()->is_compiled()) {
     // If the function isn't compiled yet, the length is not computed
     // correctly yet. Compile it now and return the right length.
     HandleScope scope;
diff --git a/src/api.cc b/src/api.cc
index 4fdc95f..b3164dd 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -886,10 +886,10 @@
 }
 
 
-void FunctionTemplate::SetIndexedInstancePropertyHandler(
+void FunctionTemplate::SetIndexedInstancePropertyHandlerImpl(
       IndexedPropertyGetter getter,
       IndexedPropertySetter setter,
-      IndexedPropertyQuery query,
+      IndexedPropertyQueryImpl query,
       IndexedPropertyDeleter remover,
       IndexedPropertyEnumerator enumerator,
       Handle<Value> data) {
@@ -1054,10 +1054,10 @@
 }
 
 
-void ObjectTemplate::SetIndexedPropertyHandler(
+void ObjectTemplate::SetIndexedPropertyHandlerImpl(
       IndexedPropertyGetter getter,
       IndexedPropertySetter setter,
-      IndexedPropertyQuery query,
+      IndexedPropertyQueryImpl query,
       IndexedPropertyDeleter remover,
       IndexedPropertyEnumerator enumerator,
       Handle<Value> data) {
@@ -1068,12 +1068,12 @@
   i::FunctionTemplateInfo* constructor =
       i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
   i::Handle<i::FunctionTemplateInfo> cons(constructor);
-  Utils::ToLocal(cons)->SetIndexedInstancePropertyHandler(getter,
-                                                          setter,
-                                                          query,
-                                                          remover,
-                                                          enumerator,
-                                                          data);
+  Utils::ToLocal(cons)->SetIndexedInstancePropertyHandlerImpl(getter,
+                                                              setter,
+                                                              query,
+                                                              remover,
+                                                              enumerator,
+                                                              data);
 }
 
 
@@ -1792,6 +1792,13 @@
 }
 
 
+bool Value::IsRegExp() const {
+  if (IsDeadCheck("v8::Value::IsRegExp()")) return false;
+  i::Handle<i::Object> obj = Utils::OpenHandle(this);
+  return obj->IsJSRegExp();
+}
+
+
 Local<String> Value::ToString() const {
   if (IsDeadCheck("v8::Value::ToString()")) return Local<String>();
   LOG_API("ToString");
@@ -4491,24 +4498,27 @@
 }
 
 
+static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
+  return const_cast<i::HeapGraphEdge*>(
+      reinterpret_cast<const i::HeapGraphEdge*>(edge));
+}
+
 HeapGraphEdge::Type HeapGraphEdge::GetType() const {
   IsDeadCheck("v8::HeapGraphEdge::GetType");
-  return static_cast<HeapGraphEdge::Type>(
-      reinterpret_cast<const i::HeapGraphEdge*>(this)->type());
+  return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
 }
 
 
 Handle<Value> HeapGraphEdge::GetName() const {
   IsDeadCheck("v8::HeapGraphEdge::GetName");
-  const i::HeapGraphEdge* edge =
-      reinterpret_cast<const i::HeapGraphEdge*>(this);
+  i::HeapGraphEdge* edge = ToInternal(this);
   switch (edge->type()) {
-    case i::HeapGraphEdge::CONTEXT_VARIABLE:
-    case i::HeapGraphEdge::INTERNAL:
-    case i::HeapGraphEdge::PROPERTY:
+    case i::HeapGraphEdge::kContextVariable:
+    case i::HeapGraphEdge::kInternal:
+    case i::HeapGraphEdge::kProperty:
       return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
           edge->name())));
-    case i::HeapGraphEdge::ELEMENT:
+    case i::HeapGraphEdge::kElement:
       return Handle<Number>(ToApi<Number>(i::Factory::NewNumberFromInt(
           edge->index())));
     default: UNREACHABLE();
@@ -4519,28 +4529,32 @@
 
 const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
   IsDeadCheck("v8::HeapGraphEdge::GetFromNode");
-  const i::HeapEntry* from =
-      reinterpret_cast<const i::HeapGraphEdge*>(this)->from();
+  const i::HeapEntry* from = ToInternal(this)->From();
   return reinterpret_cast<const HeapGraphNode*>(from);
 }
 
 
 const HeapGraphNode* HeapGraphEdge::GetToNode() const {
   IsDeadCheck("v8::HeapGraphEdge::GetToNode");
-  const i::HeapEntry* to =
-      reinterpret_cast<const i::HeapGraphEdge*>(this)->to();
+  const i::HeapEntry* to = ToInternal(this)->to();
   return reinterpret_cast<const HeapGraphNode*>(to);
 }
 
 
+static i::HeapGraphPath* ToInternal(const HeapGraphPath* path) {
+  return const_cast<i::HeapGraphPath*>(
+      reinterpret_cast<const i::HeapGraphPath*>(path));
+}
+
+
 int HeapGraphPath::GetEdgesCount() const {
-  return reinterpret_cast<const i::HeapGraphPath*>(this)->path()->length();
+  return ToInternal(this)->path()->length();
 }
 
 
 const HeapGraphEdge* HeapGraphPath::GetEdge(int index) const {
   return reinterpret_cast<const HeapGraphEdge*>(
-      reinterpret_cast<const i::HeapGraphPath*>(this)->path()->at(index));
+      ToInternal(this)->path()->at(index));
 }
 
 
@@ -4555,137 +4569,136 @@
 }
 
 
+static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
+  return const_cast<i::HeapEntry*>(
+      reinterpret_cast<const i::HeapEntry*>(entry));
+}
+
+
 HeapGraphNode::Type HeapGraphNode::GetType() const {
   IsDeadCheck("v8::HeapGraphNode::GetType");
-  return static_cast<HeapGraphNode::Type>(
-      reinterpret_cast<const i::HeapEntry*>(this)->type());
+  return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
 }
 
 
 Handle<String> HeapGraphNode::GetName() const {
   IsDeadCheck("v8::HeapGraphNode::GetName");
   return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
-      reinterpret_cast<const i::HeapEntry*>(this)->name())));
+      ToInternal(this)->name())));
 }
 
 
 uint64_t HeapGraphNode::GetId() const {
   IsDeadCheck("v8::HeapGraphNode::GetId");
-  return reinterpret_cast<const i::HeapEntry*>(this)->id();
+  return ToInternal(this)->id();
 }
 
 
 int HeapGraphNode::GetSelfSize() const {
   IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
-  return reinterpret_cast<const i::HeapEntry*>(this)->self_size();
+  return ToInternal(this)->self_size();
 }
 
 
-int HeapGraphNode::GetTotalSize() const {
-  IsDeadCheck("v8::HeapSnapshot::GetHead");
-  return const_cast<i::HeapEntry*>(
-      reinterpret_cast<const i::HeapEntry*>(this))->TotalSize();
+int HeapGraphNode::GetReachableSize() const {
+  IsDeadCheck("v8::HeapSnapshot::GetReachableSize");
+  return ToInternal(this)->ReachableSize();
 }
 
 
-int HeapGraphNode::GetPrivateSize() const {
-  IsDeadCheck("v8::HeapSnapshot::GetPrivateSize");
-  return const_cast<i::HeapEntry*>(
-      reinterpret_cast<const i::HeapEntry*>(this))->NonSharedTotalSize();
+int HeapGraphNode::GetRetainedSize() const {
+  IsDeadCheck("v8::HeapSnapshot::GetRetainedSize");
+  return ToInternal(this)->RetainedSize();
 }
 
 
 int HeapGraphNode::GetChildrenCount() const {
   IsDeadCheck("v8::HeapSnapshot::GetChildrenCount");
-  return reinterpret_cast<const i::HeapEntry*>(this)->children()->length();
+  return ToInternal(this)->children().length();
 }
 
 
 const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
   IsDeadCheck("v8::HeapSnapshot::GetChild");
   return reinterpret_cast<const HeapGraphEdge*>(
-      reinterpret_cast<const i::HeapEntry*>(this)->children()->at(index));
+      &ToInternal(this)->children()[index]);
 }
 
 
 int HeapGraphNode::GetRetainersCount() const {
   IsDeadCheck("v8::HeapSnapshot::GetRetainersCount");
-  return reinterpret_cast<const i::HeapEntry*>(this)->retainers()->length();
+  return ToInternal(this)->retainers().length();
 }
 
 
 const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
   IsDeadCheck("v8::HeapSnapshot::GetRetainer");
   return reinterpret_cast<const HeapGraphEdge*>(
-      reinterpret_cast<const i::HeapEntry*>(this)->retainers()->at(index));
+      ToInternal(this)->retainers()[index]);
 }
 
 
 int HeapGraphNode::GetRetainingPathsCount() const {
   IsDeadCheck("v8::HeapSnapshot::GetRetainingPathsCount");
-  return const_cast<i::HeapEntry*>(
-      reinterpret_cast<const i::HeapEntry*>(
-          this))->GetRetainingPaths()->length();
+  return ToInternal(this)->GetRetainingPaths()->length();
 }
 
 
 const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
   IsDeadCheck("v8::HeapSnapshot::GetRetainingPath");
   return reinterpret_cast<const HeapGraphPath*>(
-      const_cast<i::HeapEntry*>(
-          reinterpret_cast<const i::HeapEntry*>(
-              this))->GetRetainingPaths()->at(index));
+      ToInternal(this)->GetRetainingPaths()->at(index));
 }
 
 
 const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const {
   IsDeadCheck("v8::HeapSnapshotsDiff::GetAdditionsRoot");
-  const i::HeapSnapshotsDiff* diff =
-      reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
+  i::HeapSnapshotsDiff* diff =
+      const_cast<i::HeapSnapshotsDiff*>(
+          reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
   return reinterpret_cast<const HeapGraphNode*>(diff->additions_root());
 }
 
 
 const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const {
   IsDeadCheck("v8::HeapSnapshotsDiff::GetDeletionsRoot");
-  const i::HeapSnapshotsDiff* diff =
-      reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
+  i::HeapSnapshotsDiff* diff =
+      const_cast<i::HeapSnapshotsDiff*>(
+          reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
   return reinterpret_cast<const HeapGraphNode*>(diff->deletions_root());
 }
 
 
+static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
+  return const_cast<i::HeapSnapshot*>(
+      reinterpret_cast<const i::HeapSnapshot*>(snapshot));
+}
+
+
 unsigned HeapSnapshot::GetUid() const {
   IsDeadCheck("v8::HeapSnapshot::GetUid");
-  return reinterpret_cast<const i::HeapSnapshot*>(this)->uid();
+  return ToInternal(this)->uid();
 }
 
 
 Handle<String> HeapSnapshot::GetTitle() const {
   IsDeadCheck("v8::HeapSnapshot::GetTitle");
-  const i::HeapSnapshot* snapshot =
-      reinterpret_cast<const i::HeapSnapshot*>(this);
   return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
-      snapshot->title())));
+      ToInternal(this)->title())));
 }
 
 
 const HeapGraphNode* HeapSnapshot::GetRoot() const {
   IsDeadCheck("v8::HeapSnapshot::GetHead");
-  const i::HeapSnapshot* snapshot =
-      reinterpret_cast<const i::HeapSnapshot*>(this);
-  return reinterpret_cast<const HeapGraphNode*>(snapshot->const_root());
+  return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
 }
 
 
 const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
     const HeapSnapshot* snapshot) const {
   IsDeadCheck("v8::HeapSnapshot::CompareWith");
-  i::HeapSnapshot* snapshot1 = const_cast<i::HeapSnapshot*>(
-      reinterpret_cast<const i::HeapSnapshot*>(this));
-  i::HeapSnapshot* snapshot2 = const_cast<i::HeapSnapshot*>(
-      reinterpret_cast<const i::HeapSnapshot*>(snapshot));
   return reinterpret_cast<const HeapSnapshotsDiff*>(
-      snapshot1->CompareWith(snapshot2));
+      ToInternal(this)->CompareWith(ToInternal(snapshot)));
 }
 
 
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index b1f29ba..37768e8 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1050,7 +1050,7 @@
   __ ldr(r2,
          FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
   __ mov(r2, Operand(r2, ASR, kSmiTagSize));
-  __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+  __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeOffset));
   __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ cmp(r2, r0);  // Check formal and actual parameter counts.
   __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 4bcf1a0..aec80d7 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1532,9 +1532,8 @@
   __ BranchOnSmi(r0, &build_args);
   __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
   __ b(ne, &build_args);
-  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
   Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
-  __ ldr(r1, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
+  __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeOffset));
   __ cmp(r1, Operand(apply_code));
   __ b(ne, &build_args);
 
@@ -4176,21 +4175,21 @@
 
 
 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  ASSERT(args->length() == 1);
   JumpTarget leave, null, function, non_function_constructor;
+  Register scratch = VirtualFrame::scratch0();
 
-  // Load the object into r0.
+  // Load the object into register.
+  ASSERT(args->length() == 1);
   Load(args->at(0));
-  frame_->EmitPop(r0);
+  Register tos = frame_->PopToRegister();
 
   // If the object is a smi, we return null.
-  __ tst(r0, Operand(kSmiTagMask));
+  __ tst(tos, Operand(kSmiTagMask));
   null.Branch(eq);
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
-  __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
+  __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
   null.Branch(lt);
 
   // As long as JS_FUNCTION_TYPE is the last instance type and it is
@@ -4198,37 +4197,38 @@
   // LAST_JS_OBJECT_TYPE.
   STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
   STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+  __ cmp(scratch, Operand(JS_FUNCTION_TYPE));
   function.Branch(eq);
 
   // Check if the constructor in the map is a function.
-  __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
-  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+  __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
+  __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
   non_function_constructor.Branch(ne);
 
-  // The r0 register now contains the constructor function. Grab the
+  // The tos register now contains the constructor function. Grab the
   // instance class name from there.
-  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
-  frame_->EmitPush(r0);
+  __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(tos,
+         FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
+  frame_->EmitPush(tos);
   leave.Jump();
 
   // Functions have class 'Function'.
   function.Bind();
-  __ mov(r0, Operand(Factory::function_class_symbol()));
-  frame_->EmitPush(r0);
+  __ mov(tos, Operand(Factory::function_class_symbol()));
+  frame_->EmitPush(tos);
   leave.Jump();
 
   // Objects with a non-function constructor have class 'Object'.
   non_function_constructor.Bind();
-  __ mov(r0, Operand(Factory::Object_symbol()));
-  frame_->EmitPush(r0);
+  __ mov(tos, Operand(Factory::Object_symbol()));
+  frame_->EmitPush(tos);
   leave.Jump();
 
   // Non-JS objects have class null.
   null.Bind();
-  __ LoadRoot(r0, Heap::kNullValueRootIndex);
-  frame_->EmitPush(r0);
+  __ LoadRoot(tos, Heap::kNullValueRootIndex);
+  frame_->EmitPush(tos);
 
   // All done.
   leave.Bind();
@@ -4236,45 +4236,51 @@
 
 
 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  ASSERT(args->length() == 1);
+  Register scratch = VirtualFrame::scratch0();
   JumpTarget leave;
+
+  ASSERT(args->length() == 1);
   Load(args->at(0));
-  frame_->EmitPop(r0);  // r0 contains object.
+  Register tos = frame_->PopToRegister();  // tos contains object.
   // if (object->IsSmi()) return the object.
-  __ tst(r0, Operand(kSmiTagMask));
+  __ tst(tos, Operand(kSmiTagMask));
   leave.Branch(eq);
   // It is a heap object - get map. If (!object->IsJSValue()) return the object.
-  __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
+  __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
   leave.Branch(ne);
   // Load the value.
-  __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+  __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
   leave.Bind();
-  frame_->EmitPush(r0);
+  frame_->EmitPush(tos);
 }
 
 
 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  ASSERT(args->length() == 2);
+  Register scratch1 = VirtualFrame::scratch0();
+  Register scratch2 = VirtualFrame::scratch1();
   JumpTarget leave;
+
+  ASSERT(args->length() == 2);
   Load(args->at(0));    // Load the object.
   Load(args->at(1));    // Load the value.
-  frame_->EmitPop(r0);  // r0 contains value
-  frame_->EmitPop(r1);  // r1 contains object
+  Register value = frame_->PopToRegister();
+  Register object = frame_->PopToRegister(value);
   // if (object->IsSmi()) return object.
-  __ tst(r1, Operand(kSmiTagMask));
+  __ tst(object, Operand(kSmiTagMask));
   leave.Branch(eq);
   // It is a heap object - get map. If (!object->IsJSValue()) return the object.
-  __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
+  __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
   leave.Branch(ne);
   // Store the value.
-  __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
+  __ str(value, FieldMemOperand(object, JSValue::kValueOffset));
   // Update the write barrier.
-  __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
+  __ RecordWrite(object,
+                 Operand(JSValue::kValueOffset - kHeapObjectTag),
+                 scratch1,
+                 scratch2);
   // Leave.
   leave.Bind();
-  frame_->EmitPush(r0);
+  frame_->EmitPush(value);
 }
 
 
@@ -4558,22 +4564,18 @@
 // This generates code that performs a String.prototype.charCodeAt() call
 // or returns a smi in order to trigger conversion.
 void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment(masm_, "[ GenerateStringCharCodeAt");
   ASSERT(args->length() == 2);
 
   Load(args->at(0));
   Load(args->at(1));
 
-  Register index = r1;
-  Register object = r2;
-
-  frame_->EmitPop(r1);
-  frame_->EmitPop(r2);
+  Register index = frame_->PopToRegister();
+  Register object = frame_->PopToRegister(index);
 
   // We need two extra registers.
-  Register scratch = r3;
-  Register result = r0;
+  Register scratch = VirtualFrame::scratch0();
+  Register result = VirtualFrame::scratch1();
 
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(object,
@@ -4608,16 +4610,13 @@
 
 // Generates code for creating a one-char string from a char code.
 void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment(masm_, "[ GenerateStringCharFromCode");
   ASSERT(args->length() == 1);
 
   Load(args->at(0));
 
-  Register code = r1;
-  Register result = r0;
-
-  frame_->EmitPop(code);
+  Register result = frame_->GetTOSRegister();
+  Register code = frame_->PopToRegister(result);
 
   DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
       code, result);
@@ -4679,23 +4678,20 @@
 // This generates code that performs a String.prototype.charAt() call
 // or returns a smi in order to trigger conversion.
 void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment(masm_, "[ GenerateStringCharAt");
   ASSERT(args->length() == 2);
 
   Load(args->at(0));
   Load(args->at(1));
 
-  Register index = r1;
-  Register object = r2;
-
-  frame_->EmitPop(r1);
-  frame_->EmitPop(r2);
+  Register index = frame_->PopToRegister();
+  Register object = frame_->PopToRegister(index);
 
   // We need three extra registers.
-  Register scratch1 = r3;
-  Register scratch2 = r4;
-  Register result = r0;
+  Register scratch1 = VirtualFrame::scratch0();
+  Register scratch2 = VirtualFrame::scratch1();
+  // Use r6 without notifying the virtual frame.
+  Register result = r6;
 
   DeferredStringCharAt* deferred =
       new DeferredStringCharAt(object,
@@ -4874,13 +4870,13 @@
 
 
 void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
 
   // Satisfy contract with ArgumentsAccessStub:
   // Load the key into r1 and the formal parameters count into r0.
   Load(args->at(0));
-  frame_->EmitPop(r1);
+  frame_->PopToR1();
+  frame_->SpillAll();
   __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
 
   // Call the shared stub to get to arguments[key].
@@ -5108,9 +5104,7 @@
 void DeferredSearchCache::Generate() {
   __ Push(cache_, key_);
   __ CallRuntime(Runtime::kGetFromCache, 2);
-  if (!dst_.is(r0)) {
-    __ mov(dst_, r0);
-  }
+  __ Move(dst_, r0);
 }
 
 
@@ -5130,33 +5124,42 @@
 
   Load(args->at(1));
 
-  VirtualFrame::SpilledScope spilled_scope(frame_);
+  frame_->PopToR1();
+  frame_->SpillAll();
+  Register key = r1;  // Just poped to r1
+  Register result = r0;  // Free, as frame has just been spilled.
+  Register scratch1 = VirtualFrame::scratch0();
+  Register scratch2 = VirtualFrame::scratch1();
 
-  frame_->EmitPop(r2);
+  __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ ldr(scratch1,
+         FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
+  __ ldr(scratch1,
+         ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ ldr(scratch1,
+         FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
 
-  __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
-  __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
-  __ ldr(r1, ContextOperand(r1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
-  __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(cache_id)));
-
-  DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
+  DeferredSearchCache* deferred =
+      new DeferredSearchCache(result, scratch1, key);
 
   const int kFingerOffset =
       FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
-  // r0 now holds finger offset as a smi.
-  __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  // r3 now points to the start of fixed array elements.
-  __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
-  // Note side effect of PreIndex: r3 now points to the key of the pair.
-  __ cmp(r2, r0);
+  __ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
+  // result now holds finger offset as a smi.
+  __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // scratch2 now points to the start of fixed array elements.
+  __ ldr(result,
+         MemOperand(
+             scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
+  // Note side effect of PreIndex: scratch2 now points to the key of the pair.
+  __ cmp(key, result);
   deferred->Branch(ne);
 
-  __ ldr(r0, MemOperand(r3, kPointerSize));
+  __ ldr(result, MemOperand(scratch2, kPointerSize));
 
   deferred->BindExit();
-  frame_->EmitPush(r0);
+  frame_->EmitPush(result);
 }
 
 
@@ -6851,6 +6854,11 @@
   __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
   __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
 
+  // Initialize the code pointer in the function to be the one
+  // found in the shared function info object.
+  __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+  __ str(r3, FieldMemOperand(r0, JSFunction::kCodeOffset));
+
   // Return result. The argument function info has been popped already.
   __ Ret();
 
@@ -10444,11 +10452,9 @@
     // NumberToSmi discards numbers that are not exact integers.
     __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
-  if (!scratch_.is(r0)) {
-    // Save the conversion result before the pop instructions below
-    // have a chance to overwrite it.
-    __ mov(scratch_, r0);
-  }
+  // Save the conversion result before the pop instructions below
+  // have a chance to overwrite it.
+  __ Move(scratch_, r0);
   __ pop(index_);
   __ pop(object_);
   // Reload the instance type.
@@ -10467,9 +10473,7 @@
   call_helper.BeforeCall(masm);
   __ Push(object_, index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
-  if (!result_.is(r0)) {
-    __ mov(result_, r0);
-  }
+  __ Move(result_, r0);
   call_helper.AfterCall(masm);
   __ jmp(&exit_);
 
@@ -10510,9 +10514,7 @@
   call_helper.BeforeCall(masm);
   __ push(code_);
   __ CallRuntime(Runtime::kCharFromCode, 1);
-  if (!result_.is(r0)) {
-    __ mov(result_, r0);
-  }
+  __ Move(result_, r0);
   call_helper.AfterCall(masm);
   __ jmp(&exit_);
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 9c25ccd..7a03641 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -757,7 +757,7 @@
                       SharedFunctionInfo::kFormalParameterCountOffset));
   mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
   ldr(code_reg,
-      MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+      MemOperand(r1, JSFunction::kCodeOffset - kHeapObjectTag));
   add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
 
   ParameterCount expected(expected_reg);
@@ -1508,8 +1508,7 @@
     // Make sure the code objects in the builtins object and in the
     // builtin function are the same.
     push(r1);
-    ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-    ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
+    ldr(r1, FieldMemOperand(r1, JSFunction::kCodeOffset));
     cmp(r1, target);
     Assert(eq, "Builtin code object changed");
     pop(r1);
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index e1d4489..0d59505 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -56,7 +56,7 @@
   }
 
   void Iterate(ObjectVisitor* v) {
-    v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
+    v->VisitPointer(BitCast<Object**>(&cache_));
   }
 
 
@@ -470,6 +470,7 @@
   Handle<Code> code =
       Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
   empty_function->set_code(*code);
+  empty_function->shared()->set_code(*code);
   Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
   Handle<Script> script = Factory::NewScript(source);
   script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
@@ -1545,6 +1546,8 @@
     Handle<SharedFunctionInfo> shared
         = Handle<SharedFunctionInfo>(function->shared());
     if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
+    // Set the code object on the function object.
+    function->set_code(function->shared()->code());
     builtins->set_javascript_builtin_code(id, shared->code());
   }
   return true;
diff --git a/src/checks.h b/src/checks.h
index 13374d8..5ea5992 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -280,14 +280,13 @@
 
 
 // The ASSERT macro is equivalent to CHECK except that it only
-// generates code in debug builds.  Ditto STATIC_ASSERT.
+// generates code in debug builds.
 #ifdef DEBUG
 #define ASSERT_RESULT(expr)  CHECK(expr)
 #define ASSERT(condition)    CHECK(condition)
 #define ASSERT_EQ(v1, v2)    CHECK_EQ(v1, v2)
 #define ASSERT_NE(v1, v2)    CHECK_NE(v1, v2)
 #define ASSERT_GE(v1, v2)    CHECK_GE(v1, v2)
-#define STATIC_ASSERT(test)  STATIC_CHECK(test)
 #define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
 #else
 #define ASSERT_RESULT(expr)     (expr)
@@ -295,9 +294,14 @@
 #define ASSERT_EQ(v1, v2)      ((void) 0)
 #define ASSERT_NE(v1, v2)      ((void) 0)
 #define ASSERT_GE(v1, v2)      ((void) 0)
-#define STATIC_ASSERT(test)    ((void) 0)
 #define SLOW_ASSERT(condition) ((void) 0)
 #endif
+// Static asserts has no impact on runtime performance, so they can be
+// safely enabled in release mode. Moreover, the ((void) 0) expression
+// obeys different syntax rules than typedef's, e.g. it can't appear
+// inside class declaration, this leads to inconsistency between debug
+// and release compilation modes behaviour.
+#define STATIC_ASSERT(test)  STATIC_CHECK(test)
 
 
 #define ASSERT_TAG_ALIGNED(address) \
diff --git a/src/codegen.cc b/src/codegen.cc
index 444698c..a9fab43 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -77,14 +77,23 @@
     // Generate the code.
     Comment cmnt(masm_, code->comment());
     masm_->bind(code->entry_label());
-    code->SaveRegisters();
+    if (code->AutoSaveAndRestore()) {
+      code->SaveRegisters();
+    }
     code->Generate();
-    code->RestoreRegisters();
-    masm_->jmp(code->exit_label());
+    if (code->AutoSaveAndRestore()) {
+      code->RestoreRegisters();
+      code->Exit();
+    }
   }
 }
 
 
+void DeferredCode::Exit() {
+  masm_->jmp(exit_label());
+}
+
+
 void CodeGenerator::SetFrame(VirtualFrame* new_frame,
                              RegisterFile* non_frame_registers) {
   RegisterFile saved_counts;
diff --git a/src/codegen.h b/src/codegen.h
index 2a6ad64..588468f 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -319,6 +319,15 @@
 
   void SaveRegisters();
   void RestoreRegisters();
+  void Exit();
+
+  // If this returns true then all registers will be saved for the duration
+  // of the Generate() call.  Otherwise the registers are not saved and the
+  // Generate() call must bracket runtime any runtime calls with calls to
+  // SaveRegisters() and RestoreRegisters().  In this case the Generate
+  // method must also call Exit() in order to return to the non-deferred
+  // code.
+  virtual bool AutoSaveAndRestore() { return true; }
 
  protected:
   MacroAssembler* masm_;
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index c8d29f8..3e554cc 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -476,7 +476,7 @@
 
 CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
   const double actual_sampling_rate = generator_->actual_sampling_rate();
-  StopProcessorIfLastProfile();
+  StopProcessorIfLastProfile(title);
   CpuProfile* result =
       profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
                                title,
@@ -491,14 +491,15 @@
 CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
                                                String* title) {
   const double actual_sampling_rate = generator_->actual_sampling_rate();
-  StopProcessorIfLastProfile();
+  const char* profile_title = profiles_->GetName(title);
+  StopProcessorIfLastProfile(profile_title);
   int token = token_enumerator_->GetTokenId(security_token);
-  return profiles_->StopProfiling(token, title, actual_sampling_rate);
+  return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
 }
 
 
-void CpuProfiler::StopProcessorIfLastProfile() {
-  if (profiles_->is_last_profile()) {
+void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
+  if (profiles_->IsLastProfile(title)) {
     reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
     processor_->Stop();
     processor_->Join();
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 03b8176..4d5559e 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -260,7 +260,7 @@
   void StartProcessorIfNotStarted();
   CpuProfile* StopCollectingProfile(const char* title);
   CpuProfile* StopCollectingProfile(Object* security_token, String* title);
-  void StopProcessorIfLastProfile();
+  void StopProcessorIfLastProfile(const char* title);
 
   CpuProfilesCollection* profiles_;
   unsigned next_profile_uid_;
diff --git a/src/debug.cc b/src/debug.cc
index 5d386cc..dbf9df9 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -852,8 +852,8 @@
 
 
 void Debug::Iterate(ObjectVisitor* v) {
-  v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_return_)));
-  v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_slot_)));
+  v->VisitPointer(BitCast<Object**>(&(debug_break_return_)));
+  v->VisitPointer(BitCast<Object**>(&(debug_break_slot_)));
 }
 
 
diff --git a/src/factory.cc b/src/factory.cc
index d653383..14042e8 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -486,6 +486,10 @@
                                         bool force_initial_map) {
   // Allocate the function
   Handle<JSFunction> function = NewFunction(name, the_hole_value());
+
+  // Setup the code pointer in both the shared function info and in
+  // the function itself.
+  function->shared()->set_code(*code);
   function->set_code(*code);
 
   if (force_initial_map ||
@@ -511,9 +515,12 @@
                                                      Handle<JSObject> prototype,
                                                      Handle<Code> code,
                                                      bool force_initial_map) {
-  // Allocate the function
+  // Allocate the function.
   Handle<JSFunction> function = NewFunction(name, prototype);
 
+  // Setup the code pointer in both the shared function info and in
+  // the function itself.
+  function->shared()->set_code(*code);
   function->set_code(*code);
 
   if (force_initial_map ||
@@ -535,6 +542,7 @@
 Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
                                                         Handle<Code> code) {
   Handle<JSFunction> function = NewFunctionWithoutPrototype(name);
+  function->shared()->set_code(*code);
   function->set_code(*code);
   ASSERT(!function->has_initial_map());
   ASSERT(!function->has_prototype());
diff --git a/src/factory.h b/src/factory.h
index 2251112..c014986 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -329,7 +329,7 @@
 
 #define ROOT_ACCESSOR(type, name, camel_name)                                  \
   static inline Handle<type> name() {                                          \
-    return Handle<type>(BitCast<type**, Object**>(                             \
+    return Handle<type>(BitCast<type**>(                                       \
         &Heap::roots_[Heap::k##camel_name##RootIndex]));                       \
   }
   ROOT_LIST(ROOT_ACCESSOR)
@@ -337,7 +337,7 @@
 
 #define SYMBOL_ACCESSOR(name, str) \
   static inline Handle<String> name() {                                        \
-    return Handle<String>(BitCast<String**, Object**>(                         \
+    return Handle<String>(BitCast<String**>(                                   \
         &Heap::roots_[Heap::k##name##RootIndex]));                             \
   }
   SYMBOL_LIST(SYMBOL_ACCESSOR)
diff --git a/src/handles-inl.h b/src/handles-inl.h
index 8478bb5..bf19f5f 100644
--- a/src/handles-inl.h
+++ b/src/handles-inl.h
@@ -47,7 +47,7 @@
 inline T* Handle<T>::operator*() const {
   ASSERT(location_ != NULL);
   ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
-  return *location_;
+  return *BitCast<T**>(location_);
 }
 
 
diff --git a/src/handles.cc b/src/handles.cc
index 0d218cb..927cfd9 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -771,20 +771,30 @@
 bool CompileLazy(Handle<JSFunction> function,
                  Handle<Object> receiver,
                  ClearExceptionFlag flag) {
-  CompilationInfo info(function, 0, receiver);
-  bool result = CompileLazyHelper(&info, flag);
-  PROFILE(FunctionCreateEvent(*function));
-  return result;
+  if (function->shared()->is_compiled()) {
+    function->set_code(function->shared()->code());
+    return true;
+  } else {
+    CompilationInfo info(function, 0, receiver);
+    bool result = CompileLazyHelper(&info, flag);
+    PROFILE(FunctionCreateEvent(*function));
+    return result;
+  }
 }
 
 
 bool CompileLazyInLoop(Handle<JSFunction> function,
                        Handle<Object> receiver,
                        ClearExceptionFlag flag) {
-  CompilationInfo info(function, 1, receiver);
-  bool result = CompileLazyHelper(&info, flag);
-  PROFILE(FunctionCreateEvent(*function));
-  return result;
+  if (function->shared()->is_compiled()) {
+    function->set_code(function->shared()->code());
+    return true;
+  } else {
+    CompilationInfo info(function, 1, receiver);
+    bool result = CompileLazyHelper(&info, flag);
+    PROFILE(FunctionCreateEvent(*function));
+    return result;
+  }
 }
 
 
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 92ded7b..7668bbc 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -111,10 +111,10 @@
   int size = obj->Size();
   // If 'properties' and 'elements' are non-empty (thus, non-shared),
   // take their size into account.
-  if (FixedArray::cast(obj->properties())->length() != 0) {
+  if (obj->properties() != Heap::empty_fixed_array()) {
     size += obj->properties()->Size();
   }
-  if (FixedArray::cast(obj->elements())->length() != 0) {
+  if (obj->elements() != Heap::empty_fixed_array()) {
     size += obj->elements()->Size();
   }
   // For functions, also account non-empty context and literals sizes.
@@ -360,7 +360,7 @@
 
 
 HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
-  Heap::CollectAllGarbage(false);
+  Heap::CollectAllGarbage(true);
   HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
   HeapSnapshotGenerator generator(result);
   generator.GenerateSnapshot();
diff --git a/src/heap.cc b/src/heap.cc
index c4d0439..dfc18cc 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -2452,39 +2452,62 @@
 };
 
 
-static void FlushCodeForFunction(SharedFunctionInfo* function_info) {
-  // The function must be compiled and have the source code available,
-  // to be able to recompile it in case we need the function again.
-  if (!(function_info->is_compiled() && function_info->HasSourceCode())) return;
-
-  // We never flush code for Api functions.
-  if (function_info->IsApiFunction()) return;
-
-  // Only flush code for functions.
-  if (!function_info->code()->kind() == Code::FUNCTION) return;
-
-  // Function must be lazy compilable.
-  if (!function_info->allows_lazy_compilation()) return;
-
-  // If this is a full script wrapped in a function we do no flush the code.
-  if (function_info->is_toplevel()) return;
-
-  // If this function is in the compilation cache we do not flush the code.
-  if (CompilationCache::HasFunction(function_info)) return;
-
+static bool CodeIsActive(Code* code) {
   // Make sure we are not referencing the code from the stack.
   for (StackFrameIterator it; !it.done(); it.Advance()) {
-    if (function_info->code()->contains(it.frame()->pc())) return;
+    if (code->contains(it.frame()->pc())) return true;
   }
   // Iterate the archived stacks in all threads to check if
   // the code is referenced.
-  FlushingStackVisitor threadvisitor(function_info->code());
+  FlushingStackVisitor threadvisitor(code);
   ThreadManager::IterateArchivedThreads(&threadvisitor);
-  if (threadvisitor.FoundCode()) return;
+  if (threadvisitor.FoundCode()) return true;
+  return false;
+}
+
+
+static void FlushCodeForFunction(JSFunction* function) {
+  SharedFunctionInfo* shared_info = function->shared();
+
+  // Special handling if the function and shared info objects
+  // have different code objects.
+  if (function->code() != shared_info->code()) {
+    // If the shared function has been flushed but the function has not,
+    // we flush the function if possible.
+    if (!shared_info->is_compiled() && function->is_compiled() &&
+        !CodeIsActive(function->code())) {
+      function->set_code(shared_info->code());
+    }
+    return;
+  }
+
+  // The function must be compiled and have the source code available,
+  // to be able to recompile it in case we need the function again.
+  if (!(shared_info->is_compiled() && shared_info->HasSourceCode())) return;
+
+  // We never flush code for Api functions.
+  if (shared_info->IsApiFunction()) return;
+
+  // Only flush code for functions.
+  if (!shared_info->code()->kind() == Code::FUNCTION) return;
+
+  // Function must be lazy compilable.
+  if (!shared_info->allows_lazy_compilation()) return;
+
+  // If this is a full script wrapped in a function we do no flush the code.
+  if (shared_info->is_toplevel()) return;
+
+  // If this function is in the compilation cache we do not flush the code.
+  if (CompilationCache::HasFunction(shared_info)) return;
+
+  // Check stack and archived threads for the code.
+  if (CodeIsActive(shared_info->code())) return;
 
   // Compute the lazy compilable version of the code.
   HandleScope scope;
-  function_info->set_code(*ComputeLazyCompile(function_info->length()));
+  Code* code = *ComputeLazyCompile(shared_info->length());
+  shared_info->set_code(code);
+  function->set_code(code);
 }
 
 
@@ -2496,12 +2519,12 @@
   HeapObjectIterator it(old_pointer_space());
   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
     if (obj->IsJSFunction()) {
-      JSFunction* jsfunction = JSFunction::cast(obj);
+      JSFunction* function = JSFunction::cast(obj);
 
       // The function must have a valid context and not be a builtin.
-      if (jsfunction->unchecked_context()->IsContext() &&
-          !jsfunction->IsBuiltin()) {
-        FlushCodeForFunction(jsfunction->shared());
+      if (function->unchecked_context()->IsContext() &&
+          !function->IsBuiltin()) {
+        FlushCodeForFunction(function);
       }
     }
   }
@@ -2651,6 +2674,7 @@
   function->initialize_properties();
   function->initialize_elements();
   function->set_shared(shared);
+  function->set_code(shared->code());
   function->set_prototype_or_initial_map(prototype);
   function->set_context(undefined_value());
   function->set_literals(empty_fixed_array());
@@ -4000,7 +4024,7 @@
   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
   v->Synchronize("strong_root_list");
 
-  v->VisitPointer(BitCast<Object**, String**>(&hidden_symbol_));
+  v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
   v->Synchronize("symbol");
 
   Bootstrapper::Iterate(v);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 6c830cb..2565acb 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -1142,6 +1142,21 @@
 }
 
 
+void Assembler::rcr(Register dst, uint8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint5(imm8));  // illegal shift count
+  if (imm8 == 1) {
+    EMIT(0xD1);
+    EMIT(0xD8 | dst.code());
+  } else {
+    EMIT(0xC1);
+    EMIT(0xD8 | dst.code());
+    EMIT(imm8);
+  }
+}
+
+
 void Assembler::sar(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index c76c55c..8a5a4c5 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -625,6 +625,7 @@
   void or_(const Operand& dst, const Immediate& x);
 
   void rcl(Register dst, uint8_t imm8);
+  void rcr(Register dst, uint8_t imm8);
 
   void sar(Register dst, uint8_t imm8);
   void sar_cl(Register dst);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 3adb014..31f5041 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -548,7 +548,7 @@
   __ mov(ebx,
          FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
   __ SmiUntag(ebx);
-  __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+  __ mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
   __ cmp(eax, Operand(ebx));
   __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index ba7785b..3c22def 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1038,7 +1038,11 @@
 }
 
 
-// Call the specialized stub for a binary operation.
+// Perform or call the specialized stub for a binary operation.  Requires the
+// three registers left, right and dst to be distinct and spilled.  This
+// deferred operation has up to three entry points:  The main one calls the
+// runtime system.  The second is for when the result is a non-Smi.  The
+// third is for when at least one of the inputs is non-Smi and we have SSE2.
 class DeferredInlineBinaryOperation: public DeferredCode {
  public:
   DeferredInlineBinaryOperation(Token::Value op,
@@ -1051,11 +1055,23 @@
       : op_(op), dst_(dst), left_(left), right_(right),
         left_info_(left_info), right_info_(right_info), mode_(mode) {
     set_comment("[ DeferredInlineBinaryOperation");
+    ASSERT(!left.is(right));
   }
 
   virtual void Generate();
 
+  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
+  // Exit().
+  virtual bool AutoSaveAndRestore() { return false; }
+
+  void JumpToAnswerOutOfRange(Condition cond);
+  void JumpToConstantRhs(Condition cond, Smi* smi_value);
+  Label* NonSmiInputLabel();
+
  private:
+  void GenerateAnswerOutOfRange();
+  void GenerateNonSmiInput();
+
   Token::Value op_;
   Register dst_;
   Register left_;
@@ -1063,15 +1079,42 @@
   TypeInfo left_info_;
   TypeInfo right_info_;
   OverwriteMode mode_;
+  Label answer_out_of_range_;
+  Label non_smi_input_;
+  Label constant_rhs_;
+  Smi* smi_value_;
 };
 
 
+Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
+  if (Token::IsBitOp(op_) && CpuFeatures::IsSupported(SSE2)) {
+    return &non_smi_input_;
+  } else {
+    return entry_label();
+  }
+}
+
+
+void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
+  __ j(cond, &answer_out_of_range_);
+}
+
+
+void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
+                                                      Smi* smi_value) {
+  smi_value_ = smi_value;
+  __ j(cond, &constant_rhs_);
+}
+
+
 void DeferredInlineBinaryOperation::Generate() {
-  Label done;
-  if (CpuFeatures::IsSupported(SSE2) && ((op_ == Token::ADD) ||
-      (op_ ==Token::SUB) ||
-      (op_ == Token::MUL) ||
-      (op_ == Token::DIV))) {
+  // Registers are not saved implicitly for this stub, so we should not
+  // tread on the registers that were not passed to us.
+  if (CpuFeatures::IsSupported(SSE2) &&
+      ((op_ == Token::ADD) ||
+       (op_ == Token::SUB) ||
+       (op_ == Token::MUL) ||
+       (op_ == Token::DIV))) {
     CpuFeatures::Scope use_sse2(SSE2);
     Label call_runtime, after_alloc_failure;
     Label left_smi, right_smi, load_right, do_op;
@@ -1131,7 +1174,6 @@
     __ cvtsi2sd(xmm1, Operand(right_));
     __ SmiTag(right_);
     if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
-      Label alloc_failure;
       __ push(left_);
       __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
       __ pop(left_);
@@ -1146,19 +1188,200 @@
       default: UNREACHABLE();
     }
     __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
-    __ jmp(&done);
+    Exit();
+
 
     __ bind(&after_alloc_failure);
     __ pop(left_);
     __ bind(&call_runtime);
   }
+  // Register spilling is not done implicitly for this stub.
+  // We can't postpone it any more now though.
+  SaveRegisters();
+
   GenericBinaryOpStub stub(op_,
                            mode_,
                            NO_SMI_CODE_IN_STUB,
                            TypeInfo::Combine(left_info_, right_info_));
   stub.GenerateCall(masm_, left_, right_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
-  __ bind(&done);
+  RestoreRegisters();
+  Exit();
+
+  if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) {
+    GenerateNonSmiInput();
+  }
+  if (answer_out_of_range_.is_linked()) {
+    GenerateAnswerOutOfRange();
+  }
+}
+
+
+void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
+  // We know at least one of the inputs was not a Smi.
+  // This is a third entry point into the deferred code.
+  // We may not overwrite left_ because we want to be able
+  // to call the handling code for non-smi answer and it
+  // might want to overwrite the heap number in left_.
+  ASSERT(!right_.is(dst_));
+  ASSERT(!left_.is(dst_));
+  ASSERT(!left_.is(right_));
+  // This entry point is used for bit ops where the right hand side
+  // is a constant Smi and the left hand side is a heap object.  It
+  // is also used for bit ops where both sides are unknown, but where
+  // at least one of them is a heap object.
+  bool rhs_is_constant = constant_rhs_.is_linked();
+  // We can't generate code for both cases.
+  ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked());
+
+  if (FLAG_debug_code) {
+    __ int3();  // We don't fall through into this code.
+  }
+
+  __ bind(&non_smi_input_);
+
+  if (rhs_is_constant) {
+    __ bind(&constant_rhs_);
+    // In this case the input is a heap object and it is in the dst_ register.
+    // The left_ and right_ registers have not been initialized yet.
+    __ mov(right_, Immediate(smi_value_));
+    __ mov(left_, Operand(dst_));
+    if (!CpuFeatures::IsSupported(SSE2)) {
+      __ jmp(entry_label());
+      return;
+    } else {
+      CpuFeatures::Scope use_sse2(SSE2);
+      __ JumpIfNotNumber(dst_, left_info_, entry_label());
+      __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
+      __ SmiUntag(right_);
+    }
+  } else {
+    // We know we have SSE2 here because otherwise the label is not linked (see
+    // NonSmiInputLabel).
+    CpuFeatures::Scope use_sse2(SSE2);
+    // Handle the non-constant right hand side situation:
+    if (left_info_.IsSmi()) {
+      // Right is a heap object.
+      __ JumpIfNotNumber(right_, right_info_, entry_label());
+      __ ConvertToInt32(right_, right_, dst_, left_info_, entry_label());
+      __ mov(dst_, Operand(left_));
+      __ SmiUntag(dst_);
+    } else if (right_info_.IsSmi()) {
+      // Left is a heap object.
+      __ JumpIfNotNumber(left_, left_info_, entry_label());
+      __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
+      __ SmiUntag(right_);
+    } else {
+      // Here we don't know if it's one or both that is a heap object.
+      Label only_right_is_heap_object, got_both;
+      __ mov(dst_, Operand(left_));
+      __ SmiUntag(dst_, &only_right_is_heap_object);
+      // Left was a heap object.
+      __ JumpIfNotNumber(left_, left_info_, entry_label());
+      __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
+      __ SmiUntag(right_, &got_both);
+      // Both were heap objects.
+      __ rcl(right_, 1);  // Put tag back.
+      __ JumpIfNotNumber(right_, right_info_, entry_label());
+      __ ConvertToInt32(right_, right_, no_reg, left_info_, entry_label());
+      __ jmp(&got_both);
+      __ bind(&only_right_is_heap_object);
+      __ JumpIfNotNumber(right_, right_info_, entry_label());
+      __ ConvertToInt32(right_, right_, no_reg, left_info_, entry_label());
+      __ bind(&got_both);
+    }
+  }
+  ASSERT(op_ == Token::BIT_AND ||
+         op_ == Token::BIT_OR ||
+         op_ == Token::BIT_XOR ||
+         right_.is(ecx));
+  switch (op_) {
+    case Token::BIT_AND: __ and_(dst_, Operand(right_));  break;
+    case Token::BIT_OR:   __ or_(dst_, Operand(right_));  break;
+    case Token::BIT_XOR: __ xor_(dst_, Operand(right_));  break;
+    case Token::SHR:     __ shr_cl(dst_);  break;
+    case Token::SAR:     __ sar_cl(dst_);  break;
+    case Token::SHL:     __ shl_cl(dst_);  break;
+    default: UNREACHABLE();
+  }
+  if (op_ == Token::SHR) {
+    // Check that the *unsigned* result fits in a smi.  Neither of
+    // the two high-order bits can be set:
+    //  * 0x80000000: high bit would be lost when smi tagging.
+    //  * 0x40000000: this number would convert to negative when smi
+    //    tagging.
+    __ test(dst_, Immediate(0xc0000000));
+    __ j(not_zero, &answer_out_of_range_);
+  } else {
+    // Check that the *signed* result fits in a smi.
+    __ cmp(dst_, 0xc0000000);
+    __ j(negative, &answer_out_of_range_);
+  }
+  __ SmiTag(dst_);
+  Exit();
+}
+
+
+void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
+  Label after_alloc_failure2;
+  Label allocation_ok;
+  __ bind(&after_alloc_failure2);
+  // We have to allocate a number, causing a GC, while keeping hold of
+  // the answer in dst_.  The answer is not a Smi.  We can't just call the
+  // runtime shift function here because we already threw away the inputs.
+  __ xor_(left_, Operand(left_));
+  __ shl(dst_, 1);  // Put top bit in carry flag and Smi tag the low bits.
+  __ rcr(left_, 1);  // Rotate with carry.
+  __ push(dst_);   // Smi tagged low 31 bits.
+  __ push(left_);  // 0 or 0x80000000, which is Smi tagged in both cases.
+  __ CallRuntime(Runtime::kNumberAlloc, 0);
+  if (!left_.is(eax)) {
+    __ mov(left_, eax);
+  }
+  __ pop(right_);   // High bit.
+  __ pop(dst_);     // Low 31 bits.
+  __ shr(dst_, 1);  // Put 0 in top bit.
+  __ or_(dst_, Operand(right_));
+  __ jmp(&allocation_ok);
+
+  // This is the second entry point to the deferred code.  It is used only by
+  // the bit operations.
+  // The dst_ register has the answer.  It is not Smi tagged.  If mode_ is
+  // OVERWRITE_LEFT then left_ must contain either an overwritable heap number
+  // or a Smi.
+  // Put a heap number pointer in left_.
+  __ bind(&answer_out_of_range_);
+  SaveRegisters();
+  if (mode_ == OVERWRITE_LEFT) {
+    __ test(left_, Immediate(kSmiTagMask));
+    __ j(not_zero, &allocation_ok);
+  }
+  // This trashes right_.
+  __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
+  __ bind(&allocation_ok);
+  if (CpuFeatures::IsSupported(SSE2) && op_ != Token::SHR) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    ASSERT(Token::IsBitOp(op_));
+    // Signed conversion.
+    __ cvtsi2sd(xmm0, Operand(dst_));
+    __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
+  } else {
+    if (op_ == Token::SHR) {
+      __ push(Immediate(0));  // High word of unsigned value.
+      __ push(dst_);
+      __ fild_d(Operand(esp, 0));
+      __ Drop(2);
+    } else {
+      ASSERT(Token::IsBitOp(op_));
+      __ push(dst_);
+      __ fild_s(Operand(esp, 0));  // Signed conversion.
+      __ pop(dst_);
+    }
+    __ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset));
+  }
+  __ mov(dst_, left_);
+  RestoreRegisters();
+  Exit();
 }
 
 
@@ -1499,10 +1722,25 @@
                                                   TypeInfo left_info,
                                                   TypeInfo right_info,
                                                   DeferredCode* deferred) {
+  JumpIfNotBothSmiUsingTypeInfo(left,
+                                right,
+                                scratch,
+                                left_info,
+                                right_info,
+                                deferred->entry_label());
+}
+
+
+void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
+                                                  Register right,
+                                                  Register scratch,
+                                                  TypeInfo left_info,
+                                                  TypeInfo right_info,
+                                                  Label* on_not_smi) {
   if (left.is(right)) {
     if (!left_info.IsSmi()) {
       __ test(left, Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
+      __ j(not_zero, on_not_smi);
     } else {
       if (FLAG_debug_code) __ AbortIfNotSmi(left);
     }
@@ -1511,17 +1749,17 @@
       __ mov(scratch, left);
       __ or_(scratch, Operand(right));
       __ test(scratch, Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
+      __ j(not_zero, on_not_smi);
     } else {
       __ test(left, Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
+      __ j(not_zero, on_not_smi);
       if (FLAG_debug_code) __ AbortIfNotSmi(right);
     }
   } else {
     if (FLAG_debug_code) __ AbortIfNotSmi(left);
     if (!right_info.IsSmi()) {
       __ test(right, Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
+      __ j(not_zero, on_not_smi);
     } else {
       if (FLAG_debug_code) __ AbortIfNotSmi(right);
     }
@@ -1606,13 +1844,16 @@
     right->ToRegister();
     frame_->Spill(eax);
     frame_->Spill(edx);
+    // DeferredInlineBinaryOperation requires all the registers that it is
+    // told about to be spilled and distinct.
+    Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
 
     // Check that left and right are smi tagged.
     DeferredInlineBinaryOperation* deferred =
         new DeferredInlineBinaryOperation(op,
                                           (op == Token::DIV) ? eax : edx,
                                           left->reg(),
-                                          right->reg(),
+                                          distinct_right.reg(),
                                           left_type_info,
                                           right_type_info,
                                           overwrite_mode);
@@ -1695,15 +1936,23 @@
     left->ToRegister();
     ASSERT(left->is_register() && !left->reg().is(ecx));
     ASSERT(right->is_register() && right->reg().is(ecx));
+    if (left_type_info.IsSmi()) {
+      if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
+    }
+    if (right_type_info.IsSmi()) {
+      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
+    }
 
     // We will modify right, it must be spilled.
     frame_->Spill(ecx);
+    // DeferredInlineBinaryOperation requires all the registers that it is told
+    // about to be spilled and distinct.  We know that right is ecx and left is
+    // not ecx.
+    frame_->Spill(left->reg());
 
     // Use a fresh answer register to avoid spilling the left operand.
     answer = allocator_->Allocate();
     ASSERT(answer.is_valid());
-    // Check that both operands are smis using the answer register as a
-    // temporary.
     DeferredInlineBinaryOperation* deferred =
         new DeferredInlineBinaryOperation(op,
                                           answer.reg(),
@@ -1712,55 +1961,28 @@
                                           left_type_info,
                                           right_type_info,
                                           overwrite_mode);
+    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
+                                  left_type_info, right_type_info,
+                                  deferred->NonSmiInputLabel());
 
-    Label do_op, left_nonsmi;
-    // If right is a smi we make a fast case if left is either a smi
-    // or a heapnumber.
-    if (CpuFeatures::IsSupported(SSE2) && right_type_info.IsSmi()) {
-      CpuFeatures::Scope use_sse2(SSE2);
-      __ mov(answer.reg(), left->reg());
-      // Fast case - both are actually smis.
-      if (!left_type_info.IsSmi()) {
-        __ test(answer.reg(), Immediate(kSmiTagMask));
-        __ j(not_zero, &left_nonsmi);
-      } else {
-        if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-      }
-      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
-      __ SmiUntag(answer.reg());
-      __ jmp(&do_op);
+    // Untag both operands.
+    __ mov(answer.reg(), left->reg());
+    __ SmiUntag(answer.reg());
+    __ SmiUntag(right->reg());  // Right is ecx.
 
-      __ bind(&left_nonsmi);
-      // Branch if not a heapnumber.
-      __ cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
-             Factory::heap_number_map());
-      deferred->Branch(not_equal);
-
-      // Load integer value into answer register using truncation.
-      __ cvttsd2si(answer.reg(),
-                   FieldOperand(answer.reg(), HeapNumber::kValueOffset));
-      // Branch if we do not fit in a smi.
-      __ cmp(answer.reg(), 0xc0000000);
-      deferred->Branch(negative);
-    } else {
-      JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
-                                    left_type_info, right_type_info, deferred);
-
-      // Untag both operands.
-      __ mov(answer.reg(), left->reg());
-      __ SmiUntag(answer.reg());
-    }
-
-    __ bind(&do_op);
-    __ SmiUntag(ecx);
     // Perform the operation.
+    ASSERT(right->reg().is(ecx));
     switch (op) {
-      case Token::SAR:
+      case Token::SAR: {
         __ sar_cl(answer.reg());
-        // No checks of result necessary
+        if (!left_type_info.IsSmi()) {
+          // Check that the *signed* result fits in a smi.
+          __ cmp(answer.reg(), 0xc0000000);
+          deferred->JumpToAnswerOutOfRange(negative);
+        }
         break;
+      }
       case Token::SHR: {
-        Label result_ok;
         __ shr_cl(answer.reg());
         // Check that the *unsigned* result fits in a smi.  Neither of
         // the two high-order bits can be set:
@@ -1773,21 +1995,14 @@
         // case.  The low bit of the left argument may be lost, but only
         // in a case where it is dropped anyway.
         __ test(answer.reg(), Immediate(0xc0000000));
-        __ j(zero, &result_ok);
-        __ SmiTag(ecx);
-        deferred->Jump();
-        __ bind(&result_ok);
+        deferred->JumpToAnswerOutOfRange(not_zero);
         break;
       }
       case Token::SHL: {
-        Label result_ok;
         __ shl_cl(answer.reg());
         // Check that the *signed* result fits in a smi.
         __ cmp(answer.reg(), 0xc0000000);
-        __ j(positive, &result_ok);
-        __ SmiTag(ecx);
-        deferred->Jump();
-        __ bind(&result_ok);
+        deferred->JumpToAnswerOutOfRange(negative);
         break;
       }
       default:
@@ -1805,6 +2020,9 @@
   // Handle the other binary operations.
   left->ToRegister();
   right->ToRegister();
+  // DeferredInlineBinaryOperation requires all the registers that it is told
+  // about to be spilled.
+  Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
   // A newly allocated register answer is used to hold the answer.  The
   // registers containing left and right are not modified so they don't
   // need to be spilled in the fast case.
@@ -1816,12 +2034,16 @@
       new DeferredInlineBinaryOperation(op,
                                         answer.reg(),
                                         left->reg(),
-                                        right->reg(),
+                                        distinct_right.reg(),
                                         left_type_info,
                                         right_type_info,
                                         overwrite_mode);
-  JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
-                                left_type_info, right_type_info, deferred);
+  Label non_smi_bit_op;
+  if (op != Token::BIT_OR) {
+    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
+                                  left_type_info, right_type_info,
+                                  deferred->NonSmiInputLabel());
+  }
 
   __ mov(answer.reg(), left->reg());
   switch (op) {
@@ -1864,6 +2086,8 @@
 
     case Token::BIT_OR:
       __ or_(answer.reg(), Operand(right->reg()));
+      __ test(answer.reg(), Immediate(kSmiTagMask));
+      __ j(not_zero, deferred->NonSmiInputLabel());
       break;
 
     case Token::BIT_AND:
@@ -1878,6 +2102,7 @@
       UNREACHABLE();
       break;
   }
+
   deferred->BindExit();
   left->Unuse();
   right->Unuse();
@@ -2363,27 +2588,25 @@
     case Token::BIT_XOR:
     case Token::BIT_AND: {
       operand->ToRegister();
+      // DeferredInlineBinaryOperation requires all the registers that it is
+      // told about to be spilled.
       frame_->Spill(operand->reg());
-      DeferredCode* deferred = NULL;
-      if (reversed) {
-        deferred =
-            new DeferredInlineSmiOperationReversed(op,
-                                                   operand->reg(),
-                                                   smi_value,
-                                                   operand->reg(),
-                                                   operand->type_info(),
-                                                   overwrite_mode);
-      } else {
-        deferred =  new DeferredInlineSmiOperation(op,
-                                                   operand->reg(),
-                                                   operand->reg(),
-                                                   operand->type_info(),
-                                                   smi_value,
-                                                   overwrite_mode);
-      }
+      DeferredInlineBinaryOperation* deferred = NULL;
       if (!operand->type_info().IsSmi()) {
+        Result left = allocator()->Allocate();
+        ASSERT(left.is_valid());
+        Result right = allocator()->Allocate();
+        ASSERT(right.is_valid());
+        deferred = new DeferredInlineBinaryOperation(
+            op,
+            operand->reg(),
+            left.reg(),
+            right.reg(),
+            operand->type_info(),
+            TypeInfo::Smi(),
+            overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT);
         __ test(operand->reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
+        deferred->JumpToConstantRhs(not_zero, smi_value);
       } else if (FLAG_debug_code) {
         __ AbortIfNotSmi(operand->reg());
       }
@@ -2399,7 +2622,7 @@
           __ or_(Operand(operand->reg()), Immediate(value));
         }
       }
-      deferred->BindExit();
+      if (deferred != NULL) deferred->BindExit();
       answer = *operand;
       break;
     }
@@ -3212,10 +3435,8 @@
       __ j(zero, &build_args);
       __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
       __ j(not_equal, &build_args);
-      __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
       Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
-      __ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset),
-             Immediate(apply_code));
+      __ cmp(FieldOperand(eax, JSFunction::kCodeOffset), Immediate(apply_code));
       __ j(not_equal, &build_args);
 
       // Check that applicand is a function.
@@ -9467,6 +9688,11 @@
   __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
   __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
 
+  // Initialize the code pointer in the function to be the one
+  // found in the shared function info object.
+  __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+  __ mov(FieldOperand(eax, JSFunction::kCodeOffset), edx);
+
   // Return and remove the on-stack parameter.
   __ ret(1 * kPointerSize);
 
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 2368b23..81a5da1 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -530,7 +530,7 @@
 
   // Emits code sequence that jumps to deferred code if the inputs
   // are not both smis.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
+  // a deferred code object.
   void JumpIfNotBothSmiUsingTypeInfo(Register left,
                                      Register right,
                                      Register scratch,
@@ -538,6 +538,15 @@
                                      TypeInfo right_info,
                                      DeferredCode* deferred);
 
+  // Emits code sequence that jumps to the label if the inputs
+  // are not both smis.
+  void JumpIfNotBothSmiUsingTypeInfo(Register left,
+                                     Register right,
+                                     Register scratch,
+                                     TypeInfo left_info,
+                                     TypeInfo right_info,
+                                     Label* on_non_smi);
+
   // If possible, combine two constant smi values using op to produce
   // a smi result, and push it on the virtual frame, all at compile time.
   // Returns true if it succeeds.  Otherwise it has no effect.
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index d0eeb77..37b6436 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -377,6 +377,12 @@
 }
 
 
+void MacroAssembler::AbortIfSmi(Register object) {
+  test(object, Immediate(kSmiTagMask));
+  Assert(not_equal, "Operand a smi");
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(ebp);
   mov(ebp, Operand(esp));
@@ -1292,7 +1298,7 @@
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
   mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
   SmiUntag(ebx);
-  mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+  mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
   lea(edx, FieldOperand(edx, Code::kHeaderSize));
 
   ParameterCount expected(ebx);
@@ -1344,8 +1350,7 @@
     // Make sure the code objects in the builtins object and in the
     // builtin function are the same.
     push(target);
-    mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-    mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
+    mov(target, FieldOperand(edi, JSFunction::kCodeOffset));
     cmp(target, Operand(esp, 0));
     Assert(equal, "Builtin code object changed");
     pop(target);
@@ -1510,6 +1515,61 @@
 }
 
 
+void MacroAssembler::JumpIfNotNumber(Register reg,
+                                     TypeInfo info,
+                                     Label* on_not_number) {
+  if (FLAG_debug_code) AbortIfSmi(reg);
+  if (!info.IsNumber()) {
+    cmp(FieldOperand(reg, HeapObject::kMapOffset),
+        Factory::heap_number_map());
+    j(not_equal, on_not_number);
+  }
+}
+
+
+void MacroAssembler::ConvertToInt32(Register dst,
+                                    Register source,
+                                    Register scratch,
+                                    TypeInfo info,
+                                    Label* on_not_int32) {
+  if (FLAG_debug_code) {
+    AbortIfSmi(source);
+    AbortIfNotNumber(source);
+  }
+  if (info.IsInteger32()) {
+    cvttsd2si(dst, FieldOperand(source, HeapNumber::kValueOffset));
+  } else {
+    Label done;
+    bool push_pop = (scratch.is(no_reg) && dst.is(source));
+    ASSERT(!scratch.is(source));
+    if (push_pop) {
+      push(dst);
+      scratch = dst;
+    }
+    if (scratch.is(no_reg)) scratch = dst;
+    cvttsd2si(scratch, FieldOperand(source, HeapNumber::kValueOffset));
+    cmp(scratch, 0x80000000u);
+    if (push_pop || dst.is(source)) {
+      j(not_equal, &done);
+      if (push_pop) {
+        pop(dst);
+        jmp(on_not_int32);
+      }
+    } else {
+      j(equal, on_not_int32);
+    }
+
+    bind(&done);
+    if (push_pop) {
+      add(Operand(esp), Immediate(kPointerSize));  // Pop.
+    }
+    if (!scratch.is(dst)) {
+      mov(dst, scratch);
+    }
+  }
+}
+
+
 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
     Register instance_type,
     Register scratch,
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index a17a2b4..0b16f0d 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -29,6 +29,7 @@
 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
 
 #include "assembler.h"
+#include "type-info.h"
 
 namespace v8 {
 namespace internal {
@@ -225,12 +226,44 @@
     sar(reg, kSmiTagSize);
   }
 
+  // Modifies the register even if it does not contain a Smi!
+  void SmiUntag(Register reg, TypeInfo info, Label* non_smi) {
+    ASSERT(kSmiTagSize == 1);
+    sar(reg, kSmiTagSize);
+    if (info.IsSmi()) {
+      ASSERT(kSmiTag == 0);
+      j(carry, non_smi);
+    }
+  }
+
+  // Modifies the register even if it does not contain a Smi!
+  void SmiUntag(Register reg, Label* is_smi) {
+    ASSERT(kSmiTagSize == 1);
+    sar(reg, kSmiTagSize);
+    ASSERT(kSmiTag == 0);
+    j(not_carry, is_smi);
+  }
+
+  // Assumes input is a heap object.
+  void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
+
+  // Assumes input is a heap number.  Jumps on things out of range.  Also jumps
+  // on the min negative int32.  Ignores frational parts.
+  void ConvertToInt32(Register dst,
+                      Register src,      // Can be the same as dst.
+                      Register scratch,  // Can be no_reg or dst, but not src.
+                      TypeInfo info,
+                      Label* on_not_int32);
+
   // Abort execution if argument is not a number. Used in debug code.
   void AbortIfNotNumber(Register object);
 
   // Abort execution if argument is not a smi. Used in debug code.
   void AbortIfNotSmi(Register object);
 
+  // Abort execution if argument is a smi. Used in debug code.
+  void AbortIfSmi(Register object);
+
   // ---------------------------------------------------------------------------
   // Exception handling
 
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index e00626b..b9faa46 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -139,6 +139,22 @@
     if (is_used(reg)) SpillElementAt(register_location(reg));
   }
 
+  // Make the two registers distinct and spill them.  Returns the second
+  // register.  If the registers were not distinct then it returns the new
+  // second register.
+  Result MakeDistinctAndSpilled(Result* left, Result* right) {
+    Spill(left->reg());
+    Spill(right->reg());
+    if (left->reg().is(right->reg())) {
+      RegisterAllocator* allocator = cgen()->allocator();
+      Result fresh = allocator->Allocate();
+      ASSERT(fresh.is_valid());
+      masm()->mov(fresh.reg(), right->reg());
+      return fresh;
+    }
+    return *right;
+  }
+
   // Spill all occurrences of an arbitrary register if possible.  Return the
   // register spilled or no_reg if it was not possible to free any register
   // (ie, they all have frame-external references).
diff --git a/src/list-inl.h b/src/list-inl.h
index e41db11..e277bc8 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -127,6 +127,13 @@
 
 
 template<typename T, class P>
+template<class Visitor>
+void List<T, P>::Iterate(Visitor* visitor) {
+  for (int i = 0; i < length_; i++) visitor->Apply(&data_[i]);
+}
+
+
+template<typename T, class P>
 bool List<T, P>::Contains(const T& elm) {
   for (int i = 0; i < length_; i++) {
     if (data_[i] == elm)
diff --git a/src/list.h b/src/list.h
index d3c2767..9abf61c 100644
--- a/src/list.h
+++ b/src/list.h
@@ -117,6 +117,8 @@
 
   // Iterate through all list entries, starting at index 0.
   void Iterate(void (*callback)(T* x));
+  template<class Visitor>
+  void Iterate(Visitor* visitor);
 
   // Sort all list entries (using QuickSort)
   void Sort(int (*cmp)(const T* x, const T* y));
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 886b9e4..57bed6a 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -606,7 +606,7 @@
 
 void Simulator::set_fpu_register_double(int fpureg, double value) {
   ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
-  *v8i::BitCast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
+  *v8i::BitCast<double*>(&FPUregisters_[fpureg]) = value;
 }
 
 
@@ -627,8 +627,7 @@
 
 double Simulator::get_fpu_register_double(int fpureg) const {
   ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
-  return *v8i::BitCast<double*, int32_t*>(
-      const_cast<int32_t*>(&FPUregisters_[fpureg]));
+  return *v8i::BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
 }
 
 // Raw access to the PC register.
@@ -903,7 +902,7 @@
           break;
         case MFHC1:
           fp_out = get_fpu_register_double(fs_reg);
-          alu_out = *v8i::BitCast<int32_t*, double*>(&fp_out);
+          alu_out = *v8i::BitCast<int32_t*>(&fp_out);
           break;
         case MTC1:
         case MTHC1:
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 101096d..c81f4ab 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2694,12 +2694,14 @@
 
 
 Code* JSFunction::code() {
-  return shared()->code();
+  return Code::cast(READ_FIELD(this, kCodeOffset));
 }
 
 
 void JSFunction::set_code(Code* value) {
-  shared()->set_code(value);
+  // Skip the write barrier because code is never in new space.
+  ASSERT(!Heap::InNewSpace(value));
+  WRITE_FIELD(this, kCodeOffset, value);
 }
 
 
@@ -2771,7 +2773,7 @@
 
 
 bool JSFunction::is_compiled() {
-  return shared()->is_compiled();
+  return code()->kind() != Code::STUB;
 }
 
 
diff --git a/src/objects.cc b/src/objects.cc
index 4e20959..aabb041 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -5823,16 +5823,24 @@
   CustomArguments args(interceptor->data(), receiver, this);
   v8::AccessorInfo info(args.end());
   if (!interceptor->query()->IsUndefined()) {
-    v8::IndexedPropertyQuery query =
-        v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
+    v8::IndexedPropertyQueryImpl query =
+        v8::ToCData<v8::IndexedPropertyQueryImpl>(interceptor->query());
     LOG(ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
-    v8::Handle<v8::Boolean> result;
+    v8::Handle<v8::Value> result;
     {
       // Leaving JavaScript.
       VMState state(EXTERNAL);
       result = query(index, info);
     }
-    if (!result.IsEmpty()) return result->IsTrue();
+    if (!result.IsEmpty()) {
+      // IsBoolean check would be removed when transition to new API is over.
+      if (result->IsBoolean()) {
+        return result->IsTrue() ? true : false;
+      } else {
+        ASSERT(result->IsInt32());
+        return true;  // absence of property is signaled by empty handle.
+      }
+    }
   } else if (!interceptor->getter()->IsUndefined()) {
     v8::IndexedPropertyGetter getter =
         v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
diff --git a/src/objects.h b/src/objects.h
index 8fa251e..1ca3003 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -3608,7 +3608,9 @@
   static Context* GlobalContextFromLiterals(FixedArray* literals);
 
   // Layout descriptors.
-  static const int kPrototypeOrInitialMapOffset = JSObject::kHeaderSize;
+  static const int kCodeOffset = JSObject::kHeaderSize;
+  static const int kPrototypeOrInitialMapOffset =
+      kCodeOffset + kPointerSize;
   static const int kSharedFunctionInfoOffset =
       kPrototypeOrInitialMapOffset + kPointerSize;
   static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
diff --git a/src/parser.cc b/src/parser.cc
index e935b7b..1df7c21 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -3587,10 +3587,8 @@
   // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
   // We have already read the "get" or "set" keyword.
   Token::Value next = Next();
-  if (next == Token::IDENTIFIER ||
-      next == Token::STRING ||
-      next == Token::NUMBER ||
-      Token::IsKeyword(next)) {
+  // TODO(820): Allow NUMBER and STRING as well (and handle array indices).
+  if (next == Token::IDENTIFIER || Token::IsKeyword(next)) {
     Handle<String> name =
         factory()->LookupSymbol(scanner_.literal_string(),
                                 scanner_.literal_length());
@@ -3652,8 +3650,7 @@
             factory()->LookupSymbol(scanner_.literal_string(),
                                     scanner_.literal_length());
         uint32_t index;
-        if (!string.is_null() &&
-            string->AsArrayIndex(&index)) {
+        if (!string.is_null() && string->AsArrayIndex(&index)) {
           key = NewNumberLiteral(index);
           break;
         }
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index ea9bc98..0c50581 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -97,13 +97,6 @@
 }
 
 
-bool CpuProfilesCollection::is_last_profile() {
-  // Called from VM thread, and only it can mutate the list,
-  // so no locking is needed here.
-  return current_profiles_.length() == 1;
-}
-
-
 const char* CpuProfilesCollection::GetFunctionName(String* name) {
   return GetFunctionName(GetName(name));
 }
@@ -130,17 +123,6 @@
   }
 }
 
-
-template<class Visitor>
-void HeapEntriesMap::Apply(Visitor* visitor) {
-  for (HashMap::Entry* p = entries_.Start();
-       p != NULL;
-       p = entries_.Next(p)) {
-    if (!IsAlias(p->value))
-      visitor->Apply(reinterpret_cast<HeapEntry*>(p->value));
-  }
-}
-
 } }  // namespace v8::internal
 
 #endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 7054b12..cd46bad 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -542,13 +542,6 @@
 }
 
 
-CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
-                                                 String* title,
-                                                 double actual_sampling_rate) {
-  return StopProfiling(security_token_id, GetName(title), actual_sampling_rate);
-}
-
-
 CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
                                               unsigned uid) {
   HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
@@ -574,6 +567,15 @@
 }
 
 
+bool CpuProfilesCollection::IsLastProfile(const char* title) {
+  // Called from VM thread, and only it can mutate the list,
+  // so no locking is needed here.
+  if (current_profiles_.length() != 1) return false;
+  return StrLength(title) == 0
+      || strcmp(current_profiles_[0]->title(), title) == 0;
+}
+
+
 int CpuProfilesCollection::TokenToIndex(int security_token_id) {
   ASSERT(TokenEnumerator::kNoSecurityToken == -1);
   return security_token_id + 1;  // kNoSecurityToken -> 0, 0 -> 1, ...
@@ -798,83 +800,102 @@
 }
 
 
-HeapGraphEdge::HeapGraphEdge(Type type,
-                             const char* name,
-                             HeapEntry* from,
-                             HeapEntry* to)
-    : type_(type), name_(name), from_(from), to_(to) {
-  ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL);
+void HeapGraphEdge::Init(
+    int child_index, Type type, const char* name, HeapEntry* to) {
+  ASSERT(type == kContextVariable || type == kProperty || type == kInternal);
+  child_index_ = child_index;
+  type_ = type;
+  name_ = name;
+  to_ = to;
 }
 
 
-HeapGraphEdge::HeapGraphEdge(int index,
-                             HeapEntry* from,
-                             HeapEntry* to)
-    : type_(ELEMENT), index_(index), from_(from), to_(to) {
+void HeapGraphEdge::Init(int child_index, int index, HeapEntry* to) {
+  child_index_ = child_index;
+  type_ = kElement;
+  index_ = index;
+  to_ = to;
 }
 
 
-static void DeleteHeapGraphEdge(HeapGraphEdge** edge_ptr) {
-  delete *edge_ptr;
+HeapEntry* HeapGraphEdge::From() {
+  return reinterpret_cast<HeapEntry*>(this - child_index_) - 1;
 }
 
 
-static void DeleteHeapGraphPath(HeapGraphPath** path_ptr) {
-  delete *path_ptr;
+void HeapEntry::Init(HeapSnapshot* snapshot,
+                     int children_count,
+                     int retainers_count) {
+  Init(snapshot, kInternal, "", 0, 0, children_count, retainers_count);
 }
 
 
-HeapEntry::~HeapEntry() {
-  children_.Iterate(DeleteHeapGraphEdge);
-  retaining_paths_.Iterate(DeleteHeapGraphPath);
+void HeapEntry::Init(HeapSnapshot* snapshot,
+                     Type type,
+                     const char* name,
+                     uint64_t id,
+                     int self_size,
+                     int children_count,
+                     int retainers_count) {
+  snapshot_ = snapshot;
+  type_ = type;
+  painted_ = kUnpainted;
+  calculated_data_index_ = kNoCalculatedData;
+  name_ = name;
+  id_ = id;
+  self_size_ = self_size;
+  children_count_ = children_count;
+  retainers_count_ = retainers_count;
 }
 
 
-void HeapEntry::AddEdge(HeapGraphEdge* edge) {
-  children_.Add(edge);
-  edge->to()->retainers_.Add(edge);
+void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
+                                  int child_index,
+                                  const char* name,
+                                  HeapEntry* entry,
+                                  int retainer_index) {
+  children_arr()[child_index].Init(child_index, type, name, entry);
+  entry->retainers_arr()[retainer_index] = children_arr() + child_index;
 }
 
 
-void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) {
-  AddEdge(
-      new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry));
+void HeapEntry::SetElementReference(
+    int child_index, int index, HeapEntry* entry, int retainer_index) {
+  children_arr()[child_index].Init(child_index, index, entry);
+  entry->retainers_arr()[retainer_index] = children_arr() + child_index;
 }
 
 
-void HeapEntry::SetElementReference(int index, HeapEntry* entry) {
-  AddEdge(new HeapGraphEdge(index, this, entry));
+void HeapEntry::SetUnidirElementReference(
+    int child_index, int index, HeapEntry* entry) {
+  children_arr()[child_index].Init(child_index, index, entry);
 }
 
 
-void HeapEntry::SetInternalReference(const char* name, HeapEntry* entry) {
-  AddEdge(new HeapGraphEdge(HeapGraphEdge::INTERNAL, name, this, entry));
+int HeapEntry::ReachableSize() {
+  if (calculated_data_index_ == kNoCalculatedData) {
+    calculated_data_index_ = snapshot_->AddCalculatedData();
+  }
+  return snapshot_->GetCalculatedData(
+      calculated_data_index_).ReachableSize(this);
 }
 
 
-void HeapEntry::SetPropertyReference(const char* name, HeapEntry* entry) {
-  AddEdge(new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry));
+int HeapEntry::RetainedSize() {
+  if (calculated_data_index_ == kNoCalculatedData) {
+    calculated_data_index_ = snapshot_->AddCalculatedData();
+  }
+  return snapshot_->GetCalculatedData(
+      calculated_data_index_).RetainedSize(this);
 }
 
 
-void HeapEntry::SetAutoIndexReference(HeapEntry* entry) {
-  SetElementReference(next_auto_index_++, entry);
-}
-
-
-void HeapEntry::SetUnidirAutoIndexReference(HeapEntry* entry) {
-  children_.Add(new HeapGraphEdge(next_auto_index_++, this, entry));
-}
-
-
-int HeapEntry::TotalSize() {
-  return total_size_ != kUnknownSize ? total_size_ : CalculateTotalSize();
-}
-
-
-int HeapEntry::NonSharedTotalSize() {
-  return non_shared_total_size_ != kUnknownSize ?
-      non_shared_total_size_ : CalculateNonSharedTotalSize();
+List<HeapGraphPath*>* HeapEntry::GetRetainingPaths() {
+  if (calculated_data_index_ == kNoCalculatedData) {
+    calculated_data_index_ = snapshot_->AddCalculatedData();
+  }
+  return snapshot_->GetCalculatedData(
+      calculated_data_index_).GetRetainingPaths(this);
 }
 
 
@@ -882,16 +903,16 @@
 void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
   List<HeapEntry*> list(10);
   list.Add(this);
-  this->PaintReachable();
+  this->paint_reachable();
   visitor->Apply(this);
   while (!list.is_empty()) {
     HeapEntry* entry = list.RemoveLast();
-    const int children_count = entry->children_.length();
-    for (int i = 0; i < children_count; ++i) {
-      HeapEntry* child = entry->children_[i]->to();
+    Vector<HeapGraphEdge> children = entry->children();
+    for (int i = 0; i < children.length(); ++i) {
+      HeapEntry* child = children[i].to();
       if (!child->painted_reachable()) {
         list.Add(child);
-        child->PaintReachable();
+        child->paint_reachable();
         visitor->Apply(child);
       }
     }
@@ -910,78 +931,158 @@
 }
 
 
-class TotalSizeCalculator {
- public:
-  TotalSizeCalculator()
-      : total_size_(0) {
+void HeapEntry::Print(int max_depth, int indent) {
+  OS::Print("%6d %6d %6d [%ld] ",
+            self_size(), ReachableSize(), RetainedSize(), id_);
+  if (type() != kString) {
+    OS::Print("%s %.40s\n", TypeAsString(), name_);
+  } else {
+    OS::Print("\"");
+    const char* c = name_;
+    while (*c && (c - name_) <= 40) {
+      if (*c != '\n')
+        OS::Print("%c", *c);
+      else
+        OS::Print("\\n");
+      ++c;
+    }
+    OS::Print("\"\n");
   }
-
-  int total_size() const { return total_size_; }
-
-  void Apply(HeapEntry* entry) {
-    total_size_ += entry->self_size();
+  if (--max_depth == 0) return;
+  Vector<HeapGraphEdge> ch = children();
+  for (int i = 0; i < ch.length(); ++i) {
+    HeapGraphEdge& edge = ch[i];
+    switch (edge.type()) {
+      case HeapGraphEdge::kContextVariable:
+        OS::Print("  %*c #%s: ", indent, ' ', edge.name());
+        break;
+      case HeapGraphEdge::kElement:
+        OS::Print("  %*c %d: ", indent, ' ', edge.index());
+        break;
+      case HeapGraphEdge::kInternal:
+        OS::Print("  %*c $%s: ", indent, ' ', edge.name());
+        break;
+      case HeapGraphEdge::kProperty:
+        OS::Print("  %*c %s: ", indent, ' ', edge.name());
+        break;
+      default:
+        OS::Print("!!! unknown edge type: %d ", edge.type());
+    }
+    edge.to()->Print(max_depth, indent + 2);
   }
-
- private:
-  int total_size_;
-};
-
-int HeapEntry::CalculateTotalSize() {
-  snapshot_->ClearPaint();
-  TotalSizeCalculator calc;
-  ApplyAndPaintAllReachable(&calc);
-  total_size_ = calc.total_size();
-  return total_size_;
 }
 
 
-class NonSharedSizeCalculator {
+const char* HeapEntry::TypeAsString() {
+  switch (type()) {
+    case kInternal: return "/internal/";
+    case kObject: return "/object/";
+    case kClosure: return "/closure/";
+    case kString: return "/string/";
+    case kCode: return "/code/";
+    case kArray: return "/array/";
+    default: return "???";
+  }
+}
+
+
+int HeapEntry::EntriesSize(int entries_count,
+                           int children_count,
+                           int retainers_count) {
+  return sizeof(HeapEntry) * entries_count         // NOLINT
+      + sizeof(HeapGraphEdge) * children_count     // NOLINT
+      + sizeof(HeapGraphEdge*) * retainers_count;  // NOLINT
+}
+
+
+static void DeleteHeapGraphPath(HeapGraphPath** path_ptr) {
+  delete *path_ptr;
+}
+
+void HeapEntryCalculatedData::Dispose() {
+  if (retaining_paths_ != NULL) retaining_paths_->Iterate(DeleteHeapGraphPath);
+  delete retaining_paths_;
+}
+
+
+int HeapEntryCalculatedData::ReachableSize(HeapEntry* entry) {
+  if (reachable_size_ == kUnknownSize) CalculateSizes(entry);
+  return reachable_size_;
+}
+
+
+int HeapEntryCalculatedData::RetainedSize(HeapEntry* entry) {
+  if (retained_size_ == kUnknownSize) CalculateSizes(entry);
+  return retained_size_;
+}
+
+
+class ReachableSizeCalculator {
  public:
-  NonSharedSizeCalculator()
-      : non_shared_total_size_(0) {
+  ReachableSizeCalculator()
+      : reachable_size_(0) {
   }
 
-  int non_shared_total_size() const { return non_shared_total_size_; }
+  int reachable_size() const { return reachable_size_; }
 
   void Apply(HeapEntry* entry) {
-    if (entry->painted_reachable()) {
-      non_shared_total_size_ += entry->self_size();
+    reachable_size_ += entry->self_size();
+  }
+
+ private:
+  int reachable_size_;
+};
+
+class RetainedSizeCalculator {
+ public:
+  RetainedSizeCalculator()
+      : retained_size_(0) {
+  }
+
+  int reained_size() const { return retained_size_; }
+
+  void Apply(HeapEntry** entry_ptr) {
+    if ((*entry_ptr)->painted_reachable()) {
+      retained_size_ += (*entry_ptr)->self_size();
     }
   }
 
  private:
-  int non_shared_total_size_;
+  int retained_size_;
 };
 
-int HeapEntry::CalculateNonSharedTotalSize() {
-  // To calculate non-shared total size, first we paint all reachable
-  // nodes in one color, then we paint all nodes reachable from other
-  // nodes with a different color. Then we consider only nodes painted
-  // with the first color for calculating the total size.
-  snapshot_->ClearPaint();
-  PaintAllReachable();
+void HeapEntryCalculatedData::CalculateSizes(HeapEntry* entry) {
+  // To calculate retained size, first we paint all reachable nodes in
+  // one color (and calculate reachable size as a byproduct), then we
+  // paint (or re-paint) all nodes reachable from other nodes with a
+  // different color. Then we consider only nodes painted with the
+  // first color for calculating the retained size.
+  entry->snapshot()->ClearPaint();
+  ReachableSizeCalculator rch_size_calc;
+  entry->ApplyAndPaintAllReachable(&rch_size_calc);
+  reachable_size_ = rch_size_calc.reachable_size();
 
   List<HeapEntry*> list(10);
-  if (this != snapshot_->root()) {
-    list.Add(snapshot_->root());
-    snapshot_->root()->PaintReachableFromOthers();
+  HeapEntry* root = entry->snapshot()->root();
+  if (entry != root) {
+    list.Add(root);
+    root->paint_reachable_from_others();
   }
   while (!list.is_empty()) {
-    HeapEntry* entry = list.RemoveLast();
-    const int children_count = entry->children_.length();
-    for (int i = 0; i < children_count; ++i) {
-      HeapEntry* child = entry->children_[i]->to();
-      if (child != this && child->not_painted_reachable_from_others()) {
+    HeapEntry* curr = list.RemoveLast();
+    Vector<HeapGraphEdge> children = curr->children();
+    for (int i = 0; i < children.length(); ++i) {
+      HeapEntry* child = children[i].to();
+      if (child != entry && child->not_painted_reachable_from_others()) {
         list.Add(child);
-        child->PaintReachableFromOthers();
+        child->paint_reachable_from_others();
       }
     }
   }
 
-  NonSharedSizeCalculator calculator;
-  snapshot_->IterateEntries(&calculator);
-  non_shared_total_size_ = calculator.non_shared_total_size();
-  return non_shared_total_size_;
+  RetainedSizeCalculator ret_size_calc;
+  entry->snapshot()->IterateEntries(&ret_size_calc);
+  retained_size_ = ret_size_calc.reained_size();
 }
 
 
@@ -1019,128 +1120,37 @@
 };
 
 
-const List<HeapGraphPath*>* HeapEntry::GetRetainingPaths() {
-  if (retaining_paths_.length() == 0 && retainers_.length() != 0) {
+List<HeapGraphPath*>* HeapEntryCalculatedData::GetRetainingPaths(
+    HeapEntry* entry) {
+  if (retaining_paths_ == NULL) retaining_paths_ = new List<HeapGraphPath*>(4);
+  if (retaining_paths_->length() == 0 && entry->retainers().length() != 0) {
     CachedHeapGraphPath path;
-    FindRetainingPaths(this, &path);
+    FindRetainingPaths(entry, &path);
   }
-  return &retaining_paths_;
+  return retaining_paths_;
 }
 
 
-void HeapEntry::FindRetainingPaths(HeapEntry* node,
-                                   CachedHeapGraphPath* prev_path) {
-  for (int i = 0; i < node->retainers_.length(); ++i) {
-    HeapGraphEdge* ret_edge = node->retainers_[i];
-    if (prev_path->ContainsNode(ret_edge->from())) continue;
-    if (ret_edge->from() != snapshot_->root()) {
+void HeapEntryCalculatedData::FindRetainingPaths(
+    HeapEntry* entry,
+    CachedHeapGraphPath* prev_path) {
+  Vector<HeapGraphEdge*> retainers = entry->retainers();
+  for (int i = 0; i < retainers.length(); ++i) {
+    HeapGraphEdge* ret_edge = retainers[i];
+    if (prev_path->ContainsNode(ret_edge->From())) continue;
+    if (ret_edge->From() != entry->snapshot()->root()) {
       CachedHeapGraphPath path(*prev_path);
       path.Add(ret_edge);
-      FindRetainingPaths(ret_edge->from(), &path);
+      FindRetainingPaths(ret_edge->From(), &path);
     } else {
       HeapGraphPath* ret_path = new HeapGraphPath(*prev_path->path());
       ret_path->Set(0, ret_edge);
-      retaining_paths_.Add(ret_path);
+      retaining_paths_->Add(ret_path);
     }
   }
 }
 
 
-static void RemoveEdge(List<HeapGraphEdge*>* list, HeapGraphEdge* edge) {
-  for (int i = 0; i < list->length(); ) {
-    if (list->at(i) == edge) {
-      list->Remove(i);
-      return;
-    } else {
-      ++i;
-    }
-  }
-  UNREACHABLE();
-}
-
-
-void HeapEntry::RemoveChild(HeapGraphEdge* edge) {
-  RemoveEdge(&children_, edge);
-  delete edge;
-}
-
-
-void HeapEntry::RemoveRetainer(HeapGraphEdge* edge) {
-  RemoveEdge(&retainers_, edge);
-}
-
-
-void HeapEntry::CutEdges() {
-  for (int i = 0; i < children_.length(); ++i) {
-    HeapGraphEdge* edge = children_[i];
-    edge->to()->RemoveRetainer(edge);
-  }
-  children_.Iterate(DeleteHeapGraphEdge);
-  children_.Clear();
-
-  for (int i = 0; i < retainers_.length(); ++i) {
-    HeapGraphEdge* edge = retainers_[i];
-    edge->from()->RemoveChild(edge);
-  }
-  retainers_.Clear();
-}
-
-
-void HeapEntry::Print(int max_depth, int indent) {
-  OS::Print("%6d %6d %6d [%ld] ",
-            self_size_, TotalSize(), NonSharedTotalSize(), id_);
-  if (type_ != STRING) {
-    OS::Print("%s %.40s\n", TypeAsString(), name_);
-  } else {
-    OS::Print("\"");
-    const char* c = name_;
-    while (*c && (c - name_) <= 40) {
-      if (*c != '\n')
-        OS::Print("%c", *c);
-      else
-        OS::Print("\\n");
-      ++c;
-    }
-    OS::Print("\"\n");
-  }
-  if (--max_depth == 0) return;
-  const int children_count = children_.length();
-  for (int i = 0; i < children_count; ++i) {
-    HeapGraphEdge* edge = children_[i];
-    switch (edge->type()) {
-      case HeapGraphEdge::CONTEXT_VARIABLE:
-        OS::Print("  %*c #%s: ", indent, ' ', edge->name());
-        break;
-      case HeapGraphEdge::ELEMENT:
-        OS::Print("  %*c %d: ", indent, ' ', edge->index());
-        break;
-      case HeapGraphEdge::INTERNAL:
-        OS::Print("  %*c $%s: ", indent, ' ', edge->name());
-        break;
-      case HeapGraphEdge::PROPERTY:
-        OS::Print("  %*c %s: ", indent, ' ', edge->name());
-        break;
-      default:
-        OS::Print("!!! unknown edge type: %d ", edge->type());
-    }
-    edge->to()->Print(max_depth, indent + 2);
-  }
-}
-
-
-const char* HeapEntry::TypeAsString() {
-  switch (type_) {
-    case INTERNAL: return "/internal/";
-    case OBJECT: return "/object/";
-    case CLOSURE: return "/closure/";
-    case STRING: return "/string/";
-    case CODE: return "/code/";
-    case ARRAY: return "/array/";
-    default: return "???";
-  }
-}
-
-
 HeapGraphPath::HeapGraphPath(const List<HeapGraphEdge*>& path)
     : path_(path.length() + 1) {
   Add(NULL);
@@ -1151,21 +1161,21 @@
 
 
 void HeapGraphPath::Print() {
-  path_[0]->from()->Print(1, 0);
+  path_[0]->From()->Print(1, 0);
   for (int i = 0; i < path_.length(); ++i) {
     OS::Print(" -> ");
     HeapGraphEdge* edge = path_[i];
     switch (edge->type()) {
-      case HeapGraphEdge::CONTEXT_VARIABLE:
+      case HeapGraphEdge::kContextVariable:
         OS::Print("[#%s] ", edge->name());
         break;
-      case HeapGraphEdge::ELEMENT:
+      case HeapGraphEdge::kElement:
         OS::Print("[%d] ", edge->index());
         break;
-      case HeapGraphEdge::INTERNAL:
+      case HeapGraphEdge::kInternal:
         OS::Print("[$%s] ", edge->name());
         break;
-      case HeapGraphEdge::PROPERTY:
+      case HeapGraphEdge::kProperty:
         OS::Print("[%s] ", edge->name());
         break;
       default:
@@ -1177,76 +1187,27 @@
 }
 
 
-class IndexedReferencesExtractor : public ObjectVisitor {
- public:
-  IndexedReferencesExtractor(HeapSnapshot* snapshot, HeapEntry* parent)
-      : snapshot_(snapshot),
-        parent_(parent) {
-  }
+HeapObject *const HeapSnapshot::kInternalRootObject =
+    reinterpret_cast<HeapObject*>(1);
 
-  void VisitPointer(Object** o) {
-    if (!(*o)->IsHeapObject()) return;
-    HeapEntry* entry = snapshot_->GetEntry(HeapObject::cast(*o));
-    if (entry != NULL) {
-      parent_->SetAutoIndexReference(entry);
-    }
-  }
 
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) VisitPointer(p);
-  }
+// It is very important to keep objects that form a heap snapshot
+// as small as possible.
+namespace {  // Avoid littering the global namespace.
 
- private:
-  HeapSnapshot* snapshot_;
-  HeapEntry* parent_;
+template <size_t ptr_size> struct SnapshotSizeConstants;
+
+template <> struct SnapshotSizeConstants<4> {
+  static const int kExpectedHeapGraphEdgeSize = 12;
+  static const int kExpectedHeapEntrySize = 32;
 };
 
+template <> struct SnapshotSizeConstants<8> {
+  static const int kExpectedHeapGraphEdgeSize = 24;
+  static const int kExpectedHeapEntrySize = 40;
+};
 
-HeapEntriesMap::HeapEntriesMap()
-    : entries_(HeapObjectsMatch) {
-}
-
-
-HeapEntriesMap::~HeapEntriesMap() {
-  for (HashMap::Entry* p = entries_.Start();
-       p != NULL;
-       p = entries_.Next(p)) {
-    if (!IsAlias(p->value)) delete reinterpret_cast<HeapEntry*>(p->value);
-  }
-}
-
-
-void HeapEntriesMap::Alias(HeapObject* object, HeapEntry* entry) {
-  HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true);
-  if (cache_entry->value == NULL)
-    cache_entry->value = reinterpret_cast<void*>(
-        reinterpret_cast<intptr_t>(entry) | kAliasTag);
-}
-
-
-void HeapEntriesMap::Apply(void (HeapEntry::*Func)(void)) {
-  for (HashMap::Entry* p = entries_.Start();
-       p != NULL;
-       p = entries_.Next(p)) {
-    if (!IsAlias(p->value)) (reinterpret_cast<HeapEntry*>(p->value)->*Func)();
-  }
-}
-
-
-HeapEntry* HeapEntriesMap::Map(HeapObject* object) {
-  HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), false);
-  return cache_entry != NULL ?
-      reinterpret_cast<HeapEntry*>(
-          reinterpret_cast<intptr_t>(cache_entry->value) & (~kAliasTag)) : NULL;
-}
-
-
-void HeapEntriesMap::Pair(HeapObject* object, HeapEntry* entry) {
-  HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true);
-  ASSERT(cache_entry->value == NULL);
-  cache_entry->value = entry;
-}
-
+}  // namespace
 
 HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
                            const char* title,
@@ -1254,176 +1215,157 @@
     : collection_(collection),
       title_(title),
       uid_(uid),
-      root_(this),
-      sorted_entries_(NULL) {
+      root_entry_index_(-1),
+      raw_entries_(NULL),
+      entries_sorted_(false) {
+  STATIC_ASSERT(
+      sizeof(HeapGraphEdge) ==
+      SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapGraphEdgeSize);  // NOLINT
+  STATIC_ASSERT(
+      sizeof(HeapEntry) ==
+      SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize);  // NOLINT
 }
 
 
+static void DisposeCalculatedData(HeapEntryCalculatedData* cdata) {
+  cdata->Dispose();
+}
+
 HeapSnapshot::~HeapSnapshot() {
-  delete sorted_entries_;
+  DeleteArray(raw_entries_);
+  calculated_data_.Iterate(DisposeCalculatedData);
 }
 
 
-void HeapSnapshot::ClearPaint() {
-  root_.ClearPaint();
-  entries_.Apply(&HeapEntry::ClearPaint);
+void HeapSnapshot::AllocateEntries(int entries_count,
+                                   int children_count,
+                                   int retainers_count) {
+  ASSERT(raw_entries_ == NULL);
+  raw_entries_ = NewArray<char>(
+      HeapEntry::EntriesSize(entries_count, children_count, retainers_count));
 }
 
 
-HeapEntry* HeapSnapshot::GetEntry(Object* obj) {
-  if (!obj->IsHeapObject()) return NULL;
-  HeapObject* object = HeapObject::cast(obj);
-
-  {
-    HeapEntry* existing = FindEntry(object);
-    if (existing != NULL) return existing;
-  }
-
-  // Add new entry.
-  if (object->IsJSFunction()) {
+HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
+                                  int children_count,
+                                  int retainers_count) {
+  if (object == kInternalRootObject) {
+    ASSERT(root_entry_index_ == -1);
+    root_entry_index_ = entries_.length();
+    HeapEntry* entry = GetNextEntryToInit();
+    entry->Init(this, children_count, retainers_count);
+    return entry;
+  } else if (object->IsJSFunction()) {
     JSFunction* func = JSFunction::cast(object);
     SharedFunctionInfo* shared = func->shared();
     String* name = String::cast(shared->name())->length() > 0 ?
         String::cast(shared->name()) : shared->inferred_name();
-    return AddEntry(object, HeapEntry::CLOSURE, collection_->GetName(name));
+    return AddEntry(object,
+                    HeapEntry::kClosure,
+                    collection_->GetName(name),
+                    children_count,
+                    retainers_count);
   } else if (object->IsJSObject()) {
     return AddEntry(object,
-                    HeapEntry::OBJECT,
+                    HeapEntry::kObject,
                     collection_->GetName(
-                        JSObject::cast(object)->constructor_name()));
-  } else if (object->IsJSGlobalPropertyCell()) {
-    HeapEntry* value = GetEntry(JSGlobalPropertyCell::cast(object)->value());
-    // If GPC references an object that we have interest in, add the object.
-    // We don't store HeapEntries for GPCs. Instead, we make our hash map
-    // to point to object's HeapEntry by GPCs address.
-    if (value != NULL) AddEntryAlias(object, value);
-    return value;
+                        JSObject::cast(object)->constructor_name()),
+                    children_count,
+                    retainers_count);
   } else if (object->IsString()) {
     return AddEntry(object,
-                    HeapEntry::STRING,
-                    collection_->GetName(String::cast(object)));
+                    HeapEntry::kString,
+                    collection_->GetName(String::cast(object)),
+                    children_count,
+                    retainers_count);
   } else if (object->IsCode()) {
-    return AddEntry(object, HeapEntry::CODE);
+    return AddEntry(object,
+                    HeapEntry::kCode,
+                    "",
+                    children_count,
+                    retainers_count);
   } else if (object->IsSharedFunctionInfo()) {
     SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
     String* name = String::cast(shared->name())->length() > 0 ?
         String::cast(shared->name()) : shared->inferred_name();
-    return AddEntry(object, HeapEntry::CODE, collection_->GetName(name));
+    return AddEntry(object,
+                    HeapEntry::kCode,
+                    collection_->GetName(name),
+                    children_count,
+                    retainers_count);
   } else if (object->IsScript()) {
     Script* script = Script::cast(object);
     return AddEntry(object,
-                    HeapEntry::CODE,
+                    HeapEntry::kCode,
                     script->name()->IsString() ?
-                    collection_->GetName(String::cast(script->name())) : "");
+                    collection_->GetName(String::cast(script->name())) : "",
+                    children_count,
+                    retainers_count);
   } else if (object->IsFixedArray()) {
-    return AddEntry(object, HeapEntry::ARRAY);
+    return AddEntry(object,
+                    HeapEntry::kArray,
+                    "",
+                    children_count,
+                    retainers_count);
   }
   // No interest in this object.
   return NULL;
 }
 
 
-void HeapSnapshot::SetClosureReference(HeapEntry* parent,
-                                       String* reference_name,
-                                       Object* child) {
-  HeapEntry* child_entry = GetEntry(child);
-  if (child_entry != NULL) {
-    parent->SetClosureReference(
-        collection_->GetName(reference_name), child_entry);
-  }
+bool HeapSnapshot::WillAddEntry(HeapObject* object) {
+  return object == kInternalRootObject
+      || object->IsJSFunction()
+      || object->IsJSObject()
+      || object->IsString()
+      || object->IsCode()
+      || object->IsSharedFunctionInfo()
+      || object->IsScript()
+      || object->IsFixedArray();
 }
 
 
-void HeapSnapshot::SetElementReference(HeapEntry* parent,
-                                       int index,
-                                       Object* child) {
-  HeapEntry* child_entry = GetEntry(child);
-  if (child_entry != NULL) {
-    parent->SetElementReference(index, child_entry);
-  }
+static void HeapEntryClearPaint(HeapEntry** entry_ptr) {
+  (*entry_ptr)->clear_paint();
+}
+
+void HeapSnapshot::ClearPaint() {
+  entries_.Iterate(HeapEntryClearPaint);
 }
 
 
-void HeapSnapshot::SetInternalReference(HeapEntry* parent,
-                                        const char* reference_name,
-                                        Object* child) {
-  HeapEntry* child_entry = GetEntry(child);
-  if (child_entry != NULL) {
-    parent->SetInternalReference(reference_name, child_entry);
-  }
-}
-
-
-void HeapSnapshot::SetPropertyReference(HeapEntry* parent,
-                                        String* reference_name,
-                                        Object* child) {
-  HeapEntry* child_entry = GetEntry(child);
-  if (child_entry != NULL) {
-    parent->SetPropertyReference(
-        collection_->GetName(reference_name), child_entry);
-  }
+int HeapSnapshot::AddCalculatedData() {
+  calculated_data_.Add(HeapEntryCalculatedData());
+  return calculated_data_.length() - 1;
 }
 
 
 HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
                                   HeapEntry::Type type,
-                                  const char* name) {
-  HeapEntry* entry = new HeapEntry(this,
-                                   type,
-                                   name,
-                                   collection_->GetObjectId(object->address()),
-                                   GetObjectSize(object),
-                                   GetObjectSecurityToken(object));
-  entries_.Pair(object, entry);
-
-  // Detect, if this is a JS global object of the current context, and
-  // add it to snapshot's roots. There can be several JS global objects
-  // in a context.
-  if (object->IsJSGlobalProxy()) {
-    int global_security_token = GetGlobalSecurityToken();
-    int object_security_token =
-        collection_->token_enumerator()->GetTokenId(
-            Context::cast(
-                JSGlobalProxy::cast(object)->context())->security_token());
-    if (object_security_token == TokenEnumerator::kNoSecurityToken
-        || object_security_token == global_security_token) {
-      HeapEntry* global_object_entry =
-          GetEntry(HeapObject::cast(object->map()->prototype()));
-      ASSERT(global_object_entry != NULL);
-      root_.SetAutoIndexReference(global_object_entry);
-    }
-  }
-
+                                  const char* name,
+                                  int children_count,
+                                  int retainers_count) {
+  HeapEntry* entry = GetNextEntryToInit();
+  entry->Init(this,
+              type,
+              name,
+              collection_->GetObjectId(object->address()),
+              GetObjectSize(object),
+              children_count,
+              retainers_count);
   return entry;
 }
 
 
-class EdgesCutter {
- public:
-  explicit EdgesCutter(int global_security_token)
-      : global_security_token_(global_security_token) {
+HeapEntry* HeapSnapshot::GetNextEntryToInit() {
+  if (entries_.length() > 0) {
+    HeapEntry* last_entry = entries_.last();
+    entries_.Add(reinterpret_cast<HeapEntry*>(
+        reinterpret_cast<char*>(last_entry) + last_entry->EntrySize()));
+  } else {
+    entries_.Add(reinterpret_cast<HeapEntry*>(raw_entries_));
   }
-
-  void Apply(HeapEntry* entry) {
-    if (entry->security_token_id() != TokenEnumerator::kNoSecurityToken
-        && entry->security_token_id() != global_security_token_) {
-      entry->CutEdges();
-    }
-  }
-
- private:
-  const int global_security_token_;
-};
-
-void HeapSnapshot::CutObjectsFromForeignSecurityContexts() {
-  EdgesCutter cutter(GetGlobalSecurityToken());
-  entries_.Apply(&cutter);
-}
-
-
-int HeapSnapshot::GetGlobalSecurityToken() {
-  return collection_->token_enumerator()->GetTokenId(
-      Top::context()->global()->global_context()->security_token());
+  return entries_.last();
 }
 
 
@@ -1433,24 +1375,14 @@
 }
 
 
-int HeapSnapshot::GetObjectSecurityToken(HeapObject* obj) {
-  if (obj->IsGlobalContext()) {
-    return collection_->token_enumerator()->GetTokenId(
-        Context::cast(obj)->security_token());
-  } else {
-    return TokenEnumerator::kNoSecurityToken;
-  }
-}
-
-
 int HeapSnapshot::CalculateNetworkSize(JSObject* obj) {
   int size = obj->Size();
   // If 'properties' and 'elements' are non-empty (thus, non-shared),
   // take their size into account.
-  if (FixedArray::cast(obj->properties())->length() != 0) {
+  if (obj->properties() != Heap::empty_fixed_array()) {
     size += obj->properties()->Size();
   }
-  if (FixedArray::cast(obj->elements())->length() != 0) {
+  if (obj->elements() != Heap::empty_fixed_array()) {
     size += obj->elements()->Size();
   }
   // For functions, also account non-empty context and literals sizes.
@@ -1467,15 +1399,10 @@
 }
 
 
-class EntriesCollector {
- public:
-  explicit EntriesCollector(List<HeapEntry*>* list) : list_(list) { }
-  void Apply(HeapEntry* entry) {
-    list_->Add(entry);
-  }
- private:
-  List<HeapEntry*>* list_;
-};
+HeapSnapshotsDiff* HeapSnapshot::CompareWith(HeapSnapshot* snapshot) {
+  return collection_->CompareSnapshots(this, snapshot);
+}
+
 
 template<class T>
 static int SortByIds(const T* entry1_ptr,
@@ -1485,22 +1412,16 @@
 }
 
 List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
-  if (sorted_entries_ != NULL) return sorted_entries_;
-  sorted_entries_ = new List<HeapEntry*>(entries_.capacity());
-  EntriesCollector collector(sorted_entries_);
-  entries_.Apply(&collector);
-  sorted_entries_->Sort(SortByIds);
-  return sorted_entries_;
-}
-
-
-HeapSnapshotsDiff* HeapSnapshot::CompareWith(HeapSnapshot* snapshot) {
-  return collection_->CompareSnapshots(this, snapshot);
+  if (!entries_sorted_) {
+    entries_.Sort(SortByIds);
+    entries_sorted_ = true;
+  }
+  return &entries_;
 }
 
 
 void HeapSnapshot::Print(int max_depth) {
-  root_.Print(max_depth, 0);
+  root()->Print(max_depth, 0);
 }
 
 
@@ -1571,6 +1492,7 @@
 
 void HeapObjectsMap::RemoveDeadEntries() {
   List<EntryInfo>* new_entries = new List<EntryInfo>();
+  List<void*> dead_entries;
   for (HashMap::Entry* entry = entries_map_.Start();
        entry != NULL;
        entry = entries_map_.Next(entry)) {
@@ -1580,8 +1502,15 @@
     if (entry_info.accessed) {
       entry->value = reinterpret_cast<void*>(new_entries->length());
       new_entries->Add(EntryInfo(entry_info.id, false));
+    } else {
+      dead_entries.Add(entry->key);
     }
   }
+  for (int i = 0; i < dead_entries.length(); ++i) {
+    void* raw_entry = dead_entries[i];
+    entries_map_.Remove(
+        raw_entry, AddressHash(reinterpret_cast<Address>(raw_entry)));
+  }
   delete entries_;
   entries_ = new_entries;
 }
@@ -1635,53 +1564,343 @@
 }
 
 
-HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
-    : snapshot_(snapshot) {
+HeapEntriesMap::HeapEntriesMap()
+    : entries_(HeapObjectsMatch),
+      entries_count_(0),
+      total_children_count_(0),
+      total_retainers_count_(0) {
 }
 
 
+HeapEntriesMap::~HeapEntriesMap() {
+  for (HashMap::Entry* p = entries_.Start(); p != NULL; p = entries_.Next(p)) {
+    if (!IsAlias(p->value)) delete reinterpret_cast<EntryInfo*>(p->value);
+  }
+}
+
+
+void HeapEntriesMap::Alias(HeapObject* from, HeapObject* to) {
+  HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), true);
+  HashMap::Entry* to_cache_entry = entries_.Lookup(to, Hash(to), false);
+  if (from_cache_entry->value == NULL) {
+    ASSERT(to_cache_entry != NULL);
+    from_cache_entry->value = MakeAlias(to_cache_entry->value);
+  }
+}
+
+
+HeapEntry* HeapEntriesMap::Map(HeapObject* object) {
+  HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), false);
+  if (cache_entry != NULL) {
+    EntryInfo* entry_info =
+        reinterpret_cast<EntryInfo*>(Unalias(cache_entry->value));
+    return entry_info->entry;
+  } else {
+    return NULL;
+  }
+}
+
+
+void HeapEntriesMap::Pair(HeapObject* object, HeapEntry* entry) {
+  HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true);
+  ASSERT(cache_entry->value == NULL);
+  cache_entry->value = new EntryInfo(entry);
+  ++entries_count_;
+}
+
+
+void HeapEntriesMap::CountReference(HeapObject* from, HeapObject* to,
+                                    int* prev_children_count,
+                                    int* prev_retainers_count) {
+  HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), true);
+  HashMap::Entry* to_cache_entry = entries_.Lookup(to, Hash(to), false);
+  ASSERT(from_cache_entry != NULL);
+  ASSERT(to_cache_entry != NULL);
+  EntryInfo* from_entry_info =
+      reinterpret_cast<EntryInfo*>(Unalias(from_cache_entry->value));
+  EntryInfo* to_entry_info =
+      reinterpret_cast<EntryInfo*>(Unalias(to_cache_entry->value));
+  if (prev_children_count)
+    *prev_children_count = from_entry_info->children_count;
+  if (prev_retainers_count)
+    *prev_retainers_count = to_entry_info->retainers_count;
+  ++from_entry_info->children_count;
+  ++to_entry_info->retainers_count;
+  ++total_children_count_;
+  ++total_retainers_count_;
+}
+
+
+template<class Visitor>
+void HeapEntriesMap::UpdateEntries(Visitor* visitor) {
+  for (HashMap::Entry* p = entries_.Start();
+       p != NULL;
+       p = entries_.Next(p)) {
+    if (!IsAlias(p->value)) {
+      EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
+      entry_info->entry = visitor->GetEntry(
+          reinterpret_cast<HeapObject*>(p->key),
+          entry_info->children_count,
+          entry_info->retainers_count);
+      entry_info->children_count = 0;
+      entry_info->retainers_count = 0;
+    }
+  }
+}
+
+
+HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
+    : snapshot_(snapshot),
+      collection_(snapshot->collection()),
+      filler_(NULL) {
+}
+
+
+HeapEntry *const
+HeapSnapshotGenerator::SnapshotFillerInterface::kHeapEntryPlaceholder =
+    reinterpret_cast<HeapEntry*>(1);
+
+class SnapshotCounter : public HeapSnapshotGenerator::SnapshotFillerInterface {
+ public:
+  explicit SnapshotCounter(HeapEntriesMap* entries)
+      : entries_(entries) { }
+  HeapEntry* AddEntry(HeapObject* obj) {
+    entries_->Pair(obj, kHeapEntryPlaceholder);
+    return kHeapEntryPlaceholder;
+  }
+  void SetElementReference(HeapObject* parent_obj,
+                           HeapEntry*,
+                           int,
+                           Object* child_obj,
+                           HeapEntry*) {
+    entries_->CountReference(parent_obj, HeapObject::cast(child_obj));
+  }
+  void SetNamedReference(HeapGraphEdge::Type,
+                         HeapObject* parent_obj,
+                         HeapEntry*,
+                         const char*,
+                         Object* child_obj,
+                         HeapEntry*) {
+    entries_->CountReference(parent_obj, HeapObject::cast(child_obj));
+  }
+  void SetRootReference(Object* child_obj, HeapEntry*) {
+    entries_->CountReference(
+        HeapSnapshot::kInternalRootObject, HeapObject::cast(child_obj));
+  }
+ private:
+  HeapEntriesMap* entries_;
+};
+
+
+class SnapshotFiller : public HeapSnapshotGenerator::SnapshotFillerInterface {
+ public:
+  explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
+      : snapshot_(snapshot),
+        collection_(snapshot->collection()),
+        entries_(entries) { }
+  HeapEntry* AddEntry(HeapObject* obj) {
+    UNREACHABLE();
+    return NULL;
+  }
+  void SetElementReference(HeapObject* parent_obj,
+                           HeapEntry* parent_entry,
+                           int index,
+                           Object* child_obj,
+                           HeapEntry* child_entry) {
+    int child_index, retainer_index;
+    entries_->CountReference(parent_obj, HeapObject::cast(child_obj),
+                             &child_index, &retainer_index);
+    parent_entry->SetElementReference(
+        child_index, index, child_entry, retainer_index);
+  }
+  void SetNamedReference(HeapGraphEdge::Type type,
+                         HeapObject* parent_obj,
+                         HeapEntry* parent_entry,
+                         const char* reference_name,
+                         Object* child_obj,
+                         HeapEntry* child_entry) {
+    int child_index, retainer_index;
+    entries_->CountReference(parent_obj, HeapObject::cast(child_obj),
+                             &child_index, &retainer_index);
+    parent_entry->SetNamedReference(type,
+                              child_index,
+                              reference_name,
+                              child_entry,
+                              retainer_index);
+  }
+  void SetRootReference(Object* child_obj, HeapEntry* child_entry) {
+    int child_index, retainer_index;
+    entries_->CountReference(
+        HeapSnapshot::kInternalRootObject, HeapObject::cast(child_obj),
+        &child_index, &retainer_index);
+    snapshot_->root()->SetElementReference(
+        child_index, child_index + 1, child_entry, retainer_index);
+  }
+ private:
+  HeapSnapshot* snapshot_;
+  HeapSnapshotsCollection* collection_;
+  HeapEntriesMap* entries_;
+};
+
+class SnapshotAllocator {
+ public:
+  explicit SnapshotAllocator(HeapSnapshot* snapshot)
+      : snapshot_(snapshot) { }
+  HeapEntry* GetEntry(
+      HeapObject* obj, int children_count, int retainers_count) {
+    HeapEntry* entry =
+        snapshot_->AddEntry(obj, children_count, retainers_count);
+    ASSERT(entry != NULL);
+    return entry;
+  }
+ private:
+  HeapSnapshot* snapshot_;
+};
+
 void HeapSnapshotGenerator::GenerateSnapshot() {
   AssertNoAllocation no_alloc;
 
-  // Iterate heap contents.
-  HeapIterator iterator;
-  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+  // Pass 1. Iterate heap contents to count entries and references.
+  SnapshotCounter counter(&entries_);
+  filler_ = &counter;
+  filler_->AddEntry(HeapSnapshot::kInternalRootObject);
+  HeapIterator iterator1;
+  for (HeapObject* obj = iterator1.next();
+       obj != NULL;
+       obj = iterator1.next()) {
     ExtractReferences(obj);
   }
 
-  snapshot_->CutObjectsFromForeignSecurityContexts();
+  // Allocate and fill entries in the snapshot, allocate references.
+  snapshot_->AllocateEntries(entries_.entries_count(),
+                             entries_.total_children_count(),
+                             entries_.total_retainers_count());
+  SnapshotAllocator allocator(snapshot_);
+  entries_.UpdateEntries(&allocator);
+
+  // Pass 2. Fill references.
+  SnapshotFiller filler(snapshot_, &entries_);
+  filler_ = &filler;
+  HeapIterator iterator2;
+  for (HeapObject* obj = iterator2.next();
+       obj != NULL;
+       obj = iterator2.next()) {
+    ExtractReferences(obj);
+  }
 }
 
 
+HeapEntry* HeapSnapshotGenerator::GetEntry(Object* obj) {
+  if (!obj->IsHeapObject()) return NULL;
+  HeapObject* object = HeapObject::cast(obj);
+  HeapEntry* entry = entries_.Map(object);
+
+  // A new entry.
+  if (entry == NULL) {
+    if (obj->IsJSGlobalPropertyCell()) {
+      Object* cell_target = JSGlobalPropertyCell::cast(obj)->value();
+      entry = GetEntry(cell_target);
+      // If GPC references an object that we have interest in (see
+      // HeapSnapshot::AddEntry, WillAddEntry), add the object.  We
+      // don't store HeapEntries for GPCs. Instead, we make our hash
+      // map to point to object's HeapEntry by GPCs address.
+      if (entry != NULL) {
+        entries_.Alias(object, HeapObject::cast(cell_target));
+      }
+      return entry;
+    }
+
+    if (snapshot_->WillAddEntry(object)) entry = filler_->AddEntry(object);
+  }
+
+  return entry;
+}
+
+
+int HeapSnapshotGenerator::GetGlobalSecurityToken() {
+  return collection_->token_enumerator()->GetTokenId(
+      Top::context()->global()->global_context()->security_token());
+}
+
+
+int HeapSnapshotGenerator::GetObjectSecurityToken(HeapObject* obj) {
+  if (obj->IsGlobalContext()) {
+    return collection_->token_enumerator()->GetTokenId(
+        Context::cast(obj)->security_token());
+  } else {
+    return TokenEnumerator::kNoSecurityToken;
+  }
+}
+
+
+class IndexedReferencesExtractor : public ObjectVisitor {
+ public:
+  IndexedReferencesExtractor(HeapSnapshotGenerator* generator,
+                             HeapObject* parent_obj,
+                             HeapEntry* parent_entry)
+      : generator_(generator),
+        parent_obj_(parent_obj),
+        parent_(parent_entry),
+        next_index_(1) {
+  }
+
+  void VisitPointer(Object** o) {
+    generator_->SetElementReference(parent_obj_, parent_, next_index_++, *o);
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) VisitPointer(p);
+  }
+
+ private:
+  HeapSnapshotGenerator* generator_;
+  HeapObject* parent_obj_;
+  HeapEntry* parent_;
+  int next_index_;
+};
+
+
 void HeapSnapshotGenerator::ExtractReferences(HeapObject* obj) {
-  HeapEntry* entry = snapshot_->GetEntry(obj);
-  if (entry == NULL) return;
-  if (entry->visited()) return;
+  // We need to reference JS global objects from snapshot's root.
+  // We also need to only include global objects from the current
+  // security context. And we don't want to add the global proxy,
+  // as we don't have a special type for it.
+  if (obj->IsJSGlobalProxy()) {
+    int global_security_token = GetGlobalSecurityToken();
+    JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
+    int object_security_token =
+        collection_->token_enumerator()->GetTokenId(
+            Context::cast(proxy->context())->security_token());
+    if (object_security_token == TokenEnumerator::kNoSecurityToken
+        || object_security_token == global_security_token) {
+      SetRootReference(proxy->map()->prototype());
+    }
+    return;
+  }
+
+  HeapEntry* entry = GetEntry(obj);
+  if (entry == NULL) return;  // No interest in this object.
 
   if (obj->IsJSObject()) {
     JSObject* js_obj = JSObject::cast(obj);
     ExtractClosureReferences(js_obj, entry);
     ExtractPropertyReferences(js_obj, entry);
     ExtractElementReferences(js_obj, entry);
-    snapshot_->SetPropertyReference(
-        entry, Heap::prototype_symbol(), js_obj->map()->prototype());
-  } else if (obj->IsJSGlobalPropertyCell()) {
-    JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(obj);
-    snapshot_->SetElementReference(entry, 0, cell->value());
+    SetPropertyReference(
+        obj, entry, Heap::prototype_symbol(), js_obj->map()->prototype());
   } else if (obj->IsString()) {
     if (obj->IsConsString()) {
       ConsString* cs = ConsString::cast(obj);
-      snapshot_->SetElementReference(entry, 0, cs->first());
-      snapshot_->SetElementReference(entry, 1, cs->second());
+      SetElementReference(obj, entry, 0, cs->first());
+      SetElementReference(obj, entry, 1, cs->second());
     }
   } else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
-    IndexedReferencesExtractor refs_extractor(snapshot_, entry);
+    IndexedReferencesExtractor refs_extractor(this, obj, entry);
     obj->Iterate(&refs_extractor);
   } else if (obj->IsFixedArray()) {
-    IndexedReferencesExtractor refs_extractor(snapshot_, entry);
+    IndexedReferencesExtractor refs_extractor(this, obj, entry);
     obj->Iterate(&refs_extractor);
   }
-  entry->MarkAsVisited();
 }
 
 
@@ -1700,10 +1919,10 @@
       String* local_name = *zone_scope_info.LocalName(i);
       int idx = serialized_scope_info->ContextSlotIndex(local_name, NULL);
       if (idx >= 0 && idx < context->length()) {
-        snapshot_->SetClosureReference(entry, local_name, context->get(idx));
+        SetClosureReference(js_obj, entry, local_name, context->get(idx));
       }
     }
-    snapshot_->SetInternalReference(entry, "code", func->shared());
+    SetInternalReference(js_obj, entry, "code", func->shared());
   }
 }
 
@@ -1716,13 +1935,13 @@
       switch (descs->GetType(i)) {
         case FIELD: {
           int index = descs->GetFieldIndex(i);
-          snapshot_->SetPropertyReference(
-              entry, descs->GetKey(i), js_obj->FastPropertyAt(index));
+          SetPropertyReference(
+              js_obj, entry, descs->GetKey(i), js_obj->FastPropertyAt(index));
           break;
         }
         case CONSTANT_FUNCTION:
-          snapshot_->SetPropertyReference(
-              entry, descs->GetKey(i), descs->GetConstantFunction(i));
+          SetPropertyReference(
+              js_obj, entry, descs->GetKey(i), descs->GetConstantFunction(i));
           break;
         default: ;
       }
@@ -1733,8 +1952,8 @@
     for (int i = 0; i < length; ++i) {
       Object* k = dictionary->KeyAt(i);
       if (dictionary->IsKey(k)) {
-        snapshot_->SetPropertyReference(
-            entry, String::cast(k), dictionary->ValueAt(i));
+        SetPropertyReference(
+            js_obj, entry, String::cast(k), dictionary->ValueAt(i));
       }
     }
   }
@@ -1750,7 +1969,7 @@
         elements->length();
     for (int i = 0; i < length; ++i) {
       if (!elements->get(i)->IsTheHole()) {
-        snapshot_->SetElementReference(entry, i, elements->get(i));
+        SetElementReference(js_obj, entry, i, elements->get(i));
       }
     }
   } else if (js_obj->HasDictionaryElements()) {
@@ -1761,13 +1980,90 @@
       if (dictionary->IsKey(k)) {
         ASSERT(k->IsNumber());
         uint32_t index = static_cast<uint32_t>(k->Number());
-        snapshot_->SetElementReference(entry, index, dictionary->ValueAt(i));
+        SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
       }
     }
   }
 }
 
 
+void HeapSnapshotGenerator::SetClosureReference(HeapObject* parent_obj,
+                                                HeapEntry* parent_entry,
+                                                String* reference_name,
+                                                Object* child_obj) {
+  HeapEntry* child_entry = GetEntry(child_obj);
+  if (child_entry != NULL) {
+    filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
+                               parent_obj,
+                               parent_entry,
+                               collection_->GetName(reference_name),
+                               child_obj,
+                               child_entry);
+  }
+}
+
+
+void HeapSnapshotGenerator::SetElementReference(HeapObject* parent_obj,
+                                                HeapEntry* parent_entry,
+                                                int index,
+                                                Object* child_obj) {
+  HeapEntry* child_entry = GetEntry(child_obj);
+  if (child_entry != NULL) {
+    filler_->SetElementReference(
+        parent_obj, parent_entry, index, child_obj, child_entry);
+  }
+}
+
+
+void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj,
+                                                 HeapEntry* parent_entry,
+                                                 const char* reference_name,
+                                                 Object* child_obj) {
+  HeapEntry* child_entry = GetEntry(child_obj);
+  if (child_entry != NULL) {
+    filler_->SetNamedReference(HeapGraphEdge::kInternal,
+                               parent_obj,
+                               parent_entry,
+                               reference_name,
+                               child_obj,
+                               child_entry);
+  }
+}
+
+
+void HeapSnapshotGenerator::SetPropertyReference(HeapObject* parent_obj,
+                                                 HeapEntry* parent_entry,
+                                                 String* reference_name,
+                                                 Object* child_obj) {
+  HeapEntry* child_entry = GetEntry(child_obj);
+  if (child_entry != NULL) {
+    filler_->SetNamedReference(HeapGraphEdge::kProperty,
+                               parent_obj,
+                               parent_entry,
+                               collection_->GetName(reference_name),
+                               child_obj,
+                               child_entry);
+  }
+}
+
+
+void HeapSnapshotGenerator::SetRootReference(Object* child_obj) {
+  HeapEntry* child_entry = GetEntry(child_obj);
+  ASSERT(child_entry != NULL);
+  filler_->SetRootReference(child_obj, child_entry);
+}
+
+
+void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) {
+  raw_additions_root_ =
+      NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0));
+  additions_root()->Init(snapshot2_, additions_count, 0);
+  raw_deletions_root_ =
+      NewArray<char>(HeapEntry::EntriesSize(1, deletions_count, 0));
+  deletions_root()->Init(snapshot1_, deletions_count, 0);
+}
+
+
 static void DeleteHeapSnapshotsDiff(HeapSnapshotsDiff** diff_ptr) {
   delete *diff_ptr;
 }
@@ -1779,8 +2075,6 @@
 
 HeapSnapshotsDiff* HeapSnapshotsComparator::Compare(HeapSnapshot* snapshot1,
                                                     HeapSnapshot* snapshot2) {
-  HeapSnapshotsDiff* diff = new HeapSnapshotsDiff(snapshot1, snapshot2);
-  diffs_.Add(diff);
   List<HeapEntry*>* entries1 = snapshot1->GetSortedEntriesList();
   List<HeapEntry*>* entries2 = snapshot2->GetSortedEntriesList();
   int i = 0, j = 0;
@@ -1810,17 +2104,33 @@
 
   snapshot1->ClearPaint();
   snapshot1->root()->PaintAllReachable();
+  snapshot2->ClearPaint();
+  snapshot2->root()->PaintAllReachable();
+  int reachable_deleted_entries = 0, reachable_added_entries = 0;
+  for (int i = 0; i < deleted_entries.length(); ++i) {
+    HeapEntry* entry = deleted_entries[i];
+    if (entry->painted_reachable()) ++reachable_deleted_entries;
+  }
+  for (int i = 0; i < added_entries.length(); ++i) {
+    HeapEntry* entry = added_entries[i];
+    if (entry->painted_reachable()) ++reachable_added_entries;
+  }
+
+  HeapSnapshotsDiff* diff = new HeapSnapshotsDiff(snapshot1, snapshot2);
+  diffs_.Add(diff);
+  diff->CreateRoots(reachable_added_entries, reachable_deleted_entries);
+
+  int del_child_index = 0, deleted_entry_index = 1;
   for (int i = 0; i < deleted_entries.length(); ++i) {
     HeapEntry* entry = deleted_entries[i];
     if (entry->painted_reachable())
-      diff->AddDeletedEntry(entry);
+      diff->AddDeletedEntry(del_child_index++, deleted_entry_index++, entry);
   }
-  snapshot2->ClearPaint();
-  snapshot2->root()->PaintAllReachable();
+  int add_child_index = 0, added_entry_index = 1;
   for (int i = 0; i < added_entries.length(); ++i) {
     HeapEntry* entry = added_entries[i];
     if (entry->painted_reachable())
-      diff->AddAddedEntry(entry);
+      diff->AddAddedEntry(add_child_index++, added_entry_index++, entry);
   }
   return diff;
 }
diff --git a/src/profile-generator.h b/src/profile-generator.h
index cd2bd0b..bebf40a 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -279,15 +279,12 @@
   CpuProfile* StopProfiling(int security_token_id,
                             const char* title,
                             double actual_sampling_rate);
-  CpuProfile* StopProfiling(int security_token_id,
-                            String* title,
-                            double actual_sampling_rate);
   List<CpuProfile*>* Profiles(int security_token_id);
   const char* GetName(String* name) {
     return function_and_resource_names_.GetName(name);
   }
   CpuProfile* GetProfile(int security_token_id, unsigned uid);
-  inline bool is_last_profile();
+  bool IsLastProfile(const char* title);
 
   CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
                           String* name, String* resource_name, int line_number);
@@ -423,167 +420,194 @@
 };
 
 
-class HeapSnapshot;
 class HeapEntry;
 
-
-class HeapGraphEdge {
+class HeapGraphEdge BASE_EMBEDDED {
  public:
   enum Type {
-    CONTEXT_VARIABLE = v8::HeapGraphEdge::CONTEXT_VARIABLE,
-    ELEMENT = v8::HeapGraphEdge::ELEMENT,
-    PROPERTY = v8::HeapGraphEdge::PROPERTY,
-    INTERNAL = v8::HeapGraphEdge::INTERNAL
+    kContextVariable = v8::HeapGraphEdge::kContextVariable,
+    kElement = v8::HeapGraphEdge::kElement,
+    kProperty = v8::HeapGraphEdge::kProperty,
+    kInternal = v8::HeapGraphEdge::kInternal
   };
 
-  HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to);
-  HeapGraphEdge(int index, HeapEntry* from, HeapEntry* to);
+  HeapGraphEdge() { }
+  void Init(int child_index, Type type, const char* name, HeapEntry* to);
+  void Init(int child_index, int index, HeapEntry* to);
 
-  Type type() const { return type_; }
-  int index() const {
-    ASSERT(type_ == ELEMENT);
+  Type type() { return static_cast<Type>(type_); }
+  int index() {
+    ASSERT(type_ == kElement);
     return index_;
   }
-  const char* name() const {
-    ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL);
+  const char* name() {
+    ASSERT(type_ == kContextVariable
+           || type_ == kProperty
+           || type_ == kInternal);
     return name_;
   }
-  HeapEntry* from() const { return from_; }
-  HeapEntry* to() const { return to_; }
+  HeapEntry* to() { return to_; }
+
+  HeapEntry* From();
 
  private:
-  Type type_;
+  int child_index_ : 30;
+  unsigned type_ : 2;
   union {
     int index_;
     const char* name_;
   };
-  HeapEntry* from_;
   HeapEntry* to_;
 
   DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge);
 };
 
 
-class HeapGraphPath;
 class CachedHeapGraphPath;
+class HeapGraphPath;
+class HeapSnapshot;
 
-class HeapEntry {
+// HeapEntry instances represent an entity from the heap (or a special
+// virtual node, e.g. root). To make heap snapshots more compact,
+// HeapEntries has a special memory layout (no Vectors or Lists used):
+//
+//   +-----------------+
+//        HeapEntry
+//   +-----------------+
+//      HeapGraphEdge    |
+//           ...         } children_count
+//      HeapGraphEdge    |
+//   +-----------------+
+//      HeapGraphEdge*   |
+//           ...         } retainers_count
+//      HeapGraphEdge*   |
+//   +-----------------+
+//
+// In a HeapSnapshot, all entries are hand-allocated in a continuous array
+// of raw bytes.
+//
+class HeapEntry BASE_EMBEDDED {
  public:
   enum Type {
-    INTERNAL = v8::HeapGraphNode::INTERNAL,
-    ARRAY = v8::HeapGraphNode::ARRAY,
-    STRING = v8::HeapGraphNode::STRING,
-    OBJECT = v8::HeapGraphNode::OBJECT,
-    CODE = v8::HeapGraphNode::CODE,
-    CLOSURE = v8::HeapGraphNode::CLOSURE
+    kInternal = v8::HeapGraphNode::kInternal,
+    kArray = v8::HeapGraphNode::kArray,
+    kString = v8::HeapGraphNode::kString,
+    kObject = v8::HeapGraphNode::kObject,
+    kCode = v8::HeapGraphNode::kCode,
+    kClosure = v8::HeapGraphNode::kClosure
   };
 
-  explicit HeapEntry(HeapSnapshot* snapshot)
-      : snapshot_(snapshot),
-        visited_(false),
-        type_(INTERNAL),
-        name_(""),
-        id_(0),
-        next_auto_index_(0),
-        self_size_(0),
-        security_token_id_(TokenEnumerator::kNoSecurityToken),
-        children_(1),
-        retainers_(0),
-        retaining_paths_(0),
-        total_size_(kUnknownSize),
-        non_shared_total_size_(kUnknownSize),
-        painted_(kUnpainted) { }
-  HeapEntry(HeapSnapshot* snapshot,
+  HeapEntry() { }
+  void Init(HeapSnapshot* snapshot, int children_count, int retainers_count);
+  void Init(HeapSnapshot* snapshot,
             Type type,
             const char* name,
             uint64_t id,
             int self_size,
-            int security_token_id)
-      : snapshot_(snapshot),
-        visited_(false),
-        type_(type),
-        name_(name),
-        id_(id),
-        next_auto_index_(1),
-        self_size_(self_size),
-        security_token_id_(security_token_id),
-        children_(4),
-        retainers_(4),
-        retaining_paths_(4),
-        total_size_(kUnknownSize),
-        non_shared_total_size_(kUnknownSize),
-        painted_(kUnpainted) { }
-  ~HeapEntry();
+            int children_count,
+            int retainers_count);
 
-  bool visited() const { return visited_; }
-  Type type() const { return type_; }
-  const char* name() const { return name_; }
-  uint64_t id() const { return id_; }
-  int self_size() const { return self_size_; }
-  int security_token_id() const { return security_token_id_; }
-  bool painted_reachable() { return painted_ == kPaintReachable; }
-  bool not_painted_reachable_from_others() {
-    return painted_ != kPaintReachableFromOthers;
+  HeapSnapshot* snapshot() { return snapshot_; }
+  Type type() { return static_cast<Type>(type_); }
+  const char* name() { return name_; }
+  uint64_t id() { return id_; }
+  int self_size() { return self_size_; }
+
+  Vector<HeapGraphEdge> children() {
+    return Vector<HeapGraphEdge>(children_arr(), children_count_); }
+  Vector<HeapGraphEdge*> retainers() {
+    return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
+  List<HeapGraphPath*>* GetRetainingPaths();
+
+  void clear_paint() { painted_ = kUnpainted; }
+  bool painted_reachable() { return painted_ == kPainted; }
+  void paint_reachable() {
+    ASSERT(painted_ == kUnpainted);
+    painted_ = kPainted;
   }
-  const List<HeapGraphEdge*>* children() const { return &children_; }
-  const List<HeapGraphEdge*>* retainers() const { return &retainers_; }
-  const List<HeapGraphPath*>* GetRetainingPaths();
-
+  bool not_painted_reachable_from_others() {
+    return painted_ != kPaintedReachableFromOthers;
+  }
+  void paint_reachable_from_others() {
+    painted_ = kPaintedReachableFromOthers;
+  }
   template<class Visitor>
   void ApplyAndPaintAllReachable(Visitor* visitor);
-
-  void ClearPaint() { painted_ = kUnpainted; }
-  void CutEdges();
-  void MarkAsVisited() { visited_ = true; }
   void PaintAllReachable();
-  void PaintReachable() {
-    ASSERT(painted_ == kUnpainted);
-    painted_ = kPaintReachable;
-  }
-  void PaintReachableFromOthers() { painted_ = kPaintReachableFromOthers; }
-  void SetClosureReference(const char* name, HeapEntry* entry);
-  void SetElementReference(int index, HeapEntry* entry);
-  void SetInternalReference(const char* name, HeapEntry* entry);
-  void SetPropertyReference(const char* name, HeapEntry* entry);
-  void SetAutoIndexReference(HeapEntry* entry);
-  void SetUnidirAutoIndexReference(HeapEntry* entry);
 
-  int TotalSize();
-  int NonSharedTotalSize();
+  void SetElementReference(
+      int child_index, int index, HeapEntry* entry, int retainer_index);
+  void SetNamedReference(HeapGraphEdge::Type type,
+                         int child_index,
+                         const char* name,
+                         HeapEntry* entry,
+                         int retainer_index);
+  void SetUnidirElementReference(int child_index, int index, HeapEntry* entry);
+
+  int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
+  int ReachableSize();
+  int RetainedSize();
 
   void Print(int max_depth, int indent);
 
- private:
-  void AddEdge(HeapGraphEdge* edge);
-  int CalculateTotalSize();
-  int CalculateNonSharedTotalSize();
-  void FindRetainingPaths(HeapEntry* node, CachedHeapGraphPath* prev_path);
-  void RemoveChild(HeapGraphEdge* edge);
-  void RemoveRetainer(HeapGraphEdge* edge);
+  static int EntriesSize(int entries_count,
+                         int children_count,
+                         int retainers_count);
 
+ private:
+  HeapGraphEdge* children_arr() {
+    return reinterpret_cast<HeapGraphEdge*>(this + 1);
+  }
+  HeapGraphEdge** retainers_arr() {
+    return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
+  }
   const char* TypeAsString();
 
+  unsigned painted_: 2;
+  unsigned type_: 3;
+  // The calculated data is stored in HeapSnapshot in HeapEntryCalculatedData
+  // entries. See AddCalculatedData and GetCalculatedData.
+  int calculated_data_index_: 27;
+  int self_size_;
+  int children_count_;
+  int retainers_count_;
   HeapSnapshot* snapshot_;
-  bool visited_;
-  Type type_;
   const char* name_;
   uint64_t id_;
-  int next_auto_index_;
-  int self_size_;
-  int security_token_id_;
-  List<HeapGraphEdge*> children_;
-  List<HeapGraphEdge*> retainers_;
-  List<HeapGraphPath*> retaining_paths_;
-  int total_size_;
-  int non_shared_total_size_;
-  int painted_;
+
+  static const unsigned kUnpainted = 0;
+  static const unsigned kPainted = 1;
+  static const unsigned kPaintedReachableFromOthers = 2;
+  static const int kNoCalculatedData = -1;
+
+  DISALLOW_COPY_AND_ASSIGN(HeapEntry);
+};
+
+
+class HeapEntryCalculatedData {
+ public:
+  HeapEntryCalculatedData()
+      : retaining_paths_(NULL),
+        reachable_size_(kUnknownSize),
+        retained_size_(kUnknownSize) {
+  }
+  void Dispose();
+
+  List<HeapGraphPath*>* GetRetainingPaths(HeapEntry* entry);
+  int ReachableSize(HeapEntry* entry);
+  int RetainedSize(HeapEntry* entry);
+
+ private:
+  void CalculateSizes(HeapEntry* entry);
+  void FindRetainingPaths(HeapEntry* entry, CachedHeapGraphPath* prev_path);
+
+  List<HeapGraphPath*>* retaining_paths_;
+  int reachable_size_;
+  int retained_size_;
 
   static const int kUnknownSize = -1;
-  static const int kUnpainted = 0;
-  static const int kPaintReachable = 1;
-  static const int kPaintReachableFromOthers = 2;
 
-  DISALLOW_IMPLICIT_CONSTRUCTORS(HeapEntry);
+  // Allow generated copy constructor and assignment operator.
 };
 
 
@@ -595,7 +619,7 @@
 
   void Add(HeapGraphEdge* edge) { path_.Add(edge); }
   void Set(int index, HeapGraphEdge* edge) { path_[index] = edge; }
-  const List<HeapGraphEdge*>* path() const { return &path_; }
+  const List<HeapGraphEdge*>* path() { return &path_; }
 
   void Print();
 
@@ -606,39 +630,6 @@
 };
 
 
-class HeapEntriesMap {
- public:
-  HeapEntriesMap();
-  ~HeapEntriesMap();
-
-  void Alias(HeapObject* object, HeapEntry* entry);
-  void Apply(void (HeapEntry::*Func)(void));
-  template<class Visitor>
-  void Apply(Visitor* visitor);
-  HeapEntry* Map(HeapObject* object);
-  void Pair(HeapObject* object, HeapEntry* entry);
-
-  uint32_t capacity() { return entries_.capacity(); }
-
- private:
-  INLINE(uint32_t Hash(HeapObject* object)) {
-    return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
-  }
-  INLINE(static bool HeapObjectsMatch(void* key1, void* key2)) {
-    return key1 == key2;
-  }
-  INLINE(bool IsAlias(void* ptr)) {
-    return reinterpret_cast<intptr_t>(ptr) & kAliasTag;
-  }
-
-  static const intptr_t kAliasTag = 1;
-
-  HashMap entries_;
-
-  DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
-};
-
-
 class HeapSnapshotsCollection;
 class HeapSnapshotsDiff;
 
@@ -653,53 +644,52 @@
                const char* title,
                unsigned uid);
   ~HeapSnapshot();
-  void ClearPaint();
-  void CutObjectsFromForeignSecurityContexts();
-  HeapEntry* GetEntry(Object* object);
-  void SetClosureReference(
-      HeapEntry* parent, String* reference_name, Object* child);
-  void SetElementReference(HeapEntry* parent, int index, Object* child);
-  void SetInternalReference(
-      HeapEntry* parent, const char* reference_name, Object* child);
-  void SetPropertyReference(
-      HeapEntry* parent, String* reference_name, Object* child);
 
-  INLINE(const char* title() const) { return title_; }
-  INLINE(unsigned uid() const) { return uid_; }
-  const HeapEntry* const_root() const { return &root_; }
-  HeapEntry* root() { return &root_; }
-  template<class Visitor>
-  void IterateEntries(Visitor* visitor) { entries_.Apply(visitor); }
-  List<HeapEntry*>* GetSortedEntriesList();
+  HeapSnapshotsCollection* collection() { return collection_; }
+  const char* title() { return title_; }
+  unsigned uid() { return uid_; }
+  HeapEntry* root() { return entries_[root_entry_index_]; }
+
+  void AllocateEntries(
+      int entries_count, int children_count, int retainers_count);
+  HeapEntry* AddEntry(
+      HeapObject* object, int children_count, int retainers_count);
+  bool WillAddEntry(HeapObject* object);
+  int AddCalculatedData();
+  HeapEntryCalculatedData& GetCalculatedData(int index) {
+    return calculated_data_[index];
+  }
+  void ClearPaint();
   HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
+  List<HeapEntry*>* GetSortedEntriesList();
+  template<class Visitor>
+  void IterateEntries(Visitor* visitor) { entries_.Iterate(visitor); }
 
   void Print(int max_depth);
+  void PrintEntriesSize();
+
+  static HeapObject *const kInternalRootObject;
 
  private:
-  HeapEntry* AddEntry(HeapObject* object, HeapEntry::Type type) {
-    return AddEntry(object, type, "");
-  }
-  HeapEntry* AddEntry(
-      HeapObject* object, HeapEntry::Type type, const char* name);
-  void AddEntryAlias(HeapObject* object, HeapEntry* entry) {
-    entries_.Alias(object, entry);
-  }
-  HeapEntry* FindEntry(HeapObject* object) {
-    return entries_.Map(object);
-  }
-  int GetGlobalSecurityToken();
-  int GetObjectSecurityToken(HeapObject* obj);
+  HeapEntry* AddEntry(HeapObject* object,
+                      HeapEntry::Type type,
+                      const char* name,
+                      int children_count,
+                      int retainers_count);
+  HeapEntry* GetNextEntryToInit();
   static int GetObjectSize(HeapObject* obj);
   static int CalculateNetworkSize(JSObject* obj);
 
   HeapSnapshotsCollection* collection_;
   const char* title_;
   unsigned uid_;
-  HeapEntry root_;
-  // Mapping from HeapObject* pointers to HeapEntry* pointers.
-  HeapEntriesMap entries_;
-  // Entries sorted by id.
-  List<HeapEntry*>* sorted_entries_;
+  int root_entry_index_;
+  char* raw_entries_;
+  List<HeapEntry*> entries_;
+  bool entries_sorted_;
+  List<HeapEntryCalculatedData> calculated_data_;
+
+  friend class HeapSnapshotTester;
 
   DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
 };
@@ -748,30 +738,36 @@
   HeapSnapshotsDiff(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2)
       : snapshot1_(snapshot1),
         snapshot2_(snapshot2),
-        additions_root_(new HeapEntry(snapshot2)),
-        deletions_root_(new HeapEntry(snapshot1)) { }
+        raw_additions_root_(NULL),
+        raw_deletions_root_(NULL) { }
 
   ~HeapSnapshotsDiff() {
-    delete deletions_root_;
-    delete additions_root_;
+    DeleteArray(raw_deletions_root_);
+    DeleteArray(raw_additions_root_);
   }
 
-  void AddAddedEntry(HeapEntry* entry) {
-    additions_root_->SetUnidirAutoIndexReference(entry);
+  void AddAddedEntry(int child_index, int index, HeapEntry* entry) {
+    additions_root()->SetUnidirElementReference(child_index, index, entry);
   }
 
-  void AddDeletedEntry(HeapEntry* entry) {
-    deletions_root_->SetUnidirAutoIndexReference(entry);
+  void AddDeletedEntry(int child_index, int index, HeapEntry* entry) {
+    deletions_root()->SetUnidirElementReference(child_index, index, entry);
   }
 
-  const HeapEntry* additions_root() const { return additions_root_; }
-  const HeapEntry* deletions_root() const { return deletions_root_; }
+  void CreateRoots(int additions_count, int deletions_count);
+
+  HeapEntry* additions_root() {
+    return reinterpret_cast<HeapEntry*>(raw_additions_root_);
+  }
+  HeapEntry* deletions_root() {
+    return reinterpret_cast<HeapEntry*>(raw_deletions_root_);
+  }
 
  private:
   HeapSnapshot* snapshot1_;
   HeapSnapshot* snapshot2_;
-  HeapEntry* additions_root_;
-  HeapEntry* deletions_root_;
+  char* raw_additions_root_;
+  char* raw_deletions_root_;
 
   DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsDiff);
 };
@@ -830,18 +826,123 @@
 };
 
 
+// The HeapEntriesMap instance is used to track a mapping between
+// real heap objects and their representations in heap snapshots.
+class HeapEntriesMap {
+ public:
+  HeapEntriesMap();
+  ~HeapEntriesMap();
+
+  // Aliasing is used for skipping intermediate proxy objects, like
+  // JSGlobalPropertyCell.
+  void Alias(HeapObject* from, HeapObject* to);
+  HeapEntry* Map(HeapObject* object);
+  void Pair(HeapObject* object, HeapEntry* entry);
+  void CountReference(HeapObject* from, HeapObject* to,
+                      int* prev_children_count = NULL,
+                      int* prev_retainers_count = NULL);
+  template<class Visitor>
+  void UpdateEntries(Visitor* visitor);
+
+  int entries_count() { return entries_count_; }
+  int total_children_count() { return total_children_count_; }
+  int total_retainers_count() { return total_retainers_count_; }
+
+ private:
+  struct EntryInfo {
+    explicit EntryInfo(HeapEntry* entry)
+        : entry(entry), children_count(0), retainers_count(0) { }
+    HeapEntry* entry;
+    int children_count;
+    int retainers_count;
+  };
+
+  uint32_t Hash(HeapObject* object) {
+    return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
+  }
+  static bool HeapObjectsMatch(void* key1, void* key2) { return key1 == key2; }
+
+  bool IsAlias(void* ptr) {
+    return reinterpret_cast<intptr_t>(ptr) & kAliasTag;
+  }
+  void* MakeAlias(void* ptr) {
+    return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(ptr) | kAliasTag);
+  }
+  void* Unalias(void* ptr) {
+    return reinterpret_cast<void*>(
+        reinterpret_cast<intptr_t>(ptr) & (~kAliasTag));
+  }
+
+  HashMap entries_;
+  int entries_count_;
+  int total_children_count_;
+  int total_retainers_count_;
+
+  static const intptr_t kAliasTag = 1;
+
+  DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
+};
+
+
 class HeapSnapshotGenerator {
  public:
+  class SnapshotFillerInterface {
+   public:
+    virtual ~SnapshotFillerInterface() { }
+    virtual HeapEntry* AddEntry(HeapObject* obj) = 0;
+    virtual void SetElementReference(HeapObject* parent_obj,
+                                     HeapEntry* parent_entry,
+                                     int index,
+                                     Object* child_obj,
+                                     HeapEntry* child_entry) = 0;
+    virtual void SetNamedReference(HeapGraphEdge::Type type,
+                                   HeapObject* parent_obj,
+                                   HeapEntry* parent_entry,
+                                   const char* reference_name,
+                                   Object* child_obj,
+                                   HeapEntry* child_entry) = 0;
+    virtual void SetRootReference(Object* child_obj,
+                                  HeapEntry* child_entry) = 0;
+
+    static HeapEntry *const kHeapEntryPlaceholder;
+  };
+
   explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
   void GenerateSnapshot();
 
  private:
+  HeapEntry* GetEntry(Object* obj);
+  int GetGlobalSecurityToken();
+  int GetObjectSecurityToken(HeapObject* obj);
   void ExtractReferences(HeapObject* obj);
   void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
   void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
   void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
+  void SetClosureReference(HeapObject* parent_obj,
+                           HeapEntry* parent,
+                           String* reference_name,
+                           Object* child);
+  void SetElementReference(HeapObject* parent_obj,
+                           HeapEntry* parent,
+                           int index,
+                           Object* child);
+  void SetInternalReference(HeapObject* parent_obj,
+                            HeapEntry* parent,
+                            const char* reference_name,
+                            Object* child);
+  void SetPropertyReference(HeapObject* parent_obj,
+                            HeapEntry* parent,
+                            String* reference_name,
+                            Object* child);
+  void SetRootReference(Object* child);
 
   HeapSnapshot* snapshot_;
+  HeapSnapshotsCollection* collection_;
+  // Mapping from HeapObject* pointers to HeapEntry* pointers.
+  HeapEntriesMap entries_;
+  SnapshotFillerInterface* filler_;
+
+  friend class IndexedReferencesExtractor;
 
   DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
 };
diff --git a/src/runtime.cc b/src/runtime.cc
index c7d3ff7..fc6ca76 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -305,13 +305,14 @@
       }
       Handle<Object> result;
       uint32_t element_index = 0;
-      if (key->ToArrayIndex(&element_index)) {
+      if (key->IsSymbol()) {
+        // If key is a symbol it is not an array element.
+        Handle<String> name(String::cast(*key));
+        ASSERT(!name->AsArrayIndex(&element_index));
+        result = SetProperty(boilerplate, name, value, NONE);
+      } else if (key->ToArrayIndex(&element_index)) {
         // Array index (uint32).
         result = SetElement(boilerplate, element_index, value);
-      } else if (key->IsSymbol()) {
-        // The key is not an array index.
-        Handle<String> name(String::cast(*key));
-        result = SetProperty(boilerplate, name, value, NONE);
       } else {
         // Non-uint32 number.
         ASSERT(key->IsNumber());
@@ -1626,7 +1627,8 @@
     }
     // Set the code, scope info, formal parameter count,
     // and the length of the target function.
-    target->set_code(fun->code());
+    target->shared()->set_code(shared->code());
+    target->set_code(shared->code());
     target->shared()->set_scope_info(shared->scope_info());
     target->shared()->set_length(shared->length());
     target->shared()->set_formal_parameter_count(
@@ -6869,7 +6871,7 @@
 
   Handle<JSFunction> function = args.at<JSFunction>(0);
 #ifdef DEBUG
-  if (FLAG_trace_lazy) {
+  if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
     PrintF("[lazy: ");
     function->shared()->name()->Print();
     PrintF("]\n");
diff --git a/src/serialize.h b/src/serialize.h
index 6a318f1..d1b668d 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -248,7 +248,7 @@
   }
 
   static int partial_snapshot_cache_length_;
-  static const int kPartialSnapshotCacheCapacity = 1300;
+  static const int kPartialSnapshotCacheCapacity = 1400;
   static Object* partial_snapshot_cache_[];
 };
 
diff --git a/src/top.cc b/src/top.cc
index 2887b76..1a4a948 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -107,16 +107,15 @@
 void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
   v->VisitPointer(&(thread->pending_exception_));
   v->VisitPointer(&(thread->pending_message_obj_));
-  v->VisitPointer(
-      BitCast<Object**, Script**>(&(thread->pending_message_script_)));
-  v->VisitPointer(BitCast<Object**, Context**>(&(thread->context_)));
+  v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
+  v->VisitPointer(BitCast<Object**>(&(thread->context_)));
   v->VisitPointer(&(thread->scheduled_exception_));
 
   for (v8::TryCatch* block = thread->TryCatchHandler();
        block != NULL;
        block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
-    v->VisitPointer(BitCast<Object**, void**>(&(block->exception_)));
-    v->VisitPointer(BitCast<Object**, void**>(&(block->message_)));
+    v->VisitPointer(BitCast<Object**>(&(block->exception_)));
+    v->VisitPointer(BitCast<Object**>(&(block->message_)));
   }
 
   // Iterate over pointers on native execution stack.
diff --git a/src/utils.h b/src/utils.h
index 236b85e..d15319c 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -739,7 +739,11 @@
   return dest;
 }
 
-} }  // namespace v8::internal
+template <class Dest, class Source>
+inline Dest BitCast(Source* const & source) {
+    return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
+}
 
+} }  // namespace v8::internal
 
 #endif  // V8_UTILS_H_
diff --git a/src/version.cc b/src/version.cc
index c542aef..e501a7c 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     3
-#define BUILD_NUMBER      6
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      7
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index d90655b..9ad94ce 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -253,7 +253,7 @@
   int32_t disp_value = 0;
   if (mode == 0x80 || is_baseless) {
     // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
-    disp_value = *reinterpret_cast<const int32_t*>(&operand.buf_[disp_offset]);
+    disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
   } else if (mode == 0x40) {
     // Mode 1: Byte displacement.
     disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 959b4b0..6b34a4f 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -310,7 +310,7 @@
   __ movsxlq(rbx,
              FieldOperand(rdx,
                           SharedFunctionInfo::kFormalParameterCountOffset));
-  __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+  __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
   __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
   __ cmpq(rax, rbx);
   __ j(not_equal,
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index b6256fa..04078ef 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -2630,9 +2630,8 @@
       __ j(is_smi, &build_args);
       __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
       __ j(not_equal, &build_args);
-      __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
       Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
-      __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
+      __ Cmp(FieldOperand(rax, JSFunction::kCodeOffset), apply_code);
       __ j(not_equal, &build_args);
 
       // Check that applicand is a function.
@@ -8635,6 +8634,12 @@
   __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
   __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
 
+  // Initialize the code pointer in the function to be the one
+  // found in the shared function info object.
+  __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+  __ movq(FieldOperand(rax, JSFunction::kCodeOffset), rdx);
+
+
   // Return and remove the on-stack parameter.
   __ ret(1 * kPointerSize);
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index bab0199..e744d53 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -582,8 +582,7 @@
     // Make sure the code objects in the builtins object and in the
     // builtin function are the same.
     push(target);
-    movq(target, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-    movq(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
+    movq(target, FieldOperand(rdi, JSFunction::kCodeOffset));
     cmpq(target, Operand(rsp, 0));
     Assert(equal, "Builtin code object changed");
     pop(target);
@@ -2290,7 +2289,7 @@
   movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
   movsxlq(rbx,
           FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
-  movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+  movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
   // Advances rdx to the end of the Code object header, to the start of
   // the executable code.
   lea(rdx, FieldOperand(rdx, Code::kHeaderSize));